filename
stringlengths
4
198
content
stringlengths
25
939k
environment
list
variablearg
list
constarg
list
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
tests/ignite/contrib/handlers/test_polyaxon_logger.py
import os from unittest.mock import MagicMock, call import pytest import torch from ignite.contrib.handlers.polyaxon_logger import * from ignite.engine import Engine, Events, State os.environ["POLYAXON_NO_OP"] = "1" def test_output_handler_with_wrong_logger_type(): wrapper = OutputHandler("tag", output_transform=lambda x: x) mock_logger = MagicMock() mock_engine = MagicMock() with pytest.raises(RuntimeError, match="Handler 'OutputHandler' works only with PolyaxonLogger"): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) def test_output_handler_output_transform(): wrapper = OutputHandler("tag", output_transform=lambda x: x) mock_logger = MagicMock(spec=PolyaxonLogger) mock_logger.log_metrics = MagicMock() mock_engine = MagicMock() mock_engine.state = State() mock_engine.state.output = 12345 mock_engine.state.iteration = 123 wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) mock_logger.log_metrics.assert_called_once_with(step=123, **{"tag/output": 12345}) wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x}) mock_logger = MagicMock(spec=PolyaxonLogger) mock_logger.log_metrics = MagicMock() wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) mock_logger.log_metrics.assert_called_once_with(step=123, **{"another_tag/loss": 12345}) def test_output_handler_metric_names(): wrapper = OutputHandler("tag", metric_names=["a", "b", "c"]) mock_logger = MagicMock(spec=PolyaxonLogger) mock_logger.log_metrics = MagicMock() mock_engine = MagicMock() mock_engine.state = State(metrics={"a": 12.23, "b": 23.45, "c": torch.tensor(10.0)}) mock_engine.state.iteration = 5 wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.log_metrics.call_count == 1 mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0}) wrapper = OutputHandler("tag", metric_names=["a",]) mock_engine = MagicMock() mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])}) mock_engine.state.iteration = 5 mock_logger = MagicMock(spec=PolyaxonLogger) mock_logger.log_metrics = MagicMock() wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.log_metrics.call_count == 1 mock_logger.log_metrics.assert_has_calls( [call(step=5, **{"tag/a/0": 0.0, "tag/a/1": 1.0, "tag/a/2": 2.0, "tag/a/3": 3.0}),], any_order=True ) wrapper = OutputHandler("tag", metric_names=["a", "c"]) mock_engine = MagicMock() mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"}) mock_engine.state.iteration = 7 mock_logger = MagicMock(spec=PolyaxonLogger) mock_logger.log_metrics = MagicMock() with pytest.warns(UserWarning): wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.log_metrics.call_count == 1 mock_logger.log_metrics.assert_has_calls([call(step=7, **{"tag/a": 55.56})], any_order=True) # all metrics wrapper = OutputHandler("tag", metric_names="all") mock_logger = MagicMock(spec=PolyaxonLogger) mock_logger.log_metrics = MagicMock() mock_engine = MagicMock() mock_engine.state = State(metrics={"a": 12.23, "b": 23.45, "c": torch.tensor(10.0)}) mock_engine.state.iteration = 5 wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) assert mock_logger.log_metrics.call_count == 1 mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0}) def test_output_handler_both(): wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x}) mock_logger = MagicMock(spec=PolyaxonLogger) mock_logger.log_metrics = MagicMock() mock_engine = MagicMock() mock_engine.state = State(metrics={"a": 12.23, "b": 23.45}) mock_engine.state.epoch = 5 mock_engine.state.output = 12345 wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.log_metrics.call_count == 1 mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/loss": 12345}) def test_output_handler_with_wrong_global_step_transform_output(): def global_step_transform(*args, **kwargs): return "a" wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform) mock_logger = MagicMock(spec=PolyaxonLogger) mock_logger.log_metrics = MagicMock() mock_engine = MagicMock() mock_engine.state = State() mock_engine.state.epoch = 5 mock_engine.state.output = 12345 with pytest.raises(TypeError, match="global_step must be int"): wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) def test_output_handler_with_global_step_transform(): def global_step_transform(*args, **kwargs): return 10 wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform) mock_logger = MagicMock(spec=PolyaxonLogger) mock_logger.log_metrics = MagicMock() mock_engine = MagicMock() mock_engine.state = State() mock_engine.state.epoch = 5 mock_engine.state.output = 12345 wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) mock_logger.log_metrics.assert_called_once_with(step=10, **{"tag/loss": 12345}) def test_output_handler_with_global_step_from_engine(): mock_another_engine = MagicMock() mock_another_engine.state = State() mock_another_engine.state.epoch = 10 mock_another_engine.state.output = 12.345 wrapper = OutputHandler( "tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_from_engine(mock_another_engine), ) mock_logger = MagicMock(spec=PolyaxonLogger) mock_logger.log_metrics = MagicMock() mock_engine = MagicMock() mock_engine.state = State() mock_engine.state.epoch = 1 mock_engine.state.output = 0.123 wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.log_metrics.call_count == 1 mock_logger.log_metrics.assert_has_calls( [call(step=mock_another_engine.state.epoch, **{"tag/loss": mock_engine.state.output})] ) mock_another_engine.state.epoch = 11 mock_engine.state.output = 1.123 wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED) assert mock_logger.log_metrics.call_count == 2 mock_logger.log_metrics.assert_has_calls( [call(step=mock_another_engine.state.epoch, **{"tag/loss": mock_engine.state.output})] ) def test_optimizer_params_handler_wrong_setup(): with pytest.raises(TypeError): OptimizerParamsHandler(optimizer=None) optimizer = MagicMock(spec=torch.optim.Optimizer) handler = OptimizerParamsHandler(optimizer=optimizer) mock_logger = MagicMock() mock_engine = MagicMock() with pytest.raises(RuntimeError, match="Handler OptimizerParamsHandler works only with PolyaxonLogger"): handler(mock_engine, mock_logger, Events.ITERATION_STARTED) def test_optimizer_params(): optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01) wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr") mock_logger = MagicMock(spec=PolyaxonLogger) mock_logger.log_metrics = MagicMock() mock_engine = MagicMock() mock_engine.state = State() mock_engine.state.iteration = 123 wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) mock_logger.log_metrics.assert_called_once_with(**{"lr/group_0": 0.01, "step": 123}) wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator") mock_logger = MagicMock(spec=PolyaxonLogger) mock_logger.log_metrics = MagicMock() wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED) mock_logger.log_metrics.assert_called_once_with(**{"generator/lr/group_0": 0.01, "step": 123}) def test_integration(): n_epochs = 5 data = list(range(50)) losses = torch.rand(n_epochs * len(data)) losses_iter = iter(losses) def update_fn(engine, batch): return next(losses_iter) trainer = Engine(update_fn) plx_logger = PolyaxonLogger() def dummy_handler(engine, logger, event_name): global_step = engine.state.get_event_attrib_value(event_name) logger.log_metrics(step=global_step, **{"{}".format("test_value"): global_step}) plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED) trainer.run(data, max_epochs=n_epochs) def test_integration_as_context_manager(): n_epochs = 5 data = list(range(50)) losses = torch.rand(n_epochs * len(data)) losses_iter = iter(losses) def update_fn(engine, batch): return next(losses_iter) with PolyaxonLogger() as plx_logger: trainer = Engine(update_fn) def dummy_handler(engine, logger, event_name): global_step = engine.state.get_event_attrib_value(event_name) logger.log_metrics(step=global_step, **{"{}".format("test_value"): global_step}) plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED) trainer.run(data, max_epochs=n_epochs) @pytest.fixture def no_site_packages(): import sys polyaxon_client_modules = {} for k in sys.modules: if "polyaxon" in k: polyaxon_client_modules[k] = sys.modules[k] for k in polyaxon_client_modules: del sys.modules[k] prev_path = list(sys.path) sys.path = [p for p in sys.path if "site-packages" not in p] yield "no_site_packages" sys.path = prev_path for k in polyaxon_client_modules: sys.modules[k] = polyaxon_client_modules[k] def test_no_polyaxon_client(no_site_packages): with pytest.raises(RuntimeError, match=r"This contrib module requires polyaxon-client to be installed"): PolyaxonLogger()
[]
[]
[ "POLYAXON_NO_OP" ]
[]
["POLYAXON_NO_OP"]
python
1
0
tests/python/open_data/daal/test_daal_regression.py
# -*- encoding: utf-8 -*- """ :copyright: 2017-2018 H2O.ai, Inc. :license: Apache License Version 2.0 (see LICENSE for details) """ try: __import__('daal') except ImportError: import platform print("Daal is not supported. Architecture detected {}".format(platform.architecture())) else: from numpy.random import RandomState import time import os import numpy as np import logging from daal.data_management import HomogenNumericTable from daal.algorithms.linear_regression import training as linear_training from daal.algorithms.linear_regression import prediction as linear_prediction from h2o4gpu.solvers.daal_solver.daal_data import getNumpyArray from h2o4gpu import LinearMethod from numpy.linalg.tests.test_linalg import assert_almost_equal from numpy.ma.testutils import assert_array_almost_equal import h2o4gpu logging.basicConfig(level=logging.DEBUG) seeded = RandomState(42) from h2o4gpu.solvers.linear_regression import LinearRegression def test_fit_linear_regression_daal_vs_sklearn(): trainData = seeded.rand(200,10) trainDependentVariables = seeded.rand(200,2) solver_daal = LinearRegression( fit_intercept=True, normalize=False, verbose=True, backend='daal') start_daal = time.time() solver_daal.fit(trainData, trainDependentVariables) end_daal = time.time() solver_sk = LinearRegression(normalize=True) start_sklearn = time.time() solver_sk.fit(trainData, trainDependentVariables) end_sklearn = time.time() print("TEST FIT Sklearn vs Daal") print("Time taken in daal: {}".format(end_daal-start_daal)) print("Time taken in sklearn: {}".format(end_sklearn-start_sklearn)) print("DONE.") if os.getenv("CHECKPERFORMANCE") is not None: assert end_daal - start_daal <= end_sklearn - start_sklearn def test_linear_regression_simple(): # calculate beta coefficients x = np.array([0.,2.,3.]).reshape(3,1) nt_x = nt_y = HomogenNumericTable(x) lr_alg = linear_training.Batch(method=linear_training.qrDense) lr_alg.input.set(linear_training.data, nt_x) lr_alg.input.set(linear_training.dependentVariables, nt_y) result = lr_alg.compute() model = result.get(linear_training.model) beta_coeff = model.getBeta() np_beta_coeff = getNumpyArray(beta_coeff) res_beta_coeff = np.array([0,1]).reshape(1,2) assert_almost_equal(res_beta_coeff, np_beta_coeff) # predict lr_alg_predict = linear_prediction.Batch() lr_alg_predict.input.setModel(linear_prediction.model, model) lr_alg_predict.input.setTable(linear_prediction.data, nt_x) result = lr_alg_predict.compute() np_predict = getNumpyArray(result.get(linear_prediction.prediction)) assert_array_almost_equal(x, np_predict) def get_random_array(rows=10, columns=9): x = np.random.rand(rows, columns) y = np.random.rand(rows, 1) return (x,y) def test_overfitting(rows=10, columns=9): ''' overfitting - more features than data points for n(number of observation) > p (number of variables) in this case, the least squares estimates tend to have low variance, and hence performs well on test observations for n <= p, in this case a lot of variability in the least squares fit for n << p, no longer a unique least squares coefficient estimate, the variance is infinite so the method cannot be used at all. for the last second cases, one has to use ridgit regression, lasso, or reduct dimension (subset selection, e.g. scikit does this approach) ''' assert rows > columns, "More features than data points in linear regression!" def get_daal_prediction(x=np.array([1,2,3]), y=np.array([1,2,3])): ntX = HomogenNumericTable(x) ntY = HomogenNumericTable(y) lr_train = linear_training.Batch() lr_train.input.set(linear_training.data, ntX) lr_train.input.set(linear_training.dependentVariables, ntY) result = lr_train.compute() model = result.get(linear_training.model) lr_predict = linear_prediction.Batch() lr_predict.input.setModel(linear_prediction.model, model) lr_predict.input.setTable(linear_prediction.data, ntX) result = lr_predict.compute() np_predicted = getNumpyArray(result.get(linear_prediction.prediction)) # assert the same as the initial dependent variable assert_array_almost_equal(y, np_predicted) return np_predicted def get_scikit_prediction(x=np.array([1,2,3]), y=np.array([1,2,3])): from sklearn.linear_model.base import LinearRegression as ScikitLinearRegression regression = ScikitLinearRegression() regression.fit(x, y) return regression.predict(x) def test_linear_regression_against_scikit(rows=10, columns=9): ''' Test prediction daal against scikit Test for overfitting :param rows: :param columns: ''' inout = get_random_array(rows, columns) test_overfitting(rows, columns) x = inout[0] y = inout[1] daal_predicted = get_daal_prediction(x, y) scik_predicted = get_scikit_prediction(x, y) assert_array_almost_equal(daal_predicted, scik_predicted) def test_coeff_size(rows=10, columns=9): ''' number of beta coefficients (with intercept flag on) is the same number as size of data sample ''' inout = get_random_array(rows, columns) test_overfitting(rows, columns) x = inout[0] y = inout[1] ntX = HomogenNumericTable(x) ntY = HomogenNumericTable(y) lr_train = linear_training.Batch() lr_train.input.set(linear_training.data, ntX) lr_train.input.set(linear_training.dependentVariables, ntY) result = lr_train.compute() model = result.get(linear_training.model) beta_coeff = model.getBeta() np_beta = getNumpyArray(beta_coeff) assert y.transpose().shape == np_beta.shape, "Dependent variable size must have\ the same size as Beta coefficient" def test_intercept_flag(rows=10, columns=9): inout = get_random_array(rows, columns) test_overfitting(rows, columns) x = inout[0] y = inout[1] ntX = HomogenNumericTable(x) ntY = HomogenNumericTable(y) lr_train = linear_training.Batch() lr_train.input.set(linear_training.data, ntX) lr_train.input.set(linear_training.dependentVariables, ntY) result = lr_train.compute() model = result.get(linear_training.model) beta_coeff = model.getBeta() np_beta = getNumpyArray(beta_coeff) daal_intercept = np_beta[0,0] from sklearn.linear_model.base import LinearRegression as ScikitLinearRegression regression = ScikitLinearRegression() regression.fit(x, y) scikit_intercept = regression.intercept_ assert_array_almost_equal(scikit_intercept, [daal_intercept]) def test_linear_regression_daal_vs_sklearn(rows=10, columns=9,verbose=False): inout = get_random_array(rows, columns) x = inout[0] y = inout[1] start_sklearn = time.time() lin_solver_sklearn = h2o4gpu.LinearRegression(verbose=True, backend='sklearn') lin_solver_sklearn.fit(x, y) sklearn_predicted = lin_solver_sklearn.predict(x) end_sklearn = time.time() print(("Sklearn prediction: ", sklearn_predicted) if verbose else "", end="") start_daal = time.time() lin_solver_daal = h2o4gpu.LinearRegression(fit_intercept=True, verbose=True, backend='daal', method=LinearMethod.normal_equation) lin_solver_daal.fit(x, y) daal_predicted = lin_solver_daal.predict(x) end_daal = time.time() print(("Daal prediction: ", daal_predicted) if verbose else "", end="") print("Prediction calculated:") print("+ Sklearn: {}".format(end_sklearn-start_sklearn)) print("+ Daal: {}".format(end_daal-start_daal)) assert_array_almost_equal(daal_predicted, sklearn_predicted, decimal=4) assert_array_almost_equal(daal_predicted, y, decimal=4) if os.getenv("CHECKPERFORMANCE") is not None: assert end_daal - start_daal <= end_sklearn - start_sklearn sklearn_score = lin_solver_sklearn.score(x, y) daal_score = lin_solver_daal.score(x, y) print("Score calculated: ") print("+ Sklearn: {}".format(sklearn_score)) print("+ Daal: {}".format(daal_score)) assert daal_score == sklearn_score def test_linear_regression_normalized(): test_fit_linear_regression_daal_vs_sklearn() def test_linear_regression(): test_linear_regression_simple() def test_linear_regression_param_3_2(): test_linear_regression_against_scikit(rows=3, columns=2) def test_linear_regression_with_sc(): test_linear_regression_against_scikit() def test_beta(): test_coeff_size(rows=10, columns=9) test_intercept_flag(rows=10, columns=9) def test_daal_linear_regression_wrapper(): test_linear_regression_daal_vs_sklearn(rows=10, columns=9,verbose=True) #test_linear_regression_daal_vs_sklearn(rows=100, columns=99,verbose=False) #test_linear_regression_daal_vs_sklearn(rows=1000, columns=999,verbose=False) if __name__ == '__main__': test_linear_regression_simple() test_daal_linear_regression_wrapper()
[]
[]
[ "CHECKPERFORMANCE" ]
[]
["CHECKPERFORMANCE"]
python
1
0
pkg/job_control/jenkins_test.go
package jobcontrol import ( "fmt" "io/ioutil" "log" "os" "strings" "testing" "github.com/ifosch/synthetic/pkg/synthetic" ) func disableLogs() { log.SetFlags(0) log.SetOutput(ioutil.Discard) } type parsingTC struct { input string command string expectedJob string expectedArgs map[string]string expectedError string } func TestParsing(t *testing.T) { disableLogs() j := &Jenkins{ js: NewMockJobServer( map[string]string{ "deploy": "Deploy project", }, ), } tcs := []parsingTC{ { input: "build deploy INDEX=users", command: "build", expectedJob: "deploy", expectedArgs: map[string]string{"INDEX": "users"}, expectedError: "", }, { input: "build deploy INDEX=\"users ducks\"", command: "build", expectedJob: "deploy", expectedArgs: map[string]string{"INDEX": "\"users ducks\""}, expectedError: "", }, { input: "describe", command: "describe", expectedJob: "", expectedArgs: map[string]string{}, expectedError: "you must specify, at least, one job. You can use `list` to get a list of defined jobs and `describe <job>` to get all details about a job", }, { input: "describe missingjob", command: "describe", expectedJob: "", // Job does not exist so it returns empty expectedArgs: map[string]string{}, expectedError: "the job `missingjob` doesn't exist in current job list. If it's new addition, try using `reload` to refresh the list of jobs", }, } for _, test := range tcs { t.Run(test.input, func(t *testing.T) { job, args, err := j.ParseArgs(test.input, test.command) // Unexpected error happened if test.expectedError == "" && err != nil { t.Logf("Unexpected error %v", err) t.Fail() } // Expected error did not happen if test.expectedError != "" && err == nil { t.Logf("Expected error '%v' didn't happen", test.expectedError) t.Fail() } // Job parsing did not match. if job != test.expectedJob { t.Logf("Wrong job parsed '%v' should be '%v'", job, test.expectedJob) t.Fail() } // Parsed arguments did not match for expectedName, expectedValue := range test.expectedArgs { value, ok := args[expectedName] if !ok { t.Logf("Missing argument '%v'", expectedName) t.Fail() } if value != expectedValue { t.Logf("Wrong value '%v' for '%v' should be '%v'", value, expectedName, expectedValue) t.Fail() } } }) } } type loadTC struct { expectedJobs map[string]string expectedReplyOnReload string expectedRepliesOnBuild []string } func TestLoadReload(t *testing.T) { disableLogs() tc := loadTC{ expectedJobs: map[string]string{ "build": "Build the project", "test": "Run test suit on the project", "deploy": "Deploy project", }, expectedReplyOnReload: "3 Jenkins jobs reloaded", } j := &Jenkins{ js: NewMockJobServer( tc.expectedJobs, ), } if j.js.GetJobs().Len() != len(tc.expectedJobs) { t.Logf("Wrong number of jobs loaded %v but expected %v", j.js.GetJobs().Len(), len(tc.expectedJobs)) t.Fail() } i := 0 for job := range tc.expectedJobs { if j.js.GetJob(job).Describe() != tc.expectedJobs[job] { t.Logf("Wrong job loaded %v expected %v", j.js.GetJob(job), tc.expectedJobs[job]) t.Fail() } i++ } msg := synthetic.NewMockMessage("", false) j.Reload(msg) if j.js.GetJobs().Len() != len(tc.expectedJobs) { t.Logf("Wrong number of jobs loaded %v but expected %v", j.js.GetJobs().Len(), len(tc.expectedJobs)) t.Fail() } i = 0 for job := range tc.expectedJobs { if j.js.GetJob(job).Describe() != tc.expectedJobs[job] { t.Logf("Wrong job loaded %v expected %v", j.js.GetJob(job).Name(), tc.expectedJobs[job]) t.Fail() } i++ } if len(msg.Replies()) != 1 { t.Logf("Wrong number of replies received %v should be 1", len(msg.Replies())) t.Fail() } if msg.Replies()[0] != tc.expectedReplyOnReload { t.Logf("Wrong reply '%v' should be '%v'", msg.Replies()[0], tc.expectedReplyOnReload) t.Fail() } } func TestDescribe(t *testing.T) { disableLogs() tc := loadTC{ expectedJobs: map[string]string{ "build": "Build the project", "test": "Run test suit on the project", "deploy": "Deploy project", }, } j := &Jenkins{ js: NewMockJobServer( tc.expectedJobs, ), } msg := synthetic.NewMockMessage("describe test", true) j.Describe(msg) if len(msg.Replies()) != 1 { t.Logf("Wrong number of replies %v but expected 1", len(msg.Replies())) t.Fail() } if msg.Replies()[0] != tc.expectedJobs["test"] { t.Logf("Wrong reply '%v' but expected '%v'", msg.Replies()[0], tc.expectedJobs["test"]) t.Fail() } } func TestList(t *testing.T) { disableLogs() tc := loadTC{ expectedJobs: map[string]string{ "build": "Build the project", "test": "Run test suit on the project", "deploy": "Deploy project", }, } j := &Jenkins{ js: NewMockJobServer( tc.expectedJobs, ), } msg := synthetic.NewMockMessage("", false) j.List(msg) if len(msg.Replies()) != 1 { t.Logf("Wrong number of replies %v but expected 1", len(msg.Replies())) t.Fail() } for jobName := range tc.expectedJobs { if !strings.Contains(msg.Replies()[0], jobName) { t.Logf("Job named '%v' not found in '%v'", jobName, msg.Replies()[0]) t.Fail() } } } func TestBuild(t *testing.T) { disableLogs() tc := loadTC{ expectedJobs: map[string]string{ "build": "Build the project", "test": "Run test suit on the project", "deploy": "Deploy project", }, expectedRepliesOnBuild: []string{ "Execution for job `test` was queued", fmt.Sprintf("Building `test` with parameters `map[]` (%v/job/test)", os.Getenv("JENKINS_URL")), "Job test completed", }, } j := &Jenkins{ js: NewMockJobServer( tc.expectedJobs, ), } msg := synthetic.NewMockMessage("build test", true) j.Build(msg) if len(msg.Replies()) != len(tc.expectedRepliesOnBuild) { t.Logf("Wrong number of replies %v but expected %v", len(msg.Replies()), len(tc.expectedRepliesOnBuild)) t.Fail() } for i, reply := range msg.Replies() { if reply != tc.expectedRepliesOnBuild[i] { t.Logf("Wrong reply '%v' but expected '%v'", reply, tc.expectedRepliesOnBuild[i]) t.Fail() } } } func TestTokenizeParams(t *testing.T) { tt := []struct { input string result []string }{ { input: "", result: []string{}, }, { input: "build deploy", result: []string{"build", "deploy"}, }, { input: "build deploy INDEX=users", result: []string{"build", "deploy", "INDEX=users"}, }, { input: "build deploy INDEX=\"users\"", result: []string{"build", "deploy", "INDEX=\"users\""}, }, { input: "build deploy INDEX=\"users ducks\"", result: []string{"build", "deploy", "INDEX=\"users ducks\""}, }, } for _, tc := range tt { t.Run(tc.input, func(t *testing.T) { result := tokenizeParams(tc.input) if len(result) != len(tc.result) { t.Errorf("expected %d results but got %d", len(tc.result), len(result)) } for i, value := range result { if value != tc.result[i] { t.Errorf("expected element %d to be %s but was %s", i, tc.result[i], value) } } }) } }
[ "\"JENKINS_URL\"" ]
[]
[ "JENKINS_URL" ]
[]
["JENKINS_URL"]
go
1
0
mortar_rdb/tests/test_testing.py
import os from mortar_rdb.testing import register_session, TestingBase from mortar_rdb import get_session, declarative_base from mortar_rdb.controlled import Config, Source from testfixtures.components import TestComponents from mock import Mock from sqlalchemy.pool import StaticPool from sqlalchemy.engine.reflection import Inspector from sqlalchemy.ext.declarative import declarative_base as sa_declarative_base from sqlalchemy.orm import relationship from sqlalchemy.schema import Column, ForeignKey from sqlalchemy.types import Integer, String from testfixtures import Replacer, compare, TempDirectory, OutputCapture from unittest import TestCase class TestRegisterSessionFunctional(TestCase): def setUp(self): self.dir = TempDirectory() self.components = TestComponents() def tearDown(self): self.components.uninstall() self.dir.cleanup() def test_functional(self): Base = sa_declarative_base() class Model(Base): __tablename__ = 'model' id = Column('id', Integer, primary_key=True) name = Column('name', String(50)) register_session( transactional=False, config=Config(Source(Model.__table__))) session = get_session() session.add(Model(name='foo')) session.commit() def test_functional_metadata(self): Base = sa_declarative_base() class Model(Base): __tablename__ = 'model' id = Column('id', Integer, primary_key=True) name = Column('name', String(50)) register_session( transactional=False, metadata=Base.metadata ) session = get_session() session.add(Model(name='foo')) session.commit() def test_functional_echo_but_no_url(self): with Replacer() as r: # make sure there's no DB_URL r.replace('os.environ', dict()) # hoover up the logging ;-) with OutputCapture(): register_session(echo=True) def test_tricky_to_delete(self): # respect any DB_URL set here so that # we sure the real db here to make sure # delete works across all our DB types... db_path = ( os.environ.get('DB_URL', '').strip() or 'sqlite:///'+os.path.join(self.dir.path, 'test.db') ) Base = sa_declarative_base() class Model1(Base): __tablename__ = 'model1' id = Column(Integer, primary_key=True) model2_id = Column(Integer, ForeignKey('model2.id')) model2 = relationship("Model2") class Model2(Base): __tablename__ = 'model2' id = Column('id', Integer, primary_key=True) # create in one session register_session(db_path, name='create', transactional=False, metadata=Base.metadata) m1 = Model1() m2 = Model2() m1.model2 = m2 session = get_session('create') if db_path.startswith('sqlite:'): session.execute('PRAGMA foreign_keys = ON') session.add(m1) session.add(m2) session.commit() compare(session.query(Model1).count(), 1) compare(session.query(Model2).count(), 1) session.rollback() # now register another session which should # blow the above away register_session(db_path,name='read', transactional=False, metadata=Base.metadata) session = get_session('read') compare(session.query(Model1).count(), 0) compare(session.query(Model2).count(), 0) session.rollback() def test_only_some_packages(self): Base = sa_declarative_base() class Model1(Base): __tablename__ = 'model1' id = Column('id', Integer, primary_key=True) name = Column('name', String(50)) class Model2(Base): __tablename__ = 'model2' id = Column('id', Integer, primary_key=True) name = Column('name', String(50)) register_session( transactional=False, config=Config(Source(Model1.__table__))) # only table1 should have been created! compare( [u'model1'], Inspector.from_engine(get_session().bind).get_table_names() ) class TestRegisterSessionCalls(TestCase): def setUp(self): self.components = TestComponents() self.r = Replacer() self.m = Mock() self.r.replace('mortar_rdb.testing.real_register_session', self.m.realRegisterSession) self.r.replace('mortar_rdb.testing.create_engine', self.m.create_engine) # mock out for certainty # self.r.replace('mortar_rdb.testing.???',Mock()) # mock out for table destruction get_session = Mock() bind = get_session.return_value.bind bind.dialect.inspector.return_value = inspector = Mock() inspector.get_table_names.return_value = () self.r.replace('mortar_rdb.testing.get_session', get_session) def tearDown(self): self.r.restore() self.components.uninstall() def test_default_params(self): # ie: no DB_URL! self.r.replace('os.environ',dict()) register_session() compare([ ('create_engine', ('sqlite://',), {'poolclass': StaticPool, 'echo': False}), ('realRegisterSession', (None, u'', self.m.create_engine.return_value, False, True, True, None), {}), ],self.m.method_calls) def test_specified_params(self): register_session( url='x://', name='foo', echo=True, transactional=False, scoped=False, ) compare([ ('realRegisterSession', ('x://', u'foo', None, True, False, False, None), {}), ],self.m.method_calls) def test_echo_but_no_url(self): # make sure there's no DBURL self.r.replace('os.environ',dict()) register_session(echo=True) compare([ ('create_engine', ('sqlite://',), {'poolclass': StaticPool, 'echo': True}), ('realRegisterSession', (None, u'', self.m.create_engine.return_value, False, True, True, None), {}), ],self.m.method_calls) def test_engine_passed(self): engine = object() register_session( engine=engine, ) compare([ ('realRegisterSession', (None, u'', engine, False, True, True, None), {}), ],self.m.method_calls) def test_url_from_environment(self): self.r.replace('os.environ',dict( DB_URL = 'x://' )) register_session() compare([ ('realRegisterSession', ('x://', u'', None, False, True, True, None), {}), ],self.m.method_calls) def test_empty_environment_url(self): self.r.replace('os.environ',dict( DB_URL = '' )) register_session() compare([ ('create_engine', ('sqlite://',), {'poolclass': StaticPool, 'echo': False}), ('realRegisterSession', ('', u'', self.m.create_engine.return_value, False, True, True, None), {}), ],self.m.method_calls) def test_engine_overrides_environment(self): self.r.replace('os.environ',dict( DB_URL = 'x://' )) engine = object() register_session(engine=engine) compare([ ('realRegisterSession', (None, u'', engine, False, True, True, None), {}), ],self.m.method_calls) class TestTestingBase(TestCase): def test_manual(self): b1 = declarative_base() tb = TestingBase() b2 = declarative_base() tb.restore() b3 = declarative_base() # checks self.assertFalse(b1 is b2) self.assertFalse(b3 is b2) self.assertTrue(b1 is b3) def test_context_manager(self): b1 = declarative_base() with TestingBase(): b2 = declarative_base() b3 = declarative_base() # checks self.assertFalse(b1 is b2) self.assertFalse(b3 is b2) self.assertTrue(b1 is b3)
[]
[]
[ "DB_URL" ]
[]
["DB_URL"]
python
1
0
halici/wsgi.py
""" WSGI config for halici project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'halici.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
api/api.go
package api import ( "fmt" "log" "net/http" "os" "github.com/RoadTripppin/wazzup/config" "github.com/RoadTripppin/wazzup/controllers" "github.com/RoadTripppin/wazzup/helpers" "github.com/gorilla/handlers" "github.com/gorilla/mux" ) //var Contex = context.Background() func StartApi() { helpers.LoadEnv() config.CreateRedisClient() db := config.InitDB() defer db.Close() router := mux.NewRouter() wsServer := controllers.NewWebsocketServer(&helpers.RoomRepository{Db: db}, &helpers.UserRepository{Db: db}) go wsServer.Run() // CORS Handler headersOk := handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "Authorization"}) originsOk := handlers.AllowedOrigins([]string{os.Getenv("ORIGIN_ALLOWED")}) methodsOk := handlers.AllowedMethods([]string{"GET", "POST", "PUT", "HEAD", "OPTIONS"}) //routes router.HandleFunc("/ws", func(w http.ResponseWriter, r *http.Request) { controllers.ServeWs(wsServer, w, r) }) router.HandleFunc("/login", controllers.Login).Methods("POST") router.HandleFunc("/user", controllers.SearchUser).Methods("POST") router.HandleFunc("/register", controllers.Register).Methods("POST") router.HandleFunc("/user/update", controllers.UpdateUser).Methods("POST") router.HandleFunc("/user/delete", controllers.DeleteUser).Methods("POST") router.HandleFunc("/user/interacted", controllers.GetInteractedUsers).Methods("GET") router.HandleFunc("/messages", controllers.GetMessages).Methods("POST") port := os.Getenv("SERVER_PORT") fmt.Println("App is working on port :" + port) log.Fatal(http.ListenAndServe(":"+port, handlers.CORS(headersOk, methodsOk, originsOk)(router))) }
[ "\"ORIGIN_ALLOWED\"", "\"SERVER_PORT\"" ]
[]
[ "ORIGIN_ALLOWED", "SERVER_PORT" ]
[]
["ORIGIN_ALLOWED", "SERVER_PORT"]
go
2
0
tests/e2e/kubetest2-kops/deployer/up.go
/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package deployer import ( "os" osexec "os/exec" "strings" "k8s.io/klog/v2" "k8s.io/kops/tests/e2e/kubetest2-kops/aws" "k8s.io/kops/tests/e2e/kubetest2-kops/gce" "k8s.io/kops/tests/e2e/kubetest2-kops/util" "sigs.k8s.io/kubetest2/pkg/exec" ) func (d *deployer) Up() error { if err := d.init(); err != nil { return err } publicIP, err := util.ExternalIPRange() if err != nil { return err } adminAccess := d.AdminAccess if adminAccess == "" { adminAccess = publicIP } args := []string{ d.KopsBinaryPath, "create", "cluster", "--name", d.ClusterName, "--admin-access", adminAccess, "--cloud", d.CloudProvider, "--kubernetes-version", d.KubernetesVersion, "--master-count", "1", "--master-volume-size", "48", "--node-count", "4", "--node-volume-size", "48", "--override", "cluster.spec.nodePortAccess=0.0.0.0/0", "--ssh-public-key", d.SSHPublicKeyPath, "--yes", } if d.CloudProvider == "aws" { zones, err := aws.RandomZones(1) if err != nil { return err } args = append(args, "--zones", strings.Join(zones, ",")) args = append(args, "--master-size", "c5.large") } if d.CloudProvider == "gce" { zones, err := gce.RandomZones(1) if err != nil { return err } args = append(args, "--zones", strings.Join(zones, ",")) args = append(args, "--master-size", "e2-standard-2") } klog.Info(strings.Join(args, " ")) cmd := exec.Command(args[0], args[1:]...) cmd.SetEnv(d.env()...) exec.InheritOutput(cmd) err = cmd.Run() if err != nil { return err } isUp, err := d.IsUp() if err != nil { return err } else if isUp { klog.V(1).Infof("cluster reported as up") } else { klog.Errorf("cluster reported as down") } return nil } func (d *deployer) IsUp() (bool, error) { args := []string{ d.KopsBinaryPath, "validate", "cluster", "--name", d.ClusterName, "--wait", "15m", } klog.Info(strings.Join(args, " ")) cmd := exec.Command(args[0], args[1:]...) cmd.SetEnv(d.env()...) exec.InheritOutput(cmd) err := cmd.Run() // `kops validate cluster` exits 2 if validation failed if exitErr, ok := err.(*osexec.ExitError); ok && exitErr.ExitCode() == 2 { return false, nil } return err == nil, err } // verifyUpFlags ensures fields are set for creation of the cluster func (d *deployer) verifyUpFlags() error { // These environment variables are defined by the "preset-aws-ssh" prow preset // https://github.com/kubernetes/test-infra/blob/3d3b325c98b739b526ba5d93ce21c90a05e1f46d/config/prow/config.yaml#L653-L670 if d.SSHPrivateKeyPath == "" { d.SSHPrivateKeyPath = os.Getenv("AWS_SSH_PRIVATE_KEY_FILE") } if d.SSHPublicKeyPath == "" { d.SSHPublicKeyPath = os.Getenv("AWS_SSH_PUBLIC_KEY_FILE") } return nil }
[ "\"AWS_SSH_PRIVATE_KEY_FILE\"", "\"AWS_SSH_PUBLIC_KEY_FILE\"" ]
[]
[ "AWS_SSH_PRIVATE_KEY_FILE", "AWS_SSH_PUBLIC_KEY_FILE" ]
[]
["AWS_SSH_PRIVATE_KEY_FILE", "AWS_SSH_PUBLIC_KEY_FILE"]
go
2
0
program/image-classification-tf-py/classify.py
# # Copyright (c) 2017-2018 cTuning foundation. # See CK COPYRIGHT.txt for copyright details. # # SPDX-License-Identifier: BSD-3-Clause. # See CK LICENSE.txt for licensing details. # import imp import time import json import os import sys import numpy as np import tensorflow as tf MODEL_MODULE = os.getenv('CK_ENV_TENSORFLOW_MODEL_MODULE') MODEL_WEIGHTS = os.getenv('CK_ENV_TENSORFLOW_MODEL_WEIGHTS') MODEL_WEIGHTS_ARE_CHECKPOINTS = os.getenv('CK_ENV_TENSORFLOW_MODEL_WEIGHTS_ARE_CHECKPOINTS') == 'YES' MODEL_NORMALIZE_DATA = os.getenv("CK_ENV_TENSORFLOW_MODEL_NORMALIZE_DATA") == "YES" MODEL_MEAN_VALUE = np.array([0, 0, 0], dtype=np.float32) # to be populated BATCH_COUNT = int(os.getenv('CK_BATCH_COUNT', 1)) BATCH_SIZE = int(os.getenv('CK_BATCH_SIZE', 1)) IMAGE_LIST = os.getenv('RUN_OPT_IMAGE_LIST') IMAGE_DIR = os.getenv('RUN_OPT_IMAGE_DIR') RESULT_DIR = os.getenv('RUN_OPT_RESULT_DIR') SUBTRACT_MEAN = os.getenv("CK_SUBTRACT_MEAN") == "YES" USE_MODEL_MEAN = os.getenv("CK_USE_MODEL_MEAN") == "YES" IMAGE_SIZE = int(os.getenv('RUN_OPT_IMAGE_SIZE')) FULL_REPORT = int(os.getenv('RUN_OPT_SILENT_MODE', '0')) == 0 # Load images batch def load_batch(image_list, image_index): batch_data = [] for _ in range(BATCH_SIZE): img_file = os.path.join(IMAGE_DIR, image_list[image_index]) img = np.fromfile(img_file, np.uint8) img = img.reshape((IMAGE_SIZE, IMAGE_SIZE, 3)) img = img.astype(np.float) # Normalize if MODEL_NORMALIZE_DATA: img = img / 255.0 img = img - 0.5 img = img * 2 # Subtract mean value if SUBTRACT_MEAN: if USE_MODEL_MEAN: img = img - MODEL_MEAN_VALUE else: img = img - np.mean(img) # Put to batch batch_data.append(img) image_index += 1 return batch_data, image_index def main(_): global MODEL_MEAN_VALUE global USE_MODEL_MEAN # Print settings print('Model module: ' + MODEL_MODULE) print('Model weights: ' + MODEL_WEIGHTS) print('Images dir: ' + IMAGE_DIR) print('Image list: ' + IMAGE_LIST) print('Image size: {}'.format(IMAGE_SIZE)) print('Batch size: {}'.format(BATCH_SIZE)) print('Batch count: {}'.format(BATCH_COUNT)) print('Result dir: ' + RESULT_DIR) print('Normalize: {}'.format(MODEL_NORMALIZE_DATA)) print('Subtract mean: {}'.format(SUBTRACT_MEAN)) print('Use model mean: {}'.format(USE_MODEL_MEAN)) # Load model implementation module model = imp.load_source('tf_model', MODEL_MODULE) # Load mean value from model is presented if hasattr(model, 'get_mean_value'): MODEL_MEAN_VALUE = model.get_mean_value() else: USE_MODEL_MEAN = False # Load processing image filenames image_list = [] with open(IMAGE_LIST, 'r') as f: for s in f: image_list.append(s.strip()) # Prepare TF config options config = tf.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.allocator_type = 'BFC' config.gpu_options.per_process_gpu_memory_fraction = float(os.getenv('CK_TF_GPU_MEMORY_PERCENT', 33)) / 100.0 num_processors = int(os.getenv('CK_TF_CPU_NUM_OF_PROCESSORS', 0)) if num_processors > 0: config.device_count["CPU"] = num_processors setup_time_begin = time.time() # Load weights # Model implementation should store weights data somewhere into its # internal structure as main test is not interested in weights details. # If weights are not checkpoints then they are static data (e.g. numpy array files) # and can be loaded preliminary before network will have constructed. if not MODEL_WEIGHTS_ARE_CHECKPOINTS: begin_time = time.time() model.load_weights(MODEL_WEIGHTS) weights_load_time = time.time() - begin_time print("Weights loaded in %fs" % weights_load_time) with tf.Graph().as_default(), tf.Session(config=config) as sess: # Build net begin_time = time.time() input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3) input_node = tf.placeholder(dtype=tf.float32, shape=input_shape, name="input") output_node = model.inference(input_node) net_create_time = time.time() - begin_time print("Net created in %fs" % net_create_time) # Load weights # If weights are checkpoints they only can be restored after network has been built if MODEL_WEIGHTS_ARE_CHECKPOINTS: begin_time = time.time() model.load_checkpoints(sess, MODEL_WEIGHTS) weights_load_time = time.time() - begin_time print("Weights loaded in %fs" % weights_load_time) setup_time = time.time() - setup_time_begin # Run batched mode test_time_begin = time.time() image_index = 0 total_load_time = 0 total_classification_time = 0 first_classification_time = 0 images_loaded = 0 for batch_index in range(BATCH_COUNT): batch_number = batch_index+1 if FULL_REPORT or (batch_number % 10 == 0): print("\nBatch {} of {}".format(batch_number, BATCH_COUNT)) begin_time = time.time() batch_data, image_index = load_batch(image_list, image_index) load_time = time.time() - begin_time total_load_time += load_time images_loaded += BATCH_SIZE if FULL_REPORT: print("Batch loaded in %fs" % (load_time)) # Classify batch begin_time = time.time() feed = { input_node: batch_data } batch_results = output_node.eval(feed_dict=feed) classification_time = time.time() - begin_time if FULL_REPORT: print("Batch classified in %fs" % (classification_time)) total_classification_time += classification_time # Remember first batch prediction time if batch_index == 0: first_classification_time = classification_time # Process results for index_in_batch in range(BATCH_SIZE): all_probs = model.get_image_scores(batch_results, index_in_batch) global_index = batch_index * BATCH_SIZE + index_in_batch res_file = os.path.join(RESULT_DIR, image_list[global_index]) with open(res_file + '.txt', 'w') as f: for prob in all_probs: f.write('{}\n'.format(prob)) test_time = time.time() - test_time_begin if BATCH_COUNT > 1: avg_classification_time = (total_classification_time - first_classification_time) / (images_loaded - BATCH_SIZE) else: avg_classification_time = total_classification_time / images_loaded avg_load_time = total_load_time / images_loaded # Store benchmark results openme = {} openme['setup_time_s'] = setup_time openme['test_time_s'] = test_time openme['net_create_time_s'] = net_create_time openme['weights_load_time_s'] = weights_load_time openme['images_load_time_total_s'] = total_load_time openme['images_load_time_avg_s'] = avg_load_time openme['prediction_time_total_s'] = total_classification_time openme['prediction_time_avg_s'] = avg_classification_time openme['avg_time_ms'] = avg_classification_time * 1000 openme['avg_fps'] = 1.0 / avg_classification_time openme['batch_time_ms'] = avg_classification_time * 1000 * BATCH_SIZE openme['batch_size'] = BATCH_SIZE with open('tmp-ck-timer.json', 'w') as o: json.dump(openme, o, indent=2, sort_keys=True) if __name__ == '__main__': tf.app.run()
[]
[]
[ "RUN_OPT_IMAGE_DIR", "CK_USE_MODEL_MEAN", "CK_ENV_TENSORFLOW_MODEL_WEIGHTS", "CK_BATCH_SIZE", "CK_SUBTRACT_MEAN", "RUN_OPT_SILENT_MODE", "CK_TF_CPU_NUM_OF_PROCESSORS", "CK_ENV_TENSORFLOW_MODEL_NORMALIZE_DATA", "RUN_OPT_RESULT_DIR", "RUN_OPT_IMAGE_SIZE", "RUN_OPT_IMAGE_LIST", "CK_TF_GPU_MEMORY_PERCENT", "CK_ENV_TENSORFLOW_MODEL_MODULE", "CK_ENV_TENSORFLOW_MODEL_WEIGHTS_ARE_CHECKPOINTS", "CK_BATCH_COUNT" ]
[]
["RUN_OPT_IMAGE_DIR", "CK_USE_MODEL_MEAN", "CK_ENV_TENSORFLOW_MODEL_WEIGHTS", "CK_BATCH_SIZE", "CK_SUBTRACT_MEAN", "RUN_OPT_SILENT_MODE", "CK_TF_CPU_NUM_OF_PROCESSORS", "CK_ENV_TENSORFLOW_MODEL_NORMALIZE_DATA", "RUN_OPT_RESULT_DIR", "RUN_OPT_IMAGE_SIZE", "RUN_OPT_IMAGE_LIST", "CK_TF_GPU_MEMORY_PERCENT", "CK_ENV_TENSORFLOW_MODEL_MODULE", "CK_ENV_TENSORFLOW_MODEL_WEIGHTS_ARE_CHECKPOINTS", "CK_BATCH_COUNT"]
python
15
0
config/os.go
// +build !darwin,!windows package config import ( "os" "os/user" "path/filepath" ) func getDefaultConfigDirectory() (string, error) { configDir := os.Getenv("XDG_CONFIG_HOME") if configDir != "" { return filepath.Join(configDir, AppNameLowercase), nil } currentUser, userError := user.Current() if userError != nil { return "", userError } return filepath.Join(currentUser.HomeDir, ".config", AppNameLowercase), nil }
[ "\"XDG_CONFIG_HOME\"" ]
[]
[ "XDG_CONFIG_HOME" ]
[]
["XDG_CONFIG_HOME"]
go
1
0
src/tgclient/src/com/alachisoft/tayzgrid/util/DirectoryUtil.java
/* * Copyright (c) 2015, Alachisoft. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alachisoft.tayzgrid.util; import com.alachisoft.tayzgrid.common.AppUtil; import com.alachisoft.tayzgrid.common.exceptions.ManagementException; import com.alachisoft.tayzgrid.common.ServicePropValues; import com.alachisoft.tayzgrid.config.dom.CacheServerConfig; import com.alachisoft.tayzgrid.management.ThinClientConfigManager; import com.alachisoft.tayzgrid.web.caching.TayzGrid; import java.io.File; public class DirectoryUtil { private static String _installDir = null; /** * search for the specified file in the executing assembly's working folder if the file is found, then a path string is returned back. otherwise it returns null. * * @param fileName * @return */ public static String GetFileLocalPath(String fileName) { try { String path = getConfigPath(fileName); if ((new java.io.File(path)).isFile()) { return path; } } catch (Exception e) { } return null; } /** * search for the specified file in TayzGrid install directory. if the file is found then returns the path string from where the file can be loaded. otherwise it returns null. * * @param fileName * @return */ public static String GetFileGlobalPath(String fileName, String directoryName) { String directoryPath = ""; String filePath = ""; tangible.RefObject<String> tempRef_directoryPath = new tangible.RefObject<String>(directoryPath); try { boolean tempVar = !SearchGlobalDirectory(directoryName, false, directoryPath); directoryPath = tempRef_directoryPath.argvalue; if (tempVar) { return null; } } catch (Exception e) { } filePath = new File(directoryPath, fileName).getPath();//Path.Combine(directoryPath, fileName); if (!(new java.io.File(filePath)).isFile()) { return null; } return filePath; } public static java.util.ArrayList GetCacheConfig(String cacheId, boolean inproc) { String filePath = GetFileLocalPath("cache.conf"); java.util.ArrayList configurationList = null; if (filePath != null) { try { configurationList = ThinClientConfigManager.GetCacheConfig(cacheId, filePath, inproc); } catch (Exception exception) { } } return configurationList; } public static CacheServerConfig GetCacheDom(String cacheId, boolean inproc) throws ManagementException { String filePath = GetFileLocalPath("cache.conf"); CacheServerConfig dom = null; if (filePath != null) { dom = ThinClientConfigManager.GetConfigDom(cacheId, filePath, inproc); } return dom; } public static boolean SearchLocalDirectory(String directoryName, boolean createNew, tangible.RefObject<String> path) throws Exception { path.argvalue = (new java.io.File(System.getProperty("user.dir")).getParent()); if (!(new java.io.File(path.argvalue)).isDirectory()) { if (createNew) { try { (new java.io.File(path.argvalue)).mkdir(); return true; } catch (Exception e) { throw e; } } return false; } return true; } public static boolean SearchGlobalDirectory(String directoryName, boolean createNew, String path) throws Exception { String ncacheInstallDirectory = AppUtil.getInstallDir(); path = ""; if (ncacheInstallDirectory == null) { return false; } path = new File(ncacheInstallDirectory, directoryName).getPath();//Path.Combine(ncacheInstallDirectory, directoryName); if (!(new java.io.File(path)).isDirectory()) { if (createNew) { try { (new java.io.File(path)).mkdir(); return true; } catch (Exception e2) { throw e2; } } return false; } return true; } public static String getConfigPath(String filename) throws Exception { String separator = System.getProperty("file.separator"); String path = ""; //Making SetConfigPath property available on Windows. path = TayzGrid.getConfigPath(); if (path != null && path.equalsIgnoreCase("") == false) { return path.concat(separator + filename); } if (System.getProperty("os.name").toLowerCase().startsWith("win")) { //<editor-fold defaultstate="collapsed" desc=" Library Path for Windows "> //get current execution path path = System.getProperty("user.dir"); if (path != null) { if (!path.endsWith(separator)) { path = path.concat(separator); } path = path.concat(filename); if (fileExists(path)) { return path; } } //get ncache installation path path = getInstallDir(); if (path != null) { if (path != null) { if (!path.endsWith(separator)) { path = path.concat(separator); } path = path.concat("config" + separator + filename); if (fileExists(path)) { return path; } } } //</editor-fold> } else { //<editor-fold defaultstate="collapsed" desc=" Library Path for linux "> path = TayzGrid.getConfigPath(); if (path != null && path.equalsIgnoreCase("") == false) { return path.concat(separator + filename); } else { path = ServicePropValues.getTGHome(); if (path != null && path.equalsIgnoreCase("") == false) { path = path.concat(separator + "config"); } else { path = System.getenv("NCACHE_MEMCACHED_ROOT"); if (path != null && path.equalsIgnoreCase("") == false) { path = path.concat(separator + "config"); } //</editor-fold> } if (path == null || path.equalsIgnoreCase("") == true) { path = "/opt/tayzgrid/config/" + filename; return path; } } } if (path == null) { throw new Exception("Unable to find " + filename + "; please reset Enviorment variables"); } return path.concat(separator + filename); } /** * Determine whether file exists at specified path * * @param path File path to be checked * @return True if file exists, false otherwise */ public static boolean fileExists(String path) { File check = new File(path); try { return check.exists(); } catch (SecurityException se) { } return false; } /** * Read windows registry and return ncache installation directory * * @return Ncache installation directory path */ public static String getInstallDir() { if (_installDir == null) { _installDir = ServicePropValues.getTGHome(); } return _installDir; } public static String GetBaseFilePath(String fileName) { //if file present in local directory String path = System.getProperty("user.dir") + fileName; File f = new File(path); if(f.exists()) { return path; } //check global directory of ncache to find file. String directoryPath = ""; String filePath = ""; try { if (!SearchGlobalDirectory("config", false, directoryPath)) { return null; } } catch(Exception e){return null;} filePath = new File(directoryPath, fileName).getPath(); File f1 = new File(path); if(f1.exists()) { return filePath; } return null; } }
[ "\"NCACHE_MEMCACHED_ROOT\"" ]
[]
[ "NCACHE_MEMCACHED_ROOT" ]
[]
["NCACHE_MEMCACHED_ROOT"]
java
1
0
pkg/msp/matchers_test.go
/* Copyright SecureKey Technologies Inc. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package msp import ( "path/filepath" "testing" "github.com/studyzy/fabric-sdk-go/pkg/common/providers/core" "github.com/studyzy/fabric-sdk-go/pkg/core/config" "github.com/studyzy/fabric-sdk-go/third_party/github.com/stretchr/testify/assert" ) const ( sampleMatchersOverrideAll = "matchers_sample1.yaml" sampleMatchersRegexReplace = "matchers_sample3.yaml" sampleMatchersIgnoreEndpoint = "matchers_sample6.yaml" sampleMatchersDir = "matcher-samples" actualCAURL = "https://ca.org1.example.com:7054" overridedCAURL = "https://ca.org1.example.com:8888" actualTargetServerName = "ca.org1.example.com" overridedTargetServerName = "ca.override.example.com" ) //TestCAURLOverride //Scenario: Using entity mather to override CA URL func TestCAURLOverride(t *testing.T) { //Test basic entity matcher matcherPath := filepath.Join(getConfigPath(), sampleMatchersDir, sampleMatchersOverrideAll) testCAEntityMatcher(t, matcherPath) //Test entity matcher with regex replace feature '$' matcherPath = filepath.Join(getConfigPath(), sampleMatchersDir, sampleMatchersRegexReplace) testCAEntityMatcher(t, matcherPath) } func testCAEntityMatcher(t *testing.T, configPath string) { //Without entity matcher configTestPath := filepath.Join(getConfigPath(), configTestFile) backends, err := getBackendsFromFiles(configTestPath) assert.Nil(t, err, "not supposed to get error") assert.Equal(t, 1, len(backends)) config, err := ConfigFromBackend(backends...) assert.Nil(t, err, "not supposed to get error") assert.NotNil(t, config) caConfig, ok := config.CAConfig("ca.org1.example.com") assert.True(t, ok, "supposed to find caconfig") assert.NotNil(t, caConfig) assert.Equal(t, actualCAURL, caConfig.URL) assert.Equal(t, actualTargetServerName, caConfig.GRPCOptions["ssl-target-name-override"]) //Using entity matcher to override CA URL backends, err = getBackendsFromFiles(configPath, configTestPath) assert.Nil(t, err, "not supposed to get error") assert.Equal(t, 2, len(backends)) config, err = ConfigFromBackend(backends...) assert.Nil(t, err, "not supposed to get error") assert.NotNil(t, config) caConfig, ok = config.CAConfig("ca.org1.example.com") assert.True(t, ok, "supposed to find caconfig") assert.NotNil(t, caConfig) assert.Equal(t, overridedCAURL, caConfig.URL) assert.Equal(t, overridedTargetServerName, caConfig.GRPCOptions["ssl-target-name-override"]) } //TestCAEntityMatcherIgnoreEndpoint tests CA entity matcher 'IgnoreEndpoint' option // If marked 'IgnoreEndpoint: true' then corresponding CA will be ignored func TestCAEntityMatcherIgnoreEndpoint(t *testing.T) { //Without entity matcher configTestPath := filepath.Join(getConfigPath(), configTestFile) matcherPath := filepath.Join(getConfigPath(), sampleMatchersDir, sampleMatchersIgnoreEndpoint) backends, err := getBackendsFromFiles(matcherPath, configTestPath) assert.Nil(t, err, "not supposed to get error") assert.Equal(t, 2, len(backends)) config, err := ConfigFromBackend(backends...) assert.Nil(t, err, "not supposed to get error") assert.NotNil(t, config) caConfig, ok := config.CAConfig("ca.org1.example.com") assert.True(t, ok) assert.NotNil(t, caConfig) caConfig, ok = config.CAConfig("ca.org2.example.com") assert.False(t, ok) assert.Nil(t, caConfig) configImpl := config.(*IdentityConfig) assert.Equal(t, 1, len(configImpl.caConfigs)) _, ok = configImpl.caConfigs["ca.org1.example.com"] assert.True(t, ok) _, ok = configImpl.caConfigs["ca.org2.example.com"] assert.False(t, ok) } func getBackendsFromFiles(files ...string) ([]core.ConfigBackend, error) { backends := make([]core.ConfigBackend, len(files)) for i, file := range files { backend, err := config.FromFile(file)() if err != nil { return nil, err } backends[i] = backend[0] } return backends, nil }
[]
[]
[]
[]
[]
go
null
null
null
cli/cli.go
package cli import ( "flag" "fmt" "log" "net/http" "os" "runtime" "strconv" "github.com/gopherchain/gopherchain/blockchain" "github.com/gopherchain/gopherchain/network" "github.com/gopherchain/gopherchain/wallet" ) type CommandLine struct{} func (cli *CommandLine) printUsage() { fmt.Println("Usage:") fmt.Println(" getbalance -address ADDRESS - get the balance for an address") fmt.Println(" createblockchain -address ADDRESS creates a blockchain and sends genesis reward to address") fmt.Println(" printchain - Prints the blocks in the chain") fmt.Println(" send -from FROM -to TO -amount AMOUNT -mine - Send amount of coins. Then -mine flag is set, mine off of this node") fmt.Println(" createwallet - Creates a new Wallet") fmt.Println(" listaddresses - Lists the addresses in our wallet file") fmt.Println(" reindexutxo - Rebuilds the UTXO set") fmt.Println(" startnode -miner ADDRESS - Start a node with ID specified in NODE_ID env. var. -miner enables mining") } func (cli *CommandLine) validateArgs() { if len(os.Args) < 2 { cli.printUsage() runtime.Goexit() } } func (cli *CommandLine) StartNode(nodeID, minerAddress string) { fmt.Printf("Starting Node %s\n", nodeID) go func() { fs := http.FileServer(http.Dir("./blocks")) log.Fatal(http.ListenAndServe(":3002", fs)) }() if len(minerAddress) > 0 { if wallet.ValidateAddress(minerAddress) { fmt.Println("Mining is on. Address to receive rewards: ", minerAddress) } else { log.Panic("Wrong miner address!") } } network.StartServer(nodeID, minerAddress) } func (cli *CommandLine) reindexUTXO(nodeID string) { chain := blockchain.ContinueBlockChain(network.KnownNodesDir[0]) defer chain.Database.Close() UTXOSet := blockchain.UTXOSet{chain} UTXOSet.Reindex() count := UTXOSet.CountTransactions() fmt.Printf("Done! There are %d transactions in the UTXO set.\n", count) } func (cli *CommandLine) listAddresses() { wallets, _ := wallet.CreateWallets() addresses := wallets.GetAllAddresses() for _, address := range addresses { fmt.Println(address) } } func (cli *CommandLine) createWallet() { wallets, _ := wallet.CreateWallets() address := wallets.AddWallet() wallets.SaveFile() fmt.Printf("New address is: %s\n", address) } func (cli *CommandLine) printChain(nodeID string) { chain := blockchain.ContinueBlockChain(network.KnownNodesDir[0]) defer chain.Database.Close() iter := chain.Iterator() for { block := iter.Next() fmt.Printf("Hash: %x\n", block.Hash) fmt.Printf("Prev. hash: %x\n", block.PrevHash) pow := blockchain.NewProof(block) fmt.Printf("PoW: %s\n", strconv.FormatBool(pow.Validate())) for _, tx := range block.Transactions { fmt.Println(tx) } fmt.Println() if len(block.PrevHash) == 0 { break } } } func (cli *CommandLine) createBlockChain(address, nodeID string) { if !wallet.ValidateAddress(address) { log.Panic("Address is not Valid") } chain := blockchain.InitBlockChain(address) defer chain.Database.Close() UTXOSet := blockchain.UTXOSet{chain} UTXOSet.Reindex() fmt.Println("Finished!") } func (cli *CommandLine) getBalance(address, nodeID string) { if !wallet.ValidateAddress(address) { log.Panic("Address is not Valid") } chain := blockchain.ContinueBlockChain(network.KnownNodesDir[0]) UTXOSet := blockchain.UTXOSet{chain} defer chain.Database.Close() balance := 0 pubKeyHash := wallet.Base58Decode([]byte(address)) pubKeyHash = pubKeyHash[1 : len(pubKeyHash)-4] UTXOs := UTXOSet.FindUnspentTransactions(pubKeyHash) for _, out := range UTXOs { balance += out.Value } fmt.Printf("Balance of %s: %d\n", address, balance) } func (cli *CommandLine) send(from, to string, amount int, nodeID string, mineNow bool) { if !wallet.ValidateAddress(to) { log.Panic("Address is not Valid") } if !wallet.ValidateAddress(from) { log.Panic("Address is not Valid") } chain := blockchain.ContinueBlockChain(network.KnownNodesDir[0]) UTXOSet := blockchain.UTXOSet{chain} defer chain.Database.Close() wallets, err := wallet.CreateWallets() if err != nil { log.Panic(err) } wallet := wallets.GetWallet(from) tx := blockchain.NewTransaction(&wallet, to, amount, &UTXOSet) if mineNow { cbTx := blockchain.CoinbaseTx(from, "") txs := []*blockchain.Transaction{cbTx, tx} block := chain.MineBlock(txs) UTXOSet.Update(block) } else { network.SendTx(network.KnownNodes[0], tx) fmt.Println("send tx") } fmt.Println("Success!") } func (cli *CommandLine) Run() { cli.validateArgs() nodeID := os.Getenv("NODE_ID") if nodeID == "" { fmt.Println("NODE_ID env is not set!") runtime.Goexit() } getBalanceCmd := flag.NewFlagSet("getbalance", flag.ExitOnError) createBlockchainCmd := flag.NewFlagSet("createblockchain", flag.ExitOnError) sendCmd := flag.NewFlagSet("send", flag.ExitOnError) printChainCmd := flag.NewFlagSet("printchain", flag.ExitOnError) createWalletCmd := flag.NewFlagSet("createwallet", flag.ExitOnError) listAddressesCmd := flag.NewFlagSet("listaddresses", flag.ExitOnError) reindexUTXOCmd := flag.NewFlagSet("reindexutxo", flag.ExitOnError) startNodeCmd := flag.NewFlagSet("startnode", flag.ExitOnError) getBalanceAddress := getBalanceCmd.String("address", "", "The address to get balance for") createBlockchainAddress := createBlockchainCmd.String("address", "", "The address to send genesis block reward to") sendFrom := sendCmd.String("from", "", "Source wallet address") sendTo := sendCmd.String("to", "", "Destination wallet address") sendAmount := sendCmd.Int("amount", 0, "Amount to send") sendMine := sendCmd.Bool("mine", false, "Mine immediately on the same node") startNodeMiner := startNodeCmd.String("miner", "", "Enable mining mode and send reward to ADDRESS") switch os.Args[1] { case "reindexutxo": err := reindexUTXOCmd.Parse(os.Args[2:]) if err != nil { log.Panic(err) } case "getbalance": err := getBalanceCmd.Parse(os.Args[2:]) if err != nil { log.Panic(err) } case "createblockchain": err := createBlockchainCmd.Parse(os.Args[2:]) if err != nil { log.Panic(err) } case "startnode": err := startNodeCmd.Parse(os.Args[2:]) if err != nil { log.Panic(err) } case "listaddresses": err := listAddressesCmd.Parse(os.Args[2:]) if err != nil { log.Panic(err) } case "createwallet": err := createWalletCmd.Parse(os.Args[2:]) if err != nil { log.Panic(err) } case "printchain": err := printChainCmd.Parse(os.Args[2:]) if err != nil { log.Panic(err) } case "send": err := sendCmd.Parse(os.Args[2:]) if err != nil { log.Panic(err) } default: cli.printUsage() runtime.Goexit() } if getBalanceCmd.Parsed() { if *getBalanceAddress == "" { getBalanceCmd.Usage() runtime.Goexit() } cli.getBalance(*getBalanceAddress, nodeID) } if createBlockchainCmd.Parsed() { if *createBlockchainAddress == "" { createBlockchainCmd.Usage() runtime.Goexit() } cli.createBlockChain(*createBlockchainAddress, nodeID) } if printChainCmd.Parsed() { cli.printChain(nodeID) } if createWalletCmd.Parsed() { cli.createWallet() } if listAddressesCmd.Parsed() { cli.listAddresses() } if reindexUTXOCmd.Parsed() { cli.reindexUTXO(nodeID) } if sendCmd.Parsed() { if *sendFrom == "" || *sendTo == "" || *sendAmount <= 0 { sendCmd.Usage() runtime.Goexit() } cli.send(*sendFrom, *sendTo, *sendAmount, nodeID, *sendMine) } if startNodeCmd.Parsed() { nodeID := os.Getenv("NODE_ID") if nodeID == "" { startNodeCmd.Usage() runtime.Goexit() } cli.StartNode(nodeID, *startNodeMiner) } }
[ "\"NODE_ID\"", "\"NODE_ID\"" ]
[]
[ "NODE_ID" ]
[]
["NODE_ID"]
go
1
0
internal/yamlconfig/dnsprovider.go
package yamlconfig import ( "fmt" "github.com/joeig/dyndns-pdns/pkg/dnsprovider" ) // DNSProviderPowerDNS sets the DNS provider to PowerDNS. // // This setting uses a PowerDNS backend. const DNSProviderPowerDNS dnsprovider.Type = "powerDNS" // ActiveDNSProvider contains the currently activated DNS provider var ActiveDNSProvider dnsprovider.DNSProvider // SetDNSProvider configures the DNS provider set by the configuration func SetDNSProvider(d *dnsprovider.DNSProvider) { switch C.DNSProviderType { case DNSProviderPowerDNS: *d = &C.PowerDNS default: panic(fmt.Errorf("invalid dnsProviderType \"%s\"", C.DNSProviderType)) } }
[]
[]
[]
[]
[]
go
null
null
null
tests/superset_test_config.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # type: ignore from copy import copy from superset.config import * AUTH_USER_REGISTRATION_ROLE = "alpha" SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(DATA_DIR, "unittests.db") DEBUG = True SUPERSET_WEBSERVER_PORT = 8081 # Allowing SQLALCHEMY_DATABASE_URI to be defined as an env var for # continuous integration if "SUPERSET__SQLALCHEMY_DATABASE_URI" in os.environ: SQLALCHEMY_DATABASE_URI = os.environ["SUPERSET__SQLALCHEMY_DATABASE_URI"] SQL_MAX_ROW = 666 SQLLAB_CTAS_NO_LIMIT = True # SQL_MAX_ROW will not take affect for the CTA queries FEATURE_FLAGS = {"foo": "bar", "KV_STORE": True, "SHARE_QUERIES_VIA_KV_STORE": True} def GET_FEATURE_FLAGS_FUNC(ff): ff_copy = copy(ff) ff_copy["super"] = "set" return ff_copy TESTING = True WTF_CSRF_ENABLED = False PUBLIC_ROLE_LIKE_GAMMA = True AUTH_ROLE_PUBLIC = "Public" EMAIL_NOTIFICATIONS = False CACHE_CONFIG = {"CACHE_TYPE": "simple"} class CeleryConfig(object): BROKER_URL = "redis://localhost" CELERY_IMPORTS = ("superset.sql_lab",) CELERY_ANNOTATIONS = {"sql_lab.add": {"rate_limit": "10/s"}} CONCURRENCY = 1 CELERY_CONFIG = CeleryConfig
[]
[]
[ "SUPERSET__SQLALCHEMY_DATABASE_URI" ]
[]
["SUPERSET__SQLALCHEMY_DATABASE_URI"]
python
1
0
test/functional/p2p_unrequested_blocks.py
#!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test processing of unrequested blocks. Setup: two nodes, node0+node1, not connected to each other. Node1 will have nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks. We have one P2PInterface connection to node0 called test_node, and one to node1 called min_work_node. The test: 1. Generate one block on each node, to leave IBD. 2. Mine a new block on each tip, and deliver to each node from node's peer. The tip should advance for node0, but node1 should skip processing due to nMinimumChainWork. Node1 is unused in tests 3-7: 3. Mine a block that forks from the genesis block, and deliver to test_node. Node0 should not process this block (just accept the header), because it is unrequested and doesn't have more or equal work to the tip. 4a,b. Send another two blocks that build on the forking block. Node0 should process the second block but be stuck on the shorter chain, because it's missing an intermediate block. 4c.Send 288 more blocks on the longer chain (the number of blocks ahead we currently store). Node0 should process all but the last block (too far ahead in height). 5. Send a duplicate of the block in #3 to Node0. Node0 should not process the block because it is unrequested, and stay on the shorter chain. 6. Send Node0 an inv for the height 3 block produced in #4 above. Node0 should figure out that Node0 has the missing height 2 block and send a getdata. 7. Send Node0 the missing block again. Node0 should process and the tip should advance. 8. Create a fork which is invalid at a height longer than the current chain (ie to which the node will try to reorg) but which has headers built on top of the invalid block. Check that we get disconnected if we send more headers on the chain the node now knows to be invalid. 9. Test Node1 is able to sync when connected to node0 (which should have sufficient work on its chain). """ from test_framework.mininode import * from test_framework.test_framework import GenesisTestFramework from test_framework.util import * import time from test_framework.blocktools import create_block, create_coinbase, create_transaction class AcceptBlockTest(GenesisTestFramework): def add_options(self, parser): parser.add_option("--testbinary", dest="testbinary", default=os.getenv("GENESISD", "genesisd"), help="genesisd binary to test") def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [[], ["-minimumchainwork=0x10"]] def setup_network(self): # Node0 will be used to test behavior of processing unrequested blocks # from peers which are not whitelisted, while Node1 will be used for # the whitelisted case. # Node2 will be used for non-whitelisted peers to test the interaction # with nMinimumChainWork. self.setup_nodes() def run_test(self): # Setup the p2p connections and start up the network thread. # test_node connects to node0 (not whitelisted) test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # min_work_node connects to node1 (whitelisted) min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) network_thread_start() # Test logic begins here test_node.wait_for_verack() min_work_node.wait_for_verack() # 1. Have nodes mine a block (leave IBD) [ n.generate(1) for n in self.nodes ] tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ] # 2. Send one block that builds on each tip. # This should be accepted by node0 blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(2): blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) min_work_node.send_message(msg_block(blocks_h2[1])) for x in [test_node, min_work_node]: x.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 1) self.log.info("First height 2 block accepted by node0; correctly rejected by node1") # 3. Send another block that builds on genesis. block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time) block_time += 1 block_h1f.solve() test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h1f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash) # 4. Send another two block that build on the fork. block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time) block_time += 1 block_h2f.solve() test_node.send_message(msg_block(block_h2f)) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h2f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) # But this block should be accepted by node since it has equal work. self.nodes[0].getblock(block_h2f.hash) self.log.info("Second height 2 block accepted, but not reorg'ed to") # 4b. Now send another block that builds on the forking chain. block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1) block_h3.solve() test_node.send_message(msg_block(block_h3)) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h3.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) self.nodes[0].getblock(block_h3.hash) # But this block should be accepted by node since it has more work. self.nodes[0].getblock(block_h3.hash) self.log.info("Unrequested more-work block accepted") # 4c. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node (as long as its not missing any headers) tip = block_h3 all_blocks = [] for i in range(288): next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1) next_block.solve() all_blocks.append(next_block) tip = next_block # Now send the block at height 5 and check that it wasn't accepted (missing header) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash) assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash) # The block at height 5 should be accepted if we provide the missing header, though headers_message = msg_headers() headers_message.headers.append(CBlockHeader(all_blocks[0])) test_node.send_message(headers_message) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() self.nodes[0].getblock(all_blocks[1].hash) # Now send the blocks in all_blocks for i in range(288): test_node.send_message(msg_block(all_blocks[i])) test_node.sync_with_ping() # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). # The node should have requested the blocks at some point, so # disconnect/reconnect first self.nodes[0].disconnect_p2ps() self.nodes[1].disconnect_p2ps() network_thread_join() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) network_thread_start() test_node.wait_for_verack() test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) self.log.info("Unrequested block that would complete more-work chain was ignored") # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) test_node.send_message(msg_inv([CInv(2, block_h3.sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, block_h1f.sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) self.nodes[0].getblock(all_blocks[286].hash) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash) self.log.info("Successfully reorged to longer chain from non-whitelisted peer") # 8. Create a chain which is invalid at a height longer than the # current chain, but which has more blocks on top of that block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1) block_289f.solve() block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1) block_290f.solve() block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1) # block_291 spends a coinbase below maturity! block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1)) block_291.hashMerkleRoot = block_291.calc_merkle_root() block_291.solve() block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1) block_292.solve() # Now send all the headers on the chain and enough blocks to trigger reorg headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_289f)) headers_message.headers.append(CBlockHeader(block_290f)) headers_message.headers.append(CBlockHeader(block_291)) headers_message.headers.append(CBlockHeader(block_292)) test_node.send_message(headers_message) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_292.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash) test_node.send_message(msg_block(block_289f)) test_node.send_message(msg_block(block_290f)) test_node.sync_with_ping() self.nodes[0].getblock(block_289f.hash) self.nodes[0].getblock(block_290f.hash) test_node.send_message(msg_block(block_291)) # At this point we've sent an obviously-bogus block, wait for full processing # without assuming whether we will be disconnected or not try: # Only wait a short while so the test doesn't take forever if we do get # disconnected test_node.sync_with_ping(timeout=1) except AssertionError: test_node.wait_for_disconnect() self.nodes[0].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) network_thread_start() test_node.wait_for_verack() # We should have failed reorg and switched back to 290 (but have block 291) assert_equal(self.nodes[0].getblockcount(), 290) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1) # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1) block_293.solve() headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_293)) test_node.send_message(headers_message) test_node.wait_for_disconnect() # 9. Connect node1 to node0 and ensure it is able to sync connect_nodes(self.nodes[0], 1) sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Successfully synced nodes 1 and 0") if __name__ == '__main__': AcceptBlockTest().main()
[]
[]
[ "GENESISD" ]
[]
["GENESISD"]
python
1
0
provider/rcode0.go
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package provider import ( "context" "fmt" "net/url" "os" "strings" rc0 "github.com/nic-at/rc0go" log "github.com/sirupsen/logrus" "sigs.k8s.io/external-dns/endpoint" "sigs.k8s.io/external-dns/plan" ) // RcodeZeroProvider implements the DNS provider for RcodeZero Anycast DNS. type RcodeZeroProvider struct { Client *rc0.Client DomainFilter DomainFilter DryRun bool TXTEncrypt bool Key []byte } // NewRcodeZeroProvider creates a new RcodeZero Anycast DNS provider. // // Returns the provider or an error if a provider could not be created. func NewRcodeZeroProvider(domainFilter DomainFilter, dryRun bool, txtEnc bool) (*RcodeZeroProvider, error) { client, err := rc0.NewClient(os.Getenv("RC0_API_KEY")) if err != nil { return nil, err } value := os.Getenv("RC0_BASE_URL") if len(value) != 0 { client.BaseURL, err = url.Parse(os.Getenv("RC0_BASE_URL")) } if err != nil { return nil, fmt.Errorf("failed to initialize rcodezero provider: %v", err) } provider := &RcodeZeroProvider{ Client: client, DomainFilter: domainFilter, DryRun: dryRun, TXTEncrypt: txtEnc, } if txtEnc { provider.Key = []byte(os.Getenv("RC0_ENC_KEY")) } return provider, nil } // Zones returns filtered zones if filter is set func (p *RcodeZeroProvider) Zones() ([]*rc0.Zone, error) { var result []*rc0.Zone zones, err := p.fetchZones() if err != nil { return nil, err } for _, zone := range zones { if p.DomainFilter.Match(zone.Domain) { result = append(result, zone) } } return result, nil } // Records returns resource records // // Decrypts TXT records if TXT-Encrypt flag is set and key is provided func (p *RcodeZeroProvider) Records(ctx context.Context) ([]*endpoint.Endpoint, error) { zones, err := p.Zones() if err != nil { return nil, err } var endpoints []*endpoint.Endpoint for _, zone := range zones { rrset, err := p.fetchRecords(zone.Domain) if err != nil { return nil, err } for _, r := range rrset { if supportedRecordType(r.Type) { if p.TXTEncrypt && (p.Key != nil) && strings.EqualFold(r.Type, "TXT") { p.Client.RRSet.DecryptTXT(p.Key, r) } if len(r.Records) > 1 { for _, _r := range r.Records { if !_r.Disabled { endpoints = append(endpoints, endpoint.NewEndpointWithTTL(r.Name, r.Type, endpoint.TTL(r.TTL), _r.Content)) } } } else if !r.Records[0].Disabled { endpoints = append(endpoints, endpoint.NewEndpointWithTTL(r.Name, r.Type, endpoint.TTL(r.TTL), r.Records[0].Content)) } } } } return endpoints, nil } // ApplyChanges applies a given set of changes in a given zone. func (p *RcodeZeroProvider) ApplyChanges(ctx context.Context, changes *plan.Changes) error { combinedChanges := make([]*rc0.RRSetChange, 0, len(changes.Create)+len(changes.UpdateNew)+len(changes.Delete)) combinedChanges = append(combinedChanges, p.NewRcodezeroChanges(rc0.ChangeTypeADD, changes.Create)...) combinedChanges = append(combinedChanges, p.NewRcodezeroChanges(rc0.ChangeTypeUPDATE, changes.UpdateNew)...) combinedChanges = append(combinedChanges, p.NewRcodezeroChanges(rc0.ChangeTypeDELETE, changes.Delete)...) return p.submitChanges(combinedChanges) } // Helper function func rcodezeroChangesByZone(zones []*rc0.Zone, changeSet []*rc0.RRSetChange) map[string][]*rc0.RRSetChange { changes := make(map[string][]*rc0.RRSetChange) zoneNameIDMapper := zoneIDName{} for _, z := range zones { zoneNameIDMapper.Add(z.Domain, z.Domain) changes[z.Domain] = []*rc0.RRSetChange{} } for _, c := range changeSet { zone, _ := zoneNameIDMapper.FindZone(c.Name) if zone == "" { log.Debugf("Skipping record %s because no hosted zone matching record DNS Name was detected", c.Name) continue } changes[zone] = append(changes[zone], c) } return changes } // Helper function func (p *RcodeZeroProvider) fetchRecords(zoneName string) ([]*rc0.RRType, error) { var allRecords []*rc0.RRType listOptions := rc0.NewListOptions() for { records, page, err := p.Client.RRSet.List(zoneName, listOptions) if err != nil { return nil, err } allRecords = append(allRecords, records...) if page == nil || (page.CurrentPage == page.LastPage) { break } listOptions.SetPageNumber(page.CurrentPage + 1) } return allRecords, nil } // Helper function func (p *RcodeZeroProvider) fetchZones() ([]*rc0.Zone, error) { var allZones []*rc0.Zone listOptions := rc0.NewListOptions() for { zones, page, err := p.Client.Zones.List(listOptions) if err != nil { return nil, err } allZones = append(allZones, zones...) if page == nil || page.IsLastPage() { break } listOptions.SetPageNumber(page.CurrentPage + 1) } return allZones, nil } // Helper function to submit changes. // // Changes are submitted by change type. func (p *RcodeZeroProvider) submitChanges(changes []*rc0.RRSetChange) error { if len(changes) == 0 { return nil } zones, err := p.Zones() if err != nil { return err } // separate into per-zone change sets to be passed to the API. changesByZone := rcodezeroChangesByZone(zones, changes) for zoneName, changes := range changesByZone { for _, change := range changes { logFields := log.Fields{ "record": change.Name, "content": change.Records[0].Content, "type": change.Type, "action": change.ChangeType, "zone": zoneName, } log.WithFields(logFields).Info("Changing record.") if p.DryRun { continue } // to avoid accidentally adding extra dot if already present change.Name = strings.TrimSuffix(change.Name, ".") + "." switch change.ChangeType { case rc0.ChangeTypeADD: sr, err := p.Client.RRSet.Create(zoneName, []*rc0.RRSetChange{change}) if err != nil { return err } if sr.HasError() { return fmt.Errorf("adding new RR resulted in an error: %v", sr.Message) } case rc0.ChangeTypeUPDATE: sr, err := p.Client.RRSet.Edit(zoneName, []*rc0.RRSetChange{change}) if err != nil { return err } if sr.HasError() { return fmt.Errorf("updating existing RR resulted in an error: %v", sr.Message) } case rc0.ChangeTypeDELETE: sr, err := p.Client.RRSet.Delete(zoneName, []*rc0.RRSetChange{change}) if err != nil { return err } if sr.HasError() { return fmt.Errorf("deleting existing RR resulted in an error: %v", sr.Message) } default: return fmt.Errorf("unsupported changeType submitted: %v", change.ChangeType) } } } return nil } // NewRcodezeroChanges returns a RcodeZero specific array with rrset change objects. func (p *RcodeZeroProvider) NewRcodezeroChanges(action string, endpoints []*endpoint.Endpoint) []*rc0.RRSetChange { changes := make([]*rc0.RRSetChange, 0, len(endpoints)) for _, _endpoint := range endpoints { changes = append(changes, p.NewRcodezeroChange(action, _endpoint)) } return changes } // NewRcodezeroChange returns a RcodeZero specific rrset change object. func (p *RcodeZeroProvider) NewRcodezeroChange(action string, endpoint *endpoint.Endpoint) *rc0.RRSetChange { change := &rc0.RRSetChange{ Type: endpoint.RecordType, ChangeType: action, Name: endpoint.DNSName, Records: []*rc0.Record{{ Disabled: false, Content: endpoint.Targets[0], }}, } if p.TXTEncrypt && (p.Key != nil) && strings.EqualFold(endpoint.RecordType, "TXT") { p.Client.RRSet.EncryptTXT(p.Key, change) } return change }
[ "\"RC0_API_KEY\"", "\"RC0_BASE_URL\"", "\"RC0_BASE_URL\"", "\"RC0_ENC_KEY\"" ]
[]
[ "RC0_API_KEY", "RC0_BASE_URL", "RC0_ENC_KEY" ]
[]
["RC0_API_KEY", "RC0_BASE_URL", "RC0_ENC_KEY"]
go
3
0
CPythonLib/os.py
r"""OS routines for Mac, DOS, NT, or Posix depending on what system we're on. This exports: - all functions from posix, nt, dos, os2, mac, or ce, e.g. unlink, stat, etc. - os.path is one of the modules posixpath, ntpath, macpath, or dospath - os.name is 'posix', 'nt', 'dos', 'os2', 'mac', 'ce' or 'riscos' - os.curdir is a string representing the current directory ('.' or ':') - os.pardir is a string representing the parent directory ('..' or '::') - os.sep is the (or a most common) pathname separator ('/' or ':' or '\\') - os.extsep is the extension separator ('.' or '/') - os.altsep is the alternate pathname separator (None or '/') - os.pathsep is the component separator used in $PATH etc - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n') - os.defpath is the default search path for executables Programs that import and use 'os' stand a better chance of being portable between different platforms. Of course, they must then only use functions that are defined by all platforms (e.g., unlink and opendir), and leave all pathname manipulation to os.path (e.g., split and join). """ #' import sys _names = sys.builtin_module_names altsep = None __all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep", "defpath", "name"] def _get_exports_list(module): try: return list(module.__all__) except AttributeError: return [n for n in dir(module) if n[0] != '_'] if 'posix' in _names: name = 'posix' linesep = '\n' curdir = '.'; pardir = '..'; sep = '/'; pathsep = ':' defpath = ':/bin:/usr/bin' from posix import * try: from posix import _exit except ImportError: pass import posixpath path = posixpath del posixpath import posix __all__.extend(_get_exports_list(posix)) del posix elif 'nt' in _names: name = 'nt' linesep = '\r\n' curdir = '.'; pardir = '..'; sep = '\\'; pathsep = ';' defpath = '.;C:\\bin' altsep = '/' from nt import * for i in ['_exit']: try: exec "from nt import " + i except ImportError: pass import ntpath path = ntpath del ntpath import nt __all__.extend(_get_exports_list(nt)) del nt elif 'dos' in _names: name = 'dos' linesep = '\r\n' curdir = '.'; pardir = '..'; sep = '\\'; pathsep = ';' defpath = '.;C:\\bin' from dos import * try: from dos import _exit except ImportError: pass import dospath path = dospath del dospath import dos __all__.extend(_get_exports_list(dos)) del dos elif 'os2' in _names: name = 'os2' linesep = '\r\n' curdir = '.'; pardir = '..'; sep = '\\'; pathsep = ';' defpath = '.;C:\\bin' from os2 import * try: from os2 import _exit except ImportError: pass import ntpath path = ntpath del ntpath import os2 __all__.extend(_get_exports_list(os2)) del os2 elif 'mac' in _names: name = 'mac' linesep = '\r' curdir = ':'; pardir = '::'; sep = ':'; pathsep = '\n' defpath = ':' from mac import * try: from mac import _exit except ImportError: pass import macpath path = macpath del macpath import mac __all__.extend(_get_exports_list(mac)) del mac elif 'ce' in _names: name = 'ce' linesep = '\r\n' curdir = '.'; pardir = '..'; sep = '\\'; pathsep = ';' defpath = '\\Windows' from ce import * for i in ['_exit']: try: exec "from ce import " + i except ImportError: pass # We can use the standard Windows path. import ntpath path = ntpath del ntpath import ce __all__.extend(_get_exports_list(ce)) del ce elif 'riscos' in _names: name = 'riscos' linesep = '\n' curdir = '@'; pardir = '^'; sep = '.'; pathsep = ',' defpath = '<Run$Dir>' from riscos import * try: from riscos import _exit except ImportError: pass import riscospath path = riscospath del riscospath import riscos __all__.extend(_get_exports_list(riscos)) del riscos else: raise ImportError, 'no os specific module found' if sep=='.': extsep = '/' else: extsep = '.' __all__.append("path") del _names sys.modules['os.path'] = path #' # Super directory utilities. # (Inspired by Eric Raymond; the doc strings are mostly his) def makedirs(name, mode=0777): """makedirs(path [, mode=0777]) -> None Super-mkdir; create a leaf directory and all intermediate ones. Works like mkdir, except that any intermediate path segment (not just the rightmost) will be created if it does not exist. This is recursive. """ head, tail = path.split(name) if not tail: head, tail = path.split(head) if head and tail and not path.exists(head): makedirs(head, mode) mkdir(name, mode) def removedirs(name): """removedirs(path) -> None Super-rmdir; remove a leaf directory and empty all intermediate ones. Works like rmdir except that, if the leaf directory is successfully removed, directories corresponding to rightmost path segments will be pruned way until either the whole path is consumed or an error occurs. Errors during this latter phase are ignored -- they generally mean that a directory was not empty. """ rmdir(name) head, tail = path.split(name) if not tail: head, tail = path.split(head) while head and tail: try: rmdir(head) except error: break head, tail = path.split(head) def renames(old, new): """renames(old, new) -> None Super-rename; create directories as necessary and delete any left empty. Works like rename, except creation of any intermediate directories needed to make the new pathname good is attempted first. After the rename, directories corresponding to rightmost path segments of the old name will be pruned way until either the whole path is consumed or a nonempty directory is found. Note: this function can fail with the new directory structure made if you lack permissions needed to unlink the leaf directory or file. """ head, tail = path.split(new) if head and tail and not path.exists(head): makedirs(head) rename(old, new) head, tail = path.split(old) if head and tail: try: removedirs(head) except error: pass __all__.extend(["makedirs", "removedirs", "renames"]) # Make sure os.environ exists, at least try: environ except NameError: environ = {} def execl(file, *args): """execl(file, *args) Execute the executable file with argument list args, replacing the current process. """ execv(file, args) def execle(file, *args): """execle(file, *args, env) Execute the executable file with argument list args and environment env, replacing the current process. """ env = args[-1] execve(file, args[:-1], env) def execlp(file, *args): """execlp(file, *args) Execute the executable file (which is searched for along $PATH) with argument list args, replacing the current process. """ execvp(file, args) def execlpe(file, *args): """execlpe(file, *args, env) Execute the executable file (which is searched for along $PATH) with argument list args and environment env, replacing the current process. """ env = args[-1] execvpe(file, args[:-1], env) def execvp(file, args): """execp(file, args) Execute the executable file (which is searched for along $PATH) with argument list args, replacing the current process. args may be a list or tuple of strings. """ _execvpe(file, args) def execvpe(file, args, env): """execvpe(file, args, env) Execute the executable file (which is searched for along $PATH) with argument list args and environment env , replacing the current process. args may be a list or tuple of strings. """ _execvpe(file, args, env) __all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"]) def _execvpe(file, args, env=None): from errno import ENOENT, ENOTDIR if env is not None: func = execve argrest = (args, env) else: func = execv argrest = (args,) env = environ head, tail = path.split(file) if head: apply(func, (file,) + argrest) return if env.has_key('PATH'): envpath = env['PATH'] else: envpath = defpath PATH = envpath.split(pathsep) saved_exc = None saved_tb = None for dir in PATH: fullname = path.join(dir, file) try: apply(func, (fullname,) + argrest) except error, e: tb = sys.exc_info()[2] if (e.errno != ENOENT and e.errno != ENOTDIR and saved_exc is None): saved_exc = e saved_tb = tb if saved_exc: raise error, saved_exc, saved_tb raise error, e, tb # Change environ to automatically call putenv() if it exists try: # This will fail if there's no putenv putenv except NameError: pass else: import UserDict # Fake unsetenv() for Windows # not sure about os2 and dos here but # I'm guessing they are the same. if name in ('os2', 'nt', 'dos'): def unsetenv(key): putenv(key, "") if name == "riscos": # On RISC OS, all env access goes through getenv and putenv from riscosenviron import _Environ elif name in ('os2', 'nt', 'dos'): # Where Env Var Names Must Be UPPERCASE # But we store them as upper case class _Environ(UserDict.IterableUserDict): def __init__(self, environ): UserDict.UserDict.__init__(self) data = self.data for k, v in environ.items(): data[k.upper()] = v def __setitem__(self, key, item): putenv(key, item) self.data[key.upper()] = item def __getitem__(self, key): return self.data[key.upper()] try: unsetenv except NameError: def __delitem__(self, key): del self.data[key.upper()] else: def __delitem__(self, key): unsetenv(key) del self.data[key.upper()] def has_key(self, key): return self.data.has_key(key.upper()) def get(self, key, failobj=None): return self.data.get(key.upper(), failobj) def update(self, dict): for k, v in dict.items(): self[k] = v else: # Where Env Var Names Can Be Mixed Case class _Environ(UserDict.IterableUserDict): def __init__(self, environ): UserDict.UserDict.__init__(self) self.data = environ def __setitem__(self, key, item): putenv(key, item) self.data[key] = item def update(self, dict): for k, v in dict.items(): self[k] = v try: unsetenv except NameError: pass else: def __delitem__(self, key): unsetenv(key) del self.data[key] environ = _Environ(environ) def getenv(key, default=None): """Get an environment variable, return None if it doesn't exist. The optional second argument can specify an alternate default.""" return environ.get(key, default) __all__.append("getenv") def _exists(name): try: eval(name) return 1 except NameError: return 0 # Supply spawn*() (probably only for Unix) if _exists("fork") and not _exists("spawnv") and _exists("execv"): P_WAIT = 0 P_NOWAIT = P_NOWAITO = 1 # XXX Should we support P_DETACH? I suppose it could fork()**2 # and close the std I/O streams. Also, P_OVERLAY is the same # as execv*()? def _spawnvef(mode, file, args, env, func): # Internal helper; func is the exec*() function to use pid = fork() if not pid: # Child try: if env is None: func(file, args) else: func(file, args, env) except: _exit(127) else: # Parent if mode == P_NOWAIT: return pid # Caller is responsible for waiting! while 1: wpid, sts = waitpid(pid, 0) if WIFSTOPPED(sts): continue elif WIFSIGNALED(sts): return -WTERMSIG(sts) elif WIFEXITED(sts): return WEXITSTATUS(sts) else: raise error, "Not stopped, signaled or exited???" def spawnv(mode, file, args): """spawnv(mode, file, args) -> integer Execute file with arguments from args in a subprocess. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return _spawnvef(mode, file, args, None, execv) def spawnve(mode, file, args, env): """spawnve(mode, file, args, env) -> integer Execute file with arguments from args in a subprocess with the specified environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return _spawnvef(mode, file, args, env, execve) # Note: spawnvp[e] is't currently supported on Windows def spawnvp(mode, file, args): """spawnvp(mode, file, args) -> integer Execute file (which is looked for along $PATH) with arguments from args in a subprocess. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return _spawnvef(mode, file, args, None, execvp) def spawnvpe(mode, file, args, env): """spawnvpe(mode, file, args, env) -> integer Execute file (which is looked for along $PATH) with arguments from args in a subprocess with the supplied environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return _spawnvef(mode, file, args, env, execvpe) if _exists("spawnv"): # These aren't supplied by the basic Windows code # but can be easily implemented in Python def spawnl(mode, file, *args): """spawnl(mode, file, *args) -> integer Execute file with arguments from args in a subprocess. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return spawnv(mode, file, args) def spawnle(mode, file, *args): """spawnle(mode, file, *args, env) -> integer Execute file with arguments from args in a subprocess with the supplied environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ env = args[-1] return spawnve(mode, file, args[:-1], env) if _exists("spawnvp"): # At the moment, Windows doesn't implement spawnvp[e], # so it won't have spawnlp[e] either. def spawnlp(mode, file, *args): """spawnlp(mode, file, *args) -> integer Execute file (which is looked for along $PATH) with arguments from args in a subprocess with the supplied environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return spawnvp(mode, file, args) def spawnlpe(mode, file, *args): """spawnlpe(mode, file, *args, env) -> integer Execute file (which is looked for along $PATH) with arguments from args in a subprocess with the supplied environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ env = args[-1] return spawnvpe(mode, file, args[:-1], env) __all__.extend(["spawnlp","spawnlpe","spawnv", "spawnve","spawnvp", "spawnvpe","spawnl","spawnle",]) # Supply popen2 etc. (for Unix) if _exists("fork"): if not _exists("popen2"): def popen2(cmd, mode="t", bufsize=-1): import popen2 stdout, stdin = popen2.popen2(cmd, bufsize) return stdin, stdout __all__.append("popen2") if not _exists("popen3"): def popen3(cmd, mode="t", bufsize=-1): import popen2 stdout, stdin, stderr = popen2.popen3(cmd, bufsize) return stdin, stdout, stderr __all__.append("popen3") if not _exists("popen4"): def popen4(cmd, mode="t", bufsize=-1): import popen2 stdout, stdin = popen2.popen4(cmd, bufsize) return stdin, stdout __all__.append("popen4") import copy_reg as _copy_reg def _make_stat_result(tup, dict): return stat_result(tup, dict) def _pickle_stat_result(sr): (type, args) = sr.__reduce__() return (_make_stat_result, args) try: _copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result) except NameError: # stat_result may not exist pass def _make_statvfs_result(tup, dict): return statvfs_result(tup, dict) def _pickle_statvfs_result(sr): (type, args) = sr.__reduce__() return (_make_statvfs_result, args) try: _copy_reg.pickle(statvfs_result, _pickle_statvfs_result, _make_statvfs_result) except NameError: # statvfs_result may not exist pass
[]
[]
[]
[]
[]
python
0
0
lambdas/handleCallback/main.go
package main import ( "context" "encoding/json" "fmt" "net/http" "os" "time" "github.com/aws/aws-lambda-go/events" "github.com/aws/aws-lambda-go/lambda" "github.com/socialement-competents/goauth/database" "github.com/socialement-competents/goauth/models" ) // GHPayload : payload sent by the GitHub API type GHPayload struct { Code string `json:"code"` } // GHToken : token given by GitHub in exchange for a Code type GHToken struct { AccessToken string `json:"access_token"` TokenType string `json:"token_type"` Scope string `json:"scope"` } const accessTokenURL = "https://github.com/login/oauth/access_token?code=%s&client_id=%s&client_secret=%s" const userURL = "https://api.github.com/user" var client = &http.Client{} var clientID string var clientSecret string func init() { clientID = os.Getenv("GH_ID") clientSecret = os.Getenv("GH_SECRET") } // HandleCallback : handles the GitHub callback when calling "Connect with GitHub" func HandleCallback(ctx context.Context, request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) { if clientID == "" { return respond(http.StatusBadRequest, "$GH_ID should be set") } if clientSecret == "" { return respond(http.StatusBadRequest, "$GH_ID should be set") } payload, err := getCode(&request) if err != nil { return respond( http.StatusBadRequest, fmt.Sprintf("error getting the code from the payload: %v", err), ) } token, err := getAccessToken(payload) if err != nil { return respond( http.StatusInternalServerError, fmt.Sprintf("error getting the access token from GH: %v", err), ) } ghuser, err := getUser(token) if err != nil { return respond( http.StatusInternalServerError, fmt.Sprintf("error getting the user from GH: %v", err), ) } dbClient, err := database.NewClient() if err != nil { return respond( http.StatusInternalServerError, fmt.Sprintf("couldn't connect to the db: %v", err.Error()), ) } user, err := dbClient.GetUserByLogin(ghuser.Login, models.GithubProvider) exists := true if err != nil { // the user doesn't exist, we will have to create it exists = false user = &models.User{Provider: models.GithubProvider} } // Update our database with the newly fetched info // (we don't query api.github.com every time, because of rate limits) user.GHUser = ghuser user.LastLogin = time.Now() var ( verb string statusCode int ) if !exists { // the user wasn't previously found, we need to create it id, err := dbClient.CreateUser(user) if err != nil { return respond( http.StatusInternalServerError, fmt.Sprintf("creating the user failed: %v", err.Error()), ) } user.ID = id verb = "created" statusCode = http.StatusCreated } else { // otherwise update it if err = dbClient.UpdateUser(user); err != nil { return respond( http.StatusInternalServerError, fmt.Sprintf("updating the user failed: %v", err.Error()), ) } verb = "updated" statusCode = http.StatusOK } jsonBytes, err := json.Marshal(user) if err != nil { text := fmt.Sprintf("user %s but could not format to JSON: %v", verb, err) return respond(http.StatusAccepted, text) } return respond(statusCode, string(jsonBytes)) } func getCode(request *events.APIGatewayProxyRequest) (*GHPayload, error) { var payload GHPayload data, err := json.Marshal(request.QueryStringParameters) if err != nil { return nil, err } err = json.Unmarshal(data, &payload) return &payload, err } func getAccessToken(payload *GHPayload) (*GHToken, error) { url := fmt.Sprintf( accessTokenURL, payload.Code, clientID, clientSecret, ) req, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { return nil, err } req.Header.Add("Accept", "application/json") resp, err := client.Do(req) if err != nil { return nil, err } if err = checkStatusCode(resp); err != nil { return nil, err } var token GHToken err = json.NewDecoder(resp.Body).Decode(&token) return &token, err } func getUser(token *GHToken) (*models.GHUser, error) { req, err := http.NewRequest(http.MethodGet, userURL, nil) if err != nil { return nil, err } req.Header.Add("Authorization", fmt.Sprintf("token %s", token.AccessToken)) resp, err := client.Do(req) if err != nil { return nil, err } if err = checkStatusCode(resp); err != nil { return nil, err } var user models.GHUser err = json.NewDecoder(resp.Body).Decode(&user) return &user, err } func checkStatusCode(resp *http.Response) error { if resp.StatusCode >= 400 { return fmt.Errorf("bad GitHub response: %s", resp.Status) } else if resp.StatusCode >= 300 { return fmt.Errorf("unexpected return code: %s", resp.Status) } return nil } func respond(code int, payload interface{}) (events.APIGatewayProxyResponse, error) { return events.APIGatewayProxyResponse{ StatusCode: code, Body: fmt.Sprint(payload), }, nil } func main() { lambda.Start(HandleCallback) }
[ "\"GH_ID\"", "\"GH_SECRET\"" ]
[]
[ "GH_SECRET", "GH_ID" ]
[]
["GH_SECRET", "GH_ID"]
go
2
0
quickstart/gcp-cloudshell-quickstart.py
#!/usr/bin/env python3 # Copyright (c) 2019 Teradici Corporation # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import base64 import datetime import importlib import json import os import re import shutil import site import subprocess import sys import textwrap import time import casmgr import interactive REQUIRED_PACKAGES = { 'google-api-python-client': None, 'grpc-google-iam-v1': None, 'google-cloud-kms': "2.0.0" } # Service Account ID of the service account to create SA_ID = 'cas-manager' SA_ROLES = [ 'roles/editor', 'roles/cloudkms.cryptoKeyEncrypterDecrypter' ] PROJECT_ID = os.environ['GOOGLE_CLOUD_PROJECT'] REQUIRED_APIS = [ 'deploymentmanager.googleapis.com', 'cloudkms.googleapis.com', 'cloudresourcemanager.googleapis.com', 'compute.googleapis.com', 'dns.googleapis.com', 'iam.googleapis.com', ] iso_time = datetime.datetime.utcnow().isoformat(timespec='seconds').replace(':','').replace('-','') + 'Z' DEPLOYMENT_NAME = 'quickstart_deployment_' + iso_time CONNECTOR_NAME = 'quickstart_cac_' + iso_time # User entitled to workstations ENTITLE_USER = 'Administrator' HOME = os.path.expanduser('~') TERRAFORM_BIN_DIR = f'{HOME}/bin' TERRAFORM_BIN_PATH = TERRAFORM_BIN_DIR + '/terraform' TERRAFORM_VER_PATH = '../deployments/gcp/single-connector/versions.tf' CFG_FILE_PATH = 'gcp-cloudshell-quickstart.cfg' DEPLOYMENT_PATH = 'deployments/gcp/single-connector' # All of the following paths are relative to the deployment directory, DEPLOYMENT_PATH TF_VARS_REF_PATH = 'terraform.tfvars.sample' TF_VARS_PATH = 'terraform.tfvars' SECRETS_DIR = 'secrets' GCP_SA_KEY_PATH = SECRETS_DIR + '/gcp_service_account_key.json' SSH_KEY_PATH = SECRETS_DIR + '/cas_mgr_admin_id_rsa' CAS_MGR_DEPLOYMENT_SA_KEY_PATH = SECRETS_DIR + '/cas_mgr_deployment_sa_key.json.encrypted' # Types of workstations WS_TYPES = ['scent', 'gcent', 'swin', 'gwin'] def ensure_requirements(): if not PROJECT_ID: print('The PROJECT property has not been set.') print('Please run "gcloud config set project [PROJECT_ID]" to set the project.') print('See: https://cloud.google.com/sdk/gcloud/reference/config/set') print('') sys.exit(1) ensure_required_packages() import_modules() ensure_terraform() def ensure_required_packages(): """A function that ensures the correct version of Python packages are installed. The function first checks if the required packages are installed. If a package is installed, the required version number will then be checked. It will next prompt the user to update or install the required packages. """ packages_to_install_list = [] for package, required_version in REQUIRED_PACKAGES.items(): check_cmd = f'{sys.executable} -m pip show {package}' output = subprocess.run(check_cmd.split(' '), stdout=subprocess.PIPE).stdout.decode('utf-8') # If a package is not found, skip version checking and simply install the latest package if not output: packages_to_install_list.append(package) elif required_version is not None: # Second line outputs the version of the specified package current_version = output.splitlines()[1].split(' ')[-1] # Convert the string into a tuple of numbers for comparison current_version_tuple = tuple( map(int, current_version.split('.')) ) required_version_tuple = tuple( map(int, required_version.split('.')) ) if current_version_tuple < required_version_tuple: packages_to_install_list.append(package) if packages_to_install_list: # Convert the list to a string of packages delimited by a space packages_to_install = " ".join(packages_to_install_list) install_cmd = f'{sys.executable} -m pip install --upgrade {packages_to_install} --user' install_permission = input( 'One or more of the following Python packages are outdated or missing:\n' f' {packages_to_install}\n\n' 'The script can install these packages in the user\'s home directory using the following command:\n' f' {install_cmd}\n' 'Proceed? (y/n)? ').strip().lower() if install_permission not in ('y', 'yes'): print('Python packages are required for deployment. Exiting...') sys.exit(1) subprocess.check_call(install_cmd.split(' ')) # Refresh sys.path to detect new modules in user's home directory. importlib.reload(site) def import_modules(): """A function that dynamically imports required Python packages. """ # Global calls for import statements are required to avoid module not found error import_required_packages = '''\ import googleapiclient.discovery from google.cloud import kms from google.api_core import exceptions as google_exc ''' # Recommended to clear cache after installing python packages for dynamic imports importlib.invalidate_caches() exec(textwrap.dedent(import_required_packages), globals()) print('Successfully imported required Python packages.') def ensure_terraform(): """A function that ensures the required Terraform version is installed. The function first checks if the required Terraform version is installed in the user's system. If Terraform is not installed, it will prompt the user to install Terraform in the user's home directory. """ global TERRAFORM_BIN_PATH path = shutil.which('terraform') # Reference versions.tf file for the required version with open(TERRAFORM_VER_PATH,"r") as f: data = f.read() required_version = re.search(r'\">=\s([\d.]+)\"', data).group(1) if path: cmd = 'terraform -v' # Run the command 'terraform -v' and use the first line as the Terraform version terraform_version = subprocess.run(cmd.split(' '), stdout=subprocess.PIPE).stdout.decode('utf-8').splitlines()[0] print(f'Found {terraform_version} in {path}.') # Use regex to parse the version number from string (i.e. 0.12.18) current_version = re.search(r'Terraform\s*v([\d.]+)', terraform_version).group(1) # Convert the string into a tuple of numbers for comparison current_version_tuple = tuple( map(int, current_version.split('.')) ) required_version_tuple = tuple( map(int, required_version.split('.')) ) if current_version_tuple >= required_version_tuple: TERRAFORM_BIN_PATH = path return install_permission = input( f'This system is missing Terraform version >= {required_version}.\n' f'Proceed to download and install Terraform in {TERRAFORM_BIN_DIR} (y/n)? ').strip().lower() if install_permission not in ('y', 'yes'): print('Terraform is required for deployment. Exiting...') sys.exit(1) install_cmd = f'{sys.executable} install-terraform.py {TERRAFORM_BIN_DIR}' subprocess.run(install_cmd.split(' '), check=True) def quickstart_config_read(cfg_file): cfg_data = {} with open(cfg_file, 'r') as f: for line in f: if line[0] in ('#', '\n'): continue key, value = map(str.strip, line.split(':')) cfg_data[key] = value return cfg_data def service_account_find(email): service_accounts = iam_service.projects().serviceAccounts().list( name = f'projects/{PROJECT_ID}', ).execute() if not service_accounts: return for account in service_accounts['accounts']: if account['email'] == email: return account def service_account_create(email): print('Creating Service Account...') service_account = service_account_find(email) if service_account: print(f' Service account {email} already exists.') # The service account limit check is placed here so that the script doesn't # unfortunately exit after the user enters their configurations if error, but # the key will be created later to avoid reaching the limit, in case # something goes wrong and the script exits before the key is used. service_account_create_key_limit_check(service_account) return service_account service_account = iam_service.projects().serviceAccounts().create( name = 'projects/' + PROJECT_ID, body = { 'accountId': SA_ID, 'serviceAccount': { 'displayName': SA_ID, 'description': 'Account used by CAS Manager to manage PCoIP workstations.', } } ).execute() print(' Created service account: ' + service_account['email']) return service_account def service_account_create_key(service_account, filepath): print(f'Created key for {service_account["email"]}...') key = iam_service.projects().serviceAccounts().keys().create( name = 'projects/-/serviceAccounts/' + service_account['email'], body = {}, ).execute() key_data = base64.b64decode(key['privateKeyData']) with open(filepath, 'wb') as keyfile: keyfile.write(key_data) print(' Key written to ' + filepath) return json.loads(key_data.decode('utf-8')) def service_account_create_key_limit_check(service_account): print(f' Checking number of keys owned by {service_account["email"]}... ', end='') keys = iam_service.projects().serviceAccounts().keys().list( name='projects/-/serviceAccounts/' + service_account['email'] ).execute()['keys'] user_managed_keys = list(filter(lambda k: (k['keyType'] == 'USER_MANAGED'), keys)) print(f'{len(user_managed_keys)}/10') if len(user_managed_keys) >= 10: print(f' ERROR: The service account has reached the limit of the number of keys it can create.', ' Please see: https://cloud.google.com/iam/docs/creating-managing-service-account-keys', 'Exiting script...', sep='\n') sys.exit(1) def iam_policy_update(service_account, roles): policy = crm_service.projects().getIamPolicy( resource = PROJECT_ID, body = {}, ).execute() print('Adding roles:') for role in roles: print(f' {role}...') binding = { 'role': role, 'members': [f'serviceAccount:{service_account["email"]}'], } policy['bindings'].append(binding) policy = crm_service.projects().setIamPolicy( resource = PROJECT_ID, body = { 'policy': policy } ).execute() return policy def apis_enable(apis): print('Enabling APIs:') # Using shell command, no Python Google Cloud Client library support for api in apis: print(f' {api}...') subprocess.run(['gcloud', 'services', 'enable', api], check=True) def ssh_key_create(path): print('Creating SSH key...') # note the space after '-N' is required ssh_cmd = f'ssh-keygen -f {path} -t rsa -q -N ' subprocess.run(ssh_cmd.split(' '), check=True) # Creates a new .tfvar based on the .tfvar.sample file def tf_vars_create(ref_file_path, tfvar_file_path, settings): if os.path.exists(tfvar_file_path): overwrite = input("Found an existing .tfvar file, overwrite (y/n)? ").strip().lower() if overwrite not in ('y', 'yes'): print(f'{tfvar_file_path} already exists. Exiting...') sys.exit(1) with open(ref_file_path, 'r') as ref_file, open(tfvar_file_path, 'w') as out_file: for line in ref_file: if line[0] == '#': # Check if it's an optional variable and uncomment if so for k in settings.keys(): # Building string using + because can't use f"{k}" with regex pattern = "^#\s*(" + k + ")\s*=" if re.search(pattern, line.strip()): line = f'{k} = "{settings[k]}"\n' elif line[0] != '\n': key = line.split('=')[0].strip() line = f'{key} = "{settings[key]}"\n' out_file.write(line) if __name__ == '__main__': ensure_requirements() print('Setting GCP project...') sa_email = f'{SA_ID}@{PROJECT_ID}.iam.gserviceaccount.com' iam_service = googleapiclient.discovery.build('iam', 'v1') crm_service = googleapiclient.discovery.build('cloudresourcemanager', 'v1') apis_enable(REQUIRED_APIS) sa = service_account_create(sa_email) iam_policy_update(sa, SA_ROLES) print('GCP project setup complete.\n') cfg_data = interactive.configurations_get(PROJECT_ID, WS_TYPES, ENTITLE_USER) print('Preparing local requirements...') os.chdir('../') os.chdir(DEPLOYMENT_PATH) # Paths passed into terraform.tfvars should be absolute paths cwd = os.getcwd() + '/' try: print(f'Creating directory {SECRETS_DIR} to store secrets...') os.mkdir(SECRETS_DIR, 0o700) except FileExistsError: print(f'Directory {SECRETS_DIR} already exists.') ssh_key_create(SSH_KEY_PATH) print('Local requirements setup complete.\n') print('Setting CAS Manager...') mycasmgr = casmgr.CASManager(cfg_data.get('api_token')) # TODO: Add a proper clean up of GCP IAM resources so we don't have to move the # service account creation to here after the rest of the GCP setup sa_key = service_account_create_key(sa, GCP_SA_KEY_PATH) print(f'Creating deployment {DEPLOYMENT_NAME}...') deployment = mycasmgr.deployment_create(DEPLOYMENT_NAME, cfg_data.get('reg_code')) mycasmgr.deployment_add_gcp_account(sa_key, deployment) print('Creating CAS Manager API key...') cas_mgr_deployment_key = mycasmgr.deployment_key_create(deployment) print('CAS Manager setup complete.\n') print('Encrypting secrets...') days90 = 7776000 kms_client = kms.KeyManagementServiceClient() parent = f"projects/{PROJECT_ID}/locations/{cfg_data.get('gcp_region')}" key_ring_id = 'cloud_deployment_scripts' key_ring_init = {} try: key_ring = kms_client.create_key_ring(request={'parent': parent, 'key_ring_id': key_ring_id, 'key_ring': key_ring_init}) print(f'Created Key Ring {key_ring.name}') except google_exc.AlreadyExists: print(f'Key Ring {key_ring_id} already exists. Using it...') parent = kms_client.key_ring_path(PROJECT_ID, cfg_data.get('gcp_region'), key_ring_id) crypto_key_id = 'quickstart_key' crypto_key_init = { 'purpose': kms.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT, 'rotation_period': {'seconds': days90}, 'next_rotation_time': {'seconds': int(time.time()) + days90}, } try: crypto_key = kms_client.create_crypto_key(request={'parent': parent, 'crypto_key_id': crypto_key_id, 'crypto_key': crypto_key_init}) print(f'Created Crypto Key {crypto_key.name}') except google_exc.AlreadyExists: print(f'Crypto Key {crypto_key_id} already exists. Using it...') key_name = kms_client.crypto_key_path(PROJECT_ID, cfg_data.get('gcp_region'), key_ring_id, crypto_key_id) def kms_encode(key, text, base64_encoded=False): encrypted = kms_client.encrypt(request={'name': key, 'plaintext': text.encode('utf-8')}) if base64_encoded: return base64.b64encode(encrypted.ciphertext).decode('utf-8') return encrypted.ciphertext cfg_data['ad_password'] = kms_encode(key_name, cfg_data.get('ad_password'), True) cfg_data['reg_code'] = kms_encode(key_name, cfg_data.get('reg_code'), True) cas_mgr_deployment_key_encrypted = kms_encode(key_name, json.dumps(cas_mgr_deployment_key)) print('Done encrypting secrets.') print('Creating CAS Manager Deployment Service Account Key...') with open(CAS_MGR_DEPLOYMENT_SA_KEY_PATH, 'wb+') as keyfile: keyfile.write(cas_mgr_deployment_key_encrypted) print(' Key written to ' + CAS_MGR_DEPLOYMENT_SA_KEY_PATH) print('Deploying with Terraform...') #TODO: refactor this to work with more types of deployments settings = { 'gcp_credentials_file': cwd + GCP_SA_KEY_PATH, 'gcp_region': cfg_data.get('gcp_region'), 'gcp_zone': cfg_data.get('gcp_zone'), 'kms_cryptokey_id': key_name, 'dc_admin_password': cfg_data.get('ad_password'), 'safe_mode_admin_password': cfg_data.get('ad_password'), 'ad_service_account_password': cfg_data.get('ad_password'), 'cac_admin_ssh_pub_key_file': cwd + SSH_KEY_PATH + '.pub', 'win_gfx_instance_count': cfg_data.get('gwin'), 'win_std_instance_count': cfg_data.get('swin'), 'centos_gfx_instance_count': cfg_data.get('gcent'), 'centos_std_instance_count': cfg_data.get('scent'), 'centos_admin_ssh_pub_key_file': cwd + SSH_KEY_PATH + '.pub', 'pcoip_registration_code': cfg_data.get('reg_code'), 'cas_mgr_deployment_sa_file': cwd + CAS_MGR_DEPLOYMENT_SA_KEY_PATH } # update tfvar tf_vars_create(TF_VARS_REF_PATH, TF_VARS_PATH, settings) tf_cmd = f'{TERRAFORM_BIN_PATH} init' subprocess.run(tf_cmd.split(' '), check=True) tf_cmd = f'{TERRAFORM_BIN_PATH} apply -auto-approve' subprocess.run(tf_cmd.split(' '), check=True) comp_proc = subprocess.run([TERRAFORM_BIN_PATH,'output','cac-public-ip'], check=True, stdout=subprocess.PIPE) cac_public_ip = comp_proc.stdout.decode().split('"')[1] print('Terraform deployment complete.\n') # To update the auth_token used by the session header for the API call # with the one from the deployment key in case the API Token expires mycasmgr.deployment_signin(cas_mgr_deployment_key) # Add existing workstations for t in WS_TYPES: for i in range(int(cfg_data.get(t))): hostname = f'{t}-{i}' print(f'Adding "{hostname}" to CAS Manager...') mycasmgr.machine_add_existing( hostname, PROJECT_ID, cfg_data.get('gcp_zone'), deployment ) # Loop until Administrator user is found in CAS Manager while True: entitle_user = mycasmgr.user_get(ENTITLE_USER, deployment) if entitle_user: break print(f'Waiting for user "{ENTITLE_USER}" to be synced. Retrying in 10 seconds...') time.sleep(10) # Add entitlements for each workstation machines_list = mycasmgr.machines_get(deployment) for machine in machines_list: print(f'Assigning workstation "{machine["machineName"]}" to user "{ENTITLE_USER}"...') mycasmgr.entitlement_add(entitle_user, machine) print('\nQuickstart deployment finished.\n') print('') next_steps = f""" Next steps: - Connect to a workstation: 1. from a PCoIP client, connect to the Cloud Access Connector at {cac_public_ip} 2. sign in with the "{ENTITLE_USER}" user credentials 3. When connecting to a workstation immediately after this script completes, the workstation (especially graphics ones) may still be setting up. You may see "Remote Desktop is restarting..." in the client. Please wait a few minutes or reconnect if it times out. - Add additional workstations: 1. Log in to https://cas.teradici.com 2. Click on "Workstations" in the left panel, select "Create new remote workstation" from the "+" button 3. Select connector "quickstart_cac_<timestamp>" 4. Fill in the form according to your preferences. Note that the following values must be used for their respective fields: Region: "{cfg_data.get('gcp_region')}" Zone: "{cfg_data.get('gcp_zone')}" Network: "vpc-cas" Subnetowrk: "subnet-ws" Domain name: "example.com" Domain service account: "cas_ad_admin" Service account password: <set by you at start of script> 5. Click **Create** - Clean up: 1. Using GCP console, delete all workstations created by CAS Manager web interface and manually created workstations. Resources not created by the Terraform scripts must be manually removed before Terraform can properly destroy resources it created. 2. In GCP cloudshell, change directory using the command "cd ~/cloudshell_open/cloud_deployment_scripts/{DEPLOYMENT_PATH}" 3. Remove resources deployed by Terraform using the command "terraform destroy". Enter "yes" when prompted. "{'terraform' if TERRAFORM_BIN_PATH == shutil.which('terraform') else TERRAFORM_BIN_PATH} destroy" 4. Log in to https://cas.teradici.com and delete the deployment named "quickstart_deployment_<timestamp>" """ print(next_steps) print('')
[]
[]
[ "GOOGLE_CLOUD_PROJECT" ]
[]
["GOOGLE_CLOUD_PROJECT"]
python
1
0
contrib/gitian-build.py
#!/usr/bin/env python3 import argparse import os import subprocess import sys def setup(): global args, workdir programs = ['ruby', 'git', 'apt-cacher-ng', 'make', 'wget'] if args.kvm: programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils'] elif args.docker: dockers = ['docker.io', 'docker-ce'] for i in dockers: return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i]) if return_code == 0: break if return_code != 0: print('Cannot find any way to install docker', file=sys.stderr) exit(1) else: programs += ['lxc', 'debootstrap'] subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs) if not os.path.isdir('gitian.sigs'): subprocess.check_call(['git', 'clone', 'https://github.com/idealgoldv2chain/gitian.sigs.git']) if not os.path.isdir('bitcoin-detached-sigs'): subprocess.check_call(['git', 'clone', 'https://github.com/idealgoldv2chain/idealgoldv2-detached-sigs.git']) if not os.path.isdir('gitian-builder'): subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git']) if not os.path.isdir('bitcoin'): subprocess.check_call(['git', 'clone', 'https://github.com/idealgoldv2chain/idealgoldv2.git']) os.chdir('gitian-builder') make_image_prog = ['bin/make-base-vm', '--suite', 'trusty', '--arch', 'amd64'] if args.docker: make_image_prog += ['--docker'] elif not args.kvm: make_image_prog += ['--lxc'] subprocess.check_call(make_image_prog) os.chdir(workdir) if args.is_bionic and not args.kvm and not args.docker: subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net']) print('Reboot is required') exit(0) def build(): global args, workdir os.makedirs('idealgoldv2-binaries/' + args.version, exist_ok=True) print('\nBuilding Dependencies\n') os.chdir('gitian-builder') os.makedirs('inputs', exist_ok=True) subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz']) subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch']) subprocess.check_call(["echo 'a8c4e9cafba922f89de0df1f2152e7be286aba73f78505169bc351a7938dd911 inputs/osslsigncode-Backports-to-1.7.1.patch' | sha256sum -c"], shell=True) subprocess.check_call(["echo 'f9a8cdb38b9c309326764ebc937cba1523a3a751a7ab05df3ecc99d18ae466c9 inputs/osslsigncode-1.7.1.tar.gz' | sha256sum -c"], shell=True) subprocess.check_call(['make', '-C', '../idealgoldv2/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common']) if args.linux: print('\nCompiling ' + args.version + ' Linux') subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'idealgoldv2='+args.commit, '--url', 'idealgoldv2='+args.url, '../idealgoldv2/contrib/gitian-descriptors/gitian-linux.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../idealgoldv2/contrib/gitian-descriptors/gitian-linux.yml']) subprocess.check_call('mv build/out/idealgoldv2-*.tar.gz build/out/src/idealgoldv2-*.tar.gz ../idealgoldv2-binaries/'+args.version, shell=True) if args.windows: print('\nCompiling ' + args.version + ' Windows') subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'idealgoldv2='+args.commit, '--url', 'idealgoldv2='+args.url, '../idealgoldv2/contrib/gitian-descriptors/gitian-win.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../idealgoldv2/contrib/gitian-descriptors/gitian-win.yml']) subprocess.check_call('mv build/out/idealgoldv2-*-win-unsigned.tar.gz inputs/', shell=True) subprocess.check_call('mv build/out/idealgoldv2-*.zip build/out/idealgoldv2-*.exe ../idealgoldv2-binaries/'+args.version, shell=True) if args.macos: print('\nCompiling ' + args.version + ' MacOS') subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'idealgoldv2='+args.commit, '--url', 'idealgoldv2='+args.url, '../idealgoldv2/contrib/gitian-descriptors/gitian-osx.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../idealgoldv2/contrib/gitian-descriptors/gitian-osx.yml']) subprocess.check_call('mv build/out/idealgoldv2-*-osx-unsigned.tar.gz inputs/', shell=True) subprocess.check_call('mv build/out/idealgoldv2-*.tar.gz build/out/idealgoldv2-*.dmg ../idealgoldv2-binaries/'+args.version, shell=True) os.chdir(workdir) if args.commit_files: print('\nCommitting '+args.version+' Unsigned Sigs\n') os.chdir('gitian.sigs') subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer]) subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer]) subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer]) subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer]) os.chdir(workdir) def sign(): global args, workdir os.chdir('gitian-builder') if args.windows: print('\nSigning ' + args.version + ' Windows') subprocess.check_call('cp inputs/bitcoin-' + args.version + '-win-unsigned.tar.gz inputs/bitcoin-win-unsigned.tar.gz', shell=True) subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../idealgoldv2/contrib/gitian-descriptors/gitian-win-signer.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../idealgoldv2/contrib/gitian-descriptors/gitian-win-signer.yml']) subprocess.check_call('mv build/out/idealgoldv2-*win64-setup.exe ../idealgoldv2-binaries/'+args.version, shell=True) subprocess.check_call('mv build/out/idealgoldv2-*win32-setup.exe ../idealgoldv2-binaries/'+args.version, shell=True) if args.macos: print('\nSigning ' + args.version + ' MacOS') subprocess.check_call('cp inputs/bitcoin-' + args.version + '-osx-unsigned.tar.gz inputs/bitcoin-osx-unsigned.tar.gz', shell=True) subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../idealgoldv2/contrib/gitian-descriptors/gitian-osx-signer.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../idealgoldv2/contrib/gitian-descriptors/gitian-osx-signer.yml']) subprocess.check_call('mv build/out/idealgoldv2-osx-signed.dmg ../idealgoldv2-binaries/'+args.version+'/bitcoin-'+args.version+'-osx.dmg', shell=True) os.chdir(workdir) if args.commit_files: print('\nCommitting '+args.version+' Signed Sigs\n') os.chdir('gitian.sigs') subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer]) subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer]) subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer]) os.chdir(workdir) def verify(): global args, workdir os.chdir('gitian-builder') print('\nVerifying v'+args.version+' Linux\n') subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../idealgoldv2/contrib/gitian-descriptors/gitian-linux.yml']) print('\nVerifying v'+args.version+' Windows\n') subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../idealgoldv2/contrib/gitian-descriptors/gitian-win.yml']) print('\nVerifying v'+args.version+' MacOS\n') subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../idealgoldv2/contrib/gitian-descriptors/gitian-osx.yml']) print('\nVerifying v'+args.version+' Signed Windows\n') subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../idealgoldv2/contrib/gitian-descriptors/gitian-win-signer.yml']) print('\nVerifying v'+args.version+' Signed MacOS\n') subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../idealgoldv2/contrib/gitian-descriptors/gitian-osx-signer.yml']) os.chdir(workdir) def main(): global args, workdir parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version') parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch') parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request') parser.add_argument('-u', '--url', dest='url', default='https://github.com/idealgoldv2chain/idealgoldv2', help='Specify the URL of the repository. Default is %(default)s') parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build') parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build') parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS') parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries') parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS') parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s') parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s') parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC') parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC') parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)') parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.') parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git') parser.add_argument('signer', help='GPG signer to sign each build assert file') parser.add_argument('version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified') args = parser.parse_args() workdir = os.getcwd() args.linux = 'l' in args.os args.windows = 'w' in args.os args.macos = 'm' in args.os args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs']) if args.buildsign: args.build=True args.sign=True if args.kvm and args.docker: raise Exception('Error: cannot have both kvm and docker') args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign' # Set environment variable USE_LXC or USE_DOCKER, let gitian-builder know that we use lxc or docker if args.docker: os.environ['USE_DOCKER'] = '1' elif not args.kvm: os.environ['USE_LXC'] = '1' if not 'GITIAN_HOST_IP' in os.environ.keys(): os.environ['GITIAN_HOST_IP'] = '10.0.3.1' if not 'LXC_GUEST_IP' in os.environ.keys(): os.environ['LXC_GUEST_IP'] = '10.0.3.5' # Disable for MacOS if no SDK found if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'): print('Cannot build for MacOS, SDK does not exist. Will build for other OSes') args.macos = False script_name = os.path.basename(sys.argv[0]) # Signer and version shouldn't be empty if args.signer == '': print(script_name+': Missing signer.') print('Try '+script_name+' --help for more information') exit(1) if args.version == '': print(script_name+': Missing version.') print('Try '+script_name+' --help for more information') exit(1) # Add leading 'v' for tags if args.commit and args.pull: raise Exception('Cannot have both commit and pull') args.commit = ('' if args.commit else 'v') + args.version if args.setup: setup() os.chdir('idealgoldv2') if args.pull: subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge']) os.chdir('../gitian-builder/inputs/bitcoin') subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge']) args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip() args.version = 'pull-' + args.version print(args.commit) subprocess.check_call(['git', 'fetch']) subprocess.check_call(['git', 'checkout', args.commit]) os.chdir(workdir) if args.build: build() if args.sign: sign() if args.verify: verify() if __name__ == '__main__': main()
[]
[]
[ "USE_DOCKER", "GITIAN_HOST_IP", "USE_LXC", "LXC_GUEST_IP" ]
[]
["USE_DOCKER", "GITIAN_HOST_IP", "USE_LXC", "LXC_GUEST_IP"]
python
4
0
services/ws/session_v0_test.go
// Copyright (C) The Arvados Authors. All rights reserved. // // SPDX-License-Identifier: AGPL-3.0 package ws import ( "bytes" "encoding/json" "fmt" "io" "net/url" "os" "strings" "sync" "time" "git.arvados.org/arvados.git/sdk/go/arvados" "git.arvados.org/arvados.git/sdk/go/arvadostest" "git.arvados.org/arvados.git/sdk/go/ctxlog" "golang.org/x/net/websocket" check "gopkg.in/check.v1" ) func init() { if os.Getenv("ARVADOS_DEBUG") != "" { ctxlog.SetLevel("debug") } } var _ = check.Suite(&v0Suite{}) type v0Suite struct { serviceSuite serviceSuite token string toDelete []string wg sync.WaitGroup ignoreLogID uint64 } func (s *v0Suite) SetUpTest(c *check.C) { s.serviceSuite.SetUpTest(c) s.serviceSuite.start(c) s.token = arvadostest.ActiveToken s.ignoreLogID = s.lastLogID(c) } func (s *v0Suite) TearDownTest(c *check.C) { s.wg.Wait() s.serviceSuite.TearDownTest(c) } func (s *v0Suite) TearDownSuite(c *check.C) { s.deleteTestObjects(c) } func (s *v0Suite) deleteTestObjects(c *check.C) { ac := arvados.NewClientFromEnv() ac.AuthToken = arvadostest.AdminToken for _, path := range s.toDelete { err := ac.RequestAndDecode(nil, "DELETE", path, nil, nil) if err != nil { panic(err) } } s.toDelete = nil } func (s *v0Suite) TestFilters(c *check.C) { conn, r, w := s.testClient() defer conn.Close() cmd := func(method, eventType string, status int) { c.Check(w.Encode(map[string]interface{}{ "method": method, "filters": [][]interface{}{{"event_type", "in", []string{eventType}}}, }), check.IsNil) s.expectStatus(c, r, status) } cmd("subscribe", "update", 200) cmd("subscribe", "update", 200) cmd("subscribe", "create", 200) cmd("subscribe", "update", 200) cmd("unsubscribe", "blip", 400) cmd("unsubscribe", "create", 200) cmd("unsubscribe", "update", 200) go s.emitEvents(nil) lg := s.expectLog(c, r) c.Check(lg.EventType, check.Equals, "update") cmd("unsubscribe", "update", 200) cmd("unsubscribe", "update", 200) cmd("unsubscribe", "update", 400) } func (s *v0Suite) TestLastLogID(c *check.C) { lastID := s.lastLogID(c) checkLogs := func(r *json.Decoder, uuid string) { for _, etype := range []string{"create", "blip", "update"} { lg := s.expectLog(c, r) for lg.ObjectUUID != uuid { lg = s.expectLog(c, r) } c.Check(lg.EventType, check.Equals, etype) } } // Connecting connEarly (before sending the early events) lets // us confirm all of the "early" events have already passed // through the server. connEarly, rEarly, wEarly := s.testClient() defer connEarly.Close() c.Check(wEarly.Encode(map[string]interface{}{ "method": "subscribe", }), check.IsNil) s.expectStatus(c, rEarly, 200) // Send the early events. uuidChan := make(chan string, 1) s.emitEvents(uuidChan) uuidEarly := <-uuidChan // Wait for the early events to pass through. checkLogs(rEarly, uuidEarly) // Connect the client that wants to get old events via // last_log_id. conn, r, w := s.testClient() defer conn.Close() c.Check(w.Encode(map[string]interface{}{ "method": "subscribe", "last_log_id": lastID, }), check.IsNil) s.expectStatus(c, r, 200) checkLogs(r, uuidEarly) s.emitEvents(uuidChan) checkLogs(r, <-uuidChan) } func (s *v0Suite) TestPermission(c *check.C) { conn, r, w := s.testClient() defer conn.Close() c.Check(w.Encode(map[string]interface{}{ "method": "subscribe", }), check.IsNil) s.expectStatus(c, r, 200) uuidChan := make(chan string, 2) go func() { s.token = arvadostest.AdminToken s.emitEvents(uuidChan) s.token = arvadostest.ActiveToken s.emitEvents(uuidChan) }() wrongUUID := <-uuidChan rightUUID := <-uuidChan lg := s.expectLog(c, r) for lg.ObjectUUID != rightUUID { c.Check(lg.ObjectUUID, check.Not(check.Equals), wrongUUID) lg = s.expectLog(c, r) } } // Two users create private objects; admin deletes both objects; each // user receives a "delete" event for their own object (not for the // other user's object). func (s *v0Suite) TestEventTypeDelete(c *check.C) { clients := []struct { token string uuid string conn *websocket.Conn r *json.Decoder w *json.Encoder }{{token: arvadostest.ActiveToken}, {token: arvadostest.SpectatorToken}} for i := range clients { uuidChan := make(chan string, 1) s.token = clients[i].token s.emitEvents(uuidChan) clients[i].uuid = <-uuidChan clients[i].conn, clients[i].r, clients[i].w = s.testClient() c.Check(clients[i].w.Encode(map[string]interface{}{ "method": "subscribe", }), check.IsNil) s.expectStatus(c, clients[i].r, 200) } s.ignoreLogID = s.lastLogID(c) s.deleteTestObjects(c) for _, client := range clients { lg := s.expectLog(c, client.r) c.Check(lg.ObjectUUID, check.Equals, client.uuid) c.Check(lg.EventType, check.Equals, "delete") } } // Trashing/deleting a collection produces an "update" event with // properties["new_attributes"]["is_trashed"] == true. func (s *v0Suite) TestTrashedCollection(c *check.C) { ac := arvados.NewClientFromEnv() ac.AuthToken = s.token var coll arvados.Collection err := ac.RequestAndDecode(&coll, "POST", "arvados/v1/collections", s.jsonBody("collection", `{"manifest_text":""}`), map[string]interface{}{"ensure_unique_name": true}) c.Assert(err, check.IsNil) s.ignoreLogID = s.lastLogID(c) conn, r, w := s.testClient() defer conn.Close() c.Check(w.Encode(map[string]interface{}{ "method": "subscribe", }), check.IsNil) s.expectStatus(c, r, 200) err = ac.RequestAndDecode(nil, "DELETE", "arvados/v1/collections/"+coll.UUID, nil, nil) c.Assert(err, check.IsNil) lg := s.expectLog(c, r) c.Check(lg.ObjectUUID, check.Equals, coll.UUID) c.Check(lg.EventType, check.Equals, "update") c.Check(lg.Properties["old_attributes"].(map[string]interface{})["is_trashed"], check.Equals, false) c.Check(lg.Properties["new_attributes"].(map[string]interface{})["is_trashed"], check.Equals, true) } func (s *v0Suite) TestSendBadJSON(c *check.C) { conn, r, w := s.testClient() defer conn.Close() c.Check(w.Encode(map[string]interface{}{ "method": "subscribe", }), check.IsNil) s.expectStatus(c, r, 200) _, err := fmt.Fprint(conn, "^]beep\n") c.Check(err, check.IsNil) s.expectStatus(c, r, 400) c.Check(w.Encode(map[string]interface{}{ "method": "subscribe", }), check.IsNil) s.expectStatus(c, r, 200) } func (s *v0Suite) TestSubscribe(c *check.C) { conn, r, w := s.testClient() defer conn.Close() s.emitEvents(nil) err := w.Encode(map[string]interface{}{"21": 12}) c.Check(err, check.IsNil) s.expectStatus(c, r, 400) err = w.Encode(map[string]interface{}{"method": "subscribe", "filters": []string{}}) c.Check(err, check.IsNil) s.expectStatus(c, r, 200) uuidChan := make(chan string, 1) go s.emitEvents(uuidChan) uuid := <-uuidChan for _, etype := range []string{"create", "blip", "update"} { lg := s.expectLog(c, r) for lg.ObjectUUID != uuid { lg = s.expectLog(c, r) } c.Check(lg.EventType, check.Equals, etype) } } // Generate some events by creating and updating a workflow object, // and creating a custom log entry (event_type="blip") about the newly // created workflow. If uuidChan is not nil, send the new workflow // UUID to uuidChan as soon as it's known. func (s *v0Suite) emitEvents(uuidChan chan<- string) { s.wg.Add(1) defer s.wg.Done() ac := arvados.NewClientFromEnv() ac.AuthToken = s.token wf := &arvados.Workflow{ Name: "ws_test", } err := ac.RequestAndDecode(wf, "POST", "arvados/v1/workflows", s.jsonBody("workflow", `{"name":"ws_test"}`), map[string]interface{}{"ensure_unique_name": true}) if err != nil { panic(err) } if uuidChan != nil { uuidChan <- wf.UUID } lg := &arvados.Log{} err = ac.RequestAndDecode(lg, "POST", "arvados/v1/logs", s.jsonBody("log", map[string]interface{}{ "object_uuid": wf.UUID, "event_type": "blip", "properties": map[string]interface{}{ "beep": "boop", }, }), nil) if err != nil { panic(err) } err = ac.RequestAndDecode(wf, "PUT", "arvados/v1/workflows/"+wf.UUID, s.jsonBody("workflow", `{"name":"ws_test"}`), nil) if err != nil { panic(err) } s.toDelete = append(s.toDelete, "arvados/v1/workflows/"+wf.UUID, "arvados/v1/logs/"+lg.UUID) } func (s *v0Suite) jsonBody(rscName string, ob interface{}) io.Reader { val, ok := ob.(string) if !ok { j, err := json.Marshal(ob) if err != nil { panic(err) } val = string(j) } v := url.Values{} v[rscName] = []string{val} return bytes.NewBufferString(v.Encode()) } func (s *v0Suite) expectStatus(c *check.C, r *json.Decoder, status int) { msg := map[string]interface{}{} c.Check(r.Decode(&msg), check.IsNil) c.Check(int(msg["status"].(float64)), check.Equals, status) } func (s *v0Suite) expectLog(c *check.C, r *json.Decoder) *arvados.Log { lg := &arvados.Log{} ok := make(chan struct{}) go func() { for lg.ID <= s.ignoreLogID { c.Check(r.Decode(lg), check.IsNil) } close(ok) }() select { case <-time.After(10 * time.Second): panic("timed out") case <-ok: return lg } } func (s *v0Suite) testClient() (*websocket.Conn, *json.Decoder, *json.Encoder) { srv := s.serviceSuite.srv conn, err := websocket.Dial(strings.Replace(srv.URL, "http", "ws", 1)+"/websocket?api_token="+s.token, "", srv.URL) if err != nil { panic(err) } w := json.NewEncoder(conn) r := json.NewDecoder(conn) return conn, r, w } func (s *v0Suite) lastLogID(c *check.C) uint64 { var lastID uint64 c.Assert(testDB().QueryRow(`SELECT MAX(id) FROM logs`).Scan(&lastID), check.IsNil) return lastID }
[ "\"ARVADOS_DEBUG\"" ]
[]
[ "ARVADOS_DEBUG" ]
[]
["ARVADOS_DEBUG"]
go
1
0
qsubm.py
#!/usr/bin/python3 """qsubm -- generic queue submission for task-oriented batch scripts Environment variables: MCSCRIPT_DIR should specify the directory in which the mcscript package is installed, i.e., the directory where the file qsubm.py is found. (Note that qsubm uses this information to locate certain auxiliary script files used as part of the job submission process.) MCSCRIPT_RUN_HOME must specify the directory in which job files are found. MCSCRIPT_WORK_HOME should specify the parent directory in which run scratch directories should be made. MCSCRIPT_INSTALL_HOME must specify the directory in which executables are found. MCSCRIPT_LAUNCH_HOME (optional) should specify the parent directory in which run subdirectories for qsub invocation and output logging should be made. Otherwise, this will default to MCSCRIPT_WORK_HOME. MCSCRIPT_PYTHON should give the full qualified filename (i.e., including path) to the Python 3 executable for running run script files. A typical value will simply be "python3", assuming the Python 3 executable is in the shell's command search PATH. However, see note on "Availability of Python" in INSTALL.md. MCSCRIPT_RUN_PREFIX should specify the prefix for run names, e.g., set to "run" if your scripts are to be named run<XXXX>.py. Requires local definitions file config.py to translate options into arguments for local batch server. See directions in readme.txt. Your local definitions might not make use of or support all the parallel environment options. Language: Python 3 M. A. Caprio University of Notre Dame + 3/6/13 (mac): Based on earlier qsubm csh script. + 7/4/13 (mac): Support for multiple cluster flavors via qsubm_local. + 1/22/14 (mac): Python 3 update. + 10/27/14 (mac): Updates to --archive handling. + 5/14/15 (mac): - Insert "future" statements for Python 2 legacy support. - Add --noredirect switch. - Mandatory environment variable QSUBM_PYTHON. + 8/4/15 (mac): Make user environment variable definitions into option. + 6/13/16 (mac): Rename environment variables to MCSCRIPT_*. + 6/22/16 (mac): Update to use config.py for local configuration. + 12/14/16 (mac): Add --here option. + 12/29/16 (mac): - Add --spread option. - Remove --pernode option. - Make --opt option repeatable. + 1/16/17 (mac): Add --serialthreads option. + 2/23/17 (mac): Switch from os.mkdir to mcscript.utils.mkdir. + 3/16/17 (mac): - Add --setup option. - Change environment interface to pass MCSCRIPT_TASK_MODE. + 3/18/17 (mac): - Revise to support updated hybrid run parameters. - Rename option --setup to --prerun. + 5/22/17 (mac): Fix processing of boolean option --redirect. + 10/11/17 (pjf): Add --switchwaittime option. + 01/05/18 (pjf): Sort arguments into groups. + 02/11/18 (pjf): - Pass through MCSCRIPT_INSTALL_HOME. - Use job_environ for submission. + 07/06/18 (pjf): - Pass queue via MCSCRIPT_RUN_QUEUE. - Remove MCSCRIPT_HYBRID_NODESIZE. + 06/04/19 (pjf): - Add hook for individual configurations to add command-line arguments. - Move --switchwaittime option into config-slurm-nersc.py. + 09/11/19 (pjf): Add expert mode argument. """ import argparse import os import shutil import subprocess import sys import mcscript.config # local configuration (usually symlink) import mcscript.utils ################################################################ # argument parsing ################################################################ parser = argparse.ArgumentParser( description="Queue submission for numbered run.", usage= "%(prog)s [option] run queue|RUN wall [var1=val1, ...]\n", formatter_class=argparse.ArgumentDefaultsHelpFormatter, epilog= """Simply omit the queue name and leave off the wall time for a local interactive run. Environment variables for qsubm are described in INSTALL.md. Note that qsubm relies upon code in the local `config.py` configuration file for the system or cluster you are running on, in order to interpret the following arguments and translate them into arguments for your local batch system. Your local configuration file might not make use of or support all the parallel environment options listed below. """ ) # general arguments parser.add_argument("run", help="Run number (e.g., 0000 for run0000)") # latter arguments are made optional to simplify bare-bones syntax for --toc, etc., calls parser.add_argument("queue", nargs='?', help="Submission queue, or RUN for direct interactive run", default="RUN") parser.add_argument("wall", type=int, nargs='?', help="Wall time (minutes)", default=60) ##parser.add_argument("vars", nargs="?", help="Environment variables to pass to script, with optional values, comma delimited (e.g., METHOD2, PARAM=1.0)") parser.add_argument("--here", action="store_true", help="Force run in current working directory") parser.add_argument("--vars", help="Environment variables to pass to script, with optional values, comma delimited (e.g., --vars=METHOD2, PARAM=1.0)") ## parser.add_argument("--stat", action="store_true", help="Display queue status information") parser.add_argument("--num", type=int, default=1, help="Number of repetitions") parser.add_argument("--opt", action="append", help="Additional option arguments to be passed to job submission command (e.g., --opt=\"-m ae\" or --opt=\"--mail-type=END,FAIL\"), may be repeated (e.g., --opt=\"-A acct\" --opt=\"-a 1200\"); beware the spaces may be important to the job submission command") parser.add_argument("--expert", action="store_true", help="Run mcscript in expert mode") # serial run parallelization parameters serial_group = parser.add_argument_group("serial run options (single-node, non-MPI)") serial_group.add_argument("--serialthreads", type=int, default=1, help="OMP threads") # hybrid run parallelization parameters # # Not all local configuration files need necessarily require or # respect all of the following parameters. hybrid_group = parser.add_argument_group("hybrid run options") hybrid_group.add_argument("--nodes", type=int, default=1, help="number of nodes") hybrid_group.add_argument("--ranks", type=int, default=1, help="number of MPI ranks") hybrid_group.add_argument("--threads", type=int, default=1, help="OMP threads per rank)") hybrid_group.add_argument("--nodesize", type=int, default=0, help="logical threads available per node" " (might instead be interpreted physical CPUs depending on local config file)") ##hybrid_group.add_argument("--undersubscription", type=int, default=1, help="undersubscription factor (e.g., spread=2 requests twice the cores needed)") # multi-task interface: invocation modes task_mode_group = parser.add_mutually_exclusive_group() task_mode_group.add_argument("--toc", action="store_true", help="Invoke run script to generate task table of contents") task_mode_group.add_argument("--unlock", action="store_true", help="Delete any .lock or .fail flags for tasks") task_mode_group.add_argument("--archive", action="store_true", help="Invoke archive-generation run") task_mode_group.add_argument("--prerun", action="store_true", help="Invoke prerun mode, for argument validation and file staging only") task_mode_group.add_argument("--offline", action="store_true", help="Invoke offline mode, to create batch scripts for later submission instead of running compute codes") # multi-task interface: task selection task_selection_group = parser.add_argument_group("multi-task run options") task_selection_group.add_argument("--pool", help="Set task pool (or ALL) for task selection") task_selection_group.add_argument("--phase", type=int, default=0, help="Set task phase for task selection") task_selection_group.add_argument("--start", type=int, help="Set starting task number for task selection") task_selection_group.add_argument("--limit", type=int, help="Set task count limit for task selection") task_selection_group.add_argument("--redirect", default="True", choices=["True", "False"], help="Allow redirection of standard" " output/error to file (may want to disable for interactive debugging)") # some special options (deprecated?) ##parser.add_argument("--epar", type=int, default=None, help="Width for embarassingly parallel job") ##parser.add_argument("--nopar", action="store_true", help="Disable parallel resource requests (for use on special serial queues)") # site-local options try: mcscript.config.qsubm_arguments(parser) except AttributeError: # local config doesn't provide arguments, ignore gracefully pass ##parser.print_help() ##print args = parser.parse_args() ##printargs ################################################################ # special mode: status display ################################################################ # TODO # will have to modify argument processing to allow no arguments, local # customization for qstat # @ i = 0 # while (($i == 0) || ($loop)) # @ i++ # clear # echo "****************************************************************" # qstat -u $user # if ($loop) sleep 5 # end ## if (args.stat): ## pass ################################################################ # environment processing ################################################################ if (args.here): run_home = os.environ["PWD"] elif ("MCSCRIPT_RUN_HOME" in os.environ): run_home = os.environ["MCSCRIPT_RUN_HOME"] else: print("MCSCRIPT_RUN_HOME not found in environment") exit(1) if (args.here): work_home = os.environ["PWD"] elif ("MCSCRIPT_WORK_HOME" in os.environ): work_home = os.environ["MCSCRIPT_WORK_HOME"] else: print("MCSCRIPT_WORK_HOME not found in environment") exit(1) if (args.here): launch_home = os.environ["PWD"] elif ("MCSCRIPT_LAUNCH_HOME" in os.environ): launch_home = os.environ["MCSCRIPT_LAUNCH_HOME"] else: launch_home = work_home if ("MCSCRIPT_RUN_PREFIX" in os.environ): run_prefix = os.environ["MCSCRIPT_RUN_PREFIX"] else: print("MCSCRIPT_RUN_PREFIX not found in environment") exit(1) if ("MCSCRIPT_PYTHON" in os.environ): python_executable = os.environ["MCSCRIPT_PYTHON"] else: print("MCSCRIPT_PYTHON not found in environment") exit(1) if ("MCSCRIPT_DIR" in os.environ): qsubm_path = os.environ["MCSCRIPT_DIR"] else: print("MCSCRIPT_DIR not found in environment") exit(1) ################################################################ # argument processing ################################################################ # set run name run = run_prefix + args.run print("Run:", run) # ...and process run file script_extensions = [".py", ".csh"] job_file = None for extension in script_extensions: filename = os.path.join(run_home, run+extension) if (filename): job_file = filename job_extension = extension break print(" Run home:", run_home) # useful to report now, in case job file missing if (job_file is None): print("No job file %s.* found with an extension in the set %s." % (run, script_extensions)) exit(1) print(" Job file:", job_file) # set queue and flag batch or local mode # force local run for task.py toc mode if ((args.queue == "RUN") or args.toc or args.unlock): run_mode = "local" run_queue = "local" print(" Mode:", run_mode) else: run_mode = "batch" run_queue = args.queue print(" Mode:", run_mode, "(%s)" % args.queue) # set wall time wall_time_min = args.wall print(" Wall time (min): {:d}".format(wall_time_min)) wall_time_sec = wall_time_min*60 # environment definitions: general run parameters environment_definitions = [ "MCSCRIPT_RUN={:s}".format(run), "MCSCRIPT_JOB_FILE={:s}".format(job_file), "MCSCRIPT_RUN_MODE={:s}".format(run_mode), "MCSCRIPT_RUN_QUEUE={:s}".format(run_queue), "MCSCRIPT_WALL_SEC={:d}".format(wall_time_sec) ] # environment definitions: serial run parameters environment_definitions += [ "MCSCRIPT_SERIAL_THREADS={:d}".format(args.serialthreads) ] # environment definitions: hybrid run parameters environment_definitions += [ "MCSCRIPT_HYBRID_NODES={:d}".format(args.nodes), "MCSCRIPT_HYBRID_RANKS={:d}".format(args.ranks), "MCSCRIPT_HYBRID_THREADS={:d}".format(args.threads), ] # set multi-task run parameters if (args.toc): task_mode = mcscript.task.TaskMode.kTOC elif (args.unlock): task_mode = mcscript.task.TaskMode.kUnlock elif (args.archive): task_mode = mcscript.task.TaskMode.kArchive elif (args.prerun): task_mode = mcscript.task.TaskMode.kPrerun elif (args.offline): task_mode = mcscript.task.TaskMode.kOffline else: task_mode = mcscript.task.TaskMode.kRun # TODO (mac): neaten up so that these arguments are always provided # (and simplify this code to a simple list += as above) environment_definitions.append("MCSCRIPT_TASK_MODE={:d}".format(task_mode.value)) if (args.pool is not None): environment_definitions.append("MCSCRIPT_TASK_POOL={:s}".format(args.pool)) if (args.phase is not None): environment_definitions.append("MCSCRIPT_TASK_PHASE={:d}".format(args.phase)) if (args.start is not None): environment_definitions.append("MCSCRIPT_TASK_START_INDEX={:d}".format(args.start)) if (args.limit is not None): environment_definitions.append("MCSCRIPT_TASK_COUNT_LIMIT={:d}".format(args.limit)) environment_definitions.append("MCSCRIPT_TASK_REDIRECT={:s}".format(args.redirect)) # pass through install directory if os.environ.get("MCSCRIPT_INSTALL_HOME"): environment_definitions += [ "MCSCRIPT_INSTALL_HOME={:s}".format(os.environ["MCSCRIPT_INSTALL_HOME"]) ] elif os.environ.get("MCSCRIPT_INSTALL_DIR"): # TODO remove deprecated environment variable print("****************************************************************") print("MCSCRIPT_INSTALL_DIR is now MCSCRIPT_INSTALL_HOME.") print("Please update your environment variables.") print("****************************************************************") environment_definitions += [ "MCSCRIPT_INSTALL_HOME={:s}".format(os.environ["MCSCRIPT_INSTALL_DIR"]) ] else: print("MCSCRIPT_INSTALL_HOME not found in environment") exit(1) # include additional environment setup if defined if os.environ.get("MCSCRIPT_SOURCE"): environment_definitions += [ "MCSCRIPT_SOURCE={:s}".format(os.environ["MCSCRIPT_SOURCE"]) ] # set user-specified variable definitions # Note conditional is required since "".split(", ") is [""] rather than []. if (args.vars is None): user_environment_definitions = [] else: user_environment_definitions = args.vars.split(",") print(" User environment definitions:", user_environment_definitions) environment_definitions += user_environment_definitions ################################################################ # directory setup ################################################################ # set up scratch directory (for batch job work) # name is defined here, but creation is left up to job script, # in case scratch is local to the compute note work_dir = os.path.join(work_home, run) ## if ( not os.path.exists(work_dir)): ## mcscript.utils.mkdir(work_dir) environment_definitions.append("MCSCRIPT_WORK_DIR=%s" % work_dir) # set up run launch directory (for batch job output logging) launch_dir_parent = os.path.join(launch_home, run) if ( not os.path.exists(launch_home)): mcscript.utils.mkdir(launch_home) if ( not os.path.exists(launch_dir_parent)): mcscript.utils.mkdir(launch_dir_parent) if (args.archive): # archive mode # launch in archive directory rather than usual batch job output directory # (important since if batch job server directs output to the # regular output directory while tar is archiving that directory, # tar will return with an error code, torpedoing the archive task) launch_dir = os.path.join(launch_home, run, "archive") else: # standard run mode launch_dir = os.path.join(launch_home, run, "batch") if ( not os.path.exists(launch_dir)): mcscript.utils.mkdir(launch_dir) environment_definitions.append("MCSCRIPT_LAUNCH_DIR=%s" % launch_dir) ################################################################ # job environment setup ################################################################ # construct job name job_name = "%s" % run ##job_name += "-w%d" % args.width if (args.pool is not None): job_name += "-%s" % args.pool job_name += "-%s" % args.phase print(" Job name:", job_name) # process environment definitions # regularize environment definitions # Convert all plain variable name definitions "VAR" into definition # as null string "VAR=". Note that "VAR" would be an environment # variable pass-through request to qsub, but it causes trouble with # defining an environment for local execution. So doing this # regularization simplifies further processing and ensures # uniformity of the environment between batch and local runs. for i in range(len(environment_definitions)): if (not "=" in environment_definitions[i]): environment_definitions[i] += "=" print() print("Vars:", ",".join(environment_definitions)) # for local run job_environ=os.environ environment_keyvalues = [ entry.split("=") for entry in environment_definitions ] job_environ.update(dict(environment_keyvalues)) ################################################################ # run invocation ################################################################ # flush script output before invoking job print() sys.stdout.flush() # handle batch run if (run_mode == "batch"): # set local qsub arguments (submission_args, submission_input_string, repetitions) = mcscript.config.submission(job_name, job_file, qsubm_path, environment_definitions, args) # notes: options must come before command on some platforms (e.g., Univa) print(" ".join(submission_args)) print(submission_input_string) print() print("-"*64) for i in range(repetitions): process = subprocess.Popen( submission_args, stdin=subprocess.PIPE, # to take input from communicate stdout=subprocess.PIPE, # to send output to communicate -- default merged stderr env=job_environ, cwd=launch_dir ) stdout_bytes = process.communicate(input=submission_input_string)[0] stdout_string = stdout_bytes.decode("utf-8") print(stdout_string) # handle interactive run # Note: We call interpreter rather than trying to directly execute # job file since this saves us from bothering with execute permissions. # But, beware the interpreter enforced by the script's shebang line might # be different from the version of the interpreter found in the below invocation, # especially in a "module" environment. elif (run_mode == "local"): if (extension == ".py"): popen_args = [python_executable, job_file] elif (extension == ".csh"): popen_args = ["csh", job_file] print() print("-"*64) process = subprocess.Popen(popen_args, cwd=launch_dir, env=job_environ) process.wait()
[]
[]
[ "MCSCRIPT_SOURCE", "MCSCRIPT_INSTALL_HOME", "MCSCRIPT_PYTHON", "MCSCRIPT_LAUNCH_HOME", "MCSCRIPT_DIR", "MCSCRIPT_RUN_PREFIX", "MCSCRIPT_RUN_HOME", "PWD", "MCSCRIPT_INSTALL_DIR", "MCSCRIPT_WORK_HOME" ]
[]
["MCSCRIPT_SOURCE", "MCSCRIPT_INSTALL_HOME", "MCSCRIPT_PYTHON", "MCSCRIPT_LAUNCH_HOME", "MCSCRIPT_DIR", "MCSCRIPT_RUN_PREFIX", "MCSCRIPT_RUN_HOME", "PWD", "MCSCRIPT_INSTALL_DIR", "MCSCRIPT_WORK_HOME"]
python
10
0
etc/generate_post_train_dna_seq/merge_input.py
import numpy as np import os import sys def load_data(File): seqs = np.load(File, allow_pickle = True) seqs = seqs.tolist() return seqs def merge_input100(dirPrefix, pmode, samples_num): maxLen = int(sys.argv[1]) #seqsNum = 32194 #samples_num = [6375003,6375000,6375000,6375000,6374212,6361770,2790768] X_pre = np.memmap(dirPrefix+"/" + pmode + "_x_pre.npy",mode='w+', shape=(sum(samples_num),maxLen - 1)) X_post = np.memmap(dirPrefix+"/" + pmode + "_x_post.npy",mode='w+',shape=(sum(samples_num),maxLen - 1)) Y_all = np.memmap(dirPrefix+"/" + pmode + "_y.npy",mode='w+',shape=(sum(samples_num))) index = 0 start = 0 fileName = dirPrefix+"/" + pmode + "_x_" + str(index) + ".npy" while os.path.isfile(fileName): #" + pmode + "X = np.lib.format.open_memmap(dirPrefix+"_X_"+str(index*batchNum)+'.npy') X = np.load(dirPrefix+"/" + pmode + "_x_" + str(index) + ".npy") #" + pmode + "Y = np.lib.format.open_memmap(dirPrefix+"_y_"+str(index*batchNum)+'.npy') Y = np.load(dirPrefix+"/" + pmode + "_y_" + str(index) + ".npy") if samples_num[index] != X.shape[1]: print("error: " + pmode + "" + str(index)) exit(-1) #print(start,start+samples_num[index],samples_num[index]) X_pre[start:start+samples_num[index]] = X[0][:] X_post[start:start+samples_num[index]] = X[1][:] Y_all[start:start+samples_num[index]] = Y[:] del X, Y start += samples_num[index] index+=1 fileName = dirPrefix+"/" + pmode + "_x_" + str(index) + ".npy" X_pre.flush() X_post.flush() Y_all.flush() return [X_pre,X_post],Y_all if __name__ == '__main__': root_path = os.environ['d'] samples_num = load_data(root_path + '/data/post_train/input' + sys.argv[1] + '/train_valid_num.npy') X,Y = merge_input100(root_path + '/data/post_train/input' + sys.argv[1], 'train', samples_num[0: len(samples_num) - 1]) print("total: %d samples for train, " % Y.shape[0], end = '') X,Y = merge_input100(root_path + '/data/post_train/input' + sys.argv[1], 'valid', samples_num[len(samples_num) - 1: len(samples_num)]) # print(X[0].shape[0]) print("%d samples for valid" % Y.shape[0]) #print('merge ok') #print(X.shape) #print(X[0]) #print(X[1]) #print(Y)
[]
[]
[ "d" ]
[]
["d"]
python
1
0
cmd/deployer/deployer.go
// This Source Code Form is subject to the terms of the MIT License. // If a copy of the MIT License was not distributed with this // file, you can obtain one at https://opensource.org/licenses/MIT. // // Copyright (c) DUSK NETWORK. All rights reserved. package main import ( "context" "fmt" "os" "os/signal" "syscall" "time" "github.com/dusk-network/dusk-blockchain/pkg/config" log "github.com/sirupsen/logrus" "github.com/spf13/viper" ) var ( duskPath = os.Getenv("DUSK_BLOCKCHAIN_PATH") ruskPath = os.Getenv("RUSK_PATH") ) const ( pingTimeout = 10 * time.Second checkInterval = 5 * time.Second maxRetryLimit = 5 unixSocket = "unix" ) // Deployer is an application that simplifies the procedure of (re)starting a // dusk-blockchain node (both dusk-blockchain and rusk services). It should also // facilitate automatic diagnostic of runtime issues. type Deployer struct { ConfigPath string services []Service interrupt chan os.Signal Context context.Context cancel context.CancelFunc } // Run should start all services and facilitate automatic faults diagnostic. func (d *Deployer) Run() { d.interrupt = make(chan os.Signal, 1) signal.Notify(d.interrupt, os.Interrupt, syscall.SIGTERM) if err := d.startAll(); err != nil { log.WithError(err).Fatal("failed to start") return } d.Context, d.cancel = context.WithCancel(context.Background()) defer d.cancel() var next bool for { s := &d.services[0] if next { s = &d.services[1] } if !d.checkService(s) { log.Info("node terminated") break } next = !next } } func (d *Deployer) cleanup(conf *config.Registry) { // Clean up procedure if conf.RPC.Network == unixSocket { path := conf.RPC.Address if _, err := os.Stat(path); !os.IsNotExist(err) { if err := os.Remove(path); err != nil { log.WithError(err).WithField("path", path). Warn("couldn't delete unix socket") } } } if conf.RPC.Rusk.Network == unixSocket { path := conf.RPC.Rusk.Address if _, err := os.Stat(path); !os.IsNotExist(err) { if err := os.Remove(path); err != nil { log.WithError(err).WithField("path", path). Warn("couldn't delete unix socket") } } } } // validate validates both config and the environment. func (d *Deployer) validate() bool { return true } func (d *Deployer) startAll() error { log.Info("run a node") // Load and validate configuration file if _, err := os.Stat(duskPath); os.IsNotExist(err) { return fmt.Errorf("DUSK_BLOCKCHAIN_PATH %s does not exist: %w", duskPath, err) } if _, err := os.Stat(ruskPath); os.IsNotExist(err) { return fmt.Errorf("RUSK_PATH %s does not exist: %w", ruskPath, err) } viper.SetConfigFile(d.ConfigPath) if err := viper.ReadInConfig(); err != nil { log.WithError(err). WithField("path", d.ConfigPath). Fatal("couldn't read config file") } var conf config.Registry if err := viper.Unmarshal(&conf); err != nil { _, _ = fmt.Fprintln(os.Stdout, "Could not decode config file "+err.Error()) os.Exit(1) } ruskAddr := conf.RPC.Rusk.Address if conf.RPC.Rusk.Network == unixSocket { ruskAddr = "unix://" + conf.RPC.Rusk.Address } d.validate() // Clean up should remove any drawbacks for a normal system (re)start // E.g remove closed unix socket files, release busy sockets etc.. d.cleanup(&conf) var err error ps := make([]*os.Process, 2) // Start dusk service ps[0], err = startProcess(duskPath, "--config", d.ConfigPath) if err != nil { log.WithError(err). WithField("process", duskPath). Fatal("couldn't start process") return nil } // Start rusk service // Optionally, a flag here will switch between mockrusk and native rusk service. ps[1], err = startProcess(ruskPath, "mockrusk", "--rusknetwork", conf.RPC.Rusk.Network, "--ruskaddress", conf.RPC.Rusk.Address, "--walletstore", conf.Wallet.Store, "--walletfile", conf.Wallet.File, "--configfile", d.ConfigPath) if err != nil { log.WithError(err). WithField("process", ruskPath). Fatal("couldn't start process") return err } d.services = make([]Service, 2) d.services[0] = Service{ Name: "dusk", Process: ps[0], Addr: conf.Gql.Address, PingFunc: PingDusk, } d.services[1] = Service{ Name: "rusk", Process: ps[1], Addr: ruskAddr, PingFunc: PingRusk, } return nil } // stopAll sends system (interrupt) signal to all processes. func (d *Deployer) stopAll(s os.Signal) { for _, serv := range d.services { log := log.WithField("signal", s.String()). WithField("process", serv.Process.Pid). WithField("service", serv.Name) log.Info("send signal") if err := serv.Process.Signal(s); err != nil { log.WithError(err).Error("failed to run process") } time.Sleep(2 * time.Second) } } func (d *Deployer) checkService(s *Service) bool { timer := time.NewTimer(checkInterval) select { case s := <-d.interrupt: d.stopAll(s) return false case <-timer.C: } return d.controlRun(s) } func (d *Deployer) controlRun(s *Service) bool { ctx, cancel := context.WithTimeout(d.Context, pingTimeout) go func(ctx context.Context, cancel context.CancelFunc, s *Service) { defer cancel() if err := d.control(ctx, s); err != nil { log.WithError(err). WithField("pid", s.Process.Pid). WithField("name", s.Name). WithField("retries", s.retry). Warn("check service failed") } else { log.WithField("pid", s.Process.Pid). WithField("name", s.Name).Info("check service done") } }(ctx, cancel, s) select { case s := <-d.interrupt: d.stopAll(s) return false case <-ctx.Done(): } return true } func (d *Deployer) control(ctx context.Context, s *Service) error { var err error if err = s.PingFunc(ctx, s.Addr); err != nil { s.retry++ if s.retry == maxRetryLimit { s.retry = 0 // We reach max retry limit. // Collect core-dump and try to restart all. // Optionally, logger level could be increased as well. d.stopAll(syscall.SIGABRT) return d.startAll() } } return err }
[ "\"DUSK_BLOCKCHAIN_PATH\"", "\"RUSK_PATH\"" ]
[]
[ "RUSK_PATH", "DUSK_BLOCKCHAIN_PATH" ]
[]
["RUSK_PATH", "DUSK_BLOCKCHAIN_PATH"]
go
2
0
main.go
package main import ( "context" "encoding/json" "flag" "html/template" "log" "net/http" "os" "github.com/joho/godotenv" "github.com/workos-inc/workos-go/pkg/sso" ) func main() { err := godotenv.Load() if err != nil { log.Fatal("Error loading .env file") } var conf struct { Addr string APIKey string ClientID string RedirectURI string Domain string Provider string } type Profile struct { First_name string Last_name string Raw_profile string } flag.StringVar(&conf.Addr, "addr", ":3042", "The server addr.") flag.StringVar(&conf.APIKey, "api-key", os.Getenv("WORKOS_API_KEY"), "The WorkOS API key.") flag.StringVar(&conf.ClientID, "client-id", os.Getenv("WORKOS_CLIENT_ID"), "The WorkOS client id.") flag.StringVar(&conf.RedirectURI, "redirect-uri", os.Getenv("WORKOS_REDIRECT_URI"), "The redirect uri.") flag.StringVar(&conf.Domain, "domain", os.Getenv("WORKOS_DOMAIN"), "The domain used to register a WorkOS SSO connection.") flag.StringVar(&conf.Provider, "provider", "", "The OAuth provider used for the SSO connection.") flag.Parse() log.Printf("launching sso demo with configuration: %+v", conf) http.Handle("/", http.FileServer(http.Dir("./static"))) // Configure the WorkOS SSO SDK: sso.Configure(conf.APIKey, conf.ClientID) // Handle login http.Handle("/login", sso.Login(sso.GetAuthorizationURLOptions{ //Instead of domain, you can now use connection ID to associate a user to the appropriate connection. Domain: conf.Domain, RedirectURI: conf.RedirectURI, })) // Handle login redirect: tmpl := template.Must(template.ParseFiles("./static/logged_in.html")) http.HandleFunc("/callback", func(w http.ResponseWriter, r *http.Request) { log.Printf("callback is called with %s", r.URL) // Retrieving user profile: profile, err := sso.GetProfileAndToken(context.Background(), sso.GetProfileAndTokenOptions{ Code: r.URL.Query().Get("code"), }) if err != nil { log.Printf("get profile failed: %s", err) w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) return } // Display user profile: b, err := json.MarshalIndent(profile, "", " ") if err != nil { log.Printf("encoding profile failed: %s", err) w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) } // define variable to hold data var data map[string]interface{} // decode the json and set pointer to the data variable if err := json.Unmarshal(b, &data); err != nil { panic(err) } // Unnest the profile first_name := data["profile"].(map[string]interface{})["first_name"] last_name := data["profile"].(map[string]interface{})["last_name"] // Convert to strings first_name_string := first_name.(string) last_name_string := last_name.(string) raw_profile := string(b) // Create instance of Profile struct including profile values this_profile := Profile{first_name_string, last_name_string, raw_profile} // Render the template tmpl.Execute(w, this_profile) }) if err := http.ListenAndServe(conf.Addr, nil); err != nil { log.Panic(err) } }
[ "\"WORKOS_API_KEY\"", "\"WORKOS_CLIENT_ID\"", "\"WORKOS_REDIRECT_URI\"", "\"WORKOS_DOMAIN\"" ]
[]
[ "WORKOS_DOMAIN", "WORKOS_API_KEY", "WORKOS_CLIENT_ID", "WORKOS_REDIRECT_URI" ]
[]
["WORKOS_DOMAIN", "WORKOS_API_KEY", "WORKOS_CLIENT_ID", "WORKOS_REDIRECT_URI"]
go
4
0
gowebdev/emailHTML/main.go
package main import ( "fmt" "log" "net/smtp" "os" ) func main() { // sender data from := os.Getenv("FromEmailAddr") //ex: "[email protected]" password := os.Getenv("SMTPpwd") // ex: "ieiemcjdkejspqz" // receiver address toEmail := os.Getenv("ToEmailAddr") // ex: "[email protected]" to := []string{toEmail} // smtp - Simple Mail Transfer Protocol host := "smtp.gmail.com" port := "587" address := host + ":" + port // Set up authentication information. auth := smtp.PlainAuth("", from, password, host) msg := []byte( "From: Grow Adept <" + from + ">\r\n" + "To: " + toEmail + "\r\n" + "Subject: Now with HTML!\r\n" + "MIME: MIME-version: 1.0\r\n" + "Content-Type: text/html; charset=\"UTF-8\";\r\n" + "\r\n" + // "<html><h1>Golang Gophers</h1><ul><li>Robert Griesemer</li><li>Rob Pike</li><li>Ken Thompson</li></ul></html>") `<html> <h1>Designers of Golang</h1> <ul> <li>Robert Griesemer</li> <li>Rob Pike</li> <li>Ken Thompson</li> </ul> </html>`) err := smtp.SendMail(address, auth, from, to, msg) if err != nil { log.Fatal(err) } fmt.Println("Check for sent email!") }
[ "\"FromEmailAddr\"", "\"SMTPpwd\"", "\"ToEmailAddr\"" ]
[]
[ "SMTPpwd", "FromEmailAddr", "ToEmailAddr" ]
[]
["SMTPpwd", "FromEmailAddr", "ToEmailAddr"]
go
3
0
CongressionalRecord/wsgi.py
""" WSGI config for CongressionalRecord project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "CongressionalRecord.settings" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CongressionalRecord.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
[]
[]
[ "DJANGO_SETTINGS_MODULE" ]
[]
["DJANGO_SETTINGS_MODULE"]
python
1
0
exercises/rational-numbers/rational_numbers_test.py
from __future__ import division import unittest from rational_numbers import Rational # Tests adapted from `problem-specifications//canonical-data.json` @ v1.0.0 class RationalNumbersTest(unittest.TestCase): # Test addition def test_add_two_positive(self): self.assertEqual(Rational(1, 2) + Rational(2, 3), Rational(7, 6)) def test_add_positive_and_negative(self): self.assertEqual(Rational(1, 2) + Rational(-2, 3), Rational(-1, 6)) def test_add_two_negative(self): self.assertEqual(Rational(-1, 2) + Rational(-2, 3), Rational(-7, 6)) def test_add_opposite(self): self.assertEqual(Rational(1, 2) + Rational(-1, 2), Rational(0, 1)) # Test subtraction def test_subtract_two_positive(self): self.assertEqual(Rational(1, 2) - Rational(2, 3), Rational(-1, 6)) def test_subtract_positive_and_negative(self): self.assertEqual(Rational(1, 2) - Rational(-2, 3), Rational(7, 6)) def test_subtract_two_negative(self): self.assertEqual(Rational(-1, 2) - Rational(-2, 3), Rational(1, 6)) def test_subtract_from_self(self): self.assertEqual(Rational(1, 2) - Rational(1, 2), Rational(0, 1)) # Test multiplication def test_multiply_two_positive(self): self.assertEqual(Rational(1, 2) * Rational(2, 3), Rational(1, 3)) def test_multiply_negative_by_positive(self): self.assertEqual(Rational(-1, 2) * Rational(2, 3), Rational(-1, 3)) def test_multiply_two_negative(self): self.assertEqual(Rational(-1, 2) * Rational(-2, 3), Rational(1, 3)) def test_multiply_reciprocal(self): self.assertEqual(Rational(1, 2) * Rational(2, 1), Rational(1, 1)) def test_multiply_by_one(self): self.assertEqual(Rational(1, 2) * Rational(1, 1), Rational(1, 2)) def test_multiply_by_zero(self): self.assertEqual(Rational(1, 2) * Rational(0, 1), Rational(0, 1)) # Test division def test_divide_two_positive(self): self.assertEqual(Rational(1, 2) / Rational(2, 3), Rational(3, 4)) def test_divide_positive_by_negative(self): self.assertEqual(Rational(1, 2) / Rational(-2, 3), Rational(-3, 4)) def test_divide_two_negative(self): self.assertEqual(Rational(-1, 2) / Rational(-2, 3), Rational(3, 4)) def test_divide_by_one(self): self.assertEqual(Rational(1, 2) / Rational(1, 1), Rational(1, 2)) # Test absolute value def test_absolute_value_of_positive(self): self.assertEqual(abs(Rational(1, 2)), Rational(1, 2)) def test_absolute_value_of_negative(self): self.assertEqual(abs(Rational(-1, 2)), Rational(1, 2)) def test_absolute_value_of_zero(self): self.assertEqual(abs(Rational(0, 1)), Rational(0, 1)) # Test exponentiation of a rational number def test_raise_a_positive_rational_to_a_positive_integer_power(self): self.assertEqual(Rational(1, 2) ** 3, Rational(1, 8)) def test_raise_a_negative_rational_to_a_positive_integer_power(self): self.assertEqual(Rational(-1, 2) ** 3, Rational(-1, 8)) def test_raise_zero_to_an_integer_power(self): self.assertEqual(Rational(0, 1) ** 5, Rational(0, 1)) def test_raise_one_to_an_integer_power(self): self.assertEqual(Rational(1, 1) ** 4, Rational(1, 1)) def test_raise_a_positive_rational_to_the_power_of_zero(self): self.assertEqual(Rational(1, 2) ** 0, Rational(1, 1)) def test_raise_a_negative_rational_to_the_power_of_zero(self): self.assertEqual(Rational(-1, 2) ** 0, Rational(1, 1)) # Test exponentiation of a real number to a rational number def test_raise_a_real_number_to_a_positive_rational(self): self.assertAlmostEqual(8 ** Rational(4, 3), 16.0, places=8) def test_raise_a_real_number_to_a_negative_rational(self): self.assertAlmostEqual( 9 ** Rational(-1, 2), 0.3333333333333333, places=8 ) def test_raise_a_real_number_to_a_zero_rational(self): self.assertAlmostEqual(2 ** Rational(0, 1), 1.0, places=8) # Test reduction to lowest terms def test_reduce_positive(self): self.assertEqual(Rational(2, 4), Rational(1, 2)) def test_reduce_negative(self): self.assertEqual(Rational(-4, 6), Rational(-2, 3)) def test_reduce_rational_with_negative_denominator(self): self.assertEqual(Rational(3, -9), Rational(-1, 3)) def test_reduce_zero(self): self.assertEqual(Rational(0, 6), Rational(0, 1)) def test_reduce_integer(self): self.assertEqual(Rational(-14, 7), Rational(-2, 1)) def test_reduce_one(self): self.assertEqual(Rational(13, 13), Rational(1, 1)) if __name__ == '__main__': unittest.main()
[]
[]
[]
[]
[]
python
null
null
null
services/pages_api/src/app.py
""" Page API endpoints """ import pymongo from pymongo import MongoClient from bson.objectid import ObjectId from flask import Flask, request, abort from flask.json import JSONEncoder import os import logging logging.basicConfig(format='%(levelname)s :: %(asctime)s :: %(message)s', level=logging.DEBUG) from bson import json_util import base64 import json from flask import jsonify app = Flask(__name__) PAGE_SIZE = 200 @app.route('/api/v1/pages') def pages(): ''' 200 Pages per pagination iteration, pass page # ''' client = MongoClient(os.environ["DBCONNECT"]) db = client.pdfs page_num = int(request.args.get('pageNumber', '')) curs = db.postprocess_pages.find().sort('_id').skip(PAGE_SIZE * page_num).limit(PAGE_SIZE) result_list = [] for result in curs: result['_id'] = str(result['_id']) result['pdf_id'] = str(result['pdf_id']) del result['bytes'] encoded = base64.encodebytes(result['resize_bytes']) result['resize_bytes'] = encoded.decode('ascii') del result['ocr_df'] result_list.append(result) results_obj = {'results': result_list} return jsonify(results_obj)
[]
[]
[ "DBCONNECT" ]
[]
["DBCONNECT"]
python
1
0
cmd/grpc.go
/* Copyright © 2021 NAME HERE <EMAIL ADDRESS> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmd import ( "os" "github.com/Drinnn/kool-pix/application/grpc" "github.com/Drinnn/kool-pix/infrastructure/db" "github.com/spf13/cobra" ) var portNumber int // grpcCmd represents the grpc command var grpcCmd = &cobra.Command{ Use: "grpc", Short: "Start gRPC server", Run: func(cmd *cobra.Command, args []string) { database := db.ConnectDB(os.Getenv("env")) grpc.StartGrpcServer(database, portNumber) }, } func init() { rootCmd.AddCommand(grpcCmd) grpcCmd.Flags().IntVarP(&portNumber, "port", "p", 50051, "gRPC Server port") // Here you will define your flags and configuration settings. // Cobra supports Persistent Flags which will work for this command // and all subcommands, e.g.: // grpcCmd.PersistentFlags().String("foo", "", "A help for foo") // Cobra supports local flags which will only run when this command // is called directly, e.g.: // grpcCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") }
[ "\"env\"" ]
[]
[ "env" ]
[]
["env"]
go
1
0
kongoauth/wsgi.py
""" WSGI config for kongoauth project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kongoauth.settings") application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
main.go
package main import ( html "html/template" "io/ioutil" "os" "strconv" text "text/template" log "github.com/Sirupsen/logrus" "github.com/hyperboloide/dispatch" ) var ( // MainMailer connect to the queue and handle messages MainMailer *Mailer ) const defaultBody = ` <!doctype html> <html> <body>{{ . }}</body> </html>` // Configure the application from environement func Configure() { log.SetFormatter(&log.TextFormatter{ DisableColors: true, }) queue, err := dispatch.NewAMQPQueue( os.Getenv("QUEUE_NAME"), os.Getenv("QUEUE_HOST")) if err != nil { log.Fatal(err) } smtpPort, err := strconv.Atoi(os.Getenv("SMTP_PORT")) if err != nil { log.Fatal(err) } log.WithField("path", os.Getenv("TEMPLATES")).Info("Loading templates") tmpls, err := text.ParseGlob(os.Getenv("TEMPLATES")) if err != nil { log.Fatal(err) } MainMailer = &Mailer{ SMTP: SMTPConf{ Host: os.Getenv("SMTP_HOST"), Port: smtpPort, User: os.Getenv("SMTP_USER"), Password: os.Getenv("SMTP_PASSWORD"), }, Queue: queue, Sender: os.Getenv("SENDER"), Templates: tmpls, } if os.Getenv("HTML_BODY") == "" { log.Info("No html body set, using default body") if MainMailer.Body, err = html.New("body").Parse(defaultBody); err != nil { log.Fatal(err) } } else if content, err := ioutil.ReadFile(os.Getenv("HTML_BODY")); err != nil { log.Fatal(err) } else if MainMailer.Body, err = html.New("body").Parse(string(content[:])); err != nil { log.Fatal(err) } else { log.WithField("path", os.Getenv("HTML_BODY")).Info("HTML body provided") } } func main() { Configure() log.WithField("queue", os.Getenv("QUEUE_NAME")).Info("qmail started") if err := MainMailer.Queue.ListenBytes(MainMailer.Listenner); err != nil { log.Fatal("Program failed, exiting.") } }
[ "\"QUEUE_NAME\"", "\"QUEUE_HOST\"", "\"SMTP_PORT\"", "\"TEMPLATES\"", "\"TEMPLATES\"", "\"SMTP_HOST\"", "\"SMTP_USER\"", "\"SMTP_PASSWORD\"", "\"SENDER\"", "\"HTML_BODY\"", "\"HTML_BODY\"", "\"HTML_BODY\"", "\"QUEUE_NAME\"" ]
[]
[ "SMTP_PORT", "QUEUE_HOST", "HTML_BODY", "SENDER", "SMTP_USER", "SMTP_PASSWORD", "SMTP_HOST", "TEMPLATES", "QUEUE_NAME" ]
[]
["SMTP_PORT", "QUEUE_HOST", "HTML_BODY", "SENDER", "SMTP_USER", "SMTP_PASSWORD", "SMTP_HOST", "TEMPLATES", "QUEUE_NAME"]
go
9
0
cmd/server.go
package main import ( "fmt" "log" "net/http" "os" "strconv" "github.com/xhoms/xdrgateway" "github.com/xhoms/xdrgateway/xdrclient" ) var ( build string ) func main() { port := "8080" if envport, exists := os.LookupEnv("PORT"); exists { port = envport } offset := 0 if envOffset, exists := os.LookupEnv("OFFSET"); exists { if intval, err := strconv.Atoi(envOffset); err == nil { offset = intval } } debug := false if _, exists := os.LookupEnv("DEBUG"); exists { debug = true } parser := xdrgateway.NewBasicParser(offset, debug) fmt.Println("PAN-OS to Cortex XDR alert ingestion Gateway") fmt.Println("--------------------------------------------") fmt.Println("version:", xdrgateway.Version, build) fmt.Println(" - Send PAN_OS alerts to /in using HTTP POST") fmt.Println(" - The endpoint /stats provides runtime statistics") fmt.Println(" - Use the following payload in the HTTP Log Forwarding feature") fmt.Println(string(parser.DumpPayloadLayout())) client := xdrclient.NewClientFromEnv() pipeOps := xdrgateway.NewPipeOpsFromEnv() api := xdrgateway.NewAPI(parser, client, os.Getenv("PSK"), debug, pipeOps) http.HandleFunc("/stats", api.HandlerStats) http.HandleFunc("/dump", api.HandlerHint) http.HandleFunc("/in", api.HandlerIngestion) log.Println("starting http service on port", port) log.Fatal(http.ListenAndServe(":"+port, nil)) }
[ "\"PSK\"" ]
[]
[ "PSK" ]
[]
["PSK"]
go
1
0
pyngboard/credentials.py
import os class FileCredentials: def __init__(self, credentials_file): if credentials_file == None: credentials_file = os.path.expanduser("~") + "/.pingboard" self.credentials_file = credentials_file self.client_id = None self.client_secret = None def load(self): try: credentials = dict(line.strip().split('=') for line in open(self.credentials_file)) self.client_id = credentials['client_id'] self.client_secret = credentials['client_secret'] return True except Exception as e: return False class ArgsCredentials: def __init__(self, id_key, secret_key, **kwargs): self.client_id = None self.client_secret = None try: self.client_id = kwargs[id_key] self.client_secret = kwargs[secret_key] except KeyError: pass def load(self): return self.client_id != None and self.client_secret != None; class Credentials: def __init__(self, **kwargs): self.chain = [ ArgsCredentials('client_id', 'client_secret', **kwargs), ArgsCredentials('PINGBOARD_CLIENT_ID', 'PINGBOARD_CLIENT_SECRET', **os.environ), FileCredentials(kwargs.get('credentials_file')) ] def load(self): loaded_credentials = None for credentials in self.chain: if credentials.load(): loaded_credentials = credentials break if not loaded_credentials: return False self.client_id = loaded_credentials.client_id self.client_secret = loaded_credentials.client_secret return True
[]
[]
[]
[]
[]
python
0
0
datastore_to_gcs/test/test_cloud_storage.py
# -*- coding: utf-8 -*- # # test_cloud_storage.py # # Copyright 2016 Socos LLC # import datetime import itertools import os import random import unittest from pprint import pprint import dateutil.parser import gae_stubs gae_stubs.init() import cloudstorage import datastore_to_gcs.cloud_storage as gcs from datastore_to_gcs import util QUICK = os.getenv('QUICK') == '1' class FixtureModel(util.CommonEqualityMixin): def __init__(self, id, email, foofloat, dt): self.id = id self.email = email self.foofloat = foofloat self.dt = dt class TestGCSClient(unittest.TestCase): bucket = 'test-bucket' debug = False dict_fixtures = [] model_fixtures = [] def setUp(self): for i in xrange(5): self.dict_fixtures.append({u'id': unicode(i), u'email': u'%[email protected]' % i, u'foofloat': random.random(), u'nested': {u'inner': i}, u'dingo': 1000 + i % 50}) self.model_fixtures.append(FixtureModel(id=unicode(i), email='%[email protected]' % i, foofloat=random.random(), dt=datetime.date(year=2015, month=1, day=1))) def tearDown(self): for item in gcs.list_objects(self.bucket): cloudstorage.delete(util.parse_cloud_storage_path(self.bucket, item)) def upload_json(self): filename = 'test-2015-08-14T18:46:04.json' directory = 'sms_logs/' path_base = os.path.join(self.bucket, directory) print path_base bucket, object_path = gcs.upload_data(self.dict_fixtures, path_base, filename) return object_path def upload_model_json(self): filename = 'test-2015-09-14T20:46:05.json' directory = 'sms_logs/' path_base = os.path.join(self.bucket, directory) print path_base bucket, object_path = gcs.upload_data(self.model_fixtures, path_base, filename) return object_path def test_list_empty_bucket(self): self.assertEqual(len(gcs.list_objects(self.bucket)), 0) def test_list_dir(self): # Add two files in dir, one not in dir test_dir = 'td/' gcs.upload_data(self.dict_fixtures[0:10], self.bucket, test_dir + 'foo.json') gcs.upload_data(self.dict_fixtures[10:20], self.bucket, test_dir + 'bar.json') gcs.upload_data(self.dict_fixtures[10:20], self.bucket, 'baz.json') # Check that the two files are listed listed = gcs.list_objects(self.bucket, test_dir) self.assertEqual(len(listed), 2) self.assertIn('foo.json', listed) self.assertIn('bar.json', listed) @unittest.skipIf(QUICK, 'QUICK is set') def test_list_over_1000(self): test_dir = 'td/' for i in range(1010): gcs.upload_data(self.dict_fixtures[0:10], self.bucket, test_dir + str(i) + '.json') listed = gcs.list_objects(self.bucket, test_dir) self.assertEqual(len(listed), 1010) def test_upload(self): json_name = self.upload_json() objects = gcs.list_objects(self.bucket) self.assertIn(json_name, objects) def test_model_upload(self): json_name = self.upload_model_json() objects = gcs.list_objects(self.bucket) self.assertIn(json_name, objects) def test_download_object(self): filename = self.upload_json() downloaded_object = gcs.download_object(self.bucket, filename) # Check that it's a DataDict print "DOWNLOADED" pprint(downloaded_object) self.assertIsInstance(downloaded_object[0], util.DataDict) print "\nFIXTURE" data_dict_fixture = [util.DataDict(fixture) for fixture in self.dict_fixtures] pprint(data_dict_fixture) self.assertTrue(downloaded_object == data_dict_fixture) def test_download_object_model(self): filename = self.upload_model_json() downloaded_object = gcs.download_object(self.bucket, filename, object_class=FixtureModel) # Check that it's a FixtureModel self.assertIsInstance(downloaded_object[0], FixtureModel) # Fudge the dates for d in downloaded_object: d.dt = dateutil.parser.parse(d.dt).date() print "DOWNLOADED" pprint([d.__dict__ for d in downloaded_object]) print "\nFIXTURE" pprint([f.__dict__ for f in self.model_fixtures]) self.assertTrue(all([d.__dict__ == f.__dict__ for (d, f) in itertools.izip(downloaded_object, self.model_fixtures)])) def test_download_object_filter_fields(self): filename = self.upload_json() fields = ['id', 'nested.inner'] downloaded_object = gcs.download_object(self.bucket, filename, fields) self.assertEqual(len(downloaded_object), len(self.dict_fixtures)) self.assertIsInstance(downloaded_object[0]['id'], unicode) self.assertIsInstance(downloaded_object[0]['nested']['inner'], int) first_fixture_filtered = util.DataDict( {'id': self.dict_fixtures[0]['id'], 'nested': {'inner': self.dict_fixtures[0]['nested']['inner']}}) self.assertDictEqual(downloaded_object[0], first_fixture_filtered) def test_empty_json_upload(self): item = [] name = 'empty.json' try: gcs.upload_data(item, self.bucket, name) except Exception, e: self.fail('Should not throw exception {}'.format(e)) if __name__ == '__main__': unittest.main()
[]
[]
[ "QUICK" ]
[]
["QUICK"]
python
1
0
scripts/MPI/3_billboard_metrics_test.py
#! /opt/sharcnet/python/2.7.3/bin/python # ####################################### # SUBMISSION # ####################################### # sqsub -r 10m -f xeon -q mpi -o extracting_features_for_billboard -n 2 python ./4_SCRIPTS/MPI/3_billboard_metrics_test.py 0 (8 songs) # sqsub -r 90m -f xeon -q mpi -o extracting_features_for_billboard -n 192 python ./4_SCRIPTS/MPI/3_billboard_metrics_test.py 0 (32 songs) # ################################################################################ # Script for extracting metric features for the amount of scrobbles on each # defined time-zone according to the Billboard Top100 charts. # # E.g., # 14032 Big Girls Don't Cry Fergie 5/5/2007 41 3/29/2008 45 9/8/2007 1 48 18 40c8c738- ... # 8117177 3 1200781349,1208145269,1209626674 # 40933029 6 1328120272,1332187536,1342536119,1342536768,1350714616,1365226575 # ... # ################################################################################ from mpi4py import MPI import sys, os import tarfile, gzip import GVM_classes import Features import tempfile import h5py from optparse import OptionParser rank = MPI.COMM_WORLD.Get_rank() size = MPI.COMM_WORLD.Get_size() name = MPI.Get_processor_name() # print "\nHelloworld! I am process {0} of {1} on {2}".format(rank, size, name) rank_lead_zero = "%02d" % (rank + 1,) TEMP_FOLDER = tempfile.mkdtemp(dir='/scratch/vigliens/scratch') PBS_O_WORKDIR = os.environ["PBS_O_WORKDIR"] SQ_JOBID = os.environ["SQ_JOBID"] if __name__ == "__main__": usage = "usage: %prog [options] factor" opts = OptionParser(usage = usage) opts.add_option('-f', '--hdf5', dest='h5') options, args = opts.parse_args() # Factor to overcome the 256 CPUs limitation. # Each run must have a different factor # E.g., to run the 583 TAR files, it would be # possible to do 256, 256, and 71. However # it is more balanced to do 192, 192, and 199; as in # sqsub -r 12m -q mpi -o test_mpi -n 192 python ./4_SCRIPTS/MPI/helloworld.py [0-2] factor = args[0] number_of_songs = 33 # # ############ # # OPTIONS # # ############ # if options.h5 == 'user': # # output_filepath = os.path.join(args[1], 'consolidated_daily_freqs.txt') # pass # elif options.h5 == 'song': # # output_filepath = os.path.join(args[1], 'consolidated_weekly_freqs.txt') # pass # elif options.h5 == 'billboard': # # h5 = h5py.File('/scratch/vigliens/GV/1_LASTFM_DATA/7_HDF5/BILLBOARD/billboard.h5') # pass # else: # 'You must provide a destination for the hdf5 file' # sys.exit() # Extracting song_data from the billboard file: billboard_file = open(PBS_O_WORKDIR + '/4_SCRIPTS/BILLBOARD/top_200_billboard_2005_2011_mbid_2_no_feat.tsv') billboard_lines = billboard_file.readlines() songs_data = [x.strip().split('\t') for x in billboard_lines[1:number_of_songs] if x.strip().split('\t')[11] is not ''] # Printing first line with song metadata first for song_data in songs_data: if rank == 0: # Catches userlogs with no data try: song_mbid = song_data[11] out_file = open(PBS_O_WORKDIR + '/BILLBOARD_DATA_FEATURES/' + song_mbid + '.dat', 'a') out_file.write('\t'.join(song_data)+'\n') print '{0}'.format('\t'.join(song_data)) out_file.close() except Exception, e: print 'EXC1', e, '\n' # Init parameteres for where userfiles are # Creates TAR object and extracts its members to TMP folder input_dir = '/scratch/vigliens/GV/1_LASTFM_DATA/2_ALL_607_GZIP_TAR_2GB' file_list = [] # List of all files in input_dir for root, subFolders, files in os.walk(input_dir): for f in files: file_list.append('/'.join([root,f])) tar_object = tarfile.open('/'.join([file_list[size * int(factor) + rank]])) tar_object.extractall(TEMP_FOLDER) # ########################################################## # Iterate over all files in a TAR, searching for all songs # ########################################################## for file_in_tar in GVM_classes.folder_iterator(TEMP_FOLDER): user_features = Features.ListeningFeatures(file_in_tar) # Initializes object for feature extraction for song_data in songs_data: # Iterate over all songs in the list utc_times_per_song = [] # Catches userlogs with no data try: song_mbid = song_data[11] billboard_dates = [song_data[3], song_data[7], song_data[5]] #debut, peak, exit out_file = open(PBS_O_WORKDIR + '/BILLBOARD_DATA_FEATURES/' + song_mbid + '.dat', 'a') number_scrobbles_per_ranking_zone = user_features.feature_metric_per_ranking_zone(song_mbid, billboard_dates) except Exception, e: print 'EXC2', file_list[size * int(factor) + rank], file_in_tar, e # Print only if results were returned if sum(number_scrobbles_per_ranking_zone) != 0: number_scrobbles_per_ranking_zone_string = [str(l) for l in number_scrobbles_per_ranking_zone] #making a string from list out_file.write('\t'.join([str(user_features.lfid), ','.join(number_scrobbles_per_ranking_zone_string), '\n'])) out_file.close() MPI.Finalize()
[]
[]
[ "PBS_O_WORKDIR", "SQ_JOBID" ]
[]
["PBS_O_WORKDIR", "SQ_JOBID"]
python
2
0
app/interface/main/mcn/dao/bfs/dao_test.go
package bfs import ( "flag" "os" "strings" "testing" "go-common/app/interface/main/mcn/conf" "gopkg.in/h2non/gock.v1" ) var ( d *Dao ) func TestMain(m *testing.M) { if os.Getenv("DEPLOY_ENV") != "" { flag.Set("app_id", "main.archive.mcn-interface") flag.Set("conf_token", "49e4671bafbf93059aeb602685052ca0") flag.Set("tree_id", "58909") flag.Set("conf_version", "docker-1") flag.Set("deploy_env", "uat") flag.Set("conf_host", "config.bilibili.co") flag.Set("conf_path", "/tmp") flag.Set("region", "sh") flag.Set("zone", "sh001") } else { flag.Set("conf", "../../cmd/mcn-interface.toml") } if os.Getenv("UT_LOCAL_TEST") != "" { flag.Set("conf", "../../cmd/mcn-interface.toml") } flag.Parse() if err := conf.Init(); err != nil { panic(err) } d = New(conf.Conf) os.Exit(m.Run()) } func httpMock(method, url string) *gock.Request { r := gock.New(url) r.Method = strings.ToUpper(method) return r }
[ "\"DEPLOY_ENV\"", "\"UT_LOCAL_TEST\"" ]
[]
[ "DEPLOY_ENV", "UT_LOCAL_TEST" ]
[]
["DEPLOY_ENV", "UT_LOCAL_TEST"]
go
2
0
development/tools/pkg/pjtester/pjtester.go
package pjtester import ( "bytes" "flag" "fmt" "io/ioutil" "os" "strconv" "strings" "k8s.io/test-infra/prow/pod-utils/downwardapi" "k8s.io/test-infra/prow/config/secret" "github.com/go-yaml/yaml" "github.com/kyma-project/test-infra/development/tools/pkg/prtagbuilder" "github.com/sirupsen/logrus" "github.com/tidwall/gjson" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/clientcmd" prowapi "k8s.io/test-infra/prow/apis/prowjobs/v1" prowclient "k8s.io/test-infra/prow/client/clientset/versioned" "k8s.io/test-infra/prow/config" prowflagutil "k8s.io/test-infra/prow/flagutil" "k8s.io/test-infra/prow/github" "k8s.io/test-infra/prow/pjutil" ) var ( testCfgFile = fmt.Sprintf("%s/test-infra/vpath/pjtester.yaml", os.Getenv("KYMA_PROJECT_DIR")) envVarsList = []string{"KUBECONFIG_PATH", "KYMA_PROJECT_DIR", "PULL_BASE_REF", "PULL_BASE_SHA", "PULL_NUMBER", "PULL_PULL_SHA", "JOB_SPEC", "REPO_OWNER", "REPO_NAME"} log = logrus.New() ) // Default values for kyma-project/test-infra const ( defaultPjPath = "test-infra/prow/jobs/" defaultConfigPath = "test-infra/prow/config.yaml" defaultMainBranch = "main" ) // pjCfg holds prowjob to test name and path to it's definition. type pjCfg struct { PjName string `yaml:"pjName"` PjPath string `yaml:"pjPath,omitempty"` Report bool `yaml:"report,omitempty"` } // pjCfg holds number of PR to download and fetched details. type prCfg struct { PrNumber int `yaml:"prNumber"` pullRequest github.PullRequest } // prOrg holds pr configs per repository. type prOrg map[string]prCfg // testCfg holds prow config to test path, prowjobs to test names and paths to it's definitions. type testCfg struct { PjNames []pjCfg `yaml:"pjNames"` ConfigPath string `yaml:"configPath,omitempty"` PrConfigs map[string]prOrg `yaml:"prConfigs,omitempty"` } // options holds data about prowjob and pull request to test. type options struct { jobName string configPath string jobConfigPath string baseRef string baseSha string pullNumber int pullSha string pullAuthor string org string repo string github prowflagutil.GitHubOptions githubClient githubClient prFinder *prtagbuilder.GitHubClient pullRequests map[string]prOrg } type githubClient interface { GetPullRequest(org, repo string, number int) (*github.PullRequest, error) GetRef(org, repo, ref string) (string, error) } // checkEnvVars validate if required env variables are set func checkEnvVars(varsList []string) error { for _, evar := range varsList { val, present := os.LookupEnv(evar) if present { if len(val) == 0 { return fmt.Errorf("variable %s is empty", evar) } } else { return fmt.Errorf("variable %s is not set", evar) } } return nil } // newProwK8sClientset is building Prow client for kubeconfig location provided as env variable func newProwK8sClientset() *prowclient.Clientset { k8sConfig, err := clientcmd.BuildConfigFromFlags("", os.Getenv("KUBECONFIG_PATH")) if err != nil { log.WithError(err).Fatalf("Failed create config for prow k8s clientset.") } clientset, err := prowclient.NewForConfig(k8sConfig) if err != nil { log.WithError(err).Fatalf("Failed create prow k8s clientset.") } return clientset } // readTestCfg read and validate data from pjtester.yaml file. // It will set default path for prowjobs and config files if not provided in a file. func readTestCfg(testCfgFile string) testCfg { var t testCfg yamlFile, err := ioutil.ReadFile(testCfgFile) if err != nil { log.Fatal("Failed read test config file from virtual path KYMA_PROJECT_DIR/test-infra/vpath/pjtester.yaml") } err = yaml.Unmarshal(yamlFile, &t) if err != nil { log.Fatal("Failed unmarshal test config yaml.") } if len(t.PjNames) == 0 { log.WithError(err).Fatalf("PjNames were not provided.") } for index, pj := range t.PjNames { if pj.PjName == "" { log.WithError(err).Fatalf("jobName to test was not provided.") } if pj.PjPath == "" { t.PjNames[index].PjPath = defaultPjPath } } if t.ConfigPath == "" { t.ConfigPath = defaultConfigPath } if len(t.PrConfigs) > 0 { for _, repo := range t.PrConfigs { if len(repo) > 0 { for _, pr := range repo { if pr.PrNumber == 0 { log.WithError(err).Fatalf("Pull request number for repo was not provided.") } } } else { log.WithError(err).Fatalf("Pull request number for repo was not provided.") } } } return t } // getPjCfg is adding prowjob details to the options for triggering prowjob test. func getPjCfg(pjCfg pjCfg, o options) options { // jobName is a name of prowjob to test. It was read from pjtester.yaml file. o.jobName = pjCfg.PjName // jobConfigPath is a location of prow jobs config files to test. It was read from pjtester.yaml file or set to default. o.jobConfigPath = fmt.Sprintf("%s/%s", os.Getenv("KYMA_PROJECT_DIR"), pjCfg.PjPath) return o } // gatherOptions is building common options for all tests. // Options are build from PR env variables and prowjob config read from pjtester.yaml file. func gatherOptions(configPath string, ghOptions prowflagutil.GitHubOptions) options { var o options var err error o.github = ghOptions // configPath is a location of prow config file to test. It was read from pjtester.yaml file or set to default. o.configPath = fmt.Sprintf("%s/%s", os.Getenv("KYMA_PROJECT_DIR"), configPath) // baseRef is a base branch name for github pull request under test. o.baseRef = os.Getenv("PULL_BASE_REF") // baseSha is a git SHA of a base branch for github pull request under test o.baseSha = os.Getenv("PULL_BASE_SHA") // org is a name of organisation of pull request base branch o.org = os.Getenv("REPO_OWNER") // repo is a name of repository of pull request base branch o.repo = os.Getenv("REPO_NAME") // pullNumber is a number of github pull request under test o.pullNumber, err = strconv.Atoi(os.Getenv("PULL_NUMBER")) if err != nil { logrus.WithError(err).Fatalf("could not get pull number from env var PULL_NUMBER") } // pullSha is a SHA of github pull request head under test o.pullSha = os.Getenv("PULL_PULL_SHA") // pullAuthor is an author of github pull request under test o.pullAuthor = gjson.Get(os.Getenv("JOB_SPEC"), "refs.pulls.0.author").String() return o } // withGithubClientOptions will add default flags and values for github client. func (o options) withGithubClientOptions() options { fs := flag.NewFlagSet(os.Args[0], flag.ExitOnError) o.github.AddFlagsWithoutDefaultGitHubTokenPath(fs) _ = fs.Parse(os.Args[1:]) if err := o.github.Validate(false); err != nil { logrus.WithError(err).Fatalf("github options validation failed") } return o } // getPullRequests will download pull requests details from github. func (o *options) getPullRequests(t testCfg) { if o.pullRequests == nil { o.pullRequests = make(map[string]prOrg) } for org, repos := range t.PrConfigs { if _, ok := o.pullRequests[org]; !ok { o.pullRequests[org] = prOrg{} } for repo, prcfg := range repos { pr, err := o.githubClient.GetPullRequest(org, repo, prcfg.PrNumber) if err != nil { logrus.WithError(err).Fatalf("failed to fetch PullRequest from GitHub") } prcfg.pullRequest = *pr o.pullRequests[org][repo] = prcfg } } } // genJobSpec will generate job specifications for prowjob to test // For presubmits it will find and download PR details for prowjob Refs, if the PR number for that repo was not provided in pjtester.yaml // All test-infra refs will be set to pull request head SHA for which pjtester is triggered for. func (o *options) genJobSpec(conf *config.Config, name string) (config.JobBase, prowapi.ProwJobSpec) { for fullRepoName, ps := range conf.PresubmitsStatic { org, repo, err := splitRepoName(fullRepoName) if err != nil { logrus.WithError(err).Warnf("Invalid repo name %s.", fullRepoName) continue } for _, p := range ps { if p.Name == o.jobName { pjs := pjutil.PresubmitSpec(p, prowapi.Refs{ Org: org, Repo: repo, }) pjs, err = presubmitRefs(pjs, *o) if err != nil { logrus.WithError(err).Fatalf("failed generate presubmit refs or extrarefs") } return p.JobBase, pjs } } } for fullRepoName, ps := range conf.PostsubmitsStatic { org, repo, err := splitRepoName(fullRepoName) if err != nil { logrus.WithError(err).Warnf("invalid repo name %s", fullRepoName) continue } for _, p := range ps { if p.Name == o.jobName { pjs := pjutil.PostsubmitSpec(p, prowapi.Refs{ Org: org, Repo: repo, }) pjs, err = postsubmitRefs(pjs, *o) if err != nil { logrus.WithError(err).Fatalf("failed generate postsubmit refs and extrarefs") } return p.JobBase, pjs } } } for _, p := range conf.Periodics { if p.Name == o.jobName { var err error pjs := pjutil.PeriodicSpec(p) pjs, err = periodicRefs(pjs, *o) if err != nil { logrus.WithError(err).Fatalf("failed generate periodic extrarefs") } return p.JobBase, pjs } } return config.JobBase{}, prowapi.ProwJobSpec{} } // splitRepoName will extract org and repo names. func splitRepoName(repo string) (string, string, error) { s := strings.SplitN(repo, "/", 2) if len(s) != 2 { return "", "", fmt.Errorf("repo %s cannot be split into org/repo", repo) } return s[0], s[1], nil } // setPrHeadSHA set pull request head details for provided refs. func setPrHeadSHA(refs *prowapi.Refs, o options) { refs.BaseSHA = o.baseSha refs.BaseRef = o.baseRef refs.Pulls = []prowapi.Pull{{ Author: o.pullAuthor, Number: o.pullNumber, SHA: o.pullSha, }} } // matchRefPR will add pull request details to ExtraRefs. func (o *options) matchRefPR(ref *prowapi.Refs) bool { if pr, present := o.pullRequests[ref.Org][ref.Repo]; present { ref.Pulls = []prowapi.Pull{{ Author: pr.pullRequest.User.Login, Number: pr.PrNumber, SHA: pr.pullRequest.Head.SHA, }} ref.BaseSHA = pr.pullRequest.Base.SHA ref.BaseRef = pr.pullRequest.Base.Ref return true } return false } // submitRefs build prowjob refs and extrarefs according. // It ensure, refs for test-infra is set to details of pull request fro which pjtester was triggered. // It ensures refs contains pull requests details for presubmit jobs. // It ensures details of pull request numbers provided in pjtester.yaml are set for respecting refs or extra refs. func presubmitRefs(pjs prowapi.ProwJobSpec, opt options) (prowapi.ProwJobSpec, error) { // If prowjob specification refs point to test infra repo, add test-infra PR refs because we are going to test code from this PR. if pjs.Refs.Org == opt.org && pjs.Refs.Repo == opt.repo { // set refs with details of tested PR setPrHeadSHA(pjs.Refs, opt) //Add PR details to ExtraRefs if PR number was provided in pjtester.yaml for index, ref := range pjs.ExtraRefs { matched := opt.matchRefPR(&ref) if matched { pjs.ExtraRefs[index] = ref } } return pjs, nil } // If prowjob specification refs point to another repo. if pjs.Refs.Org != opt.org || pjs.Refs.Repo != opt.repo { //Check if PR number for prowjob specification refs was provided in pjtester.yaml. if !opt.matchRefPR(pjs.Refs) { // If PR number not provided set BaseRef to main pjs.Refs.BaseRef = defaultMainBranch // get latest PR number for BaseRef branch and use it to set extra refs jobSpec := &downwardapi.JobSpec{Refs: pjs.Refs} branchPrAsString, err := prtagbuilder.BuildPrTag(jobSpec, true, true, opt.prFinder) if err != nil { fmt.Printf("level=info msg=failed get pr number for main branch head, using master\n") jobSpec.Refs.BaseRef = "master" branchPrAsString, err = prtagbuilder.BuildPrTag(jobSpec, true, true, opt.prFinder) if err != nil { return pjs, fmt.Errorf("could not get pr number for branch head, got error: %w", err) } } branchPR, err := strconv.Atoi(branchPrAsString) if err != nil { return pjs, fmt.Errorf("failed converting pr number string to integer, got error: %w", err) } opt.getPullRequests(testCfg{PrConfigs: map[string]prOrg{pjs.Refs.Org: {pjs.Refs.Repo: prCfg{PrNumber: branchPR}}}}) opt.matchRefPR(pjs.Refs) } // Set PR refs for prowjob ExtraRefs if PR number provided in pjtester.yaml. for index, ref := range pjs.ExtraRefs { // If ExtraRefs ref points to test-infra, use refs from tested PR. if ref.Org == opt.org && ref.Repo == opt.repo { setPrHeadSHA(&ref, opt) // If for prowjob specification refs was provided PR number in pjtester.yaml, keep test-infra refs in ExtraRefs. Otherwise swap with current prowjob refs. pjs.ExtraRefs[index] = ref } else { matchedExtraRef := opt.matchRefPR(&ref) if matchedExtraRef { pjs.ExtraRefs[index] = ref } } } } return pjs, nil } func postsubmitRefs(pjs prowapi.ProwJobSpec, opt options) (prowapi.ProwJobSpec, error) { // If prowjob specification refs point to test infra repo, add test-infra PR refs because we are going to test code from this PR. if pjs.Refs.Org == opt.org && pjs.Refs.Repo == opt.repo { setPrHeadSHA(pjs.Refs, opt) //Add PR details to ExtraRefs if PR number was provided in pjtester.yaml for index, ref := range pjs.ExtraRefs { if opt.matchRefPR(&ref) { pjs.ExtraRefs[index] = ref } } return pjs, nil } // If prowjob specification refs point to another repo. if pjs.Refs.Org != opt.org || pjs.Refs.Repo != opt.repo { //Check if PR number for prowjob specification refs was provided in pjtester.yaml. matched := opt.matchRefPR(pjs.Refs) if !matched { // If PR number not provided set BaseRef to main pjs.Refs.BaseRef = defaultMainBranch fakeJobSpec := &downwardapi.JobSpec{Refs: pjs.Refs} _, err := prtagbuilder.BuildPrTag(fakeJobSpec, true, true, opt.prFinder) if err != nil { fmt.Printf("level=info msg=failed get pr number for main branch head, using master\n") pjs.Refs.BaseRef = "master" } } // Set PR refs for prowjob ExtraRefs if PR number provided in pjtester.yaml. for index, ref := range pjs.ExtraRefs { // If ExtraRefs ref points to test-infra, use refs from tested PR. if ref.Org == opt.org && ref.Repo == opt.repo { setPrHeadSHA(&ref, opt) // If for prowjob specification refs was provided PR number in pjtester.yaml, keep test-infra refs in ExtraRefs. Otherwise swap with current prowjob refs. pjs.ExtraRefs[index] = ref } else { matchedExtraRef := opt.matchRefPR(&ref) if matchedExtraRef { pjs.ExtraRefs[index] = ref } } } } return pjs, nil } // periodicRefs set pull request head SHA for test-infra extra refs. // Periodics are not bound to any repo so there is no prowjob refs. func periodicRefs(pjs prowapi.ProwJobSpec, opt options) (prowapi.ProwJobSpec, error) { for index, ref := range pjs.ExtraRefs { if ref.Org == opt.org && ref.Repo == opt.repo { setPrHeadSHA(&ref, opt) pjs.ExtraRefs[index] = ref } else { matched := opt.matchRefPR(&ref) if matched { pjs.ExtraRefs[index] = ref } } } return pjs, nil } // formatPjName builds and formats testing prowjobname to match gcp cluster labels restrictions. func formatPjName(pullAuthor, pjName string) string { fullName := fmt.Sprintf("%s_test_of_prowjob_%s", pullAuthor, pjName) formated := strings.ToLower(fullName) // Cut prowjob name to not exceed 63 bytes. if len(formated) > 63 { runes := bytes.Runes([]byte(formated)) for i := len(runes); i > 2; i-- { if len(string(runes[:i])) <= 63 { return string(runes[:i]) } } } return formated } // newTestPJ is building a prowjob definition for test func newTestPJ(pjCfg pjCfg, opt options) prowapi.ProwJob { o := getPjCfg(pjCfg, opt) conf, err := config.Load(o.configPath, o.jobConfigPath) if err != nil { logrus.WithError(err).Fatal("Error loading prow config") } job, pjs := o.genJobSpec(conf, o.jobName) if job.Name == "" { logrus.Fatalf("Job %s not found.", o.jobName) } // Building prowjob based on generated job specifications. pj := pjutil.NewProwJob(pjs, job.Labels, job.Annotations) // Add prefix to prowjob to test name. pj.Spec.Job = formatPjName(opt.pullAuthor, pj.Spec.Job) // Make sure prowjob to test will run on untrusted-workload cluster. pj.Spec.Cluster = "untrusted-workload" if pjCfg.Report { pj.Spec.Report = true } else { pj.Spec.ReporterConfig = &prowapi.ReporterConfig{Slack: &prowapi.SlackReporterConfig{Channel: "kyma-prow-dev-null"}} } return pj } // SchedulePJ will generate prowjob for testing and schedule it on prow for execution. func SchedulePJ(ghOptions prowflagutil.GitHubOptions) { log.SetOutput(os.Stdout) log.SetLevel(logrus.InfoLevel) var err error if err := checkEnvVars(envVarsList); err != nil { logrus.WithError(err).Fatalf("Required environment variable not set.") } testCfg := readTestCfg(testCfgFile) o := gatherOptions(testCfg.ConfigPath, ghOptions) prowClient := newProwK8sClientset() pjsClient := prowClient.ProwV1() var secretAgent *secret.Agent if o.github.TokenPath != "" { secretAgent = &secret.Agent{} if err := secretAgent.Start([]string{o.github.TokenPath}); err != nil { logrus.WithError(err).Fatal("Failed to start secret agent") } } o.githubClient, err = o.github.GitHubClient(secretAgent, false) if err != nil { logrus.WithError(err).Fatal("Failed to get GitHub client") } o.prFinder = prtagbuilder.NewGitHubClient(nil) var testPrCfg *map[string]prOrg //if testPrCfg = &testCfg.PrConfigs; testPrCfg != nil && !o.prFetched { if testPrCfg = &testCfg.PrConfigs; testPrCfg != nil { o.getPullRequests(testCfg) } for _, pjCfg := range testCfg.PjNames { pj := newTestPJ(pjCfg, o) result, err := pjsClient.ProwJobs(metav1.NamespaceDefault).Create(&pj) if err != nil { log.WithError(err).Fatalf("Failed schedule test of prowjob") } fmt.Printf("##########\nProwjob %s is %s\n##########\n", pj.Spec.Job, result.Status.State) } }
[ "\"KYMA_PROJECT_DIR\"", "\"KUBECONFIG_PATH\"", "\"KYMA_PROJECT_DIR\"", "\"KYMA_PROJECT_DIR\"", "\"PULL_BASE_REF\"", "\"PULL_BASE_SHA\"", "\"REPO_OWNER\"", "\"REPO_NAME\"", "\"PULL_NUMBER\"", "\"PULL_PULL_SHA\"", "\"JOB_SPEC\"" ]
[]
[ "PULL_BASE_SHA", "PULL_BASE_REF", "PULL_NUMBER", "PULL_PULL_SHA", "KYMA_PROJECT_DIR", "KUBECONFIG_PATH", "REPO_NAME", "REPO_OWNER", "JOB_SPEC" ]
[]
["PULL_BASE_SHA", "PULL_BASE_REF", "PULL_NUMBER", "PULL_PULL_SHA", "KYMA_PROJECT_DIR", "KUBECONFIG_PATH", "REPO_NAME", "REPO_OWNER", "JOB_SPEC"]
go
9
0
yapgBot.go
package main import ( "crypto/rand" "log" "os" "strings" tgbotapi "gopkg.in/telegram-bot-api.v4" ) const ( numbers string = `23456789` uCaseLetter string = `ABCDEFGHJKLMNPRSTUVWXYZ` lCaseLetter string = `abcdefghikmnopqrstuvwxyz` dictionary string = numbers + uCaseLetter + lCaseLetter dicLen int = len(dictionary) ) func main() { bot, err := tgbotapi.NewBotAPI(os.Getenv("BOTTOKEN")) if err != nil { log.Fatal(err) } // bot.Debug = true log.Printf("Authorized on account %s", bot.Self.UserName) update := tgbotapi.NewUpdate(0) update.Timeout = 60 err = botman(bot, update) if err != nil { log.Fatal(err) } } func botman(b *tgbotapi.BotAPI, u tgbotapi.UpdateConfig) error { const defaultMessage = `Choose your destiny!` keyboard := tgbotapi.NewKeyboardButtonRow( tgbotapi.NewKeyboardButton(`/easy`), tgbotapi.NewKeyboardButton(`/hard`)) updates, err := b.GetUpdatesChan(u) if err != nil { return err } for update := range updates { chatID := update.Message.Chat.ID text := update.Message.Text if b.Debug == true { log.Printf("[%s] %s", update.Message.From.UserName, text) } switch text { case "/start": { msg := tgbotapi.NewMessage(chatID, defaultMessage) msg.ReplyMarkup = tgbotapi.NewReplyKeyboard(keyboard) b.Send(msg) } case "/stop": { msg := tgbotapi.NewMessage(chatID, `SeeYa!`) msg.ReplyMarkup = tgbotapi.ReplyKeyboardHide{HideKeyboard: true} b.Send(msg) } case "/easy": { msg := tgbotapi.NewMessage(chatID, genPass(10)) b.Send(msg) } case "/hard": { msg := tgbotapi.NewMessage(chatID, genPass(20)) b.Send(msg) } default: { msg := tgbotapi.NewMessage(chatID, defaultMessage) b.Send(msg) } } } return nil } func genPass(level byte) string { bytes := make([]byte, level) rand.Read(bytes) var result string for checkPass(result) == false { for i, b := range bytes { bytes[i] = dictionary[b%byte(dicLen)] log.Printf("b = %v, bytes[i] = %v or %s", b, bytes[i], string(bytes[i])) } result = string(bytes) } return result } func checkPass(result string) (b bool) { b = strings.ContainsAny(result, numbers) b = b && strings.ContainsAny(result, uCaseLetter) b = b && strings.ContainsAny(result, lCaseLetter) return }
[ "\"BOTTOKEN\"" ]
[]
[ "BOTTOKEN" ]
[]
["BOTTOKEN"]
go
1
0
providers/fitbit/fitbit_test.go
package fitbit_test import ( "os" "testing" "github.com/roscopecoltran/goth" "github.com/roscopecoltran/goth/providers/fitbit" "github.com/stretchr/testify/assert" ) func provider() *fitbit.Provider { return fitbit.New(os.Getenv("FITBIT_KEY"), os.Getenv("FITBIT_SECRET"), "/foo", "user") } func Test_New(t *testing.T) { t.Parallel() a := assert.New(t) p := provider() a.Equal(p.ClientKey, os.Getenv("FITBIT_KEY")) a.Equal(p.Secret, os.Getenv("FITBIT_SECRET")) a.Equal(p.CallbackURL, "/foo") } func Test_ImplementsProvider(t *testing.T) { t.Parallel() a := assert.New(t) a.Implements((*goth.Provider)(nil), provider()) } func Test_BeginAuth(t *testing.T) { t.Parallel() a := assert.New(t) p := provider() session, err := p.BeginAuth("test_state") s := session.(*fitbit.Session) a.NoError(err) a.Contains(s.AuthURL, "www.fitbit.com/oauth2/authorize") } func Test_SessionFromJSON(t *testing.T) { t.Parallel() a := assert.New(t) p := provider() session, err := p.UnmarshalSession(`{"AuthURL":"https://www.fitbit.com/oauth2/authorize","AccessToken":"1234567890","UserID":"abc"}`) a.NoError(err) s := session.(*fitbit.Session) a.Equal(s.AuthURL, "https://www.fitbit.com/oauth2/authorize") a.Equal(s.AccessToken, "1234567890") a.Equal(s.UserID, "abc") }
[ "\"FITBIT_KEY\"", "\"FITBIT_SECRET\"", "\"FITBIT_KEY\"", "\"FITBIT_SECRET\"" ]
[]
[ "FITBIT_SECRET", "FITBIT_KEY" ]
[]
["FITBIT_SECRET", "FITBIT_KEY"]
go
2
0
salt/modules/cmdmod.py
# -*- coding: utf-8 -*- ''' A module for shelling out Keep in mind that this module is insecure, in that it can give whomever has access to the master root execution access to all salt minions. ''' # Import python libs import time import functools import glob import logging import os import shutil import subprocess import sys import traceback from salt.utils import vt # Import salt libs import salt.utils import salt.utils.timed_subprocess import salt.grains.extra from salt._compat import string_types from salt.exceptions import CommandExecutionError, TimedProcTimeoutError from salt.log import LOG_LEVELS # Only available on POSIX systems, nonfatal on windows try: import pwd except ImportError: pass # Define the module's virtual name __virtualname__ = 'cmd' # Set up logging log = logging.getLogger(__name__) DEFAULT_SHELL = salt.grains.extra.shell()['shell'] def __virtual__(): ''' Overwriting the cmd python module makes debugging modules with pdb a bit harder so lets do it this way instead. ''' return __virtualname__ def _chroot_pids(chroot): pids = [] for root in glob.glob('/proc/[0-9]*/root'): try: link = os.path.realpath(root) if link.startswith(chroot): pids.append(int(os.path.basename( os.path.dirname(root) ))) except OSError: pass return pids def _render_cmd(cmd, cwd, template, saltenv='base'): ''' If template is a valid template engine, process the cmd and cwd through that engine. ''' if not template: return (cmd, cwd) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): # write out path to temp file tmp_path_fn = salt.utils.mkstemp() with salt.utils.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(contents) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to cmd with error: {0}'.format( data['data'] ) ) else: return data['data'] cmd = _render(cmd) cwd = _render(cwd) return (cmd, cwd) def _check_loglevel(level='info', quiet=False): ''' Retrieve the level code for use in logging.Logger.log(). ''' def _bad_level(level): log.error( 'Invalid output_loglevel {0!r}. Valid levels are: {1}. Falling ' 'back to \'info\'.' .format( level, ', '.join( sorted(LOG_LEVELS, key=LOG_LEVELS.get, reverse=True) ) ) ) return LOG_LEVELS['info'] try: level = level.lower() if level not in LOG_LEVELS: return _bad_level(level) except AttributeError: return _bad_level(level) if salt.utils.is_true(quiet) or level == 'quiet': return None return LOG_LEVELS[level] def _parse_env(env): if not env: env = {} if isinstance(env, list): env = salt.utils.repack_dictlist(env) if not isinstance(env, dict): env = {} return env def _run(cmd, cwd=None, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, output_loglevel='debug', quiet=False, runas=None, shell=DEFAULT_SHELL, python_shell=True, env=None, clean_env=False, rstrip=True, template=None, umask=None, timeout=None, with_communicate=True, reset_system_locale=True, saltenv='base', use_vt=False): ''' Do the DRY thing and only call subprocess.Popen() once ''' if salt.utils.is_true(quiet): salt.utils.warn_until( 'Lithium', 'The \'quiet\' option is deprecated and will be removed in the ' '\'Lithium\' Salt release. Please use output_loglevel=quiet ' 'instead.' ) if _is_valid_shell(shell) is False: log.warning( 'Attempt to run a shell command with what may be an invalid shell! ' 'Check to ensure that the shell <{0}> is valid for this user.' .format(shell)) # Set the default working directory to the home directory of the user # salt-minion is running as. Defaults to home directory of user under which # the minion is running. if not cwd: cwd = os.path.expanduser('~{0}'.format('' if not runas else runas)) # make sure we can access the cwd # when run from sudo or another environment where the euid is # changed ~ will expand to the home of the original uid and # the euid might not have access to it. See issue #1844 if not os.access(cwd, os.R_OK): cwd = '/' if salt.utils.is_windows(): cwd = os.tempnam()[:3] else: # Handle edge cases where numeric/other input is entered, and would be # yaml-ified into non-string types cwd = str(cwd) if not salt.utils.is_windows(): if not os.path.isfile(shell) or not os.access(shell, os.X_OK): msg = 'The shell {0} is not available'.format(shell) raise CommandExecutionError(msg) if shell.lower().strip() == 'powershell': # If we were called by script(), then fakeout the Windows # shell to run a Powershell script. # Else just run a Powershell command. stack = traceback.extract_stack(limit=2) # extract_stack() returns a list of tuples. # The last item in the list [-1] is the current method. # The third item[2] in each tuple is the name of that method. if stack[-2][2] == 'script': cmd = 'Powershell -executionpolicy bypass -File ' + cmd else: cmd = 'Powershell "{0}"'.format(cmd.replace('"', '\\"')) # munge the cmd and cwd through the template (cmd, cwd) = _render_cmd(cmd, cwd, template, saltenv) ret = {} env = _parse_env(env) for bad_env_key in (x for x, y in env.iteritems() if y is None): log.error('Environment variable {0!r} passed without a value. ' 'Setting value to an empty string'.format(bad_env_key)) env[bad_env_key] = '' if runas and salt.utils.is_windows(): # TODO: Figure out the proper way to do this in windows msg = 'Sorry, {0} does not support runas functionality' raise CommandExecutionError(msg.format(__grains__['os'])) if runas: # Save the original command before munging it try: pwd.getpwnam(runas) except KeyError: raise CommandExecutionError( 'User {0!r} is not available'.format(runas) ) try: # Getting the environment for the runas user # There must be a better way to do this. py_code = '''import os, itertools; print \"\\0\".join(itertools.chain(*os.environ.items()))''' if __grains__['os'] in ['MacOS', 'Darwin']: env_cmd = ('sudo -i -u {0} -- "{1}"' ).format(runas, sys.executable) elif __grains__['os'] in ['FreeBSD']: env_cmd = ('su - {1} -c "{0} -c \'{2}\'"' ).format(shell, runas, sys.executable) else: env_cmd = ('su -s {0} - {1} -c "{2}"' ).format(shell, runas, sys.executable) env_encoded = subprocess.Popen( env_cmd, shell=python_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE ).communicate(py_code)[0] import itertools env_runas = dict(itertools.izip(*[iter(env_encoded.split(b'\0'))]*2)) env_runas.update(env) env = env_runas # Encode unicode kwargs to filesystem encoding to avoid a # UnicodeEncodeError when the subprocess is invoked. fse = sys.getfilesystemencoding() for key, val in env.iteritems(): if isinstance(val, unicode): env[key] = val.encode(fse) except ValueError: raise CommandExecutionError( 'Environment could not be retrieved for User {0!r}'.format( runas ) ) if _check_loglevel(output_loglevel, quiet) is not None: # Always log the shell commands at INFO unless quiet logging is # requested. The command output is what will be controlled by the # 'loglevel' parameter. log.info( 'Executing command {0!r} {1}in directory {2!r}'.format( cmd, 'as user {0!r} '.format(runas) if runas else '', cwd ) ) if reset_system_locale is True: if not salt.utils.is_windows(): # Default to C! # Salt only knows how to parse English words # Don't override if the user has passed LC_ALL env.setdefault('LC_ALL', 'C') else: # On Windows set the codepage to US English. cmd = 'chcp 437 > nul & ' + cmd if clean_env: run_env = env else: run_env = os.environ.copy() run_env.update(env) kwargs = {'cwd': cwd, 'shell': python_shell, 'env': run_env, 'stdin': str(stdin) if stdin is not None else stdin, 'stdout': stdout, 'stderr': stderr, 'with_communicate': with_communicate} if umask: try: _umask = int(str(umask).lstrip('0'), 8) if not _umask: raise ValueError('Zero umask not allowed.') except ValueError: msg = 'Invalid umask: \'{0}\''.format(umask) raise CommandExecutionError(msg) else: _umask = None if runas or umask: kwargs['preexec_fn'] = functools.partial( salt.utils.chugid_and_umask, runas, _umask) if not salt.utils.is_windows(): # close_fds is not supported on Windows platforms if you redirect # stdin/stdout/stderr if kwargs['shell'] is True: kwargs['executable'] = shell kwargs['close_fds'] = True if not os.path.isabs(cwd) or not os.path.isdir(cwd): raise CommandExecutionError( 'Specified cwd {0!r} either not absolute or does not exist' .format(cwd) ) if not use_vt: # This is where the magic happens try: proc = salt.utils.timed_subprocess.TimedProc(cmd, **kwargs) except (OSError, IOError) as exc: raise CommandExecutionError( 'Unable to run command {0!r} with the context {1!r}, reason: {2}' .format(cmd, kwargs, exc) ) try: proc.wait(timeout) except TimedProcTimeoutError as exc: ret['stdout'] = str(exc) ret['stderr'] = '' ret['retcode'] = None ret['pid'] = proc.process.pid # ok return code for timeouts? ret['retcode'] = 1 return ret out, err = proc.stdout, proc.stderr if rstrip: if out is not None: out = out.rstrip() if err is not None: err = err.rstrip() ret['pid'] = proc.process.pid ret['retcode'] = proc.process.returncode ret['stdout'] = out ret['stderr'] = err else: to = '' if timeout: to = ' (timeout: {0}s)'.format(timeout) log.debug('Running {0} in VT{1}'.format(cmd, to)) stdout, stderr = '', '' now = time.time() if timeout: will_timeout = now + timeout else: will_timeout = -1 try: proc = vt.Terminal(cmd, shell=True, log_stdout=True, log_stderr=True, cwd=cwd, user=runas, umask=umask, env=env, log_stdin_level=output_loglevel, log_stdout_level=output_loglevel, log_stderr_level=output_loglevel, stream_stdout=True, stream_stderr=True) # consume output finished = False ret['pid'] = proc.pid while not finished: try: try: time.sleep(0.5) try: cstdout, cstderr = proc.recv() except IOError: cstdout, cstderr = '', '' if cstdout: stdout += cstdout else: cstdout = '' if cstderr: stderr += cstderr else: cstderr = '' if not cstdout and not cstderr and not proc.isalive(): finished = True if timeout and (time.time() > will_timeout): ret['stderr'] = ( 'SALT: Timeout after {0}s\n{1}').format( timeout, stderr) ret['retcode'] = None break except KeyboardInterrupt: ret['stderr'] = 'SALT: User break\n{0}'.format(stderr) ret['retcode'] = 1 break except vt.TerminalException as exc: log.error( 'VT: {0}'.format(exc), exc_info_on_loglevel=logging.DEBUG) ret = {'retcode': 1, 'pid': '2'} break # only set stdout on sucess as we already mangled in other # cases ret['stdout'] = stdout if finished: ret['stderr'] = stderr ret['retcode'] = proc.exitstatus ret['pid'] = proc.pid finally: proc.close(terminate=True, kill=True) try: __context__['retcode'] = ret['retcode'] except NameError: # Ignore the context error during grain generation pass return ret def _run_quiet(cmd, cwd=None, stdin=None, runas=None, shell=DEFAULT_SHELL, python_shell=True, env=None, template=None, umask=None, timeout=None, reset_system_locale=True, saltenv='base'): ''' Helper for running commands quietly for minion startup ''' return _run(cmd, runas=runas, cwd=cwd, stdin=stdin, stderr=subprocess.STDOUT, output_loglevel='quiet', shell=shell, python_shell=python_shell, env=env, template=template, umask=umask, timeout=timeout, reset_system_locale=reset_system_locale, saltenv=saltenv)['stdout'] def _run_all_quiet(cmd, cwd=None, stdin=None, runas=None, shell=DEFAULT_SHELL, python_shell=True, env=None, template=None, umask=None, timeout=None, reset_system_locale=True, saltenv='base'): ''' Helper for running commands quietly for minion startup. Returns a dict of return data ''' return _run(cmd, runas=runas, cwd=cwd, stdin=stdin, shell=shell, python_shell=python_shell, env=env, output_loglevel='quiet', template=template, umask=umask, timeout=timeout, reset_system_locale=reset_system_locale, saltenv=saltenv) def run(cmd, cwd=None, stdin=None, runas=None, shell=DEFAULT_SHELL, python_shell=True, env=None, clean_env=False, template=None, rstrip=True, umask=None, output_loglevel='debug', quiet=False, timeout=None, reset_system_locale=True, ignore_retcode=False, saltenv='base', use_vt=False, **kwargs): ''' Execute the passed command and return the output as a string Note that ``env`` represents the environment variables for the command, and should be formatted as a dict, or a YAML string which resolves to a dict. CLI Example: .. code-block:: bash salt '*' cmd.run "ls -l | awk '/foo/{print \\$2}'" The template arg can be set to 'jinja' or another supported template engine to render the command arguments before execution. For example: .. code-block:: bash salt '*' cmd.run template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'" Specify an alternate shell with the shell parameter: .. code-block:: bash salt '*' cmd.run "Get-ChildItem C:\\ " shell='powershell' A string of standard input can be specified for the command to be run using the ``stdin`` parameter. This can be useful in cases where sensitive information must be read from standard input.: .. code-block:: bash salt '*' cmd.run "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' If an equal sign (``=``) appears in an argument to a Salt command it is interpreted as a keyword argument in the format ``key=val``. That processing can be bypassed in order to pass an equal sign through to the remote shell command by manually specifying the kwarg: .. code-block:: bash salt '*' cmd.run cmd='sed -e s/=/:/g' ''' ret = _run(cmd, runas=runas, shell=shell, python_shell=python_shell, cwd=cwd, stdin=stdin, stderr=subprocess.STDOUT, env=env, clean_env=clean_env, template=template, rstrip=rstrip, umask=umask, output_loglevel=output_loglevel, quiet=quiet, timeout=timeout, reset_system_locale=reset_system_locale, saltenv=saltenv, use_vt=use_vt) if 'pid' in ret and '__pub_jid' in kwargs: # Stuff the child pid in the JID file proc_dir = os.path.join(__opts__['cachedir'], 'proc') jid_file = os.path.join(proc_dir, kwargs['__pub_jid']) if os.path.isfile(jid_file): serial = salt.payload.Serial(__opts__) with salt.utils.fopen(jid_file, 'rb') as fn_: jid_dict = serial.load(fn_) if 'child_pids' in jid_dict: jid_dict['child_pids'].append(ret['pid']) else: jid_dict['child_pids'] = [ret['pid']] # Rewrite file with salt.utils.fopen(jid_file, 'w+b') as fn_: fn_.write(serial.dumps(jid_dict)) lvl = _check_loglevel(output_loglevel, quiet) if lvl is not None: if not ignore_retcode and ret['retcode'] != 0: if lvl < LOG_LEVELS['error']: lvl = LOG_LEVELS['error'] log.error( 'Command {0!r} failed with return code: {1}' .format(cmd, ret['retcode']) ) log.log(lvl, 'output: {0}'.format(ret['stdout'])) return ret['stdout'] def run_stdout(cmd, cwd=None, stdin=None, runas=None, shell=DEFAULT_SHELL, python_shell=True, env=None, clean_env=False, template=None, rstrip=True, umask=None, output_loglevel='debug', quiet=False, timeout=None, reset_system_locale=True, ignore_retcode=False, saltenv='base', use_vt=False, **kwargs): ''' Execute a command, and only return the standard out Note that ``env`` represents the environment variables for the command, and should be formatted as a dict, or a YAML string which resolves to a dict. CLI Example: .. code-block:: bash salt '*' cmd.run_stdout "ls -l | awk '/foo/{print \\$2}'" The template arg can be set to 'jinja' or another supported template engine to render the command arguments before execution. For example: .. code-block:: bash salt '*' cmd.run_stdout template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'" A string of standard input can be specified for the command to be run using the ``stdin`` parameter. This can be useful in cases where sensitive information must be read from standard input.: .. code-block:: bash salt '*' cmd.run_stdout "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' ''' ret = _run(cmd, runas=runas, cwd=cwd, stdin=stdin, shell=shell, python_shell=python_shell, env=env, clean_env=clean_env, template=template, rstrip=rstrip, umask=umask, output_loglevel=output_loglevel, quiet=quiet, timeout=timeout, reset_system_locale=reset_system_locale, saltenv=saltenv, use_vt=use_vt) lvl = _check_loglevel(output_loglevel, quiet) if lvl is not None: if not ignore_retcode and ret['retcode'] != 0: if lvl < LOG_LEVELS['error']: lvl = LOG_LEVELS['error'] log.error( 'Command {0!r} failed with return code: {1}' .format(cmd, ret['retcode']) ) if ret['stdout']: log.log(lvl, 'stdout: {0}'.format(ret['stdout'])) if ret['stderr']: log.log(lvl, 'stderr: {0}'.format(ret['stderr'])) if ret['retcode']: log.log(lvl, 'retcode: {0}'.format(ret['retcode'])) return ret['stdout'] def run_stderr(cmd, cwd=None, stdin=None, runas=None, shell=DEFAULT_SHELL, python_shell=True, env=None, clean_env=False, template=None, rstrip=True, umask=None, output_loglevel='debug', quiet=False, timeout=None, reset_system_locale=True, ignore_retcode=False, saltenv='base', use_vt=False, **kwargs): ''' Execute a command and only return the standard error Note that ``env`` represents the environment variables for the command, and should be formatted as a dict, or a YAML string which resolves to a dict. CLI Example: .. code-block:: bash salt '*' cmd.run_stderr "ls -l | awk '/foo/{print \\$2}'" The template arg can be set to 'jinja' or another supported template engine to render the command arguments before execution. For example: .. code-block:: bash salt '*' cmd.run_stderr template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'" A string of standard input can be specified for the command to be run using the ``stdin`` parameter. This can be useful in cases where sensitive information must be read from standard input.: .. code-block:: bash salt '*' cmd.run_stderr "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' ''' ret = _run(cmd, runas=runas, cwd=cwd, stdin=stdin, shell=shell, python_shell=python_shell, env=env, clean_env=clean_env, template=template, rstrip=rstrip, umask=umask, output_loglevel=output_loglevel, quiet=quiet, timeout=timeout, reset_system_locale=reset_system_locale, use_vt=use_vt, saltenv=saltenv) lvl = _check_loglevel(output_loglevel, quiet) if lvl is not None: if not ignore_retcode and ret['retcode'] != 0: if lvl < LOG_LEVELS['error']: lvl = LOG_LEVELS['error'] log.error( 'Command {0!r} failed with return code: {1}' .format(cmd, ret['retcode']) ) if ret['stdout']: log.log(lvl, 'stdout: {0}'.format(ret['stdout'])) if ret['stderr']: log.log(lvl, 'stderr: {0}'.format(ret['stderr'])) if ret['retcode']: log.log(lvl, 'retcode: {0}'.format(ret['retcode'])) return ret['stderr'] def run_all(cmd, cwd=None, stdin=None, runas=None, shell=DEFAULT_SHELL, python_shell=True, env=None, clean_env=False, template=None, rstrip=True, umask=None, output_loglevel='debug', quiet=False, timeout=None, reset_system_locale=True, ignore_retcode=False, saltenv='base', use_vt=False, **kwargs): ''' Execute the passed command and return a dict of return data Note that ``env`` represents the environment variables for the command, and should be formatted as a dict, or a YAML string which resolves to a dict. CLI Example: .. code-block:: bash salt '*' cmd.run_all "ls -l | awk '/foo/{print \\$2}'" The template arg can be set to 'jinja' or another supported template engine to render the command arguments before execution. For example: .. code-block:: bash salt '*' cmd.run_all template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'" A string of standard input can be specified for the command to be run using the ``stdin`` parameter. This can be useful in cases where sensitive information must be read from standard input.: .. code-block:: bash salt '*' cmd.run_all "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' ''' ret = _run(cmd, runas=runas, cwd=cwd, stdin=stdin, shell=shell, python_shell=python_shell, env=env, clean_env=clean_env, template=template, rstrip=rstrip, umask=umask, output_loglevel=output_loglevel, quiet=quiet, timeout=timeout, reset_system_locale=reset_system_locale, saltenv=saltenv, use_vt=use_vt) lvl = _check_loglevel(output_loglevel, quiet) if lvl is not None: if not ignore_retcode and ret['retcode'] != 0: if lvl < LOG_LEVELS['error']: lvl = LOG_LEVELS['error'] log.error( 'Command {0!r} failed with return code: {1}' .format(cmd, ret['retcode']) ) if ret['stdout']: log.log(lvl, 'stdout: {0}'.format(ret['stdout'])) if ret['stderr']: log.log(lvl, 'stderr: {0}'.format(ret['stderr'])) if ret['retcode']: log.log(lvl, 'retcode: {0}'.format(ret['retcode'])) return ret def retcode(cmd, cwd=None, stdin=None, runas=None, shell=DEFAULT_SHELL, python_shell=True, env=None, clean_env=False, template=None, umask=None, output_loglevel='debug', quiet=False, timeout=None, reset_system_locale=True, ignore_retcode=False, saltenv='base', use_vt=False, **kwargs): ''' Execute a shell command and return the command's return code. Note that ``env`` represents the environment variables for the command, and should be formatted as a dict, or a YAML string which resolves to a dict. CLI Example: .. code-block:: bash salt '*' cmd.retcode "file /bin/bash" The template arg can be set to 'jinja' or another supported template engine to render the command arguments before execution. For example: .. code-block:: bash salt '*' cmd.retcode template=jinja "file {{grains.pythonpath[0]}}/python" A string of standard input can be specified for the command to be run using the ``stdin`` parameter. This can be useful in cases where sensitive information must be read from standard input.: .. code-block:: bash salt '*' cmd.retcode "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' ''' ret = _run(cmd, runas=runas, cwd=cwd, stdin=stdin, stderr=subprocess.STDOUT, shell=shell, python_shell=python_shell, env=env, clean_env=clean_env, template=template, umask=umask, output_loglevel=output_loglevel, quiet=quiet, timeout=timeout, reset_system_locale=reset_system_locale, saltenv=saltenv, use_vt=use_vt) lvl = _check_loglevel(output_loglevel, quiet) if lvl is not None: if not ignore_retcode and ret['retcode'] != 0: if lvl < LOG_LEVELS['error']: lvl = LOG_LEVELS['error'] log.error( 'Command {0!r} failed with return code: {1}' .format(cmd, ret['retcode']) ) log.log(lvl, 'output: {0}'.format(ret['stdout'])) return ret['retcode'] def _retcode_quiet(cmd, cwd=None, stdin=None, runas=None, shell=DEFAULT_SHELL, python_shell=True, env=None, clean_env=False, template=None, umask=None, output_loglevel='quiet', quiet=True, timeout=None, reset_system_locale=True, ignore_retcode=False, saltenv='base', use_vt=False, **kwargs): ''' Helper for running commands quietly for minion startup. Returns same as retcode ''' return retcode(cmd, cwd=cwd, stdin=stdin, runas=runas, shell=shell, python_shell=python_shell, env=env, clean_env=clean_env, template=template, umask=umask, output_loglevel=output_loglevel, timeout=timeout, reset_system_locale=reset_system_locale, ignore_retcode=ignore_retcode, saltenv=saltenv, use_vt=use_vt, **kwargs) def script(source, args=None, cwd=None, stdin=None, runas=None, shell=DEFAULT_SHELL, python_shell=True, env=None, template=None, umask=None, output_loglevel='debug', quiet=False, timeout=None, reset_system_locale=True, __env__=None, saltenv='base', use_vt=False, **kwargs): ''' Download a script from a remote location and execute the script locally. The script can be located on the salt master file server or on an HTTP/FTP server. The script will be executed directly, so it can be written in any available programming language. The script can also be formatted as a template, the default is jinja. Arguments for the script can be specified as well. CLI Example: .. code-block:: bash salt '*' cmd.script salt://scripts/runme.sh salt '*' cmd.script salt://scripts/runme.sh 'arg1 arg2 "arg 3"' salt '*' cmd.script salt://scripts/windows_task.ps1 args=' -Input c:\\tmp\\infile.txt' shell='powershell' A string of standard input can be specified for the command to be run using the ``stdin`` parameter. This can be useful in cases where sensitive information must be read from standard input.: .. code-block:: bash salt '*' cmd.script salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' ''' def _cleanup_tempfile(path): try: os.remove(path) except (IOError, OSError) as exc: log.error('cmd.script: Unable to clean tempfile {0!r}: {1}' .format(path, exc)) if isinstance(__env__, string_types): salt.utils.warn_until( 'Boron', 'Passing a salt environment should be done using \'saltenv\' not ' '\'__env__\'. This functionality will be removed in Salt Boron.' ) # Backwards compatibility saltenv = __env__ path = salt.utils.mkstemp(dir=cwd, suffix=os.path.splitext(source)[1]) if template: fn_ = __salt__['cp.get_template'](source, path, template, saltenv, **kwargs) if not fn_: _cleanup_tempfile(path) return {'pid': 0, 'retcode': 1, 'stdout': '', 'stderr': '', 'cache_error': True} else: fn_ = __salt__['cp.cache_file'](source, saltenv) if not fn_: _cleanup_tempfile(path) return {'pid': 0, 'retcode': 1, 'stdout': '', 'stderr': '', 'cache_error': True} shutil.copyfile(fn_, path) if not salt.utils.is_windows(): os.chmod(path, 320) os.chown(path, __salt__['file.user_to_uid'](runas), -1) ret = _run(path + ' ' + str(args) if args else path, cwd=cwd, stdin=stdin, output_loglevel=output_loglevel, quiet=quiet, runas=runas, shell=shell, python_shell=python_shell, env=env, umask=umask, timeout=timeout, reset_system_locale=reset_system_locale, saltenv=saltenv, use_vt=use_vt) _cleanup_tempfile(path) return ret def script_retcode(source, cwd=None, stdin=None, runas=None, shell=DEFAULT_SHELL, python_shell=True, env=None, template='jinja', umask=None, timeout=None, reset_system_locale=True, __env__=None, saltenv='base', output_loglevel='debug', use_vt=False, **kwargs): ''' Download a script from a remote location and execute the script locally. The script can be located on the salt master file server or on an HTTP/FTP server. The script will be executed directly, so it can be written in any available programming language. The script can also be formatted as a template, the default is jinja. Only evaluate the script return code and do not block for terminal output CLI Example: .. code-block:: bash salt '*' cmd.script_retcode salt://scripts/runme.sh A string of standard input can be specified for the command to be run using the ``stdin`` parameter. This can be useful in cases where sensitive information must be read from standard input.: .. code-block:: bash salt '*' cmd.script_retcode salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' ''' if isinstance(__env__, string_types): salt.utils.warn_until( 'Boron', 'Passing a salt environment should be done using \'saltenv\' not ' '\'env\'. This functionality will be removed in Salt Boron.' ) # Backwards compatibility saltenv = __env__ return script(source=source, cwd=cwd, stdin=stdin, runas=runas, shell=shell, python_shell=python_shell, env=env, template=template, umask=umask, timeout=timeout, reset_system_locale=reset_system_locale, saltenv=saltenv, output_loglevel=output_loglevel, use_vt=use_vt, **kwargs)['retcode'] def which(cmd): ''' Returns the path of an executable available on the minion, None otherwise CLI Example: .. code-block:: bash salt '*' cmd.which cat ''' return salt.utils.which(cmd) def which_bin(cmds): ''' Returns the first command found in a list of commands CLI Example: .. code-block:: bash salt '*' cmd.which_bin '[pip2, pip, pip-python]' ''' return salt.utils.which_bin(cmds) def has_exec(cmd): ''' Returns true if the executable is available on the minion, false otherwise CLI Example: .. code-block:: bash salt '*' cmd.has_exec cat ''' return which(cmd) is not None def exec_code(lang, code, cwd=None): ''' Pass in two strings, the first naming the executable language, aka - python2, python3, ruby, perl, lua, etc. the second string containing the code you wish to execute. The stdout and stderr will be returned CLI Example: .. code-block:: bash salt '*' cmd.exec_code ruby 'puts "cheese"' ''' codefile = salt.utils.mkstemp() with salt.utils.fopen(codefile, 'w+t') as fp_: fp_.write(code) cmd = [lang, codefile] ret = run(cmd, cwd=cwd, python_shell=False) os.remove(codefile) return ret def tty(device, echo=None): ''' Echo a string to a specific tty CLI Example: .. code-block:: bash salt '*' cmd.tty tty0 'This is a test' salt '*' cmd.tty pts3 'This is a test' ''' if device.startswith('tty'): teletype = '/dev/{0}'.format(device) elif device.startswith('pts'): teletype = '/dev/{0}'.format(device.replace('pts', 'pts/')) else: return {'Error': 'The specified device is not a valid TTY'} try: with salt.utils.fopen(teletype, 'wb') as tty_device: tty_device.write(echo) return { 'Success': 'Message was successfully echoed to {0}'.format(teletype) } except IOError: return { 'Error': 'Echoing to {0} returned error'.format(teletype) } def run_chroot(root, cmd): ''' .. versionadded:: 2014.7.0 This function runs :mod:`cmd.run_all <salt.modules.cmdmod.run_all>` wrapped within a chroot, with dev and proc mounted in the chroot CLI Example: .. code-block:: bash salt '*' cmd.run_chroot /var/lib/lxc/container_name/rootfs 'sh /tmp/bootstrap.sh' ''' __salt__['mount.mount']( os.path.join(root, 'dev'), 'udev', fstype='devtmpfs') __salt__['mount.mount']( os.path.join(root, 'proc'), 'proc', fstype='proc') # Execute chroot routine sh_ = '/bin/sh' if os.path.isfile(os.path.join(root, 'bin/bash')): sh_ = '/bin/bash' cmd = 'chroot {0} {1} -c {2!r}'.format( root, sh_, cmd) res = run_all(cmd, output_loglevel='quiet') # Kill processes running in the chroot for i in range(6): pids = _chroot_pids(root) if not pids: break for pid in pids: # use sig 15 (TERM) for first 3 attempts, then 9 (KILL) sig = 15 if i < 3 else 9 os.kill(pid, sig) if _chroot_pids(root): log.error('Processes running in chroot could not be killed, ' 'filesystem will remain mounted') __salt__['mount.umount'](os.path.join(root, 'proc')) __salt__['mount.umount'](os.path.join(root, 'dev')) log.info(res) return res def _is_valid_shell(shell): ''' Attempts to search for valid shells on a system and see if a given shell is in the list ''' if salt.utils.is_windows(): return True # Don't even try this for Windows shells = '/etc/shells' available_shells = [] if os.path.exists(shells): try: with salt.utils.fopen(shells, 'r') as shell_fp: lines = shell_fp.read().splitlines() for line in lines: if line.startswith('#'): continue else: available_shells.append(line) except OSError: return True else: # No known method of determining available shells return None if shell in available_shells: return True else: return False
[]
[]
[]
[]
[]
python
0
0
main.go
package main import ( "fmt" "github.com/THUNDERGROOVE/SDETool/sde" "github.com/gorilla/mux" "log" "net/http" "os" ) var ( SDE *sde.SDE Dev = false ) func init() { if _, err := os.Stat(".git"); err == nil { log.Println("Git data found. Running in development mode") Dev = true } if !Dev { os.Chdir(os.Getenv("OPENSHIFT_DATA_DIR")) } if Dev == false { log.Printf("In non-development environment. Unpacking assets") UnpackAssets() } log.Println("Parsing templates") ParseTemplates() log.Printf("SDE Search %v@%v", Version, Branch) log.Println("Loading SDE related things") var err error SDE, err = sde.Load("dust.sde") if err != nil { log.Fatalf("Couldn't open SDE file:( %v", err.Error()) } } func main() { m := mux.NewRouter() m.HandleFunc("/", HandlerIndex) m.HandleFunc("/info", HandlerInfo) m.HandleFunc("/search", HandlerSearch) m.HandleFunc("/type/{TypeID:[0-9]+}", HandlerType) m.HandleFunc("/store", HandlerStoreView) m.HandleFunc("/error", HandlerTestPassError) // Devel stuff m.HandleFunc("/dev/reload", HandlerReload) m.PathPrefix("/public/").Handler(http.StripPrefix("/public/", http.FileServer(http.Dir("public/")))) log.Println("Starting http server.") var host string var port string if !Dev { host = os.Getenv("HOST") port = os.Getenv("PORT") } http.ListenAndServe(fmt.Sprintf("%v:%v", host, port), m) }
[ "\"OPENSHIFT_DATA_DIR\"", "\"HOST\"", "\"PORT\"" ]
[]
[ "PORT", "HOST", "OPENSHIFT_DATA_DIR" ]
[]
["PORT", "HOST", "OPENSHIFT_DATA_DIR"]
go
3
0
poky-dunfell/meta/lib/oeqa/selftest/cases/wic.py
# # Copyright (c) 2015, Intel Corporation. # # SPDX-License-Identifier: GPL-2.0-only # # AUTHORS # Ed Bartosh <[email protected]> """Test cases for wic.""" import os import sys import unittest from glob import glob from shutil import rmtree, copy from functools import wraps, lru_cache from tempfile import NamedTemporaryFile from oeqa.selftest.case import OESelftestTestCase from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars, runqemu @lru_cache(maxsize=32) def get_host_arch(recipe): """A cached call to get_bb_var('HOST_ARCH', <recipe>)""" return get_bb_var('HOST_ARCH', recipe) def only_for_arch(archs, image='core-image-minimal'): """Decorator for wrapping test cases that can be run only for specific target architectures. A list of compatible architectures is passed in `archs`. Current architecture will be determined by parsing bitbake output for `image` recipe. """ def wrapper(func): @wraps(func) def wrapped_f(*args, **kwargs): arch = get_host_arch(image) if archs and arch not in archs: raise unittest.SkipTest("Testcase arch dependency not met: %s" % arch) return func(*args, **kwargs) wrapped_f.__name__ = func.__name__ return wrapped_f return wrapper def extract_files(debugfs_output): """ extract file names from the output of debugfs -R 'ls -p', which looks like this: /2/040755/0/0/.//\n /2/040755/0/0/..//\n /11/040700/0/0/lost+found^M//\n /12/040755/1002/1002/run//\n /13/040755/1002/1002/sys//\n /14/040755/1002/1002/bin//\n /80/040755/1002/1002/var//\n /92/040755/1002/1002/tmp//\n """ # NOTE the occasional ^M in file names return [line.split('/')[5].strip() for line in \ debugfs_output.strip().split('/\n')] def files_own_by_root(debugfs_output): for line in debugfs_output.strip().split('/\n'): if line.split('/')[3:5] != ['0', '0']: print(debugfs_output) return False return True class WicTestCase(OESelftestTestCase): """Wic test class.""" image_is_ready = False wicenv_cache = {} def setUpLocal(self): """This code is executed before each test method.""" self.resultdir = self.builddir + "/wic-tmp/" super(WicTestCase, self).setUpLocal() # Do this here instead of in setUpClass as the base setUp does some # clean up which can result in the native tools built earlier in # setUpClass being unavailable. if not WicTestCase.image_is_ready: if get_bb_var('USE_NLS') == 'yes': bitbake('wic-tools') else: self.skipTest('wic-tools cannot be built due its (intltool|gettext)-native dependency and NLS disable') bitbake('core-image-minimal') bitbake('core-image-minimal-mtdutils') WicTestCase.image_is_ready = True rmtree(self.resultdir, ignore_errors=True) def tearDownLocal(self): """Remove resultdir as it may contain images.""" rmtree(self.resultdir, ignore_errors=True) super(WicTestCase, self).tearDownLocal() def _get_image_env_path(self, image): """Generate and obtain the path to <image>.env""" if image not in WicTestCase.wicenv_cache: self.assertEqual(0, bitbake('%s -c do_rootfs_wicenv' % image).status) bb_vars = get_bb_vars(['STAGING_DIR', 'MACHINE'], image) stdir = bb_vars['STAGING_DIR'] machine = bb_vars['MACHINE'] WicTestCase.wicenv_cache[image] = os.path.join(stdir, machine, 'imgdata') return WicTestCase.wicenv_cache[image] class Wic(WicTestCase): def test_version(self): """Test wic --version""" runCmd('wic --version') def test_help(self): """Test wic --help and wic -h""" runCmd('wic --help') runCmd('wic -h') def test_createhelp(self): """Test wic create --help""" runCmd('wic create --help') def test_listhelp(self): """Test wic list --help""" runCmd('wic list --help') def test_help_create(self): """Test wic help create""" runCmd('wic help create') def test_help_list(self): """Test wic help list""" runCmd('wic help list') def test_help_overview(self): """Test wic help overview""" runCmd('wic help overview') def test_help_plugins(self): """Test wic help plugins""" runCmd('wic help plugins') def test_help_kickstart(self): """Test wic help kickstart""" runCmd('wic help kickstart') def test_list_images(self): """Test wic list images""" runCmd('wic list images') def test_list_source_plugins(self): """Test wic list source-plugins""" runCmd('wic list source-plugins') def test_listed_images_help(self): """Test wic listed images help""" output = runCmd('wic list images').output imagelist = [line.split()[0] for line in output.splitlines()] for image in imagelist: runCmd('wic list %s help' % image) def test_unsupported_subcommand(self): """Test unsupported subcommand""" self.assertNotEqual(0, runCmd('wic unsupported', ignore_status=True).status) def test_no_command(self): """Test wic without command""" self.assertEqual(1, runCmd('wic', ignore_status=True).status) def test_build_image_name(self): """Test wic create wictestdisk --image-name=core-image-minimal""" cmd = "wic create wictestdisk --image-name=core-image-minimal -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct"))) @only_for_arch(['i586', 'i686', 'x86_64']) def test_gpt_image(self): """Test creation of core-image-minimal with gpt table and UUID boot""" cmd = "wic create directdisk-gpt --image-name core-image-minimal -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct"))) @only_for_arch(['i586', 'i686', 'x86_64']) def test_iso_image(self): """Test creation of hybrid iso image with legacy and EFI boot""" config = 'INITRAMFS_IMAGE = "core-image-minimal-initramfs"\n'\ 'MACHINE_FEATURES_append = " efi"\n'\ 'DEPENDS_pn-core-image-minimal += "syslinux"\n' self.append_config(config) bitbake('core-image-minimal core-image-minimal-initramfs') self.remove_config(config) cmd = "wic create mkhybridiso --image-name core-image-minimal -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "HYBRID_ISO_IMG-*.direct"))) self.assertEqual(1, len(glob(self.resultdir + "HYBRID_ISO_IMG-*.iso"))) @only_for_arch(['i586', 'i686', 'x86_64']) def test_qemux86_directdisk(self): """Test creation of qemux-86-directdisk image""" cmd = "wic create qemux86-directdisk -e core-image-minimal -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "qemux86-directdisk-*direct"))) @only_for_arch(['i586', 'i686', 'x86_64']) def test_mkefidisk(self): """Test creation of mkefidisk image""" cmd = "wic create mkefidisk -e core-image-minimal -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "mkefidisk-*direct"))) @only_for_arch(['i586', 'i686', 'x86_64']) def test_bootloader_config(self): """Test creation of directdisk-bootloader-config image""" config = 'DEPENDS_pn-core-image-minimal += "syslinux"\n' self.append_config(config) bitbake('core-image-minimal') self.remove_config(config) cmd = "wic create directdisk-bootloader-config -e core-image-minimal -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "directdisk-bootloader-config-*direct"))) @only_for_arch(['i586', 'i686', 'x86_64']) def test_systemd_bootdisk(self): """Test creation of systemd-bootdisk image""" config = 'MACHINE_FEATURES_append = " efi"\n' self.append_config(config) bitbake('core-image-minimal') self.remove_config(config) cmd = "wic create systemd-bootdisk -e core-image-minimal -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "systemd-bootdisk-*direct"))) def test_sdimage_bootpart(self): """Test creation of sdimage-bootpart image""" cmd = "wic create sdimage-bootpart -e core-image-minimal -o %s" % self.resultdir kimgtype = get_bb_var('KERNEL_IMAGETYPE', 'core-image-minimal') self.write_config('IMAGE_BOOT_FILES = "%s"\n' % kimgtype) runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "sdimage-bootpart-*direct"))) @only_for_arch(['i586', 'i686', 'x86_64']) def test_default_output_dir(self): """Test default output location""" for fname in glob("directdisk-*.direct"): os.remove(fname) config = 'DEPENDS_pn-core-image-minimal += "syslinux"\n' self.append_config(config) bitbake('core-image-minimal') self.remove_config(config) cmd = "wic create directdisk -e core-image-minimal" runCmd(cmd) self.assertEqual(1, len(glob("directdisk-*.direct"))) @only_for_arch(['i586', 'i686', 'x86_64']) def test_build_artifacts(self): """Test wic create directdisk providing all artifacts.""" bb_vars = get_bb_vars(['STAGING_DATADIR', 'RECIPE_SYSROOT_NATIVE'], 'wic-tools') bb_vars.update(get_bb_vars(['DEPLOY_DIR_IMAGE', 'IMAGE_ROOTFS'], 'core-image-minimal')) bbvars = {key.lower(): value for key, value in bb_vars.items()} bbvars['resultdir'] = self.resultdir runCmd("wic create directdisk " "-b %(staging_datadir)s " "-k %(deploy_dir_image)s " "-n %(recipe_sysroot_native)s " "-r %(image_rootfs)s " "-o %(resultdir)s" % bbvars) self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct"))) def test_compress_gzip(self): """Test compressing an image with gzip""" runCmd("wic create wictestdisk " "--image-name core-image-minimal " "-c gzip -o %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct.gz"))) def test_compress_bzip2(self): """Test compressing an image with bzip2""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-c bzip2 -o %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct.bz2"))) def test_compress_xz(self): """Test compressing an image with xz""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "--compress-with=xz -o %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct.xz"))) def test_wrong_compressor(self): """Test how wic breaks if wrong compressor is provided""" self.assertEqual(2, runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-c wrong -o %s" % self.resultdir, ignore_status=True).status) def test_debug_short(self): """Test -D option""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-D -o %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct"))) def test_debug_long(self): """Test --debug option""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "--debug -o %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct"))) def test_skip_build_check_short(self): """Test -s option""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-s -o %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct"))) def test_skip_build_check_long(self): """Test --skip-build-check option""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "--skip-build-check " "--outdir %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct"))) def test_build_rootfs_short(self): """Test -f option""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-f -o %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct"))) def test_build_rootfs_long(self): """Test --build-rootfs option""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "--build-rootfs " "--outdir %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct"))) @only_for_arch(['i586', 'i686', 'x86_64']) def test_rootfs_indirect_recipes(self): """Test usage of rootfs plugin with rootfs recipes""" runCmd("wic create directdisk-multi-rootfs " "--image-name=core-image-minimal " "--rootfs rootfs1=core-image-minimal " "--rootfs rootfs2=core-image-minimal " "--outdir %s" % self.resultdir) self.assertEqual(1, len(glob(self.resultdir + "directdisk-multi-rootfs*.direct"))) @only_for_arch(['i586', 'i686', 'x86_64']) def test_rootfs_artifacts(self): """Test usage of rootfs plugin with rootfs paths""" bb_vars = get_bb_vars(['STAGING_DATADIR', 'RECIPE_SYSROOT_NATIVE'], 'wic-tools') bb_vars.update(get_bb_vars(['DEPLOY_DIR_IMAGE', 'IMAGE_ROOTFS'], 'core-image-minimal')) bbvars = {key.lower(): value for key, value in bb_vars.items()} bbvars['wks'] = "directdisk-multi-rootfs" bbvars['resultdir'] = self.resultdir runCmd("wic create %(wks)s " "--bootimg-dir=%(staging_datadir)s " "--kernel-dir=%(deploy_dir_image)s " "--native-sysroot=%(recipe_sysroot_native)s " "--rootfs-dir rootfs1=%(image_rootfs)s " "--rootfs-dir rootfs2=%(image_rootfs)s " "--outdir %(resultdir)s" % bbvars) self.assertEqual(1, len(glob(self.resultdir + "%(wks)s-*.direct" % bbvars))) def test_exclude_path(self): """Test --exclude-path wks option.""" oldpath = os.environ['PATH'] os.environ['PATH'] = get_bb_var("PATH", "wic-tools") try: wks_file = 'temp.wks' with open(wks_file, 'w') as wks: rootfs_dir = get_bb_var('IMAGE_ROOTFS', 'core-image-minimal') wks.write(""" part / --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path usr part /usr --source rootfs --ondisk mmcblk0 --fstype=ext4 --rootfs-dir %s/usr part /etc --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path bin/ --rootfs-dir %s/usr""" % (rootfs_dir, rootfs_dir)) runCmd("wic create %s -e core-image-minimal -o %s" \ % (wks_file, self.resultdir)) os.remove(wks_file) wicout = glob(self.resultdir + "%s-*direct" % 'temp') self.assertEqual(1, len(wicout)) wicimg = wicout[0] # verify partition size with wic res = runCmd("parted -m %s unit b p 2>/dev/null" % wicimg) # parse parted output which looks like this: # BYT;\n # /var/tmp/wic/build/tmpfwvjjkf_-201611101222-hda.direct:200MiB:file:512:512:msdos::;\n # 1:0.00MiB:200MiB:200MiB:ext4::;\n partlns = res.output.splitlines()[2:] self.assertEqual(3, len(partlns)) for part in [1, 2, 3]: part_file = os.path.join(self.resultdir, "selftest_img.part%d" % part) partln = partlns[part-1].split(":") self.assertEqual(7, len(partln)) start = int(partln[1].rstrip("B")) / 512 length = int(partln[3].rstrip("B")) / 512 runCmd("dd if=%s of=%s skip=%d count=%d" % (wicimg, part_file, start, length)) # Test partition 1, should contain the normal root directories, except # /usr. res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % \ os.path.join(self.resultdir, "selftest_img.part1")) files = extract_files(res.output) self.assertIn("etc", files) self.assertNotIn("usr", files) # Partition 2, should contain common directories for /usr, not root # directories. res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % \ os.path.join(self.resultdir, "selftest_img.part2")) files = extract_files(res.output) self.assertNotIn("etc", files) self.assertNotIn("usr", files) self.assertIn("share", files) # Partition 3, should contain the same as partition 2, including the bin # directory, but not the files inside it. res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % \ os.path.join(self.resultdir, "selftest_img.part3")) files = extract_files(res.output) self.assertNotIn("etc", files) self.assertNotIn("usr", files) self.assertIn("share", files) self.assertIn("bin", files) res = runCmd("debugfs -R 'ls -p bin' %s 2>/dev/null" % \ os.path.join(self.resultdir, "selftest_img.part3")) files = extract_files(res.output) self.assertIn(".", files) self.assertIn("..", files) self.assertEqual(2, len(files)) for part in [1, 2, 3]: part_file = os.path.join(self.resultdir, "selftest_img.part%d" % part) os.remove(part_file) finally: os.environ['PATH'] = oldpath def test_include_path(self): """Test --include-path wks option.""" oldpath = os.environ['PATH'] os.environ['PATH'] = get_bb_var("PATH", "wic-tools") try: include_path = os.path.join(self.resultdir, 'test-include') os.makedirs(include_path) with open(os.path.join(include_path, 'test-file'), 'w') as t: t.write("test\n") wks_file = os.path.join(include_path, 'temp.wks') with open(wks_file, 'w') as wks: rootfs_dir = get_bb_var('IMAGE_ROOTFS', 'core-image-minimal') wks.write(""" part /part1 --source rootfs --ondisk mmcblk0 --fstype=ext4 part /part2 --source rootfs --ondisk mmcblk0 --fstype=ext4 --include-path %s""" % (include_path)) runCmd("wic create %s -e core-image-minimal -o %s" \ % (wks_file, self.resultdir)) part1 = glob(os.path.join(self.resultdir, 'temp-*.direct.p1'))[0] part2 = glob(os.path.join(self.resultdir, 'temp-*.direct.p2'))[0] # Test partition 1, should not contain 'test-file' res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % (part1)) files = extract_files(res.output) self.assertNotIn('test-file', files) # Test partition 2, should not contain 'test-file' res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % (part2)) files = extract_files(res.output) self.assertIn('test-file', files) finally: os.environ['PATH'] = oldpath def test_exclude_path_errors(self): """Test --exclude-path wks option error handling.""" wks_file = 'temp.wks' # Absolute argument. with open(wks_file, 'w') as wks: wks.write("part / --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path /usr") self.assertNotEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \ % (wks_file, self.resultdir), ignore_status=True).status) os.remove(wks_file) # Argument pointing to parent directory. with open(wks_file, 'w') as wks: wks.write("part / --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path ././..") self.assertNotEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \ % (wks_file, self.resultdir), ignore_status=True).status) os.remove(wks_file) def test_permissions(self): """Test permissions are respected""" # prepare wicenv and rootfs bitbake('core-image-minimal core-image-minimal-mtdutils -c do_rootfs_wicenv') oldpath = os.environ['PATH'] os.environ['PATH'] = get_bb_var("PATH", "wic-tools") t_normal = """ part / --source rootfs --fstype=ext4 """ t_exclude = """ part / --source rootfs --fstype=ext4 --exclude-path=home """ t_multi = """ part / --source rootfs --ondisk sda --fstype=ext4 part /export --source rootfs --rootfs=core-image-minimal-mtdutils --fstype=ext4 """ t_change = """ part / --source rootfs --ondisk sda --fstype=ext4 --exclude-path=etc/    part /etc --source rootfs --fstype=ext4 --change-directory=etc """ tests = [t_normal, t_exclude, t_multi, t_change] try: for test in tests: include_path = os.path.join(self.resultdir, 'test-include') os.makedirs(include_path) wks_file = os.path.join(include_path, 'temp.wks') with open(wks_file, 'w') as wks: wks.write(test) runCmd("wic create %s -e core-image-minimal -o %s" \ % (wks_file, self.resultdir)) for part in glob(os.path.join(self.resultdir, 'temp-*.direct.p*')): res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % (part)) self.assertEqual(True, files_own_by_root(res.output)) config = 'IMAGE_FSTYPES += "wic"\nWKS_FILE = "%s"\n' % wks_file self.append_config(config) bitbake('core-image-minimal') tmpdir = os.path.join(get_bb_var('WORKDIR', 'core-image-minimal'),'build-wic') # check each partition for permission for part in glob(os.path.join(tmpdir, 'temp-*.direct.p*')): res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % (part)) self.assertTrue(files_own_by_root(res.output) ,msg='Files permission incorrect using wks set "%s"' % test) # clean config and result directory for next cases self.remove_config(config) rmtree(self.resultdir, ignore_errors=True) finally: os.environ['PATH'] = oldpath def test_change_directory(self): """Test --change-directory wks option.""" oldpath = os.environ['PATH'] os.environ['PATH'] = get_bb_var("PATH", "wic-tools") try: include_path = os.path.join(self.resultdir, 'test-include') os.makedirs(include_path) wks_file = os.path.join(include_path, 'temp.wks') with open(wks_file, 'w') as wks: wks.write("part /etc --source rootfs --fstype=ext4 --change-directory=etc") runCmd("wic create %s -e core-image-minimal -o %s" \ % (wks_file, self.resultdir)) part1 = glob(os.path.join(self.resultdir, 'temp-*.direct.p1'))[0] res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % (part1)) files = extract_files(res.output) self.assertIn('passwd', files) finally: os.environ['PATH'] = oldpath def test_change_directory_errors(self): """Test --change-directory wks option error handling.""" wks_file = 'temp.wks' # Absolute argument. with open(wks_file, 'w') as wks: wks.write("part / --source rootfs --fstype=ext4 --change-directory /usr") self.assertNotEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \ % (wks_file, self.resultdir), ignore_status=True).status) os.remove(wks_file) # Argument pointing to parent directory. with open(wks_file, 'w') as wks: wks.write("part / --source rootfs --fstype=ext4 --change-directory ././..") self.assertNotEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \ % (wks_file, self.resultdir), ignore_status=True).status) os.remove(wks_file) class Wic2(WicTestCase): def test_bmap_short(self): """Test generation of .bmap file -m option""" cmd = "wic create wictestdisk -e core-image-minimal -m -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct"))) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct.bmap"))) def test_bmap_long(self): """Test generation of .bmap file --bmap option""" cmd = "wic create wictestdisk -e core-image-minimal --bmap -o %s" % self.resultdir runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct"))) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct.bmap"))) def test_image_env(self): """Test generation of <image>.env files.""" image = 'core-image-minimal' imgdatadir = self._get_image_env_path(image) bb_vars = get_bb_vars(['IMAGE_BASENAME', 'WICVARS'], image) basename = bb_vars['IMAGE_BASENAME'] self.assertEqual(basename, image) path = os.path.join(imgdatadir, basename) + '.env' self.assertTrue(os.path.isfile(path)) wicvars = set(bb_vars['WICVARS'].split()) # filter out optional variables wicvars = wicvars.difference(('DEPLOY_DIR_IMAGE', 'IMAGE_BOOT_FILES', 'INITRD', 'INITRD_LIVE', 'ISODIR','INITRAMFS_IMAGE', 'INITRAMFS_IMAGE_BUNDLE', 'INITRAMFS_LINK_NAME', 'APPEND')) with open(path) as envfile: content = dict(line.split("=", 1) for line in envfile) # test if variables used by wic present in the .env file for var in wicvars: self.assertTrue(var in content, "%s is not in .env file" % var) self.assertTrue(content[var]) def test_image_vars_dir_short(self): """Test image vars directory selection -v option""" image = 'core-image-minimal' imgenvdir = self._get_image_env_path(image) native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "wic-tools") runCmd("wic create wictestdisk " "--image-name=%s -v %s -n %s -o %s" % (image, imgenvdir, native_sysroot, self.resultdir)) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct"))) def test_image_vars_dir_long(self): """Test image vars directory selection --vars option""" image = 'core-image-minimal' imgenvdir = self._get_image_env_path(image) native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "wic-tools") runCmd("wic create wictestdisk " "--image-name=%s " "--vars %s " "--native-sysroot %s " "--outdir %s" % (image, imgenvdir, native_sysroot, self.resultdir)) self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct"))) @only_for_arch(['i586', 'i686', 'x86_64']) def test_wic_image_type(self): """Test building wic images by bitbake""" config = 'IMAGE_FSTYPES += "wic"\nWKS_FILE = "wic-image-minimal"\n'\ 'MACHINE_FEATURES_append = " efi"\n' self.append_config(config) self.assertEqual(0, bitbake('wic-image-minimal').status) self.remove_config(config) bb_vars = get_bb_vars(['DEPLOY_DIR_IMAGE', 'MACHINE']) deploy_dir = bb_vars['DEPLOY_DIR_IMAGE'] machine = bb_vars['MACHINE'] prefix = os.path.join(deploy_dir, 'wic-image-minimal-%s.' % machine) # check if we have result image and manifests symlinks # pointing to existing files for suffix in ('wic', 'manifest'): path = prefix + suffix self.assertTrue(os.path.islink(path)) self.assertTrue(os.path.isfile(os.path.realpath(path))) @only_for_arch(['i586', 'i686', 'x86_64']) def test_qemu(self): """Test wic-image-minimal under qemu""" config = 'IMAGE_FSTYPES += "wic"\nWKS_FILE = "wic-image-minimal"\n'\ 'MACHINE_FEATURES_append = " efi"\n' self.append_config(config) self.assertEqual(0, bitbake('wic-image-minimal').status) self.remove_config(config) with runqemu('wic-image-minimal', ssh=False) as qemu: cmd = "mount | grep '^/dev/' | cut -f1,3 -d ' ' | egrep -c -e '/dev/sda1 /boot' " \ "-e '/dev/root /|/dev/sda2 /' -e '/dev/sda3 /media' -e '/dev/sda4 /mnt'" status, output = qemu.run_serial(cmd) self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output)) self.assertEqual(output, '4') cmd = "grep UUID= /etc/fstab" status, output = qemu.run_serial(cmd) self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output)) self.assertEqual(output, 'UUID=2c71ef06-a81d-4735-9d3a-379b69c6bdba\t/media\text4\tdefaults\t0\t0') @only_for_arch(['i586', 'i686', 'x86_64']) def test_qemu_efi(self): """Test core-image-minimal efi image under qemu""" config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "mkefidisk.wks"\n' self.append_config(config) self.assertEqual(0, bitbake('core-image-minimal ovmf').status) self.remove_config(config) with runqemu('core-image-minimal', ssh=False, runqemuparams='ovmf', image_fstype='wic') as qemu: cmd = "grep sda. /proc/partitions |wc -l" status, output = qemu.run_serial(cmd) self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output)) self.assertEqual(output, '3') @staticmethod def _make_fixed_size_wks(size): """ Create a wks of an image with a single partition. Size of the partition is set using --fixed-size flag. Returns a tuple: (path to wks file, wks image name) """ with NamedTemporaryFile("w", suffix=".wks", delete=False) as tempf: wkspath = tempf.name tempf.write("part " \ "--source rootfs --ondisk hda --align 4 --fixed-size %d " "--fstype=ext4\n" % size) return wkspath def _get_wic_partitions(self, wkspath, native_sysroot=None, ignore_status=False): p = runCmd("wic create %s -e core-image-minimal -o %s" % (wkspath, self.resultdir), ignore_status=ignore_status) if p.status: return (p, None) wksname = os.path.splitext(os.path.basename(wkspath))[0] wicout = glob(self.resultdir + "%s-*direct" % wksname) if not wicout: return (p, None) wicimg = wicout[0] if not native_sysroot: native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "wic-tools") # verify partition size with wic res = runCmd("parted -m %s unit kib p 2>/dev/null" % wicimg, native_sysroot=native_sysroot) # parse parted output which looks like this: # BYT;\n # /var/tmp/wic/build/tmpfwvjjkf_-201611101222-hda.direct:200MiB:file:512:512:msdos::;\n # 1:0.00MiB:200MiB:200MiB:ext4::;\n return (p, res.output.splitlines()[2:]) def test_fixed_size(self): """ Test creation of a simple image with partition size controlled through --fixed-size flag """ wkspath = Wic2._make_fixed_size_wks(200) _, partlns = self._get_wic_partitions(wkspath) os.remove(wkspath) self.assertEqual(partlns, [ "1:4.00kiB:204804kiB:204800kiB:ext4::;", ]) def test_fixed_size_error(self): """ Test creation of a simple image with partition size controlled through --fixed-size flag. The size of partition is intentionally set to 1MiB in order to trigger an error in wic. """ wkspath = Wic2._make_fixed_size_wks(1) p, _ = self._get_wic_partitions(wkspath, ignore_status=True) os.remove(wkspath) self.assertNotEqual(p.status, 0, "wic exited successfully when an error was expected:\n%s" % p.output) def test_offset(self): native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "wic-tools") with NamedTemporaryFile("w", suffix=".wks") as tempf: # Test that partitions are placed at the correct offsets, default KB tempf.write("bootloader --ptable gpt\n" \ "part / --source rootfs --ondisk hda --offset 32 --fixed-size 100M --fstype=ext4\n" \ "part /bar --ondisk hda --offset 102432 --fixed-size 100M --fstype=ext4\n") tempf.flush() _, partlns = self._get_wic_partitions(tempf.name, native_sysroot) self.assertEqual(partlns, [ "1:32.0kiB:102432kiB:102400kiB:ext4:primary:;", "2:102432kiB:204832kiB:102400kiB:ext4:primary:;", ]) with NamedTemporaryFile("w", suffix=".wks") as tempf: # Test that partitions are placed at the correct offsets, same with explicit KB tempf.write("bootloader --ptable gpt\n" \ "part / --source rootfs --ondisk hda --offset 32K --fixed-size 100M --fstype=ext4\n" \ "part /bar --ondisk hda --offset 102432K --fixed-size 100M --fstype=ext4\n") tempf.flush() _, partlns = self._get_wic_partitions(tempf.name, native_sysroot) self.assertEqual(partlns, [ "1:32.0kiB:102432kiB:102400kiB:ext4:primary:;", "2:102432kiB:204832kiB:102400kiB:ext4:primary:;", ]) with NamedTemporaryFile("w", suffix=".wks") as tempf: # Test that partitions are placed at the correct offsets using MB tempf.write("bootloader --ptable gpt\n" \ "part / --source rootfs --ondisk hda --offset 32K --fixed-size 100M --fstype=ext4\n" \ "part /bar --ondisk hda --offset 101M --fixed-size 100M --fstype=ext4\n") tempf.flush() _, partlns = self._get_wic_partitions(tempf.name, native_sysroot) self.assertEqual(partlns, [ "1:32.0kiB:102432kiB:102400kiB:ext4:primary:;", "2:103424kiB:205824kiB:102400kiB:ext4:primary:;", ]) with NamedTemporaryFile("w", suffix=".wks") as tempf: # Test that partitions can be placed on a 512 byte sector boundary tempf.write("bootloader --ptable gpt\n" \ "part / --source rootfs --ondisk hda --offset 65s --fixed-size 99M --fstype=ext4\n" \ "part /bar --ondisk hda --offset 102432 --fixed-size 100M --fstype=ext4\n") tempf.flush() _, partlns = self._get_wic_partitions(tempf.name, native_sysroot) self.assertEqual(partlns, [ "1:32.5kiB:101408kiB:101376kiB:ext4:primary:;", "2:102432kiB:204832kiB:102400kiB:ext4:primary:;", ]) with NamedTemporaryFile("w", suffix=".wks") as tempf: # Test that a partition can be placed immediately after a MSDOS partition table tempf.write("bootloader --ptable msdos\n" \ "part / --source rootfs --ondisk hda --offset 1s --fixed-size 100M --fstype=ext4\n") tempf.flush() _, partlns = self._get_wic_partitions(tempf.name, native_sysroot) self.assertEqual(partlns, [ "1:0.50kiB:102400kiB:102400kiB:ext4::;", ]) with NamedTemporaryFile("w", suffix=".wks") as tempf: # Test that image creation fails if the partitions would overlap tempf.write("bootloader --ptable gpt\n" \ "part / --source rootfs --ondisk hda --offset 32 --fixed-size 100M --fstype=ext4\n" \ "part /bar --ondisk hda --offset 102431 --fixed-size 100M --fstype=ext4\n") tempf.flush() p, _ = self._get_wic_partitions(tempf.name, ignore_status=True) self.assertNotEqual(p.status, 0, "wic exited successfully when an error was expected:\n%s" % p.output) with NamedTemporaryFile("w", suffix=".wks") as tempf: # Test that partitions are not allowed to overlap with the booloader tempf.write("bootloader --ptable gpt\n" \ "part / --source rootfs --ondisk hda --offset 8 --fixed-size 100M --fstype=ext4\n") tempf.flush() p, _ = self._get_wic_partitions(tempf.name, ignore_status=True) self.assertNotEqual(p.status, 0, "wic exited successfully when an error was expected:\n%s" % p.output) def test_extra_space(self): native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "wic-tools") with NamedTemporaryFile("w", suffix=".wks") as tempf: tempf.write("bootloader --ptable gpt\n" \ "part / --source rootfs --ondisk hda --extra-space 200M --fstype=ext4\n") tempf.flush() _, partlns = self._get_wic_partitions(tempf.name, native_sysroot) self.assertEqual(len(partlns), 1) size = partlns[0].split(':')[3] self.assertRegex(size, r'^[0-9]+kiB$') size = int(size[:-3]) self.assertGreaterEqual(size, 204800) @only_for_arch(['i586', 'i686', 'x86_64']) def test_rawcopy_plugin_qemu(self): """Test rawcopy plugin in qemu""" # build ext4 and then use it for a wic image config = 'IMAGE_FSTYPES = "ext4"\n' self.append_config(config) self.assertEqual(0, bitbake('core-image-minimal').status) self.remove_config(config) config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "test_rawcopy_plugin.wks.in"\n' self.append_config(config) self.assertEqual(0, bitbake('core-image-minimal-mtdutils').status) self.remove_config(config) with runqemu('core-image-minimal-mtdutils', ssh=False, image_fstype='wic') as qemu: cmd = "grep sda. /proc/partitions |wc -l" status, output = qemu.run_serial(cmd) self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output)) self.assertEqual(output, '2') def test_rawcopy_plugin(self): """Test rawcopy plugin""" img = 'core-image-minimal' machine = get_bb_var('MACHINE', img) with NamedTemporaryFile("w", suffix=".wks") as wks: wks.writelines(['part /boot --active --source bootimg-pcbios\n', 'part / --source rawcopy --sourceparams="file=%s-%s.ext4" --use-uuid\n'\ % (img, machine), 'bootloader --timeout=0 --append="console=ttyS0,115200n8"\n']) wks.flush() cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir) runCmd(cmd) wksname = os.path.splitext(os.path.basename(wks.name))[0] out = glob(self.resultdir + "%s-*direct" % wksname) self.assertEqual(1, len(out)) @only_for_arch(['i586', 'i686', 'x86_64']) def test_biosplusefi_plugin_qemu(self): """Test biosplusefi plugin in qemu""" config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "test_biosplusefi_plugin.wks"\nMACHINE_FEATURES_append = " efi"\n' self.append_config(config) self.assertEqual(0, bitbake('core-image-minimal').status) self.remove_config(config) with runqemu('core-image-minimal', ssh=False, image_fstype='wic') as qemu: # Check that we have ONLY two /dev/sda* partitions (/boot and /) cmd = "grep sda. /proc/partitions | wc -l" status, output = qemu.run_serial(cmd) self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output)) self.assertEqual(output, '2') # Check that /dev/sda1 is /boot and that either /dev/root OR /dev/sda2 is / cmd = "mount | grep '^/dev/' | cut -f1,3 -d ' ' | egrep -c -e '/dev/sda1 /boot' -e '/dev/root /|/dev/sda2 /'" status, output = qemu.run_serial(cmd) self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output)) self.assertEqual(output, '2') # Check that /boot has EFI bootx64.efi (required for EFI) cmd = "ls /boot/EFI/BOOT/bootx64.efi | wc -l" status, output = qemu.run_serial(cmd) self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output)) self.assertEqual(output, '1') # Check that "BOOTABLE" flag is set on boot partition (required for PC-Bios) # Trailing "cat" seems to be required; otherwise run_serial() sends back echo of the input command cmd = "fdisk -l /dev/sda | grep /dev/sda1 | awk {print'$2'} | cat" status, output = qemu.run_serial(cmd) self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output)) self.assertEqual(output, '*') @only_for_arch(['i586', 'i686', 'x86_64']) def test_biosplusefi_plugin(self): """Test biosplusefi plugin""" # Wic generation below may fail depending on the order of the unittests # This is because bootimg-pcbios (that bootimg-biosplusefi uses) generate its MBR inside STAGING_DATADIR directory # which may or may not exists depending on what was built already # If an image hasn't been built yet, directory ${STAGING_DATADIR}/syslinux won't exists and _get_bootimg_dir() # will raise with "Couldn't find correct bootimg_dir" # The easiest way to work-around this issue is to make sure we already built an image here, hence the bitbake call config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "test_biosplusefi_plugin.wks"\nMACHINE_FEATURES_append = " efi"\n' self.append_config(config) self.assertEqual(0, bitbake('core-image-minimal').status) self.remove_config(config) img = 'core-image-minimal' with NamedTemporaryFile("w", suffix=".wks") as wks: wks.writelines(['part /boot --active --source bootimg-biosplusefi --sourceparams="loader=grub-efi"\n', 'part / --source rootfs --fstype=ext4 --align 1024 --use-uuid\n'\ 'bootloader --timeout=0 --append="console=ttyS0,115200n8"\n']) wks.flush() cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir) runCmd(cmd) wksname = os.path.splitext(os.path.basename(wks.name))[0] out = glob(self.resultdir + "%s-*.direct" % wksname) self.assertEqual(1, len(out)) def test_fs_types(self): """Test filesystem types for empty and not empty partitions""" img = 'core-image-minimal' with NamedTemporaryFile("w", suffix=".wks") as wks: wks.writelines(['part ext2 --fstype ext2 --source rootfs\n', 'part btrfs --fstype btrfs --source rootfs --size 40M\n', 'part squash --fstype squashfs --source rootfs\n', 'part swap --fstype swap --size 1M\n', 'part emptyvfat --fstype vfat --size 1M\n', 'part emptymsdos --fstype msdos --size 1M\n', 'part emptyext2 --fstype ext2 --size 1M\n', 'part emptybtrfs --fstype btrfs --size 150M\n']) wks.flush() cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir) runCmd(cmd) wksname = os.path.splitext(os.path.basename(wks.name))[0] out = glob(self.resultdir + "%s-*direct" % wksname) self.assertEqual(1, len(out)) def test_kickstart_parser(self): """Test wks parser options""" with NamedTemporaryFile("w", suffix=".wks") as wks: wks.writelines(['part / --fstype ext3 --source rootfs --system-id 0xFF '\ '--overhead-factor 1.2 --size 100k\n']) wks.flush() cmd = "wic create %s -e core-image-minimal -o %s" % (wks.name, self.resultdir) runCmd(cmd) wksname = os.path.splitext(os.path.basename(wks.name))[0] out = glob(self.resultdir + "%s-*direct" % wksname) self.assertEqual(1, len(out)) def test_image_bootpart_globbed(self): """Test globbed sources with image-bootpart plugin""" img = "core-image-minimal" cmd = "wic create sdimage-bootpart -e %s -o %s" % (img, self.resultdir) config = 'IMAGE_BOOT_FILES = "%s*"' % get_bb_var('KERNEL_IMAGETYPE', img) self.append_config(config) runCmd(cmd) self.remove_config(config) self.assertEqual(1, len(glob(self.resultdir + "sdimage-bootpart-*direct"))) def test_sparse_copy(self): """Test sparse_copy with FIEMAP and SEEK_HOLE filemap APIs""" libpath = os.path.join(get_bb_var('COREBASE'), 'scripts', 'lib', 'wic') sys.path.insert(0, libpath) from filemap import FilemapFiemap, FilemapSeek, sparse_copy, ErrorNotSupp with NamedTemporaryFile("w", suffix=".wic-sparse") as sparse: src_name = sparse.name src_size = 1024 * 10 sparse.truncate(src_size) # write one byte to the file with open(src_name, 'r+b') as sfile: sfile.seek(1024 * 4) sfile.write(b'\x00') dest = sparse.name + '.out' # copy src file to dest using different filemap APIs for api in (FilemapFiemap, FilemapSeek, None): if os.path.exists(dest): os.unlink(dest) try: sparse_copy(sparse.name, dest, api=api) except ErrorNotSupp: continue # skip unsupported API dest_stat = os.stat(dest) self.assertEqual(dest_stat.st_size, src_size) # 8 blocks is 4K (physical sector size) self.assertEqual(dest_stat.st_blocks, 8) os.unlink(dest) def test_wic_ls(self): """Test listing image content using 'wic ls'""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-D -o %s" % self.resultdir) images = glob(self.resultdir + "wictestdisk-*.direct") self.assertEqual(1, len(images)) sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools') # list partitions result = runCmd("wic ls %s -n %s" % (images[0], sysroot)) self.assertEqual(3, len(result.output.split('\n'))) # list directory content of the first partition result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot)) self.assertEqual(6, len(result.output.split('\n'))) def test_wic_cp(self): """Test copy files and directories to the the wic image.""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-D -o %s" % self.resultdir) images = glob(self.resultdir + "wictestdisk-*.direct") self.assertEqual(1, len(images)) sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools') # list directory content of the first partition result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot)) self.assertEqual(6, len(result.output.split('\n'))) with NamedTemporaryFile("w", suffix=".wic-cp") as testfile: testfile.write("test") # copy file to the partition runCmd("wic cp %s %s:1/ -n %s" % (testfile.name, images[0], sysroot)) # check if file is there result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot)) self.assertEqual(7, len(result.output.split('\n'))) self.assertTrue(os.path.basename(testfile.name) in result.output) # prepare directory testdir = os.path.join(self.resultdir, 'wic-test-cp-dir') testsubdir = os.path.join(testdir, 'subdir') os.makedirs(os.path.join(testsubdir)) copy(testfile.name, testdir) # copy directory to the partition runCmd("wic cp %s %s:1/ -n %s" % (testdir, images[0], sysroot)) # check if directory is there result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot)) self.assertEqual(8, len(result.output.split('\n'))) self.assertTrue(os.path.basename(testdir) in result.output) # copy the file from the partition and check if it success dest = '%s-cp' % testfile.name runCmd("wic cp %s:1/%s %s -n %s" % (images[0], os.path.basename(testfile.name), dest, sysroot)) self.assertTrue(os.path.exists(dest)) def test_wic_rm(self): """Test removing files and directories from the the wic image.""" runCmd("wic create mkefidisk " "--image-name=core-image-minimal " "-D -o %s" % self.resultdir) images = glob(self.resultdir + "mkefidisk-*.direct") self.assertEqual(1, len(images)) sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools') # list directory content of the first partition result = runCmd("wic ls %s:1 -n %s" % (images[0], sysroot)) self.assertIn('\nBZIMAGE ', result.output) self.assertIn('\nEFI <DIR> ', result.output) # remove file runCmd("wic rm %s:1/bzimage -n %s" % (images[0], sysroot)) # remove directory runCmd("wic rm %s:1/efi -n %s" % (images[0], sysroot)) # check if they're removed result = runCmd("wic ls %s:1 -n %s" % (images[0], sysroot)) self.assertNotIn('\nBZIMAGE ', result.output) self.assertNotIn('\nEFI <DIR> ', result.output) def test_mkfs_extraopts(self): """Test wks option --mkfs-extraopts for empty and not empty partitions""" img = 'core-image-minimal' with NamedTemporaryFile("w", suffix=".wks") as wks: wks.writelines( ['part ext2 --fstype ext2 --source rootfs --mkfs-extraopts "-D -F -i 8192"\n', "part btrfs --fstype btrfs --source rootfs --size 40M --mkfs-extraopts='--quiet'\n", 'part squash --fstype squashfs --source rootfs --mkfs-extraopts "-no-sparse -b 4096"\n', 'part emptyvfat --fstype vfat --size 1M --mkfs-extraopts "-S 1024 -s 64"\n', 'part emptymsdos --fstype msdos --size 1M --mkfs-extraopts "-S 1024 -s 64"\n', 'part emptyext2 --fstype ext2 --size 1M --mkfs-extraopts "-D -F -i 8192"\n', 'part emptybtrfs --fstype btrfs --size 100M --mkfs-extraopts "--mixed -K"\n']) wks.flush() cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir) runCmd(cmd) wksname = os.path.splitext(os.path.basename(wks.name))[0] out = glob(self.resultdir + "%s-*direct" % wksname) self.assertEqual(1, len(out)) def test_expand_mbr_image(self): """Test wic write --expand command for mbr image""" # build an image config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "directdisk.wks"\n' self.append_config(config) self.assertEqual(0, bitbake('core-image-minimal').status) # get path to the image bb_vars = get_bb_vars(['DEPLOY_DIR_IMAGE', 'MACHINE']) deploy_dir = bb_vars['DEPLOY_DIR_IMAGE'] machine = bb_vars['MACHINE'] image_path = os.path.join(deploy_dir, 'core-image-minimal-%s.wic' % machine) self.remove_config(config) try: # expand image to 1G new_image_path = None with NamedTemporaryFile(mode='wb', suffix='.wic.exp', dir=deploy_dir, delete=False) as sparse: sparse.truncate(1024 ** 3) new_image_path = sparse.name sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools') cmd = "wic write -n %s --expand 1:0 %s %s" % (sysroot, image_path, new_image_path) runCmd(cmd) # check if partitions are expanded orig = runCmd("wic ls %s -n %s" % (image_path, sysroot)) exp = runCmd("wic ls %s -n %s" % (new_image_path, sysroot)) orig_sizes = [int(line.split()[3]) for line in orig.output.split('\n')[1:]] exp_sizes = [int(line.split()[3]) for line in exp.output.split('\n')[1:]] self.assertEqual(orig_sizes[0], exp_sizes[0]) # first partition is not resized self.assertTrue(orig_sizes[1] < exp_sizes[1]) # Check if all free space is partitioned result = runCmd("%s/usr/sbin/sfdisk -F %s" % (sysroot, new_image_path)) self.assertTrue("0 B, 0 bytes, 0 sectors" in result.output) os.rename(image_path, image_path + '.bak') os.rename(new_image_path, image_path) # Check if it boots in qemu with runqemu('core-image-minimal', ssh=False) as qemu: cmd = "ls /etc/" status, output = qemu.run_serial('true') self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output)) finally: if os.path.exists(new_image_path): os.unlink(new_image_path) if os.path.exists(image_path + '.bak'): os.rename(image_path + '.bak', image_path) def test_wic_ls_ext(self): """Test listing content of the ext partition using 'wic ls'""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-D -o %s" % self.resultdir) images = glob(self.resultdir + "wictestdisk-*.direct") self.assertEqual(1, len(images)) sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools') # list directory content of the second ext4 partition result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot)) self.assertTrue(set(['bin', 'home', 'proc', 'usr', 'var', 'dev', 'lib', 'sbin']).issubset( set(line.split()[-1] for line in result.output.split('\n') if line))) def test_wic_cp_ext(self): """Test copy files and directories to the ext partition.""" runCmd("wic create wictestdisk " "--image-name=core-image-minimal " "-D -o %s" % self.resultdir) images = glob(self.resultdir + "wictestdisk-*.direct") self.assertEqual(1, len(images)) sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools') # list directory content of the ext4 partition result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot)) dirs = set(line.split()[-1] for line in result.output.split('\n') if line) self.assertTrue(set(['bin', 'home', 'proc', 'usr', 'var', 'dev', 'lib', 'sbin']).issubset(dirs)) with NamedTemporaryFile("w", suffix=".wic-cp") as testfile: testfile.write("test") # copy file to the partition runCmd("wic cp %s %s:2/ -n %s" % (testfile.name, images[0], sysroot)) # check if file is there result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot)) newdirs = set(line.split()[-1] for line in result.output.split('\n') if line) self.assertEqual(newdirs.difference(dirs), set([os.path.basename(testfile.name)])) # check if the file to copy is in the partition result = runCmd("wic ls %s:2/etc/ -n %s" % (images[0], sysroot)) self.assertTrue('fstab' in [line.split()[-1] for line in result.output.split('\n') if line]) # copy file from the partition, replace the temporary file content with it and # check for the file size to validate the copy runCmd("wic cp %s:2/etc/fstab %s -n %s" % (images[0], testfile.name, sysroot)) self.assertTrue(os.stat(testfile.name).st_size > 0) def test_wic_rm_ext(self): """Test removing files from the ext partition.""" runCmd("wic create mkefidisk " "--image-name=core-image-minimal " "-D -o %s" % self.resultdir) images = glob(self.resultdir + "mkefidisk-*.direct") self.assertEqual(1, len(images)) sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools') # list directory content of the /etc directory on ext4 partition result = runCmd("wic ls %s:2/etc/ -n %s" % (images[0], sysroot)) self.assertTrue('fstab' in [line.split()[-1] for line in result.output.split('\n') if line]) # remove file runCmd("wic rm %s:2/etc/fstab -n %s" % (images[0], sysroot)) # check if it's removed result = runCmd("wic ls %s:2/etc/ -n %s" % (images[0], sysroot)) self.assertTrue('fstab' not in [line.split()[-1] for line in result.output.split('\n') if line]) # remove non-empty directory runCmd("wic rm -r %s:2/etc/ -n %s" % (images[0], sysroot)) # check if it's removed result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot)) self.assertTrue('etc' not in [line.split()[-1] for line in result.output.split('\n') if line])
[]
[]
[ "PATH" ]
[]
["PATH"]
python
1
0
pkg/cmd/client/kubecfg.go
package client import ( "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "reflect" "sort" "strconv" "strings" "time" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" klatest "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" kmeta "github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta" kclient "github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubecfg" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/version" "github.com/golang/glog" "github.com/openshift/origin/pkg/api/latest" buildapi "github.com/openshift/origin/pkg/build/api" buildutil "github.com/openshift/origin/pkg/build/util" osclient "github.com/openshift/origin/pkg/client" . "github.com/openshift/origin/pkg/cmd/client/api" "github.com/openshift/origin/pkg/cmd/client/build" "github.com/openshift/origin/pkg/cmd/client/image" "github.com/openshift/origin/pkg/cmd/client/project" "github.com/openshift/origin/pkg/cmd/client/route" "github.com/openshift/origin/pkg/config" configapi "github.com/openshift/origin/pkg/config/api" deployapi "github.com/openshift/origin/pkg/deploy/api" deployclient "github.com/openshift/origin/pkg/deploy/client" imageapi "github.com/openshift/origin/pkg/image/api" projectapi "github.com/openshift/origin/pkg/project/api" routeapi "github.com/openshift/origin/pkg/route/api" ) type KubeConfig struct { ClientConfig kclient.Config ServerVersion bool PreventSkew bool Config string TemplateConfig string Selector string UpdatePeriod time.Duration PortSpec string ServicePort int AuthConfig string JSON bool YAML bool Verbose bool Proxy bool WWW string TemplateFile string TemplateStr string ID string Namespace string BuildConfigID string ImageName string APIVersion string OSAPIVersion string Args []string ns string nsFile string } func (c *KubeConfig) Arg(index int) string { if index >= len(c.Args) { return "" } return c.Args[index] } func usage(name string) string { return fmt.Sprintf(` Kubernetes REST API: %[1]s [OPTIONS] get|list|create|delete|update <%[2]s>[/<id>] Manage replication controllers: %[1]s [OPTIONS] stop|rm <controller> %[1]s [OPTIONS] [-u <time>] [-image <image>] rollingupdate <controller> %[1]s [OPTIONS] resize <controller> <replicas> Launch a simple ReplicationController with a single container based on the given image: %[1]s [OPTIONS] [-p <port spec>] run <image> <replicas> <controller> Manage namespace: %[1]s [OPTIONS] ns [<namespace>] Perform bulk operations on groups of Kubernetes resources: %[1]s [OPTIONS] apply -c config.json Process template into config: %[1]s [OPTIONS] process -c template.json Retrieve build logs: %[1]s [OPTIONS] buildLogs --id="buildID" `, name, prettyWireStorage()) } var parser = kubecfg.NewParser(map[string]runtime.Object{ "pods": &api.Pod{}, "services": &api.Service{}, "replicationControllers": &api.ReplicationController{}, "minions": &api.Node{}, "nodes": &api.Node{}, "builds": &buildapi.Build{}, "buildConfigs": &buildapi.BuildConfig{}, "images": &imageapi.Image{}, "imageRepositories": &imageapi.ImageRepository{}, "imageRepositoryMappings": &imageapi.ImageRepositoryMapping{}, "config": &configapi.Config{}, "deployments": &deployapi.Deployment{}, "deploymentConfigs": &deployapi.DeploymentConfig{}, "routes": &routeapi.Route{}, "projects": &projectapi.Project{}, }) func prettyWireStorage() string { types := parser.SupportedWireStorage() sort.Strings(types) return strings.Join(types, "|") } // readConfigData reads the bytes from the specified filesytem or network location associated with the *config flag func (c *KubeConfig) readConfigData() []byte { // read from STDIN if c.Config == "-" { data, err := ioutil.ReadAll(os.Stdin) if err != nil { glog.Fatalf("Unable to read from STDIN: %v\n", err) } return data } // we look for http:// or https:// to determine if valid URL, otherwise do normal file IO if url, err := url.Parse(c.Config); err == nil && (url.Scheme == "http" || url.Scheme == "https") { resp, err := http.Get(url.String()) if err != nil { glog.Fatalf("Unable to access URL %v: %v\n", c.Config, err) } defer resp.Body.Close() if resp.StatusCode != 200 { glog.Fatalf("Unable to read URL, server reported %d %s", resp.StatusCode, resp.Status) } data, err := ioutil.ReadAll(resp.Body) if err != nil { glog.Fatalf("Unable to read URL %v: %v\n", c.Config, err) } return data } data, err := ioutil.ReadFile(c.Config) if err != nil { glog.Fatalf("Unable to read %v: %v\n", c.Config, err) } return data } // readConfig reads and parses pod, replicationController, and service // configuration files. If any errors log and exit non-zero. func (c *KubeConfig) readConfig(storage string, serverCodec runtime.Codec) []byte { if len(c.Config) == 0 { glog.Fatal("Need config file (-c)") } data, err := parser.ToWireFormat(c.readConfigData(), storage, latest.Codec, serverCodec) if err != nil { glog.Fatalf("Error parsing %v as an object for %v: %v", c.Config, storage, err) } if c.Verbose { glog.Infof("Parsed config file successfully; sending:\n%v", string(data)) } return data } // getNamespace returns the effective namespace for this invocation based on the first of: // 1. The --ns argument // 2. The contents of the nsFile // 3. Uses the default namespace func (c *KubeConfig) getNamespace() string { // Load namespace information for requests nsInfo, err := kubecfg.LoadNamespaceInfo(c.nsFile) if err != nil { glog.Fatalf("Error loading current namespace: %v", err) } ret := nsInfo.Namespace // Check if the namespace was overriden by the -ns argument if len(c.ns) > 0 { ret = c.ns } return ret } func (c *KubeConfig) Run() { util.InitLogs() defer util.FlushLogs() clientConfig := &c.ClientConfig // Initialize the client if clientConfig.Host == "" { clientConfig.Host = os.Getenv("KUBERNETES_MASTER") } if clientConfig.Host == "" { // TODO: eventually apiserver should start on 443 and be secure by default clientConfig.Host = "http://localhost:8080" } hosts := strings.SplitN(clientConfig.Host, ",", 2) for i := range hosts { hosts[i] = strings.TrimRight(hosts[i], "/") } clientConfig.Host = hosts[0] if kclient.IsConfigTransportTLS(clientConfig) { auth, err := kubecfg.LoadClientAuthInfoOrPrompt(c.AuthConfig, os.Stdin) if err != nil { glog.Fatalf("Error loading auth: %v", err) } clientConfig.Username = auth.User clientConfig.Password = auth.Password if auth.CAFile != "" { clientConfig.CAFile = auth.CAFile } if auth.CertFile != "" { clientConfig.CertFile = auth.CertFile } if auth.KeyFile != "" { clientConfig.KeyFile = auth.KeyFile } if len(clientConfig.BearerToken) == 0 && auth.BearerToken != "" { clientConfig.BearerToken = auth.BearerToken } if auth.Insecure != nil { clientConfig.Insecure = *auth.Insecure } } clientConfig.Version = c.APIVersion kubeClient, err := kclient.New(clientConfig) if err != nil { glog.Fatalf("Unable to set up the Kubernetes API client: %v", err) } if len(hosts) > 1 { clientConfig.Host = hosts[1] } clientConfig.Version = c.OSAPIVersion client, err := osclient.New(clientConfig) if err != nil { glog.Fatalf("Unable to set up the OpenShift API client: %v", err) } // check the kubernetes server version if c.ServerVersion { got, err := kubeClient.ServerVersion() if err != nil { fmt.Printf("Couldn't read version from server: %v", err) os.Exit(1) } fmt.Printf("Server Version: %#v", got) os.Exit(0) } if c.PreventSkew { got, err := kubeClient.ServerVersion() if err != nil { fmt.Printf("Couldn't read version from server: %v", err) os.Exit(1) } if c, s := version.Get(), *got; !reflect.DeepEqual(c, s) { fmt.Printf("Server version (%#v) differs from client version (%#v)!", s, c) os.Exit(1) } } if c.Proxy { glog.Info("Starting to serve on localhost:8001") server, err := kubecfg.NewProxyServer(c.WWW, clientConfig) if err != nil { glog.Fatalf("Unable to initialize proxy server %v", err) } glog.Fatal(server.Serve()) } method := c.Arg(0) clients := ClientMappings{ "minions": {"Minion", kubeClient.RESTClient, klatest.Codec}, "pods": {"Pod", kubeClient.RESTClient, klatest.Codec}, "services": {"Service", kubeClient.RESTClient, klatest.Codec}, "replicationControllers": {"ReplicationController", kubeClient.RESTClient, klatest.Codec}, "builds": {"Build", client.RESTClient, latest.Codec}, "buildConfigs": {"BuildConfig", client.RESTClient, latest.Codec}, "images": {"Image", client.RESTClient, latest.Codec}, "imageRepositories": {"ImageRepository", client.RESTClient, latest.Codec}, "imageRepositoryMappings": {"ImageRepositoryMapping", client.RESTClient, latest.Codec}, "deployments": {"Deployment", client.RESTClient, latest.Codec}, "deploymentConfigs": {"DeploymentConfig", client.RESTClient, latest.Codec}, "routes": {"Route", client.RESTClient, latest.Codec}, "projects": {"Project", client.RESTClient, latest.Codec}, } matchFound := c.executeConfigRequest(method, clients) || c.executeBuildRequest(method, client) || c.executeTemplateRequest(method, client) || c.executeBuildLogRequest(method, client) || c.executeControllerRequest(method, kubeClient) || c.executeNamespaceRequest(method) || c.executeAPIRequest(method, clients) if matchFound == false { glog.Fatalf("Unknown command %s", method) } } // storagePathFromArg normalizes a path and breaks out the first segment if available func storagePathFromArg(arg string) (storage, path string, hasSuffix bool) { path = strings.Trim(arg, "/") segments := strings.SplitN(path, "/", 2) storage = segments[0] if len(segments) > 1 && segments[1] != "" { hasSuffix = true } return storage, path, hasSuffix } //checkStorage returns true if the provided storage is valid func checkStorage(storage string) bool { for _, allowed := range parser.SupportedWireStorage() { if allowed == storage { return true } } return false } func (c *KubeConfig) executeAPIRequest(method string, clients ClientMappings) bool { storage, path, hasSuffix := storagePathFromArg(c.Arg(1)) validStorage := checkStorage(storage) client, ok := clients[storage] if !ok { glog.Fatalf("Unsupported storage type %s", storage) } verb := "" setBody := false var version string switch method { case "get": verb = "GET" if !validStorage || !hasSuffix { glog.Fatalf("usage: kubecfg [OPTIONS] %s <%s>[/<id>]", method, prettyWireStorage()) } case "list": verb = "GET" if !validStorage || hasSuffix { glog.Fatalf("usage: kubecfg [OPTIONS] %s <%s>", method, prettyWireStorage()) } case "delete": verb = "DELETE" if !validStorage || !hasSuffix { glog.Fatalf("usage: kubecfg [OPTIONS] %s <%s>/<id>", method, prettyWireStorage()) } case "create": verb = "POST" setBody = true if !validStorage || hasSuffix { glog.Fatalf("usage: kubecfg [OPTIONS] %s <%s>", method, prettyWireStorage()) } case "update": obj, err := client.Client.Verb("GET").Path(path).Do().Get() if err != nil { glog.Fatalf("error obtaining resource version for update: %v", err) } typeMeta, err := kmeta.Accessor(obj) if err != nil { glog.Fatalf("error finding json base for update: %v", err) } version = typeMeta.ResourceVersion() verb = "PUT" setBody = true if !validStorage || !hasSuffix { glog.Fatalf("usage: kubecfg [OPTIONS] %s <%s>/<id>", method, prettyWireStorage()) } default: return false } r := client.Client.Verb(verb). Namespace(c.getNamespace()). Path(path). ParseSelectorParam("labels", c.Selector) if setBody { if len(version) != 0 { data := c.readConfig(storage, client.Codec) obj, err := latest.Codec.Decode(data) if err != nil { glog.Fatalf("error setting resource version: %v", err) } typeMeta, err := kmeta.Accessor(obj) if err != nil { glog.Fatalf("error setting resource version: %v", err) } typeMeta.SetResourceVersion(version) data, err = client.Codec.Encode(obj) if err != nil { glog.Fatalf("error setting resource version: %v", err) } r.Body(data) } else { r.Body(c.readConfig(storage, client.Codec)) } } result := r.Do() obj, err := result.Get() if err != nil { glog.Fatalf("Got request error: %v", err) return false } var printer kubecfg.ResourcePrinter switch { case c.JSON: printer = &kubecfg.IdentityPrinter{} case c.YAML: printer = &kubecfg.YAMLPrinter{} case len(c.TemplateFile) > 0 || len(c.TemplateStr) > 0: var data []byte if len(c.TemplateFile) > 0 { var err error data, err = ioutil.ReadFile(c.TemplateFile) if err != nil { glog.Fatalf("Error reading template %s, %v", c.TemplateFile, err) return false } } else { data = []byte(c.TemplateStr) } if printer, err = kubecfg.NewTemplatePrinter(data); err != nil { glog.Fatalf("Failed to create printer %v", err) } default: printer = humanReadablePrinter() } if err = printer.PrintObj(obj, os.Stdout); err != nil { body, _ := result.Raw() glog.Fatalf("Failed to print: %v\nRaw received object:\n%#v\n\nBody received: %v", err, obj, string(body)) } fmt.Print("\n") return true } func (c *KubeConfig) executeControllerRequest(method string, client *kclient.Client) bool { parseController := func() string { if len(c.Args) != 2 { glog.Fatal("usage: kubecfg [OPTIONS] stop|rm|rollingupdate|run|resize <controller>") } return c.Arg(1) } ctx := api.WithNamespace(api.NewContext(), c.getNamespace()) var err error switch method { case "stop": err = kubecfg.StopController(ctx, parseController(), client) case "rm": err = kubecfg.DeleteController(ctx, parseController(), client) case "rollingupdate": err = kubecfg.Update(ctx, parseController(), client, c.UpdatePeriod, c.ImageName) case "run": if len(c.Args) != 4 { glog.Fatal("usage: kubecfg [OPTIONS] run <image> <replicas> <controller>") } image := c.Arg(1) replicas, err := strconv.Atoi(c.Arg(2)) name := c.Arg(3) if err != nil { glog.Fatalf("Error parsing replicas: %v", err) } err = kubecfg.RunController(ctx, image, name, replicas, client, c.PortSpec, c.ServicePort) case "resize": args := c.Args if len(args) < 3 { glog.Fatal("usage: kubecfg resize <controller> <replicas>") } name := args[1] replicas, err := strconv.Atoi(args[2]) if err != nil { glog.Fatalf("Error parsing replicas: %v", err) } err = kubecfg.ResizeController(ctx, name, replicas, client) default: return false } if err != nil { glog.Fatalf("Error: %v", err) } return true } // executeBuildRequest will re-ran specified build or create a new one from specified buildConfig. // To re-run the build specify the buildID, from which the build parameters will be extracted. // E.g: openshift kube create builds --id="buildID" // To create a build from a buildConfig specify it's ID. // E.g: openshift kube create builds --from-build-cfg="buildConfigID" func (c *KubeConfig) executeBuildRequest(method string, client *osclient.Client) bool { if method != "create" || c.Arg(1) != "builds" || (len(c.ID) == 0 && len(c.BuildConfigID) == 0) { return false } build := &buildapi.Build{} if len(c.ID) != 0 { oldBuild := &buildapi.Build{} request := client.Get().Namespace(c.getNamespace()).Path("/builds").Path(c.ID) err := request.Do().Into(oldBuild) if err != nil { glog.Fatalf("failed to trigger build manually: %v", err) } build = buildutil.GenerateBuildFromBuild(oldBuild) } else { buildConfig := &buildapi.BuildConfig{} request := client.Get().Namespace(c.getNamespace()).Path("/buildConfigs").Path(c.BuildConfigID) err := request.Do().Into(buildConfig) if err != nil { glog.Fatalf("failed to trigger build manually: %v", err) } build = buildutil.GenerateBuildFromConfig(buildConfig, buildConfig.Parameters.Revision) } request := client.Post().Namespace(c.getNamespace()).Path("/builds").Body(build) if err := request.Do().Error(); err != nil { glog.Fatalf("failed to trigger build manually: %v", err) } return true } // executeBuildLogRequest retrieves the logs from builder container func (c *KubeConfig) executeBuildLogRequest(method string, client *osclient.Client) bool { if method != "buildLogs" { return false } if len(c.ID) == 0 { glog.Fatal("Build ID required") } request := client.Verb("GET").Namespace(c.getNamespace()).Path("redirect").Path("buildLogs").Path(c.ID) readCloser, err := request.Stream() if err != nil { glog.Fatalf("Error: %v", err) } defer readCloser.Close() if _, err := io.Copy(os.Stdout, readCloser); err != nil { glog.Fatalf("Error: %v", err) } return true } // executeTemplateRequest transform the JSON file with Config template into a // valid Config JSON. // // TODO: Print the output for each resource on success, as "create" method // does in the executeAPIRequest(). func (c *KubeConfig) executeTemplateRequest(method string, client *osclient.Client) bool { if method != "process" { return false } if len(c.Config) == 0 { glog.Fatal("Need template file (-c)") } data, err := ioutil.ReadFile(c.Config) if err != nil { glog.Fatalf("error reading template file: %v", err) } request := client.Verb("POST").Namespace(c.getNamespace()).Path("/templateConfigs").Body(data) result := request.Do() body, err := result.Raw() if err != nil { glog.Fatalf("failed to process template: %v", err) } printer := JSONPrinter{} if err := printer.Print(body, os.Stdout); err != nil { glog.Fatalf("unable to pretty print config JSON: %v [%s]", err, string(body)) } return true } func (c *KubeConfig) executeConfigRequest(method string, clients ClientMappings) bool { if method != "apply" { return false } if len(c.Config) == 0 { glog.Fatal("Need to pass valid configuration file (-c config.json)") } clientFunc := func(m *kmeta.RESTMapping) (*kubectl.RESTHelper, error) { mapping, ok := clients[m.Resource] if !ok { return nil, fmt.Errorf("Unable to provide REST client for %v", m.Resource) } return kubectl.NewRESTHelper(mapping.Client, m), nil } result, err := config.Apply(c.getNamespace(), c.readConfigData(), clientFunc) if err != nil { glog.Fatalf("Error applying the config: %v", err) } for _, itemResult := range result { if len(itemResult.Errors) == 0 { glog.Infof(itemResult.Message) continue } for _, itemError := range itemResult.Errors { glog.Errorf("%v", itemError) } } return true } func humanReadablePrinter() *kubecfg.HumanReadablePrinter { printer := kubecfg.NewHumanReadablePrinter() // Add Handler calls here to support additional types build.RegisterPrintHandlers(printer) image.RegisterPrintHandlers(printer) deployclient.RegisterPrintHandlers(printer) route.RegisterPrintHandlers(printer) project.RegisterPrintHandlers(printer) return printer } func (c *KubeConfig) executeNamespaceRequest(method string) bool { var err error var ns *kubecfg.NamespaceInfo switch method { case "ns": switch len(c.Args) { case 1: ns, err = kubecfg.LoadNamespaceInfo(c.nsFile) case 2: ns = &kubecfg.NamespaceInfo{Namespace: c.Args[1]} err = kubecfg.SaveNamespaceInfo(c.nsFile, ns) default: glog.Fatalf("usage: kubecfg ns [<namespace>]") } default: return false } if err != nil { glog.Fatalf("Error: %v", err) } fmt.Printf("Using namespace %s\n", ns.Namespace) return true }
[ "\"KUBERNETES_MASTER\"" ]
[]
[ "KUBERNETES_MASTER" ]
[]
["KUBERNETES_MASTER"]
go
1
0
integration/integration_suite_test.go
package integration_test import ( "encoding/json" "io/ioutil" "os" "github.com/steve-sienk/gcs-resource" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" "testing" ) func TestIntegration(t *testing.T) { RegisterFailHandler(Fail) } var jsonKey = os.Getenv("GCS_RESOURCE_JSON_KEY") var bucketName = os.Getenv("GCS_RESOURCE_BUCKET_NAME") var versionedBucketName = os.Getenv("GCS_RESOURCE_VERSIONED_BUCKET_NAME") var gcsClient gcsresource.GCSClient var checkPath string var inPath string var outPath string type suiteData struct { CheckPath string InPath string OutPath string } var _ = SynchronizedBeforeSuite(func() []byte { cp, err := gexec.Build("github.com/steve-sienk/gcs-resource/cmd/check") Expect(err).ToNot(HaveOccurred()) ip, err := gexec.Build("github.com/steve-sienk/gcs-resource/cmd/in") Expect(err).ToNot(HaveOccurred()) op, err := gexec.Build("github.com/steve-sienk/gcs-resource/cmd/out") Expect(err).ToNot(HaveOccurred()) data, err := json.Marshal(suiteData{ CheckPath: cp, InPath: ip, OutPath: op, }) Expect(err).ToNot(HaveOccurred()) return data }, func(data []byte) { var sd suiteData err := json.Unmarshal(data, &sd) Expect(err).ToNot(HaveOccurred()) checkPath = sd.CheckPath inPath = sd.InPath outPath = sd.OutPath Expect(bucketName).ToNot(BeEmpty(), "must specify $GCS_RESOURCE_BUCKET_NAME") Expect(versionedBucketName).ToNot(BeEmpty(), "must specify $GCS_RESOURCE_VERSIONED_BUCKET_NAME") gcsClient, err = gcsresource.NewGCSClient( ioutil.Discard, jsonKey, ) Expect(err).ToNot(HaveOccurred()) }) var _ = SynchronizedAfterSuite(func() {}, func() { gexec.CleanupBuildArtifacts() }) func TestIn(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Integration Suite") }
[ "\"GCS_RESOURCE_JSON_KEY\"", "\"GCS_RESOURCE_BUCKET_NAME\"", "\"GCS_RESOURCE_VERSIONED_BUCKET_NAME\"" ]
[]
[ "GCS_RESOURCE_BUCKET_NAME", "GCS_RESOURCE_VERSIONED_BUCKET_NAME", "GCS_RESOURCE_JSON_KEY" ]
[]
["GCS_RESOURCE_BUCKET_NAME", "GCS_RESOURCE_VERSIONED_BUCKET_NAME", "GCS_RESOURCE_JSON_KEY"]
go
3
0
tests/system_tests_sasl_plain.py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from __future__ import print_function from time import sleep import os, json from subprocess import PIPE, STDOUT, Popen from system_test import TestCase, Qdrouterd, main_module, DIR, TIMEOUT, SkipIfNeeded, Process from system_test import unittest, QdManager from qpid_dispatch.management.client import Node from proton import SASL class RouterTestPlainSaslCommon(TestCase): @classmethod def router(cls, name, connection): config = Qdrouterd.Config(connection) cls.routers.append(cls.tester.qdrouterd(name, config, wait=False)) @classmethod def createSaslFiles(cls): # Create a sasl database. p = Popen(['saslpasswd2', '-c', '-p', '-f', 'qdrouterd.sasldb', '-u', 'domain.com', 'test'], stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True) result = p.communicate('password') assert p.returncode == 0, \ "saslpasswd2 exit status %s, output:\n%s" % (p.returncode, result) # Create a SASL configuration file. with open('tests-mech-PLAIN.conf', 'w') as sasl_conf: sasl_conf.write(""" pwcheck_method: auxprop auxprop_plugin: sasldb sasldb_path: qdrouterd.sasldb mech_list: ANONYMOUS DIGEST-MD5 EXTERNAL PLAIN # The following line stops spurious 'sql_select option missing' errors when cyrus-sql-sasl plugin is installed sql_select: dummy select """) class RouterTestPlainSaslFailure(RouterTestPlainSaslCommon): @staticmethod def sasl_file(name): return os.path.join(DIR, 'sasl_files', name) @classmethod def setUpClass(cls): """ Tests the sasl_username, sasl_password property of the dispatch router. Creates two routers (QDR.X and QDR.Y) and sets up PLAIN authentication on QDR.X. QDR.Y connects to QDR.X by providing a sasl_username and a bad sasl_password as a non-existent file. """ super(RouterTestPlainSaslFailure, cls).setUpClass() if not SASL.extended(): return super(RouterTestPlainSaslFailure, cls).createSaslFiles() cls.routers = [] x_listener_port = cls.tester.get_port() y_listener_port = cls.tester.get_port() super(RouterTestPlainSaslFailure, cls).router('X', [ ('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port, 'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}), # This unauthenticated listener is for qdstat to connect to it. ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(), 'authenticatePeer': 'no'}), ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(), 'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}), ('router', {'workerThreads': 1, 'id': 'QDR.X', 'mode': 'interior', 'saslConfigName': 'tests-mech-PLAIN', # Leave as saslConfigPath for testing backward compatibility 'saslConfigPath': os.getcwd()}), ]) super(RouterTestPlainSaslFailure, cls).router('Y', [ ('connector', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port, # Provide a sasl user name and password to connect to QDR.X 'saslMechanisms': 'PLAIN', 'saslUsername': '[email protected]', # Provide a non-existen file. 'saslPassword': 'file:' + cls.sasl_file('non-existent-password-file.txt')}), ('router', {'workerThreads': 1, 'mode': 'interior', 'id': 'QDR.Y'}), ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': y_listener_port}), ]) cls.routers[0].wait_ports() cls.routers[1].wait_ports() try: # This will time out in 5 seconds because there is no inter-router connection cls.routers[1].wait_connectors(timeout=5) except: pass # Give some time for connector failures to be written to the log. sleep(3) @SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test") def test_inter_router_sasl_fail(self): passed = False long_type = 'org.apache.qpid.dispatch.connection' qd_manager = QdManager(self, address=self.routers[1].addresses[0]) connections = qd_manager.query(long_type) for connection in connections: if connection['role'] == 'inter-router': passed = True break # There was no inter-router connection established. self.assertFalse(passed) qd_manager = QdManager(self, address=self.routers[1].addresses[0]) logs = qd_manager.get_log() sasl_failed = False file_open_failed = False for log in logs: if log[0] == 'SERVER' and log[1] == "info" and "amqp:unauthorized-access Authentication failed [mech=PLAIN]" in log[2]: sasl_failed = True if log[0] == "CONN_MGR" and log[1] == "error" and "Unable to open password file" in log[2] and "error: No such file or directory" in log[2]: file_open_failed = True self.assertTrue(sasl_failed) self.assertTrue(file_open_failed) class RouterTestPlainSaslFailureUsingLiteral(RouterTestPlainSaslCommon): @staticmethod def sasl_file(name): return os.path.join(DIR, 'sasl_files', name) @classmethod def setUpClass(cls): """ Tests the sasl_username, sasl_password property of the dispatch router. Creates two routers (QDR.X and QDR.Y) and sets up PLAIN authentication on QDR.X. QDR.Y connects to QDR.X by providing a sasl_username and a bad sasl_password using the literal: prefix. """ super(RouterTestPlainSaslFailureUsingLiteral, cls).setUpClass() if not SASL.extended(): return super(RouterTestPlainSaslFailureUsingLiteral, cls).createSaslFiles() cls.routers = [] x_listener_port = cls.tester.get_port() y_listener_port = cls.tester.get_port() super(RouterTestPlainSaslFailureUsingLiteral, cls).router('X', [ ('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port, 'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}), # This unauthenticated listener is for qdstat to connect to it. ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(), 'authenticatePeer': 'no'}), ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(), 'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}), ('router', {'workerThreads': 1, 'id': 'QDR.X', 'mode': 'interior', 'saslConfigName': 'tests-mech-PLAIN', # Leave as saslConfigPath for testing backward compatibility 'saslConfigPath': os.getcwd()}), ]) super(RouterTestPlainSaslFailureUsingLiteral, cls).router('Y', [ ('connector', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port, # Provide a sasl user name and password to connect to QDR.X 'saslMechanisms': 'PLAIN', 'saslUsername': '[email protected]', # Provide the password with a prefix of literal. This should fail.. 'saslPassword': 'literal:password'}), ('router', {'workerThreads': 1, 'mode': 'interior', 'id': 'QDR.Y'}), ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': y_listener_port}), ]) cls.routers[0].wait_ports() cls.routers[1].wait_ports() try: # This will time out in 5 seconds because there is no inter-router connection cls.routers[1].wait_connectors(timeout=5) except: pass # Give some time for connector failures to be written to the log. sleep(3) @SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test") def test_inter_router_sasl_fail(self): passed = False long_type = 'org.apache.qpid.dispatch.connection' qd_manager = QdManager(self, address=self.routers[1].addresses[0]) connections = qd_manager.query(long_type) for connection in connections: if connection['role'] == 'inter-router': passed = True break # There was no inter-router connection established. self.assertFalse(passed) logs = qd_manager.get_log() sasl_failed = False for log in logs: if log[0] == 'SERVER' and log[1] == "info" and "amqp:unauthorized-access Authentication failed [mech=PLAIN]" in log[2]: sasl_failed = True self.assertTrue(sasl_failed) class RouterTestPlainSasl(RouterTestPlainSaslCommon): @classmethod def setUpClass(cls): """ Tests the sasl_username, sasl_password property of the dispatch router. Creates two routers (QDR.X and QDR.Y) and sets up PLAIN authentication on QDR.X. QDR.Y connects to QDR.X by providing a sasl_username and a sasl_password. """ super(RouterTestPlainSasl, cls).setUpClass() if not SASL.extended(): return os.environ["ENV_SASL_PASSWORD"] = "password" super(RouterTestPlainSasl, cls).createSaslFiles() cls.routers = [] x_listener_port = cls.tester.get_port() y_listener_port = cls.tester.get_port() super(RouterTestPlainSasl, cls).router('X', [ ('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port, 'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}), # This unauthenticated listener is for qdstat to connect to it. ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(), 'authenticatePeer': 'no'}), ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(), 'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}), ('router', {'workerThreads': 1, 'id': 'QDR.X', 'mode': 'interior', 'saslConfigName': 'tests-mech-PLAIN', # Leave as saslConfigPath for testing backward compatibility 'saslConfigPath': os.getcwd()}), ]) super(RouterTestPlainSasl, cls).router('Y', [ ('connector', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port, # Provide a sasl user name and password to connect to QDR.X 'saslMechanisms': 'PLAIN', 'saslUsername': '[email protected]', 'saslPassword': 'env:ENV_SASL_PASSWORD'}), ('router', {'workerThreads': 1, 'mode': 'interior', 'id': 'QDR.Y'}), ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': y_listener_port}), ]) cls.routers[1].wait_router_connected('QDR.X') @SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test") def test_inter_router_plain_exists(self): """ Check authentication of inter-router link is PLAIN. This test makes executes a qdstat -c via an unauthenticated listener to QDR.X and makes sure that the output has an "inter-router" connection to QDR.Y whose authentication is PLAIN. This ensures that QDR.Y did not somehow use SASL ANONYMOUS to connect to QDR.X """ p = self.popen( ['qdstat', '-b', str(self.routers[0].addresses[1]), '-c'], name='qdstat-'+self.id(), stdout=PIPE, expect=None, universal_newlines=True) out = p.communicate()[0] assert p.returncode == 0, \ "qdstat exit status %s, output:\n%s" % (p.returncode, out) self.assertIn("inter-router", out) self.assertIn("[email protected](PLAIN)", out) @SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test") def test_qdstat_connect_sasl(self): """ Make qdstat use sasl plain authentication. """ p = self.popen( ['qdstat', '-b', str(self.routers[0].addresses[2]), '-c', '--sasl-mechanisms=PLAIN', '[email protected]', '--sasl-password=password'], name='qdstat-'+self.id(), stdout=PIPE, expect=None, universal_newlines=True) out = p.communicate()[0] assert p.returncode == 0, \ "qdstat exit status %s, output:\n%s" % (p.returncode, out) split_list = out.split() # There will be 2 connections that have authenticated using SASL PLAIN. One inter-router connection # and the other connection that this qdstat client is making self.assertEqual(2, split_list.count("[email protected](PLAIN)")) self.assertEqual(1, split_list.count("inter-router")) self.assertEqual(1, split_list.count("normal")) @SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test") def test_qdstat_connect_sasl_password_file(self): """ Make qdstat use sasl plain authentication with client password specified in a file. """ password_file = os.getcwd() + '/sasl-client-password-file.txt' # Create a SASL configuration file. with open(password_file, 'w') as sasl_client_password_file: sasl_client_password_file.write("password") sasl_client_password_file.close() p = self.popen( ['qdstat', '-b', str(self.routers[0].addresses[2]), '-c', '--sasl-mechanisms=PLAIN', '[email protected]', '--sasl-password-file=' + password_file], name='qdstat-'+self.id(), stdout=PIPE, expect=None, universal_newlines=True) out = p.communicate()[0] assert p.returncode == 0, \ "qdstat exit status %s, output:\n%s" % (p.returncode, out) split_list = out.split() # There will be 2 connections that have authenticated using SASL PLAIN. One inter-router connection # and the other connection that this qdstat client is making self.assertEqual(2, split_list.count("[email protected](PLAIN)")) self.assertEqual(1, split_list.count("inter-router")) self.assertEqual(1, split_list.count("normal")) class RouterTestPlainSaslOverSsl(RouterTestPlainSaslCommon): @staticmethod def ssl_file(name): return os.path.join(DIR, 'ssl_certs', name) @staticmethod def sasl_file(name): return os.path.join(DIR, 'sasl_files', name) @classmethod def setUpClass(cls): """ Tests the sasl_username, sasl_password property of the dispatch router. Creates two routers (QDR.X and QDR.Y) and sets up PLAIN authentication on QDR.X. QDR.Y connects to QDR.X by providing a sasl_username and a sasl_password. This PLAIN authentication is done over a TLS connection. """ super(RouterTestPlainSaslOverSsl, cls).setUpClass() if not SASL.extended(): return super(RouterTestPlainSaslOverSsl, cls).createSaslFiles() cls.routers = [] x_listener_port = cls.tester.get_port() y_listener_port = cls.tester.get_port() super(RouterTestPlainSaslOverSsl, cls).router('X', [ ('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port, 'sslProfile':'server-ssl-profile', 'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}), ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(), 'authenticatePeer': 'no'}), ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(), 'sslProfile':'server-ssl-profile', 'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}), ('sslProfile', {'name': 'server-ssl-profile', 'caCertFile': cls.ssl_file('ca-certificate.pem'), 'certFile': cls.ssl_file('server-certificate.pem'), 'privateKeyFile': cls.ssl_file('server-private-key.pem'), 'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS', 'protocols': 'TLSv1.1 TLSv1.2', 'password': 'server-password'}), ('router', {'workerThreads': 1, 'id': 'QDR.X', 'mode': 'interior', 'saslConfigName': 'tests-mech-PLAIN', 'saslConfigDir': os.getcwd()}), ]) super(RouterTestPlainSaslOverSsl, cls).router('Y', [ # This router will act like a client. First an SSL connection will be established and then # we will have SASL plain authentication over SSL. ('connector', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port, 'sslProfile': 'client-ssl-profile', 'verifyHostname': 'no', # Provide a sasl user name and password to connect to QDR.X 'saslMechanisms': 'PLAIN', 'saslUsername': '[email protected]', 'saslPassword': 'file:' + cls.sasl_file('password.txt')}), ('router', {'workerThreads': 1, 'mode': 'interior', 'id': 'QDR.Y'}), ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': y_listener_port}), ('sslProfile', {'name': 'client-ssl-profile', 'caCertFile': cls.ssl_file('ca-certificate.pem'), 'certFile': cls.ssl_file('client-certificate.pem'), 'privateKeyFile': cls.ssl_file('client-private-key.pem'), 'password': 'client-password'}), ]) cls.routers[1].wait_router_connected('QDR.X') @SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test") def test_aaa_qdstat_connect_sasl_over_ssl(self): """ Make qdstat use sasl plain authentication over ssl. """ p = self.popen( ['qdstat', '-b', str(self.routers[0].addresses[2]), '-c', # The following are SASL args '--sasl-mechanisms=PLAIN', '[email protected]', '--sasl-password=password', # The following are SSL args '--ssl-disable-peer-name-verify', '--ssl-trustfile=' + self.ssl_file('ca-certificate.pem'), '--ssl-certificate=' + self.ssl_file('client-certificate.pem'), '--ssl-key=' + self.ssl_file('client-private-key.pem'), '--ssl-password=client-password'], name='qdstat-'+self.id(), stdout=PIPE, expect=None, universal_newlines=True) out = p.communicate()[0] assert p.returncode == 0, \ "qdstat exit status %s, output:\n%s" % (p.returncode, out) split_list = out.split() # There will be 2 connections that have authenticated using SASL PLAIN. One inter-router connection # and the other connection that this qdstat client is making self.assertEqual(2, split_list.count("[email protected](PLAIN)")) self.assertEqual(1, split_list.count("inter-router")) self.assertEqual(1, split_list.count("normal")) @SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test") def test_inter_router_plain_over_ssl_exists(self): """The setUpClass sets up two routers with SASL PLAIN enabled over TLS. This test makes executes a query for type='org.apache.qpid.dispatch.connection' over an unauthenticated listener to QDR.X and makes sure that the output has an "inter-router" connection to QDR.Y whose authentication is PLAIN. This ensures that QDR.Y did not somehow use SASL ANONYMOUS to connect to QDR.X Also makes sure that TLSv1.x was used as sslProto """ local_node = Node.connect(self.routers[0].addresses[1], timeout=TIMEOUT) results = local_node.query(type='org.apache.qpid.dispatch.connection').results # sslProto should be TLSv1.x self.assertTrue(u'TLSv1' in results[0][10]) # role should be inter-router self.assertEqual(u'inter-router', results[0][3]) # sasl must be plain self.assertEqual(u'PLAIN', results[0][6]) # user must be [email protected] self.assertEqual(u'[email protected]', results[0][8]) class RouterTestVerifyHostNameYes(RouterTestPlainSaslCommon): @staticmethod def ssl_file(name): return os.path.join(DIR, 'ssl_certs', name) @staticmethod def sasl_file(name): return os.path.join(DIR, 'sasl_files', name) @classmethod def setUpClass(cls): """ Tests the verifyHostname property of the connector. The hostname on the server certificate we use is A1.Good.Server.domain.com and the host is 0.0.0.0 on the client router initiating the SSL connection. Since the host names do not match and the verifyHostname is set to true, the client router will NOT be able make a successful SSL connection the server router. """ super(RouterTestVerifyHostNameYes, cls).setUpClass() if not SASL.extended(): return super(RouterTestVerifyHostNameYes, cls).createSaslFiles() cls.routers = [] x_listener_port = cls.tester.get_port() y_listener_port = cls.tester.get_port() super(RouterTestVerifyHostNameYes, cls).router('X', [ ('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port, 'sslProfile':'server-ssl-profile', 'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}), # This unauthenticated listener is for qdstat to connect to it. ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(), 'authenticatePeer': 'no'}), ('sslProfile', {'name': 'server-ssl-profile', 'caCertFile': cls.ssl_file('ca-certificate.pem'), 'certFile': cls.ssl_file('server-certificate.pem'), 'privateKeyFile': cls.ssl_file('server-private-key.pem'), 'password': 'server-password'}), ('router', {'workerThreads': 1, 'id': 'QDR.X', 'mode': 'interior', 'saslConfigName': 'tests-mech-PLAIN', 'saslConfigDir': os.getcwd()}), ]) super(RouterTestVerifyHostNameYes, cls).router('Y', [ ('connector', {'host': '127.0.0.1', 'role': 'inter-router', 'port': x_listener_port, 'sslProfile': 'client-ssl-profile', # verifyHostName has been deprecated. We are using it here to test # backward compatibility. 'verifyHostName': 'yes', 'saslMechanisms': 'PLAIN', 'saslUsername': '[email protected]', 'saslPassword': 'file:' + cls.sasl_file('password.txt')}), ('router', {'workerThreads': 1, 'mode': 'interior', 'id': 'QDR.Y'}), ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': y_listener_port}), ('sslProfile', {'name': 'client-ssl-profile', 'caCertFile': cls.ssl_file('ca-certificate.pem'), 'certFile': cls.ssl_file('client-certificate.pem'), 'privateKeyFile': cls.ssl_file('client-private-key.pem'), 'password': 'client-password'}), ]) cls.routers[0].wait_ports() cls.routers[1].wait_ports() try: # This will time out because there is no inter-router connection cls.routers[1].wait_connectors(timeout=3) except: pass @SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test") def test_no_inter_router_connection(self): """ Tests to make sure that there are no 'inter-router' connections. The connection to the other router will not happen because the connection failed due to setting 'verifyHostname': 'yes' """ local_node = Node.connect(self.routers[1].addresses[0], timeout=TIMEOUT) results = local_node.query(type='org.apache.qpid.dispatch.connection').results # There should be only two connections. # There will be no inter-router connection self.assertEqual(2, len(results)) self.assertEqual('in', results[0][4]) self.assertEqual('normal', results[0][3]) self.assertEqual('anonymous', results[0][8]) self.assertEqual('normal', results[1][3]) self.assertEqual('anonymous', results[1][8]) class RouterTestVerifyHostNameNo(RouterTestPlainSaslCommon): @staticmethod def ssl_file(name): return os.path.join(DIR, 'ssl_certs', name) x_listener_port = None @classmethod def setUpClass(cls): """ Tests the verifyHostname property of the connector. The hostname on the server certificate we use is A1.Good.Server.domain.com and the host is 0.0.0.0 on the client router initiating the SSL connection. Since the host names do not match but verifyHostname is set to false, the client router will be successfully able to make an SSL connection the server router. """ super(RouterTestVerifyHostNameNo, cls).setUpClass() if not SASL.extended(): return super(RouterTestVerifyHostNameNo, cls).createSaslFiles() cls.routers = [] x_listener_port = cls.tester.get_port() RouterTestVerifyHostNameNo.x_listener_port = x_listener_port y_listener_port = cls.tester.get_port() super(RouterTestVerifyHostNameNo, cls).router('X', [ ('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port, 'sslProfile':'server-ssl-profile', 'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}), # This unauthenticated listener is for qdstat to connect to it. ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(), 'authenticatePeer': 'no'}), ('sslProfile', {'name': 'server-ssl-profile', # certDb has been deprecated. We are using it here to test backward compatibility. 'certDb': cls.ssl_file('ca-certificate.pem'), 'certFile': cls.ssl_file('server-certificate.pem'), # keyFile has been deprecated. We are using it here to test backward compatibility. 'keyFile': cls.ssl_file('server-private-key.pem'), 'password': 'server-password'}), ('router', {'workerThreads': 1, 'id': 'QDR.X', 'mode': 'interior', 'saslConfigName': 'tests-mech-PLAIN', 'saslConfigDir': os.getcwd()}), ]) super(RouterTestVerifyHostNameNo, cls).router('Y', [ # This router will act like a client. First an SSL connection will be established and then # we will have SASL plain authentication over SSL. ('connector', {'name': 'connectorToX', 'host': '127.0.0.1', 'role': 'inter-router', 'port': x_listener_port, 'sslProfile': 'client-ssl-profile', # Provide a sasl user name and password to connect to QDR.X 'saslMechanisms': 'PLAIN', 'verifyHostname': 'no', 'saslUsername': '[email protected]', 'saslPassword': 'pass:password'}), ('router', {'workerThreads': 1, 'mode': 'interior', 'id': 'QDR.Y'}), ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': y_listener_port}), ('sslProfile', {'name': 'client-ssl-profile', 'caCertFile': cls.ssl_file('ca-certificate.pem'), 'certFile': cls.ssl_file('client-certificate.pem'), 'privateKeyFile': cls.ssl_file('client-private-key.pem'), 'password': 'client-password'}), ]) cls.routers[0].wait_ports() cls.routers[1].wait_ports() cls.routers[1].wait_router_connected('QDR.X') @staticmethod def ssl_file(name): return os.path.join(DIR, 'ssl_certs', name) def common_asserts(self, results): search = "QDR.X" found = False for N in range(0, len(results)): if results[N][5] == search: found = True break self.assertTrue(found, "Connection to %s not found" % search) # sslProto should be TLSv1.x self.assertTrue(u'TLSv1' in results[N][10]) # role should be inter-router self.assertEqual(u'inter-router', results[N][3]) # sasl must be plain self.assertEqual(u'PLAIN', results[N][6]) # user must be [email protected] self.assertEqual(u'[email protected]', results[N][8]) @SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test") def test_inter_router_plain_over_ssl_exists(self): """ Tests to make sure that an inter-router connection exists between the routers since verifyHostname is 'no'. """ local_node = Node.connect(self.routers[1].addresses[0], timeout=TIMEOUT) results = local_node.query(type='org.apache.qpid.dispatch.connection').results self.common_asserts(results) @SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test") def test_zzz_delete_create_ssl_profile(self): """ Deletes a connector and its corresponding ssl profile and recreates both """ local_node = self.routers[1].management connections = local_node.query(type='org.apache.qpid.dispatch.connection').get_entities() self.assertIn("QDR.X", [c.container for c in connections]) # We can find the connection before local_node.delete(type='connector', name='connectorToX') local_node.delete(type='sslProfile', name='client-ssl-profile') connections = local_node.query(type='org.apache.qpid.dispatch.connection').get_entities() is_qdr_x = "QDR.X" in [c.container for c in connections] self.assertFalse(is_qdr_x) # Should not be present now # re-create the ssl profile local_node.create({'type': 'sslProfile', 'name': 'client-ssl-profile', 'certFile': self.ssl_file('client-certificate.pem'), 'privateKeyFile': self.ssl_file('client-private-key.pem'), 'password': 'client-password', 'caCertFile': self.ssl_file('ca-certificate.pem')}) # re-create connector local_node.create({'type': 'connector', 'name': 'connectorToX', 'host': '127.0.0.1', 'port': self.x_listener_port, 'saslMechanisms': 'PLAIN', 'sslProfile': 'client-ssl-profile', 'role': 'inter-router', 'verifyHostname': False, 'saslUsername': '[email protected]', 'saslPassword': 'password'}) self.routers[1].wait_connectors() results = local_node.query(type='org.apache.qpid.dispatch.connection').results self.common_asserts(results) if __name__ == '__main__': unittest.main(main_module())
[]
[]
[ "ENV_SASL_PASSWORD" ]
[]
["ENV_SASL_PASSWORD"]
python
1
0
apps/iiif/manifests/export.py
"""Github export module""" import io import json import logging import os import re import shutil import subprocess import tempfile import zipfile from datetime import datetime from urllib.parse import urlparse # pylint: disable = unused-import, ungrouped-imports try: from yaml import CLoader as Loader, CDumper as Dumper except ImportError: from yaml import Loader, Dumper # pylint: enable = unused-import, ungrouped-imports # TODO: Can we be more efficient in how we import git? import git from git.cmd import Git from yaml import load, safe_dump from django.core.mail import send_mail from django.core.serializers import serialize from django.template.loader import get_template from apps.users.models import User import digitaledition_jekylltheme import config.settings.local as settings from .github import GithubApi, GithubAccountNotFound __version__ = "2.0.0" LOGGER = logging.getLogger(__name__) # zip file of base jekyll site with digital edition templates JEKYLL_THEME_ZIP = digitaledition_jekylltheme.ZIPFILE_PATH class ExportException(Exception): """Custom exception""" pass class IiifManifestExport: """Manifest Export :return: Return bytes containing the entire contents of the buffer. :rtype: bytes """ @classmethod def get_zip(self, manifest, version, owners=[]): """Generate zipfile of manifest. :param manifest: Manifest to be exported. :type manifest: apps.iiif.manifests.models.Manifest :param version: IIIF API version to use. :type version: str :param owners: List of annotation owners, defaults to [] :type owners: list, optional :return: Return bytes containing the entire contents of the buffer. :rtype: bytes """ # zip_subdir = manifest.label # zip_filename = "iiif_export.zip" # Open BytesIO to grab in-memory ZIP contents byte_stream = io.BytesIO() # The zip compressor zip_file = zipfile.ZipFile(byte_stream, "w") # First write basic human-readable metadata # Annotated edition from {grab site identity/version of Readux} at {grab site URL} # volume title # volume author # volume date # volume publisher # number of pages # annotator username # time of export # pylint: disable = possibly-unused-variable title = manifest.label author = manifest.author date = manifest.published_date publisher = manifest.publisher page_count = manifest.canvas_set.count() now = datetime.utcnow() readux_url = settings.HOSTNAME annotators = User.objects.filter( userannotation__canvas__manifest__id=manifest.id ).distinct() annotators_string = ', '.join([i.fullname() for i in annotators]) # pylint: enable = possibly-unused-variable # pylint: disable = line-too-long # get the owner_id for each/all annotations # dedup the list of owners (although -- how to order? alphabetical or # sby contribution count or ignore order) .distinct() # turn the list of owners into a comma separated string of formal names instead of user ids readme = "Annotation export from Readux %(version)s at %(readux_url)s\nedition type: Readux IIIF Exported Edition\nexport date: %(now)s UTC\n\n" % locals() volume_data = "volume title: %(title)s\nvolume author: %(author)s\nvolume date: %(date)s\nvolume publisher: %(publisher)s\npages: %(page_count)s \n" % locals() annotators_attribution_string = "Annotated by: " + annotators_string +"\n\n" boilerplate = "Readux is a platform developed by Emory University’s Center for Digital Scholarship for browsing, annotating, and publishing with digitized books. This zip file includes an International Image Interoperability Framework (IIIF) manifest for the digitized book and an annotation list for each page that includes both the encoded text of the book and annotations created by the user who created this export. This bundle can be used to archive the recognized text and annotations for preservation and future access.\n\n" explanation = "Each canvas (\"sc:Canvas\") in the manifest represents a page of the work. Each canvas includes an \"otherContent\" field-set with information identifying that page's annotation lists. This field-set includes an \"@id\" field and the label field (\"@type\") \"sc:AnnotationList\" for each annotation list. The \"@id\" field contains the URL link at which the annotation list was created and originally hosted from the Readux site. In order to host this IIIF manifest and its annotation lists again to browse the book and annotations outside of Readux, these @id fields would need to be updated to the appropriate URLs for the annotation lists on the new host. Exported annotation lists replace nonword characters (where words are made up of alphanumerics and underscores) with underscores in the filename." readme = readme + volume_data + annotators_attribution_string + boilerplate + explanation zip_file.writestr('README.txt', readme) current_user = User.objects.get(id__in=owners) # pylint: enable = line-too-long # Next write the manifest zip_file.writestr( 'manifest.json', json.dumps( json.loads( serialize( 'manifest', [manifest], version=version, annotators=current_user.name, exportdate=now, current_user=current_user ) ), indent=4 ) ) # Then write the OCR annotations for canvas in manifest.canvas_set.all(): if canvas.annotation_set.count() > 0: json_hash = json.loads( serialize( 'annotation_list', [canvas], version=version, owners=owners ) ) anno_uri = json_hash['@id'] annotation_file = re.sub(r'\W','_', anno_uri) + ".json" zip_file.writestr( annotation_file, json.dumps( json_hash, indent=4 ) ) # Then write the user annotations for canvas in manifest.canvas_set.all(): user_annotations = current_user.userannotation_set.filter(canvas=canvas) if user_annotations.count() > 0: # annotations = canvas.userannotation_set.filter(owner__in=owners).all() json_hash = json.loads( serialize( 'user_annotation_list', [canvas], version=version, is_list=False, owners=[current_user] ) ) anno_uri = json_hash['@id'] annotation_file = re.sub(r'\W', '_', anno_uri) + ".json" zip_file.writestr( annotation_file, json.dumps( json_hash, indent=4 ) ) zip_file.close() # flush zipfile to byte stream return byte_stream.getvalue() class GithubExportException(Exception): """Custom exception.""" pass class JekyllSiteExport(object): """Export Jekyllsite :param object: [description] :type object: [type] :raises ExportException: [description] :raises GithubExportException: [description] :return: [description] :rtype: [type] """ def __init__(self, manifest, version, page_one=None, include_images=False, deep_zoom='hosted', github_repo=None, owners=None, user=None): """Init JekyllSiteExport :param manifest: Manifest to be exported :type manifest: apps.iiif.manifests.models.Manifest :param version: IIIF API version eg 'v2' :type version: str :param page_one: First page for export, defaults to None :type page_one: apps.iiif.canvases.models.Canvas, optional :param include_images: Wether or not to include image files in export, defaults to False :type include_images: bool, optional :param deep_zoom: Where to look for DeepZoom, defaults to 'hosted' :type deep_zoom: str, optional :param github_repo: Name of GitHub repo for export, defaults to None :type github_repo: str, optional :param owners: List of annotation owners, defaults to None :type owners: list, optional :param user: Person doing the export. defaults to None :type user: apps.users.models.User, optional """ # self.volume = volume self.manifest = manifest self.version = version # self.page_one = page_one # self.include_images = include_images #self.deep_zoom = deep_zoom self.include_deep_zoom = (deep_zoom == 'include') self.no_deep_zoom = (deep_zoom == 'exclude') # self.github_repo = github_repo # # initialize github connection values to None self.github = None self.github_username = None self.github_token = None self.jekyll_site_dir = None self.owners = owners self.user = user self.github_repo = github_repo # TODO: Why? self.is_testing = False def log_status(self, msg): """Shortcut function to log status of export. :param msg: Message to log. :type msg: str """ LOGGER.info(msg) # TODO: is this ever called? # Why not just call `log_status` directly? def notify_msg(self, msg): """Log the notification. :param msg: Notification message :type msg: str """ self.log_status(msg) # TODO: is this needed? # Why not just call `website_zip` directly? def get_zip(self): """Get the zip file of the export. :return: Exported site in zip file :rtype: bytes """ return self.website_zip() def get_zip_path(filename): """Convenience function to get the path for the export zip file.""" return os.path.join(tempfile.gettempdir(), filename) def get_zip_file(self, filename): """Generate zip file""" file = open(JekyllSiteExport.get_zip_path(filename),"rb") data = file.read() file.close() return data def iiif_dir(self): """Convenience function to produce the system path for export file. :return: System path for export file. :rtype: str """ return os.path.join(self.jekyll_site_dir, 'iiif_export') def import_iiif_jekyll(self, manifest, tmpdir): """Get a fresh import of IIIF as jekyll site content :param manifest: Manifest to be exported. :type manifest: apps.iiif.manifests.models.Manifest :param tmpdir: System path for tmp directory. :type tmpdir: str :raises ExportException: [description] """ # run the script to get a fresh import of IIIF as jekyll site content self.log_status('Running jekyll import IIIF manifest script') jekyllimport_manifest_script = settings.JEKYLLIMPORT_MANIFEST_SCRIPT import_command = [ jekyllimport_manifest_script, '--local-directory', '-q', self.iiif_dir(), tmpdir ] # TODO # # if a page number is specified, pass it as a parameter to the script # if self.page_one is not None: # import_command.extend(['--page-one', unicode(self.page_one)]) # # if no deep zoom is requested, pass through so the jekyll # # config can be updated appropriately if self.no_deep_zoom: import_command.append('--no-deep-zoom') try: LOGGER.debug('Jekyll import command: %s', ' '.join(import_command)) output = subprocess.check_output( ' '.join(import_command), shell=True, stderr=subprocess.STDOUT ) LOGGER.debug('Jekyll import output:') LOGGER.debug(output.decode('utf-8')) except subprocess.CalledProcessError as error: LOGGER.debug('Jekyll import error:') LOGGER.debug(error.output) err_msg = "Error running jekyll import on IIIF manifest!\n{cmd}\n{err}".format( cmd=' '.join(import_command), err=error.output.decode('utf-8') ) LOGGER.error(err_msg) raise ExportException(err_msg) def generate_website(self): """Generate a jekyll website for a volume with annotations. Creates a jekyll site and imports pages and annotations from the IIIF, and then returns the directory for further use (e.g., packaging as a zipfile for download, or for creating a new GitHub repository). :return: System path for export directory. :rtype: str """ LOGGER.debug('Generating jekyll website for %s', self.manifest.id) tmpdir = tempfile.mkdtemp(prefix='tmp-rdx-export') LOGGER.debug('Building export for %s in %s', self.manifest.id, tmpdir) # unzip jekyll template site self.log_status('Extracting jekyll template site') with zipfile.ZipFile(JEKYLL_THEME_ZIP, 'r') as jekyllzip: jekyllzip.extractall(tmpdir) self.jekyll_site_dir = os.path.join(tmpdir, 'digitaledition-jekylltheme') LOGGER.debug('Jekyll site dir:') LOGGER.debug(self.jekyll_site_dir) LOGGER.debug('Exporting IIIF bundle') iiif_zip_stream = IiifManifestExport.get_zip(self.manifest, 'v2', owners=self.owners) iiif_zip = zipfile.ZipFile(io.BytesIO(iiif_zip_stream), "r") iiif_zip.extractall(self.iiif_dir()) # TODO # # save image files if requested, and update image paths # # to use local references # if self.include_images: # self.save_page_images(jekyll_site_dir) # if self.include_deep_zoom: # self.generate_deep_zoom(jekyll_site_dir) # run the script to import IIIF as jekyll site content self.import_iiif_jekyll(self.manifest, self.jekyll_site_dir) # NOTE: putting export content in a separate dir to make it easy to create # the zip file with the right contents and structure export_dir = os.path.join(tmpdir, 'export') os.mkdir(export_dir) # rename the jekyll dir and move it into the export dir shutil.move(self.jekyll_site_dir, self.edition_dir(export_dir)) return export_dir def edition_dir(self, export_dir): """Convenience function for system path to the edition directory :param export_dir: System path for export directory. :type export_dir: str :return: System path for edition directory :rtype: str """ return os.path.join( export_dir, '{m}_annotated_jekyll_site'.format(m=self.manifest.id) ) def website_zip(self): """Package up a Jekyll site created by :meth:`website` as a zip file for easy download. :return: Temporary zip file. :rtype: tempfile.NamedTemporaryFile """ export_dir = self.generate_website() # create a tempfile to hold a zip file of the site # (using tempfile for automatic cleanup after use) webzipfile = tempfile.NamedTemporaryFile( suffix='.zip', prefix='%s_annotated_site_' % self.manifest.id, delete=False) shutil.make_archive( # name of the zipfile to create without .zip os.path.splitext(webzipfile.name)[0], 'zip', # archive format; could also do tar export_dir ) LOGGER.debug('Jekyll site web export zipfile for %s is %s', self.manifest.id, webzipfile.name) # clean up temporary files shutil.rmtree(export_dir) # NOTE: method has to return the tempfile itself, or else it will # get cleaned up when the reference is destroyed return webzipfile def use_github(self, user): """Set variables for GitHub export. :param user: Person exporting :type user: apps.users.models.User """ # connect to github as the user in order to create the repository self.github = GithubApi.connect_as_user(user) self.github_username = GithubApi.github_username(user) self.github_token = GithubApi.github_token(user) self.github.session.headers['Authorization'] = f'token {self.github_token}' def github_auth_repo(self, repo_name=None, repo_url=None): """Generate a GitHub repo url with an oauth token in order to push to GitHub on the user's behalf. Takes either a repository name or repository url. The expected result should be formatted as follows: https://<github username>:<github token>@github.com/<github username>/<github repo>.git :return: GitHub authentication header. :rtype: str """ if repo_url: parsed_repo_url = urlparse(repo_url) return f'https://{self.github_username}:{GithubApi.github_token(self.user)}@github.com/{parsed_repo_url.path[1:]}.git' return f'https://{self.github_username}:{GithubApi.github_token(self.user)}@github.com/{self.github_username}/{repo_name}.git' def gitrepo_exists(self): """Check to see if GitHub repo already exists. :return: True if repo exists. False if it does not. :rtype: bool """ current_repos = self.github.list_repos(self.github_username) current_repo_names = [repo['name'] for repo in current_repos] LOGGER.debug( 'Checking to see if {gr} in {rns}'.format( gr=self.github_repo, rns=" ".join(current_repo_names) ) ) return self.github_repo in current_repo_names def website_gitrepo(self): """Create a new GitHub repository and populate it with content from a newly generated jekyll website export created via :meth:`website`. :return: On success, returns a tuple of public repository URL and GitHub Pages URL for the newly created repo and site :rtype: tuple """ # NOTE: github pages sites now default to https github_pages_url = 'https://{un}.github.io/{gr}/'.format( un=self.github_username, gr=self.github_repo ) # before even starting to generate the jekyll site, # check if requested repo name already exists; if so, bail out with an error LOGGER.debug( 'Checking github repo {gr} for {un}'.format( gr=self.github_repo, un=self.github_username ) ) if self.gitrepo_exists(): raise GithubExportException( 'GitHub repo {gr} already exists.'.format( gr=self.github_repo ) ) export_dir = self.generate_website() # jekyll dir is *inside* the export directory; # for the jekyll site to display correctly, we need to commit what # is in the directory, not the directory itself jekyll_dir = self.edition_dir(export_dir) # modify the jekyll config for relative url on github.io config_file_path = os.path.join(jekyll_dir, '_config.yml') with open(config_file_path, 'r') as configfile: config_data = load(configfile, Loader=Loader) # split out github pages url into the site url and path parsed_gh_url = urlparse(github_pages_url) config_data['url'] = '{s}://{n}'.format( s=parsed_gh_url.scheme, n=parsed_gh_url.netloc ) config_data['baseurl'] = parsed_gh_url.path.rstrip('/') with open(config_file_path, 'w') as configfile: safe_dump(config_data, configfile, default_flow_style=False) # using safe_dump to generate only standard yaml output # NOTE: pyyaml requires default_flow_style=false to output # nested collections in block format LOGGER.debug( 'Creating github repo {gr} for {un}'.format( gr=self.github_repo, un=self.github_username ) ) self.github.create_repo( self.github_repo, homepage=github_pages_url, user=self.user, description='An annotated digital edition created with Readux' ) # get auth repo url to use to push data repo_url = self.github_auth_repo(repo_name=self.github_repo) # add the jekyll site to github; based on these instructions: # https://help.github.com/articles/adding-an-existing-project-to-github-using-the-command-line/ # initialize export dir as a git repo, and commit the contents # NOTE: to debug git commands, print the git return to see git output gitcmd = Git(jekyll_dir) # initialize jekyll site as a git repo gitcmd.init() # add and commit all contents gitcmd.config("user.email", self.user.email) gitcmd.config("user.name", self.user.fullname()) # Use the token to authenticate the Git commands. # Required to do this as of June 9, 2020 # https://developer.github.com/changes/2020-02-14-deprecating-oauth-app-endpoint/ gitcmd.config("user.password", GithubApi.github_token(self.user)) gitcmd.add(['.']) gitcmd.commit([ '-m', 'Import Jekyll site generated by Readux {v}'.format( v=__version__ ), '--author="{fn} <{ue}>"'.format( fn=self.user.fullname(), ue=self.user.email ) ]) # push local master to the gh-pages branch of the newly created repo, # using the user's oauth token credentials self.log_status('Pushing new content to GitHub') if os.environ['DJANGO_ENV'] != 'test': # pragma: no cover gitcmd.push([repo_url, 'master:gh-pages']) # pragma: no cover # clean up temporary files after push to github shutil.rmtree(export_dir) # generate public repo url for display to user public_repo_url = 'https://github.com/{un}/{gr}'.format( un=self.github_username, gr=self.github_repo ) return (public_repo_url, github_pages_url) def update_gitrepo(self): '''Update an existing GitHub repository previously created by Readux export. Checks out the repository, creates a new branch, runs the iiif_to_jekyll import on that branch, pushes it to github, and creates a pull request. Returns the HTML url for the new pull request on success.''' repo_url = 'github.com/{un}/{gr}.git'.format( un=self.github_username, gr=self.github_repo ) # get auth repo url to use to create branch auth_repo_url = self.github_auth_repo(repo_name=self.github_repo) # create a tmpdir to clone the git repo into tmpdir = tempfile.mkdtemp(prefix='tmp-rdx-export-update') LOGGER.debug( 'Cloning {r} to {t}'.format( r=repo_url, t=tmpdir ) ) repo = None if os.environ['DJANGO_ENV'] == 'test': repo = git.Repo.init(tmpdir) yml_config_path = os.path.join(tmpdir, '_config.yml') open(yml_config_path, 'a').close() repo.index.commit('initial commit') repo.git.checkout('HEAD', b='gh-pages') else: repo = git.Repo.clone_from(auth_repo_url, tmpdir, branch='gh-pages') repo.remote().pull() # pragma: no cover # create and switch to a new branch and switch to it; using datetime # for uniqueness git_branch_name = 'readux-update-%s' % \ datetime.now().strftime('%Y%m%d-%H%M%S') update_branch = repo.create_head(git_branch_name) update_branch.checkout() LOGGER.debug( 'Updating export for {m} in {t}'.format( m=self.manifest.pid, t=tmpdir ) ) # remove all annotations and tag pages so that if an annotation is removed # or a tag is no longer used in readux, it will be removed in the export # (annotations and tags that are unchanged will be restored by the IIIF # jekyll import, and look unchanged to git if no different) try: repo.index.remove(['_annotations/*', 'tags/*', 'iiif_export/*']) except git.GitCommandError: # it's possible that an export has no annotations or tags # (although unlikely to occur anywhere but development & testing) # if there's an error on removal, ignore it pass # save image files if requested, and update image paths # to use local references # TODO # if self.include_images: # self.save_page_images(tmpdir) self.jekyll_site_dir = tmpdir LOGGER.debug('Exporting IIIF bundle') iiif_zip_stream = IiifManifestExport.get_zip(self.manifest, 'v2', owners=self.owners) iiif_zip = zipfile.ZipFile(io.BytesIO(iiif_zip_stream), "r") iiif_zip.extractall(self.iiif_dir()) # TODO # # save image files if requested, and update image paths # # to use local references # if self.include_images: # self.save_page_images(jekyll_site_dir) # if self.include_deep_zoom: # self.generate_deep_zoom(jekyll_site_dir) if os.environ['DJANGO_ENV'] != 'test': # run the script to import IIIF as jekyll site content self.import_iiif_jekyll(self.manifest, self.jekyll_site_dir) # add any files that could be updated to the git index repo.index.add([ # pragma: no cover '_config.yml', '_volume_pages/*', '_annotations/*', '_data/tags.yml', 'tags/*', 'iiif_export/*' ]) # TODO: if deep zoom is added, we must add that directory as well git_author = git.Actor( self.user.fullname(), self.user.email ) # commit all changes repo.index.commit( 'Updated Jekyll site by Readux {v}'.format( v=__version__ ), author=git_author ) if os.environ['DJANGO_ENV'] != 'test': # push the update to a new branch on github repo.remotes.origin.push( # pragma: no cover '{b}s:{b}s'.format(b=git_branch_name) ) # convert repo url to form needed to generate pull request repo = repo_url.replace('github.com/', '').replace('.git', '') pullrequest = self.github.create_pull_request( repo, 'Updated export', git_branch_name, 'gh-pages') # clean up local checkout after successful push shutil.rmtree(tmpdir) # return the html url for the new pull request return pullrequest['html_url'] # from readux/books/consumers.py in Readux 1. def github_export(self, user_email): """ Export manifest to GitHub. :param user_email: Email of exporter. :type user_email: str :return: List of export URLs :rtype: list """ LOGGER.debug('Background export started.') # user_has_github = False if self.user: # check if user has a github account linked try: GithubApi.github_account(self.user) except GithubAccountNotFound: LOGGER.info('User attempted github export with no github account.') # connect to github as the user in order to create the repository self.use_github(self.user) # check that oauth token has sufficient permission # to do needed export steps # TODO: httpretty seems to include the HEAD method, but it errors when # making the request because the Head method is not implemented. if os.environ['DJANGO_ENV'] != 'test' and 'repo' not in self.github.oauth_scopes(): LOGGER.error('TODO: bad scope message') return None # pragma: no cover repo_url = None ghpages_url = None pr_url = None if not self.gitrepo_exists(): # create a new github repository with exported jekyll site try: repo_url, ghpages_url = self.website_gitrepo() LOGGER.info('Exported %s to GitHub repo %s for user %s', self.manifest.pid, repo_url, self.user.username) except GithubExportException as err: LOGGER.info('Export failed: {e}'.format(e=err)) else: # update an existing github repository with new branch and # a pull request try: # TODO: How to highjack the request to # https://58816:[email protected]/zaphod/marx.git/ when testing. if os.environ['DJANGO_ENV'] != 'test': pr_url = self.update_gitrepo() # pragma: no cover else: pr_url = 'https://github.com/{u}/{r}/pull/2'.format( u=self.github_username, r=self.github_repo ) LOGGER.info('GitHub jekyll site update completed') repo_url = 'https://github.com/%s/%s' % (self.github_username, self.github_repo) ghpages_url = 'https://%s.github.io/%s/' % (self.github_username, self.github_repo) except GithubExportException as err: self.notify_msg('Export failed: {e}'.format(e=err)) context = {} context['repo_url'] = repo_url context['ghpages_url'] = ghpages_url context['pr_url'] = pr_url email_contents = get_template('jekyll_export_email.html').render(context) text_contents = get_template('jekyll_export_email.txt').render(context) send_mail( 'Your Readux site export is ready!', text_contents, settings.READUX_EMAIL_SENDER, [user_email], fail_silently=False, html_message=email_contents ) return [repo_url, ghpages_url, pr_url] def download_export(self, user_email, volume): """Download exported manifest. :param user_email: Exporter's email address. :type user_email: str :param volume: Manifest being exported. :type volume: apps.iiif.manifests.models.Manifest :return: Filename for the exported zip. :rtype: str """ LOGGER.debug( 'Background download export started. Sending email to {ue}'.format( ue=user_email ) ) zip_file = self.website_zip() context = {} context["filename"] = os.path.basename(zip_file.name) context["volume"] = volume context["hostname"] = settings.HOSTNAME email_contents = get_template('download_export_email.html').render(context) text_contents = get_template('download_export_email.txt').render(context) # TODO: Maybe break this out so we can test it? send_mail( 'Your Readux site export is ready!', text_contents, settings.READUX_EMAIL_SENDER, [user_email], fail_silently=False, html_message=email_contents ) return zip_file.name
[]
[]
[ "DJANGO_ENV" ]
[]
["DJANGO_ENV"]
python
1
0
command/plugin/list_plugin_repos_command.go
package plugin import ( "os" "code.cloudfoundry.org/cli/cf/cmd" "code.cloudfoundry.org/cli/command" ) type ListPluginReposCommand struct { usage interface{} `usage:"CF_NAME list-plugin-repos"` relatedCommands interface{} `related_commands:"add-plugin-repo, install-plugin"` } func (_ ListPluginReposCommand) Setup(config command.Config, ui command.UI) error { return nil } func (_ ListPluginReposCommand) Execute(args []string) error { cmd.Main(os.Getenv("CF_TRACE"), os.Args) return nil }
[ "\"CF_TRACE\"" ]
[]
[ "CF_TRACE" ]
[]
["CF_TRACE"]
go
1
0
python_modules/libraries/dagster-docker/dagster_docker_tests/test_launch_docker.py
# pylint doesn't know about pytest fixtures # pylint: disable=unused-argument import os import re from contextlib import contextmanager import pytest from dagster.core.storage.pipeline_run import PipelineRunStatus from dagster.core.test_utils import poll_for_finished_run, poll_for_step_start from dagster.utils.test.postgres_instance import postgres_instance_for_test from dagster.utils.yaml_utils import merge_yamls from dagster_test.test_project import ( ReOriginatedExternalPipelineForTest, find_local_test_image, get_buildkite_registry_config, get_test_project_docker_image, get_test_project_environments_path, get_test_project_external_pipeline, get_test_project_recon_pipeline, ) IS_BUILDKITE = os.getenv("BUILDKITE") is not None @contextmanager def docker_postgres_instance(overrides=None): with postgres_instance_for_test( __file__, "test-postgres-db-docker", overrides=overrides ) as instance: yield instance def test_launch_docker_image_on_pipeline_config(): # Docker image name to use for launch specified as part of the pipeline origin # rather than in the run launcher instance config docker_image = get_test_project_docker_image() launcher_config = { "env_vars": [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", ], "network": "container:test-postgres-db-docker", } if IS_BUILDKITE: launcher_config["registry"] = get_buildkite_registry_config() else: find_local_test_image(docker_image) run_config = merge_yamls( [ os.path.join(get_test_project_environments_path(), "env.yaml"), os.path.join(get_test_project_environments_path(), "env_s3.yaml"), ] ) with docker_postgres_instance( overrides={ "run_launcher": { "class": "DockerRunLauncher", "module": "dagster_docker", "config": launcher_config, } } ) as instance: recon_pipeline = get_test_project_recon_pipeline("demo_pipeline", docker_image) run = instance.create_run_for_pipeline( pipeline_def=recon_pipeline.get_definition(), run_config=run_config, ) external_pipeline = ReOriginatedExternalPipelineForTest( get_test_project_external_pipeline("demo_pipeline", container_image=docker_image), container_image=docker_image, ) instance.launch_run(run.run_id, external_pipeline) poll_for_finished_run(instance, run.run_id, timeout=60) assert instance.get_run_by_id(run.run_id).status == PipelineRunStatus.SUCCESS def _check_event_log_contains(event_log, expected_type_and_message): types_and_messages = [ (e.dagster_event.event_type_value, e.message) for e in event_log if e.is_dagster_event ] for expected_event_type, expected_message_fragment in expected_type_and_message: assert any( event_type == expected_event_type and expected_message_fragment in message for event_type, message in types_and_messages ) def test_terminate_launched_docker_run(): docker_image = get_test_project_docker_image() launcher_config = { "env_vars": [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", ], "network": "container:test-postgres-db-docker", } if IS_BUILDKITE: launcher_config["registry"] = get_buildkite_registry_config() else: find_local_test_image(docker_image) run_config = merge_yamls( [ os.path.join(get_test_project_environments_path(), "env_s3.yaml"), ] ) with docker_postgres_instance( overrides={ "run_launcher": { "class": "DockerRunLauncher", "module": "dagster_docker", "config": launcher_config, } } ) as instance: recon_pipeline = get_test_project_recon_pipeline("hanging_pipeline", docker_image) run = instance.create_run_for_pipeline( pipeline_def=recon_pipeline.get_definition(), run_config=run_config, ) run_id = run.run_id external_pipeline = ReOriginatedExternalPipelineForTest( get_test_project_external_pipeline("hanging_pipeline", container_image=docker_image), container_image=docker_image, ) instance.launch_run(run_id, external_pipeline) poll_for_step_start(instance, run_id) assert instance.run_launcher.can_terminate(run_id) assert instance.run_launcher.terminate(run_id) terminated_pipeline_run = poll_for_finished_run(instance, run_id, timeout=30) terminated_pipeline_run = instance.get_run_by_id(run_id) assert terminated_pipeline_run.status == PipelineRunStatus.CANCELED run_logs = instance.all_logs(run_id) _check_event_log_contains( run_logs, [ ("PIPELINE_CANCELING", "Sending pipeline termination request"), ("STEP_FAILURE", 'Execution of step "hanging_solid" failed.'), ("PIPELINE_CANCELED", 'Execution of pipeline "hanging_pipeline" canceled.'), ("ENGINE_EVENT", "Pipeline execution terminated by interrupt"), ("ENGINE_EVENT", "Process for pipeline exited"), ], ) def test_launch_docker_invalid_image(): docker_image = "_invalid_format_image" launcher_config = { "env_vars": [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", ], "network": "container:test-postgres-db-docker", "image": docker_image, } if IS_BUILDKITE: launcher_config["registry"] = get_buildkite_registry_config() run_config = merge_yamls( [ os.path.join(get_test_project_environments_path(), "env.yaml"), os.path.join(get_test_project_environments_path(), "env_s3.yaml"), ] ) with docker_postgres_instance( overrides={ "run_launcher": { "class": "DockerRunLauncher", "module": "dagster_docker", "config": launcher_config, } } ) as instance: recon_pipeline = get_test_project_recon_pipeline("demo_pipeline") run = instance.create_run_for_pipeline( pipeline_def=recon_pipeline.get_definition(), run_config=run_config, ) external_pipeline = ReOriginatedExternalPipelineForTest( get_test_project_external_pipeline("demo_pipeline") ) with pytest.raises( Exception, match=re.escape("Docker image name _invalid_format_image is not correctly formatted"), ): instance.launch_run(run.run_id, external_pipeline) def test_launch_docker_image_on_instance_config(): docker_image = get_test_project_docker_image() launcher_config = { "env_vars": [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", ], "network": "container:test-postgres-db-docker", "image": docker_image, } if IS_BUILDKITE: launcher_config["registry"] = get_buildkite_registry_config() else: find_local_test_image(docker_image) run_config = merge_yamls( [ os.path.join(get_test_project_environments_path(), "env.yaml"), os.path.join(get_test_project_environments_path(), "env_s3.yaml"), ] ) with docker_postgres_instance( overrides={ "run_launcher": { "class": "DockerRunLauncher", "module": "dagster_docker", "config": launcher_config, } } ) as instance: recon_pipeline = get_test_project_recon_pipeline("demo_pipeline") run = instance.create_run_for_pipeline( pipeline_def=recon_pipeline.get_definition(), run_config=run_config, ) external_pipeline = ReOriginatedExternalPipelineForTest( get_test_project_external_pipeline("demo_pipeline") ) instance.launch_run(run.run_id, external_pipeline) poll_for_finished_run(instance, run.run_id, timeout=60) assert instance.get_run_by_id(run.run_id).status == PipelineRunStatus.SUCCESS
[]
[]
[ "BUILDKITE" ]
[]
["BUILDKITE"]
python
1
0
vendor/mvdan.cc/sh/interp/interp.go
// Copyright (c) 2017, Daniel Martí <[email protected]> // See LICENSE for licensing information package interp import ( "bytes" "context" "fmt" "io" "io/ioutil" "math" "os" "os/user" "path/filepath" "regexp" "runtime" "strings" "sync" "time" "golang.org/x/sync/errgroup" "mvdan.cc/sh/expand" "mvdan.cc/sh/syntax" ) // New creates a new Runner, applying a number of options. If applying any of // the options results in an error, it is returned. // // Any unset options fall back to their defaults. For example, not supplying the // environment falls back to the process's environment, and not supplying the // standard output writer means that the output will be discarded. func New(opts ...func(*Runner) error) (*Runner, error) { r := &Runner{usedNew: true} for _, opt := range opts { if err := opt(r); err != nil { return nil, err } } // Set the default fallbacks, if necessary. if r.Env == nil { Env(nil)(r) } if r.Dir == "" { if err := Dir("")(r); err != nil { return nil, err } } if r.Exec == nil { Module(ModuleExec(nil))(r) } if r.Open == nil { Module(ModuleOpen(nil))(r) } if r.Stdout == nil || r.Stderr == nil { StdIO(r.Stdin, r.Stdout, r.Stderr)(r) } return r, nil } func (r *Runner) fillExpandConfig(ctx context.Context) { r.ectx = ctx r.ecfg = &expand.Config{ Env: expandEnv{r}, CmdSubst: func(w io.Writer, cs *syntax.CmdSubst) error { switch len(cs.Stmts) { case 0: // nothing to do return nil case 1: // $(<file) word := catShortcutArg(cs.Stmts[0]) if word == nil { break } path := r.literal(word) f, err := r.open(ctx, r.relPath(path), os.O_RDONLY, 0, true) if err != nil { return err } _, err = io.Copy(w, f) return err } r2 := r.sub() r2.Stdout = w r2.stmts(ctx, cs.StmtList) return r2.err }, ReadDir: ioutil.ReadDir, } r.updateExpandOpts() } // catShortcutArg checks if a statement is of the form "$(<file)". The redirect // word is returned if there's a match, and nil otherwise. func catShortcutArg(stmt *syntax.Stmt) *syntax.Word { if stmt.Cmd != nil || stmt.Negated || stmt.Background || stmt.Coprocess { return nil } if len(stmt.Redirs) != 1 { return nil } redir := stmt.Redirs[0] if redir.Op != syntax.RdrIn { return nil } return redir.Word } func (r *Runner) updateExpandOpts() { r.ecfg.NoGlob = r.opts[optNoGlob] r.ecfg.GlobStar = r.opts[optGlobStar] } func (r *Runner) expandErr(err error) { switch err := err.(type) { case nil: case expand.UnsetParameterError: r.errf("%s\n", err.Message) r.exit = 1 r.setErr(ShellExitStatus(r.exit)) default: r.setErr(err) r.exit = 1 } } func (r *Runner) arithm(expr syntax.ArithmExpr) int { n, err := expand.Arithm(r.ecfg, expr) r.expandErr(err) return n } func (r *Runner) fields(words ...*syntax.Word) []string { strs, err := expand.Fields(r.ecfg, words...) r.expandErr(err) return strs } func (r *Runner) literal(word *syntax.Word) string { str, err := expand.Literal(r.ecfg, word) r.expandErr(err) return str } func (r *Runner) document(word *syntax.Word) string { str, err := expand.Document(r.ecfg, word) r.expandErr(err) return str } func (r *Runner) pattern(word *syntax.Word) string { str, err := expand.Pattern(r.ecfg, word) r.expandErr(err) return str } // expandEnv exposes Runner's variables to the expand package. type expandEnv struct { r *Runner } func (e expandEnv) Get(name string) expand.Variable { return e.r.lookupVar(name) } func (e expandEnv) Set(name string, vr expand.Variable) { e.r.setVarInternal(name, vr) } func (e expandEnv) Each(fn func(name string, vr expand.Variable) bool) { e.r.Env.Each(fn) for name, vr := range e.r.Vars { if !fn(name, vr) { return } } } // Env sets the interpreter's environment. If nil, a copy of the current // process's environment is used. func Env(env expand.Environ) func(*Runner) error { return func(r *Runner) error { if env == nil { env = expand.ListEnviron(os.Environ()...) } r.Env = env return nil } } // Dir sets the interpreter's working directory. If empty, the process's current // directory is used. func Dir(path string) func(*Runner) error { return func(r *Runner) error { if path == "" { path, err := os.Getwd() if err != nil { return fmt.Errorf("could not get current dir: %v", err) } r.Dir = path return nil } path, err := filepath.Abs(path) if err != nil { return fmt.Errorf("could not get absolute dir: %v", err) } info, err := os.Stat(path) if err != nil { return fmt.Errorf("could not stat: %v", err) } if !info.IsDir() { return fmt.Errorf("%s is not a directory", path) } r.Dir = path return nil } } // Params populates the shell options and parameters. For example, Params("-e", // "--", "foo") will set the "-e" option and the parameters ["foo"]. // // This is similar to what the interpreter's "set" builtin does. func Params(args ...string) func(*Runner) error { return func(r *Runner) error { for len(args) > 0 { arg := args[0] if arg == "" || (arg[0] != '-' && arg[0] != '+') { break } if arg == "--" { args = args[1:] break } enable := arg[0] == '-' var opt *bool if flag := arg[1:]; flag == "o" { args = args[1:] if len(args) == 0 && enable { for i, opt := range &shellOptsTable { r.printOptLine(opt.name, r.opts[i]) } break } if len(args) == 0 && !enable { for i, opt := range &shellOptsTable { setFlag := "+o" if r.opts[i] { setFlag = "-o" } r.outf("set %s %s\n", setFlag, opt.name) } break } opt = r.optByName(args[0], false) } else { opt = r.optByFlag(flag) } if opt == nil { return fmt.Errorf("invalid option: %q", arg) } *opt = enable args = args[1:] } r.Params = args r.updateExpandOpts() return nil } } type ModuleFunc interface { isModule() } // Module sets an interpreter module, which can be ModuleExec or ModuleOpen. If // the value is nil, the default module implementation is used. func Module(mod ModuleFunc) func(*Runner) error { return func(r *Runner) error { switch mod := mod.(type) { case ModuleExec: if mod == nil { mod = DefaultExec } r.Exec = mod case ModuleOpen: if mod == nil { mod = DefaultOpen } r.Open = mod default: return fmt.Errorf("unknown module type: %T", mod) } return nil } } // StdIO configures an interpreter's standard input, standard output, and // standard error. If out or err are nil, they default to a writer that discards // the output. func StdIO(in io.Reader, out, err io.Writer) func(*Runner) error { return func(r *Runner) error { r.Stdin = in if out == nil { out = ioutil.Discard } r.Stdout = out if err == nil { err = ioutil.Discard } r.Stderr = err return nil } } // A Runner interprets shell programs. It can be reused, but it is not safe for // concurrent use. You should typically use New to build a new Runner. // // Note that writes to Stdout and Stderr may be concurrent if background // commands are used. If you plan on using an io.Writer implementation that // isn't safe for concurrent use, consider a workaround like hiding writes // behind a mutex. // // To create a Runner, use New. type Runner struct { // Env specifies the environment of the interpreter, which must be // non-nil. Env expand.Environ // Dir specifies the working directory of the command, which must be an // absolute path. Dir string // Params are the current shell parameters, e.g. from running a shell // file or calling a function. Accessible via the $@/$* family of vars. Params []string // Exec is the module responsible for executing programs. It must be // non-nil. Exec ModuleExec // Open is the module responsible for opening files. It must be non-nil. Open ModuleOpen Stdin io.Reader Stdout io.Writer Stderr io.Writer // Separate maps - note that bash allows a name to be both a var and a // func simultaneously Vars map[string]expand.Variable Funcs map[string]*syntax.Stmt ecfg *expand.Config ectx context.Context // just so that Runner.Sub can use it again // didReset remembers whether the runner has ever been reset. This is // used so that Reset is automatically called when running any program // or node for the first time on a Runner. didReset bool usedNew bool filename string // only if Node was a File // like Vars, but local to a func i.e. "local foo=bar" funcVars map[string]expand.Variable // like Vars, but local to a cmd i.e. "foo=bar prog args..." cmdVars map[string]string // >0 to break or continue out of N enclosing loops breakEnclosing, contnEnclosing int inLoop bool inFunc bool inSource bool err error // current shell exit code or fatal error exit int // current (last) exit status code bgShells errgroup.Group opts [len(shellOptsTable) + len(bashOptsTable)]bool dirStack []string optState getopts // keepRedirs is used so that "exec" can make any redirections // apply to the current shell, and not just the command. keepRedirs bool // KillTimeout holds how much time the interpreter will wait for a // program to stop after being sent an interrupt signal, after // which a kill signal will be sent. This process will happen when the // interpreter's context is cancelled. // // The zero value will default to 2 seconds. // // A negative value means that a kill signal will be sent immediately. // // On Windows, the kill signal is always sent immediately, // because Go doesn't currently support sending Interrupt on Windows. KillTimeout time.Duration } func (r *Runner) optByFlag(flag string) *bool { for i, opt := range &shellOptsTable { if opt.flag == flag { return &r.opts[i] } } return nil } func (r *Runner) optByName(name string, bash bool) *bool { if bash { for i, optName := range bashOptsTable { if optName == name { return &r.opts[len(shellOptsTable)+i] } } } for i, opt := range &shellOptsTable { if opt.name == name { return &r.opts[i] } } return nil } var shellOptsTable = [...]struct { flag, name string }{ // sorted alphabetically by name; use a space for the options // that have no flag form {"a", "allexport"}, {"e", "errexit"}, {"n", "noexec"}, {"f", "noglob"}, {"u", "nounset"}, {" ", "pipefail"}, } var bashOptsTable = [...]string{ // sorted alphabetically by name "globstar", } // To access the shell options arrays without a linear search when we // know which option we're after at compile time. First come the shell options, // then the bash options. const ( optAllExport = iota optErrExit optNoExec optNoGlob optNoUnset optPipeFail optGlobStar ) // Reset empties the runner state and sets any exported fields with zero values // to their default values. // // Typically, this function only needs to be called if a runner is reused to run // multiple programs non-incrementally. Not calling Reset between each run will // mean that the shell state will be kept, including variables and options. func (r *Runner) Reset() { if !r.usedNew { panic("use interp.New to construct a Runner") } // reset the internal state *r = Runner{ Env: r.Env, Dir: r.Dir, Params: r.Params, Stdin: r.Stdin, Stdout: r.Stdout, Stderr: r.Stderr, Exec: r.Exec, Open: r.Open, KillTimeout: r.KillTimeout, // emptied below, to reuse the space Vars: r.Vars, cmdVars: r.cmdVars, dirStack: r.dirStack[:0], usedNew: r.usedNew, } if r.Vars == nil { r.Vars = make(map[string]expand.Variable) } else { for k := range r.Vars { delete(r.Vars, k) } } if r.cmdVars == nil { r.cmdVars = make(map[string]string) } else { for k := range r.cmdVars { delete(r.cmdVars, k) } } if vr := r.Env.Get("HOME"); !vr.IsSet() { u, _ := user.Current() r.Vars["HOME"] = expand.Variable{Value: u.HomeDir} } r.Vars["PWD"] = expand.Variable{Value: r.Dir} r.Vars["IFS"] = expand.Variable{Value: " \t\n"} r.Vars["OPTIND"] = expand.Variable{Value: "1"} if runtime.GOOS == "windows" { // convert $PATH to a unix path list path := r.Env.Get("PATH").String() path = strings.Join(filepath.SplitList(path), ":") r.Vars["PATH"] = expand.Variable{Value: path} } r.dirStack = append(r.dirStack, r.Dir) if r.KillTimeout == 0 { r.KillTimeout = 2 * time.Second } r.didReset = true } func (r *Runner) modCtx(ctx context.Context) context.Context { mc := ModuleCtx{ Dir: r.Dir, Stdin: r.Stdin, Stdout: r.Stdout, Stderr: r.Stderr, KillTimeout: r.KillTimeout, } oenv := overlayEnviron{ parent: r.Env, values: make(map[string]expand.Variable), } for name, vr := range r.Vars { oenv.Set(name, vr) } for name, vr := range r.funcVars { oenv.Set(name, vr) } for name, value := range r.cmdVars { oenv.Set(name, expand.Variable{Exported: true, Value: value}) } mc.Env = oenv return context.WithValue(ctx, moduleCtxKey{}, mc) } // ShellExitStatus exits the shell with a status code. type ShellExitStatus uint8 func (s ShellExitStatus) Error() string { return fmt.Sprintf("exit status %d", s) } // ExitStatus is a non-zero status code resulting from running a shell node. type ExitStatus uint8 func (s ExitStatus) Error() string { return fmt.Sprintf("exit status %d", s) } func (r *Runner) setErr(err error) { if r.err == nil { r.err = err } } // Run interprets a node, which can be a *File, *Stmt, or Command. If a non-nil // error is returned, it will typically be of type ExitStatus or // ShellExitStatus. // // Run can be called multiple times synchronously to interpret programs // incrementally. To reuse a Runner without keeping the internal shell state, // call Reset. func (r *Runner) Run(ctx context.Context, node syntax.Node) error { if !r.didReset { r.Reset() } r.fillExpandConfig(ctx) r.err = nil r.filename = "" switch x := node.(type) { case *syntax.File: r.filename = x.Name r.stmts(ctx, x.StmtList) case *syntax.Stmt: r.stmt(ctx, x) case syntax.Command: r.cmd(ctx, x) default: return fmt.Errorf("node can only be File, Stmt, or Command: %T", x) } if r.exit > 0 { r.setErr(ExitStatus(r.exit)) } return r.err } func (r *Runner) out(s string) { io.WriteString(r.Stdout, s) } func (r *Runner) outf(format string, a ...interface{}) { fmt.Fprintf(r.Stdout, format, a...) } func (r *Runner) errf(format string, a ...interface{}) { fmt.Fprintf(r.Stderr, format, a...) } func (r *Runner) stop(ctx context.Context) bool { if r.err != nil { return true } if err := ctx.Err(); err != nil { r.err = err return true } if r.opts[optNoExec] { return true } return false } func (r *Runner) stmt(ctx context.Context, st *syntax.Stmt) { if r.stop(ctx) { return } if st.Background { r2 := r.sub() st2 := *st st2.Background = false r.bgShells.Go(func() error { return r2.Run(ctx, &st2) }) } else { r.stmtSync(ctx, st) } } func (r *Runner) stmtSync(ctx context.Context, st *syntax.Stmt) { oldIn, oldOut, oldErr := r.Stdin, r.Stdout, r.Stderr for _, rd := range st.Redirs { cls, err := r.redir(ctx, rd) if err != nil { r.exit = 1 return } if cls != nil { defer cls.Close() } } if st.Cmd == nil { r.exit = 0 } else { r.cmd(ctx, st.Cmd) } if st.Negated { r.exit = oneIf(r.exit == 0) } if r.exit != 0 && r.opts[optErrExit] { r.setErr(ShellExitStatus(r.exit)) } if !r.keepRedirs { r.Stdin, r.Stdout, r.Stderr = oldIn, oldOut, oldErr } } func (r *Runner) sub() *Runner { // Keep in sync with the Runner type. Manually copy fields, to not copy // sensitive ones like errgroup.Group, and to do deep copies of slices. r2 := &Runner{ Env: r.Env, Dir: r.Dir, Params: r.Params, Exec: r.Exec, Open: r.Open, Stdin: r.Stdin, Stdout: r.Stdout, Stderr: r.Stderr, Funcs: r.Funcs, KillTimeout: r.KillTimeout, filename: r.filename, opts: r.opts, } r2.Vars = make(map[string]expand.Variable, len(r.Vars)) for k, v := range r.Vars { r2.Vars[k] = v } r2.funcVars = make(map[string]expand.Variable, len(r.funcVars)) for k, v := range r.funcVars { r2.funcVars[k] = v } r2.cmdVars = make(map[string]string, len(r.cmdVars)) for k, v := range r.cmdVars { r2.cmdVars[k] = v } r2.dirStack = append([]string(nil), r.dirStack...) r2.fillExpandConfig(r.ectx) r2.didReset = true return r2 } func (r *Runner) cmd(ctx context.Context, cm syntax.Command) { if r.stop(ctx) { return } switch x := cm.(type) { case *syntax.Block: r.stmts(ctx, x.StmtList) case *syntax.Subshell: r2 := r.sub() r2.stmts(ctx, x.StmtList) r.exit = r2.exit r.setErr(r2.err) case *syntax.CallExpr: fields := r.fields(x.Args...) if len(fields) == 0 { for _, as := range x.Assigns { vr := r.lookupVar(as.Name.Value) vr.Value = r.assignVal(as, "") r.setVar(as.Name.Value, as.Index, vr) } break } for _, as := range x.Assigns { val := r.assignVal(as, "") // we know that inline vars must be strings r.cmdVars[as.Name.Value] = val.(string) } r.call(ctx, x.Args[0].Pos(), fields) // cmdVars can be nuked here, as they are never useful // again once we nest into further levels of inline // vars. for k := range r.cmdVars { delete(r.cmdVars, k) } case *syntax.BinaryCmd: switch x.Op { case syntax.AndStmt: r.stmt(ctx, x.X) if r.exit == 0 { r.stmt(ctx, x.Y) } case syntax.OrStmt: r.stmt(ctx, x.X) if r.exit != 0 { r.stmt(ctx, x.Y) } case syntax.Pipe, syntax.PipeAll: pr, pw := io.Pipe() r2 := r.sub() r2.Stdout = pw if x.Op == syntax.PipeAll { r2.Stderr = pw } else { r2.Stderr = r.Stderr } r.Stdin = pr var wg sync.WaitGroup wg.Add(1) go func() { r2.stmt(ctx, x.X) pw.Close() wg.Done() }() r.stmt(ctx, x.Y) pr.Close() wg.Wait() if r.opts[optPipeFail] && r2.exit > 0 && r.exit == 0 { r.exit = r2.exit } r.setErr(r2.err) } case *syntax.IfClause: r.stmts(ctx, x.Cond) if r.exit == 0 { r.stmts(ctx, x.Then) break } r.exit = 0 r.stmts(ctx, x.Else) case *syntax.WhileClause: for !r.stop(ctx) { r.stmts(ctx, x.Cond) stop := (r.exit == 0) == x.Until r.exit = 0 if stop || r.loopStmtsBroken(ctx, x.Do) { break } } case *syntax.ForClause: switch y := x.Loop.(type) { case *syntax.WordIter: name := y.Name.Value for _, field := range r.fields(y.Items...) { r.setVarString(name, field) if r.loopStmtsBroken(ctx, x.Do) { break } } case *syntax.CStyleLoop: r.arithm(y.Init) for r.arithm(y.Cond) != 0 { if r.loopStmtsBroken(ctx, x.Do) { break } r.arithm(y.Post) } } case *syntax.FuncDecl: r.setFunc(x.Name.Value, x.Body) case *syntax.ArithmCmd: r.exit = oneIf(r.arithm(x.X) == 0) case *syntax.LetClause: var val int for _, expr := range x.Exprs { val = r.arithm(expr) } r.exit = oneIf(val == 0) case *syntax.CaseClause: str := r.literal(x.Word) for _, ci := range x.Items { for _, word := range ci.Patterns { pattern := r.pattern(word) if match(pattern, str) { r.stmts(ctx, ci.StmtList) return } } } case *syntax.TestClause: r.exit = 0 if r.bashTest(ctx, x.X, false) == "" && r.exit == 0 { // to preserve exit status code 2 for regex errors, etc r.exit = 1 } case *syntax.DeclClause: local, global := false, false var modes []string valType := "" switch x.Variant.Value { case "declare": // When used in a function, "declare" acts as "local" // unless the "-g" option is used. local = r.inFunc case "local": if !r.inFunc { r.errf("local: can only be used in a function\n") r.exit = 1 return } local = true case "export": modes = append(modes, "-x") case "readonly": modes = append(modes, "-r") case "nameref": modes = append(modes, "-n") } for _, opt := range x.Opts { switch s := r.literal(opt); s { case "-x", "-r", "-n": modes = append(modes, s) case "-a", "-A": valType = s case "-g": global = true default: r.errf("declare: invalid option %q\n", s) r.exit = 2 return } } for _, as := range x.Assigns { for _, as := range r.flattenAssign(as) { name := as.Name.Value if !syntax.ValidName(name) { r.errf("declare: invalid name %q\n", name) r.exit = 1 return } vr := r.lookupVar(as.Name.Value) vr.Value = r.assignVal(as, valType) if global { vr.Local = false } else if local { vr.Local = true } for _, mode := range modes { switch mode { case "-x": vr.Exported = true case "-r": vr.ReadOnly = true case "-n": vr.NameRef = true } } r.setVar(name, as.Index, vr) } } case *syntax.TimeClause: start := time.Now() if x.Stmt != nil { r.stmt(ctx, x.Stmt) } format := "%s\t%s\n" if x.PosixFormat { format = "%s %s\n" } else { r.outf("\n") } real := time.Since(start) r.outf(format, "real", elapsedString(real, x.PosixFormat)) // TODO: can we do these? r.outf(format, "user", elapsedString(0, x.PosixFormat)) r.outf(format, "sys", elapsedString(0, x.PosixFormat)) default: panic(fmt.Sprintf("unhandled command node: %T", x)) } } func (r *Runner) flattenAssign(as *syntax.Assign) []*syntax.Assign { // Convert "declare $x" into "declare value". // Don't use syntax.Parser here, as we only want the basic // splitting by '='. if as.Name != nil { return []*syntax.Assign{as} // nothing to do } var asgns []*syntax.Assign for _, field := range r.fields(as.Value) { as := &syntax.Assign{} parts := strings.SplitN(field, "=", 2) as.Name = &syntax.Lit{Value: parts[0]} if len(parts) == 1 { as.Naked = true } else { as.Value = &syntax.Word{Parts: []syntax.WordPart{ &syntax.Lit{Value: parts[1]}, }} } asgns = append(asgns, as) } return asgns } func match(pattern, name string) bool { expr, err := syntax.TranslatePattern(pattern, true) if err != nil { return false } rx := regexp.MustCompile("^" + expr + "$") return rx.MatchString(name) } func elapsedString(d time.Duration, posix bool) string { if posix { return fmt.Sprintf("%.2f", d.Seconds()) } min := int(d.Minutes()) sec := math.Remainder(d.Seconds(), 60.0) return fmt.Sprintf("%dm%.3fs", min, sec) } func (r *Runner) stmts(ctx context.Context, sl syntax.StmtList) { for _, stmt := range sl.Stmts { r.stmt(ctx, stmt) } } func (r *Runner) hdocReader(rd *syntax.Redirect) io.Reader { if rd.Op != syntax.DashHdoc { hdoc := r.document(rd.Hdoc) return strings.NewReader(hdoc) } var buf bytes.Buffer var cur []syntax.WordPart flushLine := func() { if buf.Len() > 0 { buf.WriteByte('\n') } buf.WriteString(r.document(&syntax.Word{Parts: cur})) cur = cur[:0] } for _, wp := range rd.Hdoc.Parts { lit, ok := wp.(*syntax.Lit) if !ok { cur = append(cur, wp) continue } for i, part := range strings.Split(lit.Value, "\n") { if i > 0 { flushLine() cur = cur[:0] } part = strings.TrimLeft(part, "\t") cur = append(cur, &syntax.Lit{Value: part}) } } flushLine() return &buf } func (r *Runner) redir(ctx context.Context, rd *syntax.Redirect) (io.Closer, error) { if rd.Hdoc != nil { r.Stdin = r.hdocReader(rd) return nil, nil } orig := &r.Stdout if rd.N != nil { switch rd.N.Value { case "1": case "2": orig = &r.Stderr } } arg := r.literal(rd.Word) switch rd.Op { case syntax.WordHdoc: r.Stdin = strings.NewReader(arg + "\n") return nil, nil case syntax.DplOut: switch arg { case "1": *orig = r.Stdout case "2": *orig = r.Stderr } return nil, nil case syntax.RdrIn, syntax.RdrOut, syntax.AppOut, syntax.RdrAll, syntax.AppAll: // done further below // case syntax.DplIn: default: panic(fmt.Sprintf("unhandled redirect op: %v", rd.Op)) } mode := os.O_RDONLY switch rd.Op { case syntax.AppOut, syntax.AppAll: mode = os.O_WRONLY | os.O_CREATE | os.O_APPEND case syntax.RdrOut, syntax.RdrAll: mode = os.O_WRONLY | os.O_CREATE | os.O_TRUNC } f, err := r.open(ctx, r.relPath(arg), mode, 0644, true) if err != nil { return nil, err } switch rd.Op { case syntax.RdrIn: r.Stdin = f case syntax.RdrOut, syntax.AppOut: *orig = f case syntax.RdrAll, syntax.AppAll: r.Stdout = f r.Stderr = f default: panic(fmt.Sprintf("unhandled redirect op: %v", rd.Op)) } return f, nil } func (r *Runner) loopStmtsBroken(ctx context.Context, sl syntax.StmtList) bool { oldInLoop := r.inLoop r.inLoop = true defer func() { r.inLoop = oldInLoop }() for _, stmt := range sl.Stmts { r.stmt(ctx, stmt) if r.contnEnclosing > 0 { r.contnEnclosing-- return r.contnEnclosing > 0 } if r.breakEnclosing > 0 { r.breakEnclosing-- return true } } return false } type returnStatus uint8 func (s returnStatus) Error() string { return fmt.Sprintf("return status %d", s) } func (r *Runner) call(ctx context.Context, pos syntax.Pos, args []string) { if r.stop(ctx) { return } name := args[0] if body := r.Funcs[name]; body != nil { // stack them to support nested func calls oldParams := r.Params r.Params = args[1:] oldInFunc := r.inFunc oldFuncVars := r.funcVars r.funcVars = nil r.inFunc = true r.stmt(ctx, body) r.Params = oldParams r.funcVars = oldFuncVars r.inFunc = oldInFunc if code, ok := r.err.(returnStatus); ok { r.err = nil r.exit = int(code) } return } if isBuiltin(name) { r.exit = r.builtinCode(ctx, pos, name, args[1:]) return } r.exec(ctx, args) } func (r *Runner) exec(ctx context.Context, args []string) { path := r.lookPath(args[0]) err := r.Exec(r.modCtx(ctx), path, args) switch x := err.(type) { case nil: r.exit = 0 case ExitStatus: r.exit = int(x) default: // module's custom fatal error r.setErr(err) } } func (r *Runner) open(ctx context.Context, path string, flags int, mode os.FileMode, print bool) (io.ReadWriteCloser, error) { f, err := r.Open(r.modCtx(ctx), path, flags, mode) switch err.(type) { case nil: case *os.PathError: if print { r.errf("%v\n", err) } default: // module's custom fatal error r.setErr(err) } return f, err } func (r *Runner) stat(name string) (os.FileInfo, error) { return os.Stat(r.relPath(name)) } func (r *Runner) checkStat(file string) string { d, err := r.stat(file) if err != nil { return "" } m := d.Mode() if m.IsDir() { return "" } if runtime.GOOS != "windows" && m&0111 == 0 { return "" } return file } func winHasExt(file string) bool { i := strings.LastIndex(file, ".") if i < 0 { return false } return strings.LastIndexAny(file, `:\/`) < i } func (r *Runner) findExecutable(file string, exts []string) string { if len(exts) == 0 { // non-windows return r.checkStat(file) } if winHasExt(file) && r.checkStat(file) != "" { return file } for _, e := range exts { if f := file + e; r.checkStat(f) != "" { return f } } return "" } func driveLetter(c byte) bool { return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') } // splitList is like filepath.SplitList, but always using the unix path // list separator ':'. On Windows, it also makes sure not to split // [A-Z]:[/\]. func splitList(path string) []string { if path == "" { return []string{""} } list := strings.Split(path, ":") if runtime.GOOS != "windows" { return list } // join "C", "/foo" into "C:/foo" var fixed []string for i := 0; i < len(list); i++ { s := list[i] switch { case len(s) != 1, !driveLetter(s[0]): case i+1 >= len(list): // last element case strings.IndexAny(list[i+1], `/\`) != 0: // next element doesn't start with / or \ default: fixed = append(fixed, s+":"+list[i+1]) i++ continue } fixed = append(fixed, s) } return fixed } func (r *Runner) lookPath(file string) string { pathList := splitList(r.envGet("PATH")) chars := `/` if runtime.GOOS == "windows" { chars = `:\/` // so that "foo" always tries "./foo" pathList = append([]string{"."}, pathList...) } exts := r.pathExts() if strings.ContainsAny(file, chars) { return r.findExecutable(file, exts) } for _, dir := range pathList { var path string switch dir { case "", ".": // otherwise "foo" won't be "./foo" path = "." + string(filepath.Separator) + file default: path = filepath.Join(dir, file) } if f := r.findExecutable(path, exts); f != "" { return f } } return "" } func (r *Runner) pathExts() []string { if runtime.GOOS != "windows" { return nil } pathext := r.envGet("PATHEXT") if pathext == "" { return []string{".com", ".exe", ".bat", ".cmd"} } var exts []string for _, e := range strings.Split(strings.ToLower(pathext), `;`) { if e == "" { continue } if e[0] != '.' { e = "." + e } exts = append(exts, e) } return exts }
[]
[]
[]
[]
[]
go
null
null
null
source/lambda/ingestion-youtube/test/conftest.py
#!/usr/bin/env python ###################################################################################################################### # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # # with the License. A copy of the License is located at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES # # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions # # and limitations under the License. # ###################################################################################################################### import os import boto3 import pytest from botocore.stub import Stubber from moto import dynamodb from shared_util import custom_boto_config @pytest.fixture(autouse=True) def aws_environment_variables(): """Mocked AWS evivronment variables such as AWS credentials and region""" os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" os.environ["AWS_REGION"] = "us-east-1" # must be a valid region os.environ["AWS_SDK_USER_AGENT"] = '{ "user_agent_extra": "solution/fakeID/fakeVersion" }' os.environ["TARGET_DDB_TABLE"] = "mockqueryddbtable" os.environ["EVENT_BUS_NAME"] = "fakeeventbus" os.environ["INGESTION_NAMESPACE"] = "com.analyze.news.config" os.environ["STREAM_NAME"] = "fakestream" os.environ["SSM_API_KEY"] = "fakessmapikey" os.environ["QUERY"] = "fakeSearch" os.environ["VIDEO_NAMESPACE"] = "com.youtube.video" os.environ["VIDEO_SEARCH_INGESTION_WINDOW"] = "7"
[]
[]
[ "STREAM_NAME", "AWS_SESSION_TOKEN", "SSM_API_KEY", "AWS_SECRET_ACCESS_KEY", "AWS_REGION", "AWS_SDK_USER_AGENT", "TARGET_DDB_TABLE", "EVENT_BUS_NAME", "INGESTION_NAMESPACE", "AWS_ACCESS_KEY_ID", "VIDEO_NAMESPACE", "VIDEO_SEARCH_INGESTION_WINDOW", "QUERY" ]
[]
["STREAM_NAME", "AWS_SESSION_TOKEN", "SSM_API_KEY", "AWS_SECRET_ACCESS_KEY", "AWS_REGION", "AWS_SDK_USER_AGENT", "TARGET_DDB_TABLE", "EVENT_BUS_NAME", "INGESTION_NAMESPACE", "AWS_ACCESS_KEY_ID", "VIDEO_NAMESPACE", "VIDEO_SEARCH_INGESTION_WINDOW", "QUERY"]
python
13
0
usher.go
/* usher is a tiny personal url shortener. This library provides the maintenance functions for our simple database of code => url mappings (a yaml file in filepath.join(os.UserConfigDir(), "usher")). */ package usher import ( "errors" "fmt" "io/ioutil" "math/rand" "os" "path/filepath" "regexp" "sort" "strings" "time" yaml "gopkg.in/yaml.v3" ) const configfile = "usher.yml" const indexCode = "INDEX" // Random Code generation constants const minRandomCodeLen = 5 const maxRandomCodeLen = 8 const digits = "23456789" // omit 0 and 1 as easily confused with o and l const chars = "abcdefghijkmnpqrstuvwxyz" // omit o and l as easily confused with 0 and 1 // Errors var ( ErrNotFound = errors.New("not found") ErrCodeExists = errors.New("code already used") ErrNoChange = errors.New("mapping unchanged") ErrPushTypeUnconfigured = errors.New("config backend type is unconfigured") ErrPushTypeBad = errors.New("config backend type is bad") ) type DB struct { Root string // full path to usher root directory containing databases Domain string // fully-qualified domain whose mappings we want DBPath string // full path to database for Domain ConfigPath string // full path to usher config file } type Entry struct { Code string Url string } type ConfigEntry struct { Type string `yaml:"type"` AWSKey string `yaml:"aws_key,omitempty"` AWSSecret string `yaml:"aws_secret,omitempty"` AWSRegion string `yaml:"aws_region,omitempty"` } // NewDB creates a DB struct with members derived from parameters, // the environment, or defaults (in that order). It does no checking // that the values produced are sane or exist on the filesystem. func NewDB(domain string) (*DB, error) { // Get root root := os.Getenv("USHER_ROOT") if root == "" { // If USHER_ROOT is unset, check if there is an usher.yml in the cwd stat, err := os.Stat("usher.yml") if err == nil && !stat.IsDir() { cwd, err := os.Getwd() if err == nil { root = cwd } } } if root == "" { // If root is still unset, default to "os.UserConfigDir()/usher" configDir, err := os.UserConfigDir() if err != nil { return nil, err } root = filepath.Join(configDir, "usher") } // Derive domain if not set - check for USHER_DOMAIN in environment if domain == "" { domain = os.Getenv("USHER_DOMAIN") } // Else infer the domain if only one database exists if domain == "" { matches, _ := filepath.Glob(filepath.Join(root, "*.*.yml")) if len(matches) == 1 { // Exactly one match - strip .yml suffix to get domain re := regexp.MustCompile(`.yml$`) domain = re.ReplaceAllLiteralString(filepath.Base(matches[0]), "") } } // Else give up with an error if domain == "" { return nil, errors.New("Domain not passed as parameter or set in env USHER_DOMAIN") } // Set DBPath dbpath := filepath.Join(root, domain+".yml") // Set ConfigPath configpath := filepath.Join(root, configfile) return &DB{Root: root, Domain: domain, DBPath: dbpath, ConfigPath: configpath}, nil } // Init checks and creates the following, if they don't exist: // - an usher root directory // - an usher database for the db.Domain // - an entry in the user config file for db.Domain func (db *DB) Init() (dbCreated bool, err error) { dbCreated = false // Ensure root exists err = os.MkdirAll(db.Root, 0755) if err != nil { return dbCreated, err } // Ensure database exists _, err = os.Stat(db.DBPath) if err == nil { return dbCreated, nil // exists } if err != nil && !os.IsNotExist(err) { return dbCreated, err // unexpected error } // Database does not exist - create fh, err := os.Create(db.DBPath) fh.Close() if err != nil { return dbCreated, err } dbCreated = true // Ensure configfile exists _, err = os.Stat(db.ConfigPath) if err == nil { _, err := db.readConfig() if err != nil { if err != ErrNotFound { return dbCreated, err } } err = db.appendConfigString(db.configPlaceholder()) if err != nil { return dbCreated, err } } else { // Create a placeholder config file for domain err = db.writeConfigString(db.configPlaceholder()) if err != nil { return dbCreated, err } } return dbCreated, nil } // List returns the set of database entries whose code matches glob func (db *DB) List(glob string) ([]Entry, error) { // FIXME: first-pass - ignore glob mappings, err := db.readDB() if err != nil { return nil, err } // Extract codes and sort codes := make([]string, len(mappings)) i := 0 for code := range mappings { codes[i] = code i++ } sort.Strings(codes) // Compile entries var entries = make([]Entry, len(mappings)) i = 0 for _, code := range codes { entries[i] = Entry{Code: code, Url: mappings[code]} i++ } return entries, nil } // Add a mapping for url and code to the database. // If code is missing, a random code will be generated and returned. func (db *DB) Add(url, code string) (string, error) { mappings, err := db.readDB() if err != nil { return "", err } if code == "" { code = randomCode(mappings) } else { // Check for parameter inversion reUrl := regexp.MustCompile(`^https?://`) if !reUrl.MatchString(url) && reUrl.MatchString(code) { url, code = code, url } // Check whether code is already used dburl, exists := mappings[code] if exists { if dburl == url { // Trying to re-add the same url is not an error, just a noop return code, nil } return code, ErrCodeExists } } mappings[code] = url err = db.writeDB(mappings) if err != nil { return code, err } return code, nil } // Update an existing mapping in the database, changing the URL. func (db *DB) Update(url, code string) error { mappings, err := db.readDB() if err != nil { return err } // Check for parameter inversion reUrl := regexp.MustCompile(`^https?://`) if !reUrl.MatchString(url) && reUrl.MatchString(code) { url, code = code, url } // If code is missing, abort dburl, exists := mappings[code] if !exists { return ErrNotFound } // Trying to update to the same url is not an error, just a noop if dburl == url { return nil } mappings[code] = url err = db.writeDB(mappings) if err != nil { return err } return nil } // Remove the mapping with code from the database // Returns ErrNotFound if code does not exist in the database func (db *DB) Remove(code string) error { mappings, err := db.readDB() if err != nil { return err } _, exists := mappings[code] if !exists { return ErrNotFound } delete(mappings, code) err = db.writeDB(mappings) if err != nil { return err } return nil } // Push syncs all current mappings with the backend configured for db.Domain // in db.ConfigPath func (db *DB) Push() error { config, err := db.readConfig() if err != nil { return err } if config.Type == "" { return fmt.Errorf("no 'type' field found for %q in config %q\n", db.Domain, db.ConfigPath) } switch config.Type { case "s3": err = db.pushS3(config) if err != nil { return err } case "render": err = db.pushRender() if err != nil { return err } case "unconfigured": return ErrPushTypeUnconfigured default: return fmt.Errorf("invalid config backend type %q found for %q: %w", config.Type, db.Domain, ErrPushTypeBad) } return nil } // readDB is a utility function to read all mappings from db.DBPath // and return as a go map func (db *DB) readDB() (map[string]string, error) { data, err := ioutil.ReadFile(db.DBPath) if err != nil { return nil, err } var mappings map[string]string err = yaml.Unmarshal(data, &mappings) if err != nil { return nil, err } if len(mappings) == 0 { mappings = make(map[string]string) } return mappings, nil } // writeDB is a utility function to write mappings (as yaml) to db.DBPath func (db *DB) writeDB(mappings map[string]string) error { var data []byte var err error if len(mappings) > 0 { data, err = yaml.Marshal(mappings) if err != nil { return err } } tmpfile := db.DBPath + ".tmp" err = ioutil.WriteFile(tmpfile, data, 0644) if err != nil { return err } err = os.Rename(tmpfile, db.DBPath) if err != nil { return err } return nil } // readConfig is a utility function to read the config entry for // db.Domain from db.ConfigPath file func (db *DB) readConfig() (*ConfigEntry, error) { data, err := ioutil.ReadFile(db.ConfigPath) if err != nil { return nil, err } var entries map[string]ConfigEntry err = yaml.Unmarshal(data, &entries) if err != nil { return nil, err } entry, exists := entries[db.Domain] if !exists { return nil, ErrNotFound } return &entry, nil } // writeConfigString is a utility function to write data to db.ConfigPath func (db *DB) writeConfigString(data string) error { tmpfile := db.ConfigPath + ".tmp" err := ioutil.WriteFile(tmpfile, []byte(data), 0600) if err != nil { return err } err = os.Rename(tmpfile, db.ConfigPath) if err != nil { return err } return nil } // appendConfigString is a utility function to write data to db.ConfigPath func (db *DB) appendConfigString(data string) error { config, err := ioutil.ReadFile(db.ConfigPath) if err != nil { return err } config = append(config, []byte(data)...) tmpfile := db.ConfigPath + ".tmp" err = ioutil.WriteFile(tmpfile, config, 0600) if err != nil { return err } err = os.Rename(tmpfile, db.ConfigPath) if err != nil { return err } return nil } // randomCode is a utility function to generate a random code // and check that it doesn't exist in mappings. // Random codes use the following pattern: 1 digit, then 4-7 // lowercase ascii characters. This usually allows them to be // relatively easily distinguished from explicit codes, while // still being easy to communicate orally. func randomCode(mappings map[string]string) string { rand.Seed(time.Now().UnixNano()) var b strings.Builder b.WriteByte(digits[rand.Intn(len(digits))]) for i := 1; i < maxRandomCodeLen; i++ { b.WriteByte(chars[rand.Intn(len(chars))]) // If long enough, check if exists in mappings, and return if not if i+1 >= minRandomCodeLen { s := b.String() if _, exists := mappings[s]; !exists { return s } } } // Failed to find an unused code? Just retry? return randomCode(mappings) } func (db *DB) configPlaceholder() string { return db.Domain + `: type: unconfigured # Replace the line above with one of the 'type' sections below for the backend # you wish to use. # 'render' uses render.com as a backend, and needs no additional config here. # See https://github.com/gavincarr/usher/blob/master/Render.md for render configuration details. # type: render # 's3' uses Amazon S3 as a backed, and requires the 3 'aws_*' parameters below. # See https://github.com/gavincarr/usher/blob/master/S3.md for full S3 configuration details. # type: s3 # aws_key: foo # aws_secret: bar # aws_region: us-east-1 ` }
[ "\"USHER_ROOT\"", "\"USHER_DOMAIN\"" ]
[]
[ "USHER_DOMAIN", "USHER_ROOT" ]
[]
["USHER_DOMAIN", "USHER_ROOT"]
go
2
0
tests/test_length_sequence.py
# -*- coding: utf-8 -*- """Tests for Terminal methods that account for sequences in strings""" # std imports import os import sys import struct import platform import itertools # 3rd party import six import pytest # local from .accessories import TestTerminal, as_subprocess from .conftest import IS_WINDOWS if platform.system() != 'Windows': import fcntl import termios def test_length_cjk(): """Test length of East Asian characters""" @as_subprocess def child(): term = TestTerminal() # given, given = term.bold_red(u'コンニチハ, セカイ!') expected = sum((2, 2, 2, 2, 2, 1, 1, 2, 2, 2, 1,)) # exercise, assert term.length(given) == expected child() def test_length_ansiart(): """Test length of ANSI art""" @as_subprocess def child(kind): import codecs term = TestTerminal(kind=kind) # this 'ansi' art contributed by xzip!impure for another project, # unlike most CP-437 DOS ansi art, this is actually utf-8 encoded. fname = os.path.join(os.path.dirname(__file__), 'wall.ans') with codecs.open(fname, 'r', 'utf-8') as ansiart: lines = ansiart.readlines() assert term.length(lines[0]) == 67 # ^[[64C^[[34m▄▓▄ assert term.length(lines[1]) == 75 assert term.length(lines[2]) == 78 assert term.length(lines[3]) == 78 assert term.length(lines[4]) == 78 assert term.length(lines[5]) == 78 assert term.length(lines[6]) == 77 kind = 'vtwin10' if IS_WINDOWS else 'xterm-256color' child(kind) def test_sequence_length(all_terms): """Ensure T.length(string containing sequence) is correcterm.""" # pylint: disable=too-complex,too-many-statements @as_subprocess def child(kind): term = TestTerminal(kind=kind, force_styling=True) # Make sure to test with 24-bit color on at least one terminal if kind == 'xterm': term.number_of_colors = 1 << 24 # Create a list of ascii characters, to be separated # by word, to be zipped up with a cycling list of # terminal sequences. Then, compare the length of # each, the basic plain_texterm.__len__ vs. the Terminal # method length. They should be equal. plain_text = (u'The softest things of the world ' u'Override the hardest things of the world ' u'That which has no substance ' u'Enters into that which has no openings') if term.bold: assert (term.length(term.bold) == 0) assert (term.length(term.bold(u'x')) == 1) assert (term.length(term.bold_red) == 0) assert (term.length(term.bold_red(u'x')) == 1) assert (term.length(term.bold_on_red) == 0) assert (term.length(term.bold_on_red(u'x')) == 1) assert (term.length(term.bold_olivedrab4) == 0) assert (term.length(term.bold_olivedrab4(u'x')) == 1) assert (term.length(term.bold_on_olivedrab4) == 0) assert (term.length(term.bold_on_olivedrab4(u'x')) == 1) assert (term.strip(term.bold) == u'') assert (term.rstrip(term.bold) == u'') assert (term.lstrip(term.bold) == u'') assert (term.strip(term.bold(u' x ')) == u'x') assert (term.strip(term.bold(u'z x q'), 'zq') == u' x ') assert (term.rstrip(term.bold(u' x ')) == u' x') assert (term.lstrip(term.bold(u' x ')) == u'x ') assert (term.strip(term.bold_red) == u'') assert (term.rstrip(term.bold_red) == u'') assert (term.lstrip(term.bold_red) == u'') assert (term.strip(term.bold_on_red) == u'') assert (term.rstrip(term.bold_on_red) == u'') assert (term.lstrip(term.bold_on_red) == u'') assert (term.strip(term.bold_olivedrab4) == u'') assert (term.rstrip(term.bold_olivedrab4) == u'') assert (term.lstrip(term.bold_olivedrab4) == u'') assert (term.strip(term.bold_on_olivedrab4) == u'') assert (term.rstrip(term.bold_on_olivedrab4) == u'') assert (term.lstrip(term.bold_on_olivedrab4) == u'') assert (term.strip(term.bold_red(u' x ')) == u'x') assert (term.rstrip(term.bold_red(u' x ')) == u' x') assert (term.lstrip(term.bold_red(u' x ')) == u'x ') assert (term.strip(term.bold_on_red(u' x ')) == u'x') assert (term.rstrip(term.bold_on_red(u' x ')) == u' x') assert (term.lstrip(term.bold_on_red(u' x ')) == u'x ') assert (term.strip(term.bold_olivedrab4(u' x ')) == u'x') assert (term.rstrip(term.bold_olivedrab4(u' x ')) == u' x') assert (term.lstrip(term.bold_olivedrab4(u' x ')) == u'x ') assert (term.strip(term.bold_on_olivedrab4(u' x ')) == u'x') assert (term.rstrip(term.bold_on_olivedrab4(u' x ')) == u' x') assert (term.lstrip(term.bold_on_olivedrab4(u' x ')) == u'x ') assert (term.strip_seqs(term.bold) == u'') assert (term.strip_seqs(term.bold(u' x ')) == u' x ') assert (term.strip_seqs(term.bold_red) == u'') assert (term.strip_seqs(term.bold_red(u' x ')) == u' x ') assert (term.strip_seqs(term.bold_on_red) == u'') assert (term.strip_seqs(term.bold_on_red(u' x ')) == u' x ') assert (term.strip_seqs(term.bold_olivedrab4) == u'') assert (term.strip_seqs(term.bold_olivedrab4(u' x ')) == u' x ') assert (term.strip_seqs(term.bold_on_olivedrab4) == u'') assert (term.strip_seqs(term.bold_on_olivedrab4(u' x ')) == u' x ') if term.underline: assert (term.length(term.underline) == 0) assert (term.length(term.underline(u'x')) == 1) assert (term.length(term.underline_red) == 0) assert (term.length(term.underline_red(u'x')) == 1) assert (term.length(term.underline_on_red) == 0) assert (term.length(term.underline_on_red(u'x')) == 1) assert (term.length(term.underline_olivedrab4) == 0) assert (term.length(term.underline_olivedrab4(u'x')) == 1) assert (term.length(term.underline_on_olivedrab4) == 0) assert (term.length(term.underline_on_olivedrab4(u'x')) == 1) assert (term.strip(term.underline) == u'') assert (term.strip(term.underline(u' x ')) == u'x') assert (term.strip(term.underline_red) == u'') assert (term.strip(term.underline_red(u' x ')) == u'x') assert (term.rstrip(term.underline_red(u' x ')) == u' x') assert (term.lstrip(term.underline_red(u' x ')) == u'x ') assert (term.strip(term.underline_on_red) == u'') assert (term.strip(term.underline_on_red(u' x ')) == u'x') assert (term.rstrip(term.underline_on_red(u' x ')) == u' x') assert (term.lstrip(term.underline_on_red(u' x ')) == u'x ') assert (term.strip(term.underline_olivedrab4) == u'') assert (term.strip(term.underline_olivedrab4(u' x ')) == u'x') assert (term.rstrip(term.underline_olivedrab4(u' x ')) == u' x') assert (term.lstrip(term.underline_olivedrab4(u' x ')) == u'x ') assert (term.strip(term.underline_on_olivedrab4) == u'') assert (term.strip(term.underline_on_olivedrab4(u' x ')) == u'x') assert (term.rstrip(term.underline_on_olivedrab4(u' x ')) == u' x') assert (term.lstrip(term.underline_on_olivedrab4(u' x ')) == u'x ') assert (term.strip_seqs(term.underline) == u'') assert (term.strip_seqs(term.underline(u' x ')) == u' x ') assert (term.strip_seqs(term.underline_red) == u'') assert (term.strip_seqs(term.underline_red(u' x ')) == u' x ') assert (term.strip_seqs(term.underline_on_red) == u'') assert (term.strip_seqs(term.underline_on_red(u' x ')) == u' x ') assert (term.strip_seqs(term.underline_olivedrab4) == u'') assert (term.strip_seqs(term.underline_olivedrab4(u' x ')) == u' x ') assert (term.strip_seqs(term.underline_on_olivedrab4) == u'') assert (term.strip_seqs(term.underline_on_olivedrab4(u' x ')) == u' x ') if term.reverse: assert (term.length(term.reverse) == 0) assert (term.length(term.reverse(u'x')) == 1) assert (term.length(term.reverse_red) == 0) assert (term.length(term.reverse_red(u'x')) == 1) assert (term.length(term.reverse_on_red) == 0) assert (term.length(term.reverse_on_red(u'x')) == 1) assert (term.length(term.reverse_olivedrab4) == 0) assert (term.length(term.reverse_olivedrab4(u'x')) == 1) assert (term.length(term.reverse_on_olivedrab4) == 0) assert (term.length(term.reverse_on_olivedrab4(u'x')) == 1) assert (term.strip(term.reverse) == u'') assert (term.strip(term.reverse(u' x ')) == u'x') assert (term.strip(term.reverse_red) == u'') assert (term.strip(term.reverse_red(u' x ')) == u'x') assert (term.rstrip(term.reverse_red(u' x ')) == u' x') assert (term.lstrip(term.reverse_red(u' x ')) == u'x ') assert (term.strip(term.reverse_on_red) == u'') assert (term.strip(term.reverse_on_red(u' x ')) == u'x') assert (term.rstrip(term.reverse_on_red(u' x ')) == u' x') assert (term.lstrip(term.reverse_on_red(u' x ')) == u'x ') assert (term.strip(term.reverse_olivedrab4) == u'') assert (term.strip(term.reverse_olivedrab4(u' x ')) == u'x') assert (term.rstrip(term.reverse_olivedrab4(u' x ')) == u' x') assert (term.lstrip(term.reverse_olivedrab4(u' x ')) == u'x ') assert (term.strip(term.reverse_on_olivedrab4) == u'') assert (term.strip(term.reverse_on_olivedrab4(u' x ')) == u'x') assert (term.rstrip(term.reverse_on_olivedrab4(u' x ')) == u' x') assert (term.lstrip(term.reverse_on_olivedrab4(u' x ')) == u'x ') assert (term.strip_seqs(term.reverse) == u'') assert (term.strip_seqs(term.reverse(u' x ')) == u' x ') assert (term.strip_seqs(term.reverse_red) == u'') assert (term.strip_seqs(term.reverse_red(u' x ')) == u' x ') assert (term.strip_seqs(term.reverse_on_red) == u'') assert (term.strip_seqs(term.reverse_on_red(u' x ')) == u' x ') assert (term.strip_seqs(term.reverse_olivedrab4) == u'') assert (term.strip_seqs(term.reverse_olivedrab4(u' x ')) == u' x ') assert (term.strip_seqs(term.reverse_on_olivedrab4) == u'') assert (term.strip_seqs(term.reverse_on_olivedrab4(u' x ')) == u' x ') if term.blink: assert (term.length(term.blink) == 0) assert (term.length(term.blink(u'x')) == 1) assert (term.length(term.blink_red) == 0) assert (term.length(term.blink_red(u'x')) == 1) assert (term.length(term.blink_on_red) == 0) assert (term.length(term.blink_on_red(u'x')) == 1) assert (term.length(term.blink_olivedrab4) == 0) assert (term.length(term.blink_olivedrab4(u'x')) == 1) assert (term.length(term.blink_on_olivedrab4) == 0) assert (term.length(term.blink_on_olivedrab4(u'x')) == 1) assert (term.strip(term.blink) == u'') assert (term.strip(term.blink(u' x ')) == u'x') assert (term.strip(term.blink(u'z x q'), u'zq') == u' x ') assert (term.strip(term.blink_red) == u'') assert (term.strip(term.blink_red(u' x ')) == u'x') assert (term.strip(term.blink_on_red) == u'') assert (term.strip(term.blink_on_red(u' x ')) == u'x') assert (term.strip(term.blink_olivedrab4) == u'') assert (term.strip(term.blink_olivedrab4(u' x ')) == u'x') assert (term.strip(term.blink_on_olivedrab4) == u'') assert (term.strip(term.blink_on_olivedrab4(u' x ')) == u'x') assert (term.strip_seqs(term.blink) == u'') assert (term.strip_seqs(term.blink(u' x ')) == u' x ') assert (term.strip_seqs(term.blink_red) == u'') assert (term.strip_seqs(term.blink_red(u' x ')) == u' x ') assert (term.strip_seqs(term.blink_on_red) == u'') assert (term.strip_seqs(term.blink_on_red(u' x ')) == u' x ') assert (term.strip_seqs(term.blink_olivedrab4) == u'') assert (term.strip_seqs(term.blink_olivedrab4(u' x ')) == u' x ') assert (term.strip_seqs(term.blink_on_olivedrab4) == u'') assert (term.strip_seqs(term.blink_on_olivedrab4(u' x ')) == u' x ') if term.home: assert (term.length(term.home) == 0) assert (term.strip(term.home) == u'') if term.clear_eol: assert (term.length(term.clear_eol) == 0) assert (term.strip(term.clear_eol) == u'') if term.enter_fullscreen: assert (term.length(term.enter_fullscreen) == 0) assert (term.strip(term.enter_fullscreen) == u'') if term.exit_fullscreen: assert (term.length(term.exit_fullscreen) == 0) assert (term.strip(term.exit_fullscreen) == u'') # horizontally, we decide move_down and move_up are 0, assert (term.length(term.move_down) == 0) assert (term.length(term.move_down(2)) == 0) assert (term.length(term.move_up) == 0) assert (term.length(term.move_up(2)) == 0) # other things aren't so simple, somewhat edge cases, # moving backwards and forwards horizontally must be # accounted for as a "length", as <x><move right 10><y> # will result in a printed column length of 12 (even # though columns 2-11 are non-destructive space assert (term.length(u'x\b') == 0) assert (term.strip(u'x\b') == u'') # XXX why are some terminals width of 9 here ?? assert (term.length(u'\t') in (8, 9)) assert (term.strip(u'\t') == u'') assert (term.length(u'_' + term.move_left) == 0) assert (term.length(term.move_right) == 1) if term.cub: assert (term.length((u'_' * 10) + term.cub(10)) == 0) if term.cuf: assert (term.length(term.cuf(10)) == 10) # vertical spacing is unaccounted as a 'length' assert (term.length(term.move_up) == 0) assert (term.length(term.cuu(10)) == 0) assert (term.length(term.move_down) == 0) assert (term.length(term.cud(10)) == 0) # this is how manpages perform underlining, this is done # with the 'overstrike' capability of teletypes, and aparently # less(1), '123' -> '1\b_2\b_3\b_' text_wseqs = u''.join(itertools.chain( *zip(plain_text, itertools.cycle(['\b_'])))) assert (term.length(text_wseqs) == len(plain_text)) child(all_terms) def test_env_winsize(): """Test height and width is appropriately queried in a pty.""" @as_subprocess def child(): # set the pty's virtual window size os.environ['COLUMNS'] = '99' os.environ['LINES'] = '11' term = TestTerminal(stream=six.StringIO()) save_init = term._init_descriptor save_stdout = sys.__stdout__ try: term._init_descriptor = None sys.__stdout__ = None winsize = term._height_and_width() width = term.width height = term.height finally: term._init_descriptor = save_init sys.__stdout__ = save_stdout assert winsize.ws_col == width == 99 assert winsize.ws_row == height == 11 child() @pytest.mark.skipif(IS_WINDOWS, reason="requires fcntl") def test_winsize(many_lines, many_columns): """Test height and width is appropriately queried in a pty.""" pixel_width, pixel_height = 1024, 768 @as_subprocess def child(lines=25, cols=80): # set the pty's virtual window size val = struct.pack('HHHH', lines, cols, pixel_width, pixel_height) fcntl.ioctl(sys.__stdout__.fileno(), termios.TIOCSWINSZ, val) term = TestTerminal() winsize = term._height_and_width() assert term.width == cols assert term.height == lines assert winsize.ws_col == cols assert winsize.ws_row == lines assert term.pixel_width == pixel_width assert term.pixel_height == pixel_height child(lines=many_lines, cols=many_columns) def test_Sequence_alignment_fixed_width(all_terms): """Test alignment methods with width provided""" @as_subprocess def child(kind): term = TestTerminal(kind=kind) pony_msg = 'pony express, all aboard, choo, choo!' pony_len = len(pony_msg) pony_colored = u''.join('%s%s' % (term.color(n % 7), ch,) for n, ch in enumerate(pony_msg)) pony_colored += term.normal ladjusted = term.ljust(pony_colored, 88) radjusted = term.rjust(pony_colored, 88) centered = term.center(pony_colored, 88) assert (term.length(pony_colored) == pony_len) assert (term.length(centered.strip()) == pony_len) assert (term.length(centered) == len(pony_msg.center(88))) assert (term.length(ladjusted.strip()) == pony_len) assert (term.length(ladjusted) == len(pony_msg.ljust(88))) assert (term.length(radjusted.strip()) == pony_len) assert (term.length(radjusted) == len(pony_msg.rjust(88))) child(kind=all_terms) @pytest.mark.skipif(IS_WINDOWS, reason="requires fcntl") def test_Sequence_alignment(all_terms): """Tests methods related to Sequence class, namely ljust, rjust, center.""" @as_subprocess def child(kind, lines=25, cols=80): # set the pty's virtual window size val = struct.pack('HHHH', lines, cols, 0, 0) fcntl.ioctl(sys.__stdout__.fileno(), termios.TIOCSWINSZ, val) term = TestTerminal(kind=kind) pony_msg = 'pony express, all aboard, choo, choo!' pony_len = len(pony_msg) pony_colored = u''.join('%s%s' % (term.color(n % 7), ch,) for n, ch in enumerate(pony_msg)) pony_colored += term.normal ladjusted = term.ljust(pony_colored) radjusted = term.rjust(pony_colored) centered = term.center(pony_colored) assert (term.length(pony_colored) == pony_len) assert (term.length(centered.strip()) == pony_len) assert (term.length(centered) == len(pony_msg.center(term.width))) assert (term.length(ladjusted.strip()) == pony_len) assert (term.length(ladjusted) == len(pony_msg.ljust(term.width))) assert (term.length(radjusted.strip()) == pony_len) assert (term.length(radjusted) == len(pony_msg.rjust(term.width))) child(kind=all_terms) def test_hyperlink_nostyling(): """Test length our of hyperlink URL's.""" @as_subprocess def child(): # given, term = TestTerminal(force_styling=None) given_basic_url = term.link( 'https://blessed.readthedocs.org', 'blessed') assert given_basic_url == 'blessed' child() def test_basic_hyperlinks(): """Test length our of hyperlink URL's.""" @as_subprocess def child(): # given, term = TestTerminal() given_basic_url = term.link( 'https://blessed.readthedocs.org', 'blessed') # exercise, split_parts = term.split_seqs(given_basic_url) # verify if term.does_styling: assert split_parts[0] == '\x1b]8;;https://blessed.readthedocs.org\x1b\\' assert term.length(split_parts[0]) == 0 assert ''.join(split_parts[1:8]) == 'blessed' assert split_parts[8] == '\x1b]8;;\x1b\\' assert len(split_parts) == 9 else: assert ''.join(split_parts) == 'blessed' child() def test_hyperlink_with_id(): """Test length our of hyperlink URL's with ID.""" @as_subprocess def child(): # given, term = TestTerminal() given_advanced_urltext = term.link( 'https://blessed.readthedocs.org', 'blessed', '123') # exercise, split_parts = term.split_seqs(given_advanced_urltext) # verify, if term.does_styling: assert split_parts[0] == '\x1b]8;id=123;https://blessed.readthedocs.org\x1b\\' assert term.length(split_parts[0]) == 0 assert ''.join(split_parts[1:8]) == 'blessed' assert split_parts[8] == '\x1b]8;;\x1b\\' assert len(split_parts) == 9 else: assert ''.join(split_parts) == 'blessed' child() def test_sequence_is_movement_false(all_terms): """Test parser about sequences that do not move the cursor.""" @as_subprocess def child(kind): from blessed.sequences import measure_length term = TestTerminal(kind=kind) assert measure_length(u'', term) == 0 # not even a mbs assert measure_length(u'xyzzy', term) == 0 # negative numbers, though printable as %d, do not result # in movement; just garbage. Also not a valid sequence. assert measure_length(term.cuf(-333), term) == 0 assert (len(term.clear_eol) == measure_length(term.clear_eol, term)) # various erases don't *move* assert (len(term.clear_bol) == measure_length(term.clear_bol, term)) assert (len(term.clear_eos) == measure_length(term.clear_eos, term)) assert (len(term.bold) == measure_length(term.bold, term)) # various paints don't move assert (len(term.red) == measure_length(term.red, term)) assert (len(term.civis) == measure_length(term.civis, term)) if term.cvvis: assert (len(term.cvvis) == measure_length(term.cvvis, term)) assert (len(term.underline) == measure_length(term.underline, term)) assert (len(term.reverse) == measure_length(term.reverse, term)) for _num in (0, term.number_of_colors): expected = len(term.color(_num)) given = measure_length(term.color(_num), term) assert (expected == given) assert (len(term.normal_cursor) == measure_length(term.normal_cursor, term)) assert (len(term.hide_cursor) == measure_length(term.hide_cursor, term)) assert (len(term.save) == measure_length(term.save, term)) assert (len(term.italic) == measure_length(term.italic, term)) assert (len(term.standout) == measure_length(term.standout, term) ), (term.standout, term._wont_move) child(all_terms) def test_termcap_will_move_false(all_terms): # pylint: disable=too-complex,too-many-branches """Test parser about sequences that do not move the cursor.""" @as_subprocess def child(kind): # pylint: disable=too-many-branches from blessed.sequences import iter_parse term = TestTerminal(kind=kind) if term.clear_eol: assert not next(iter_parse(term, term.clear_eol))[1].will_move if term.clear_bol: assert not next(iter_parse(term, term.clear_bol))[1].will_move if term.clear_eos: assert not next(iter_parse(term, term.clear_eos))[1].will_move if term.bold: assert not next(iter_parse(term, term.bold))[1].will_move if term.red: assert not next(iter_parse(term, term.red))[1].will_move if term.civis: assert not next(iter_parse(term, term.civis))[1].will_move if term.cvvis: assert not next(iter_parse(term, term.cvvis))[1].will_move if term.underline: assert not next(iter_parse(term, term.underline))[1].will_move if term.reverse: assert not next(iter_parse(term, term.reverse))[1].will_move if term.color(0): assert not next(iter_parse(term, term.color(0)))[1].will_move if term.normal_cursor: assert not next(iter_parse(term, term.normal_cursor))[1].will_move if term.save: assert not next(iter_parse(term, term.save))[1].will_move if term.italic: assert not next(iter_parse(term, term.italic))[1].will_move if term.standout: assert not next(iter_parse(term, term.standout))[1].will_move child(all_terms) def test_sequence_is_movement_true(all_terms): """Test parsers about sequences that move the cursor.""" @as_subprocess def child(kind): from blessed.sequences import measure_length term = TestTerminal(kind=kind) # movements assert (len(term.move(98, 76)) == measure_length(term.move(98, 76), term)) assert (len(term.move(54)) == measure_length(term.move(54), term)) assert (len(term.move_xy(1, 2)) == measure_length(term.move(1, 2), term)) assert (len(term.move_yx(3, 4)) == measure_length(term.move(3, 4), term)) assert not term.cud1 or (len(term.cud1) == measure_length(term.cud1, term)) assert not term.cub1 or (len(term.cub1) == measure_length(term.cub1, term)) assert not term.cuf1 or (len(term.cuf1) == measure_length(term.cuf1, term)) assert not term.cuu1 or (len(term.cuu1) == measure_length(term.cuu1, term)) assert not term.cub or (len(term.cub(333)) == measure_length(term.cub(333), term)) assert not term.cuf or (len(term.cuf(333)) == measure_length(term.cuf(333), term)) assert not term.home or (len(term.home) == measure_length(term.home, term)) assert not term.restore or (len(term.restore) == measure_length(term.restore, term)) assert not term.clear or (len(term.clear) == measure_length(term.clear, term)) child(all_terms) def test_termcap_will_move_true(all_terms): """Test parser about sequences that move the cursor.""" @as_subprocess def child(kind): from blessed.sequences import iter_parse term = TestTerminal(kind=kind, force_styling=True) assert next(iter_parse(term, term.move(98, 76)))[1].will_move assert next(iter_parse(term, term.move_yx(8, 76)))[1].will_move assert next(iter_parse(term, term.move_xy(98, 7)))[1].will_move assert next(iter_parse(term, term.move(54)))[1].will_move assert next(iter_parse(term, term.cud1))[1].will_move assert next(iter_parse(term, term.cub1))[1].will_move assert next(iter_parse(term, term.cuf1))[1].will_move assert next(iter_parse(term, term.cuu1))[1].will_move if term.cub(333): assert next(iter_parse(term, term.cub(333)))[1].will_move if term.cuf(333): assert next(iter_parse(term, term.cuf(333)))[1].will_move assert next(iter_parse(term, term.home))[1].will_move assert next(iter_parse(term, term.restore))[1].will_move assert next(iter_parse(term, term.clear))[1].will_move child(all_terms) def test_foreign_sequences(): """Test parsers about sequences received from foreign sources.""" @as_subprocess def child(kind): from blessed.sequences import measure_length term = TestTerminal(kind=kind) assert measure_length(u'\x1b[m', term) == len('\x1b[m') child(kind='ansi')
[]
[]
[ "LINES", "COLUMNS" ]
[]
["LINES", "COLUMNS"]
python
2
0
devops/main.go
package main import ( "encoding/json" "fmt" "log" "net/http" "os" "os/signal" "strconv" "syscall" "time" "github.com/LimouziCoDev/meetup-1-init/devops/elastic" elasticapi "gopkg.in/olivere/elastic.v5" ) func main() { // Errors channel errc := make(chan error) // Create a client for elasticsearch elasticURL := os.Getenv("ES_URL") if elasticURL == "" { elasticURL = "http://127.0.0.1:9200/" } client, err := elasticapi.NewClient(elasticapi.SetSniff(false), elasticapi.SetURL(elasticURL)) if err != nil { // trick for docker-compose wait until ES is ready fmt.Println("ES not ready, wait for 10s") time.Sleep(time.Second * 10) client, err = elasticapi.NewClient(elasticapi.SetSniff(false), elasticapi.SetURL(elasticURL)) if err != nil { panic(err) } } // Interrupt handler. go func() { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) errc <- fmt.Errorf("%s", <-c) }() go func() { // index endpoint http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) fmt.Fprintln(w, "Welcome to the first Limouzi meetup !") }) http.HandleFunc("/bank/search/", func(w http.ResponseWriter, r *http.Request) { ages, ok := r.URL.Query()["age"] if !ok || len(ages) < 1 { log.Println("Url Param 'age' is missing") w.WriteHeader(http.StatusBadRequest) return } age, _ := strconv.Atoi(ages[0]) accounts, err := elastic.GetAccountByAge(client, int(age)) if err != nil { w.WriteHeader(http.StatusInternalServerError) log.Println(err) return } w.Header().Set("Content-Type", "application/json; charset=utf-8") w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(accounts) }) log.Println("The microservice bookkeeping-data-migration is started on port 8080") errc <- http.ListenAndServe("localhost:8080", nil) }() log.Println("exit", <-errc) }
[ "\"ES_URL\"" ]
[]
[ "ES_URL" ]
[]
["ES_URL"]
go
1
0
zproto/chat_test_data.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: chat_test_data.proto /* Package zproto is a generated protocol buffer package. It is generated from these files: chat_test_data.proto message.proto rpc_metadata.proto sync.proto It has these top-level messages: ChatMessage ChatSession VoidRsp2 MessageDataEmpty MessageData RpcMetadata ServerAuthReq VoidRsp DeliveryUpdatesToUsers PushUpdatesData */ package zproto import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type ChatMessage struct { SenderSessionId string `protobuf:"bytes,1,opt,name=sender_session_id,json=senderSessionId" json:"sender_session_id,omitempty"` MessageData string `protobuf:"bytes,3,opt,name=message_data,json=messageData" json:"message_data,omitempty"` } func (m *ChatMessage) Reset() { *m = ChatMessage{} } func (m *ChatMessage) String() string { return proto.CompactTextString(m) } func (*ChatMessage) ProtoMessage() {} func (*ChatMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *ChatMessage) GetSenderSessionId() string { if m != nil { return m.SenderSessionId } return "" } func (m *ChatMessage) GetMessageData() string { if m != nil { return m.MessageData } return "" } type ChatSession struct { SessionId string `protobuf:"bytes,1,opt,name=session_id,json=sessionId" json:"session_id,omitempty"` } func (m *ChatSession) Reset() { *m = ChatSession{} } func (m *ChatSession) String() string { return proto.CompactTextString(m) } func (*ChatSession) ProtoMessage() {} func (*ChatSession) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *ChatSession) GetSessionId() string { if m != nil { return m.SessionId } return "" } type VoidRsp2 struct { } func (m *VoidRsp2) Reset() { *m = VoidRsp2{} } func (m *VoidRsp2) String() string { return proto.CompactTextString(m) } func (*VoidRsp2) ProtoMessage() {} func (*VoidRsp2) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } func init() { proto.RegisterType((*ChatMessage)(nil), "zproto.ChatMessage") proto.RegisterType((*ChatSession)(nil), "zproto.ChatSession") proto.RegisterType((*VoidRsp2)(nil), "zproto.VoidRsp2") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for ChatTest service type ChatTestClient interface { Connect(ctx context.Context, in *ChatSession, opts ...grpc.CallOption) (ChatTest_ConnectClient, error) SendChat(ctx context.Context, in *ChatMessage, opts ...grpc.CallOption) (*VoidRsp2, error) } type chatTestClient struct { cc *grpc.ClientConn } func NewChatTestClient(cc *grpc.ClientConn) ChatTestClient { return &chatTestClient{cc} } func (c *chatTestClient) Connect(ctx context.Context, in *ChatSession, opts ...grpc.CallOption) (ChatTest_ConnectClient, error) { stream, err := grpc.NewClientStream(ctx, &_ChatTest_serviceDesc.Streams[0], c.cc, "/zproto.ChatTest/Connect", opts...) if err != nil { return nil, err } x := &chatTestConnectClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type ChatTest_ConnectClient interface { Recv() (*ChatMessage, error) grpc.ClientStream } type chatTestConnectClient struct { grpc.ClientStream } func (x *chatTestConnectClient) Recv() (*ChatMessage, error) { m := new(ChatMessage) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *chatTestClient) SendChat(ctx context.Context, in *ChatMessage, opts ...grpc.CallOption) (*VoidRsp2, error) { out := new(VoidRsp2) err := grpc.Invoke(ctx, "/zproto.ChatTest/SendChat", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for ChatTest service type ChatTestServer interface { Connect(*ChatSession, ChatTest_ConnectServer) error SendChat(context.Context, *ChatMessage) (*VoidRsp2, error) } func RegisterChatTestServer(s *grpc.Server, srv ChatTestServer) { s.RegisterService(&_ChatTest_serviceDesc, srv) } func _ChatTest_Connect_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(ChatSession) if err := stream.RecvMsg(m); err != nil { return err } return srv.(ChatTestServer).Connect(m, &chatTestConnectServer{stream}) } type ChatTest_ConnectServer interface { Send(*ChatMessage) error grpc.ServerStream } type chatTestConnectServer struct { grpc.ServerStream } func (x *chatTestConnectServer) Send(m *ChatMessage) error { return x.ServerStream.SendMsg(m) } func _ChatTest_SendChat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ChatMessage) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(ChatTestServer).SendChat(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/zproto.ChatTest/SendChat", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChatTestServer).SendChat(ctx, req.(*ChatMessage)) } return interceptor(ctx, in, info, handler) } var _ChatTest_serviceDesc = grpc.ServiceDesc{ ServiceName: "zproto.ChatTest", HandlerType: (*ChatTestServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "SendChat", Handler: _ChatTest_SendChat_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "Connect", Handler: _ChatTest_Connect_Handler, ServerStreams: true, }, }, Metadata: "chat_test_data.proto", } func init() { proto.RegisterFile("chat_test_data.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 240 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x8f, 0x4d, 0x4b, 0xc3, 0x40, 0x10, 0x86, 0x89, 0x42, 0x4d, 0xa7, 0xe2, 0xc7, 0xd4, 0x43, 0x29, 0x08, 0x9a, 0x93, 0x8a, 0x04, 0xad, 0xf8, 0x07, 0x5a, 0x0f, 0x7a, 0x10, 0x4a, 0x2b, 0x1e, 0x44, 0x08, 0xd3, 0xec, 0x60, 0x03, 0x66, 0xb7, 0x64, 0xc6, 0x8b, 0xbf, 0x5e, 0x92, 0xdd, 0x05, 0xc5, 0x9e, 0x06, 0x9e, 0x77, 0xde, 0x67, 0x67, 0xe1, 0xa4, 0x5c, 0x93, 0x16, 0xca, 0xa2, 0x85, 0x21, 0xa5, 0x7c, 0xd3, 0x38, 0x75, 0xd8, 0xfb, 0xee, 0x66, 0xf6, 0x0e, 0x83, 0xd9, 0x9a, 0xf4, 0x99, 0x45, 0xe8, 0x83, 0xf1, 0x0a, 0x8e, 0x85, 0xad, 0xe1, 0xa6, 0x10, 0x16, 0xa9, 0x9c, 0x2d, 0x2a, 0x33, 0x4a, 0xce, 0x92, 0x8b, 0xfe, 0xe2, 0xd0, 0x07, 0x4b, 0xcf, 0x9f, 0x0c, 0x9e, 0xc3, 0x7e, 0xed, 0x6b, 0x9d, 0x78, 0xb4, 0xdb, 0xad, 0x0d, 0x02, 0x7b, 0x20, 0xa5, 0xec, 0xda, 0xdb, 0x43, 0x07, 0x4f, 0x01, 0xfe, 0x69, 0xfb, 0x12, 0x85, 0x19, 0x40, 0xfa, 0xea, 0x2a, 0xb3, 0x90, 0xcd, 0x64, 0xa2, 0x90, 0xb6, 0xcd, 0x17, 0x16, 0xc5, 0x7b, 0xd8, 0x9b, 0x39, 0x6b, 0xb9, 0x54, 0x1c, 0xe6, 0xfe, 0xee, 0xfc, 0x97, 0x76, 0xfc, 0x07, 0x86, 0x9f, 0xdc, 0x24, 0x78, 0x0b, 0xe9, 0x92, 0xad, 0x69, 0x21, 0x6e, 0x5b, 0x19, 0x1f, 0x45, 0x18, 0x5f, 0x9d, 0x5e, 0xc2, 0xb0, 0x74, 0x75, 0x6e, 0x79, 0xf5, 0xf5, 0x49, 0x55, 0x1d, 0xf2, 0xe9, 0xc1, 0xdb, 0xbc, 0x9d, 0xf1, 0xa0, 0xc7, 0x9d, 0x79, 0xb2, 0xea, 0x75, 0xd1, 0xdd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x73, 0xe7, 0x5b, 0x83, 0x5f, 0x01, 0x00, 0x00, }
[]
[]
[]
[]
[]
go
null
null
null
routing/route_adding.py
import os from pathlib import Path from sanic import Sanic, response import routing.ice_cream_or_pickle.ice_cream_or_pickle as ice_cream_or_pickle def add_routes(app: Sanic): # create handler for index route home_index = Path('./home_index') async def index(request): return await response.file(str(home_index / 'index.html')) async def favicon(request): return await response.file(str(home_index / 'favicon.ico')) app.static('/static', str(home_index / 'static')) app.add_route(index, '/') app.add_route(favicon, '/favicon.ico') # Ice cream or pickle app.add_route(ice_cream_or_pickle.IceCreamOrPickle.as_view(), '/ice_cream_or_pickle') # IFTTT testing routes @app.route('/test_api/ifttt/v1/status', methods=['GET']) def ifttt_status(request): headers = request.headers channel_key = headers['ifttt-channel-key'] r = response.text('ONLINE!') if channel_key != os.environ.get('IFTTT_SERVICE_KEY'): r = response.text('ERROR!') r.status = 401 return r
[]
[]
[ "IFTTT_SERVICE_KEY" ]
[]
["IFTTT_SERVICE_KEY"]
python
1
0
soracom/generated/cmd/subscribers_list.go
// Code generated by soracom-cli generate-cmd. DO NOT EDIT. package cmd import ( "fmt" "net/url" "os" "github.com/spf13/cobra" ) // SubscribersListCmdLastEvaluatedKey holds value of 'last_evaluated_key' option var SubscribersListCmdLastEvaluatedKey string // SubscribersListCmdSerialNumberFilter holds value of 'serial_number_filter' option var SubscribersListCmdSerialNumberFilter string // SubscribersListCmdSpeedClassFilter holds value of 'speed_class_filter' option var SubscribersListCmdSpeedClassFilter string // SubscribersListCmdStatusFilter holds value of 'status_filter' option var SubscribersListCmdStatusFilter string // SubscribersListCmdTagName holds value of 'tag_name' option var SubscribersListCmdTagName string // SubscribersListCmdTagValue holds value of 'tag_value' option var SubscribersListCmdTagValue string // SubscribersListCmdTagValueMatchMode holds value of 'tag_value_match_mode' option var SubscribersListCmdTagValueMatchMode string // SubscribersListCmdLimit holds value of 'limit' option var SubscribersListCmdLimit int64 // SubscribersListCmdPaginate indicates to do pagination or not var SubscribersListCmdPaginate bool // SubscribersListCmdOutputJSONL indicates to output with jsonl format var SubscribersListCmdOutputJSONL bool func init() { SubscribersListCmd.Flags().StringVar(&SubscribersListCmdLastEvaluatedKey, "last-evaluated-key", "", TRAPI("The IMSI of the last subscriber retrieved on the current page. By specifying this parameter, you can continue to retrieve the list from the next subscriber onward.")) SubscribersListCmd.Flags().StringVar(&SubscribersListCmdSerialNumberFilter, "serial-number-filter", "", TRAPI("Serial number for filtering the search. Can specify multiple values delimited by `|`. Returns subscribers with serial number starting with the specified value(s).")) SubscribersListCmd.Flags().StringVar(&SubscribersListCmdSpeedClassFilter, "speed-class-filter", "", TRAPI("Speed class for filtering the search. Can specify multiple values delimited by `|`. Valid values include: `s1.minimum`, `s1.slow`, `s1.standard`, `s1.fast`")) SubscribersListCmd.Flags().StringVar(&SubscribersListCmdStatusFilter, "status-filter", "", TRAPI("Status for filtering the search. Can specify multiple values delimited by `|`. Valid values include: `active`, `inactive`, `ready`, `instock`, `shipped`, `suspended`, and `terminated`.")) SubscribersListCmd.Flags().StringVar(&SubscribersListCmdTagName, "tag-name", "", TRAPI("Tag name for filtering the search (exact match).")) SubscribersListCmd.Flags().StringVar(&SubscribersListCmdTagValue, "tag-value", "", TRAPI("Tag search string for filtering the search. Required when `tag_name` has been specified.")) SubscribersListCmd.Flags().StringVar(&SubscribersListCmdTagValueMatchMode, "tag-value-match-mode", "exact", TRAPI("Tag match mode.")) SubscribersListCmd.Flags().Int64Var(&SubscribersListCmdLimit, "limit", 0, TRAPI("Maximum number of subscribers to retrieve. Setting a limit does not guarantee the number of subscribers returned in the response (i.e. the response may contain fewer subscribers than the specified limit).")) SubscribersListCmd.Flags().BoolVar(&SubscribersListCmdPaginate, "fetch-all", false, TRCLI("cli.common_params.paginate.short_help")) SubscribersListCmd.Flags().BoolVar(&SubscribersListCmdOutputJSONL, "jsonl", false, TRCLI("cli.common_params.jsonl.short_help")) SubscribersCmd.AddCommand(SubscribersListCmd) } // SubscribersListCmd defines 'list' subcommand var SubscribersListCmd = &cobra.Command{ Use: "list", Short: TRAPI("/subscribers:get:summary"), Long: TRAPI(`/subscribers:get:description`), RunE: func(cmd *cobra.Command, args []string) error { if len(args) > 0 { return fmt.Errorf("unexpected arguments passed => %v", args) } opt := &apiClientOptions{ BasePath: "/v1", Language: getSelectedLanguage(), } ac := newAPIClient(opt) if v := os.Getenv("SORACOM_VERBOSE"); v != "" { ac.SetVerbose(true) } err := authHelper(ac, cmd, args) if err != nil { cmd.SilenceUsage = true return err } param, err := collectSubscribersListCmdParams(ac) if err != nil { return err } body, err := ac.callAPI(param) if err != nil { cmd.SilenceUsage = true return err } if body == "" { return nil } if rawOutput { _, err = os.Stdout.Write([]byte(body)) } else { if SubscribersListCmdOutputJSONL { return printStringAsJSONL(body) } return prettyPrintStringAsJSON(body) } return err }, } func collectSubscribersListCmdParams(ac *apiClient) (*apiParams, error) { return &apiParams{ method: "GET", path: buildPathForSubscribersListCmd("/subscribers"), query: buildQueryForSubscribersListCmd(), doPagination: SubscribersListCmdPaginate, paginationKeyHeaderInResponse: "x-soracom-next-key", paginationRequestParameterInQuery: "last_evaluated_key", noRetryOnError: noRetryOnError, }, nil } func buildPathForSubscribersListCmd(path string) string { return path } func buildQueryForSubscribersListCmd() url.Values { result := url.Values{} if SubscribersListCmdLastEvaluatedKey != "" { result.Add("last_evaluated_key", SubscribersListCmdLastEvaluatedKey) } if SubscribersListCmdSerialNumberFilter != "" { result.Add("serial_number_filter", SubscribersListCmdSerialNumberFilter) } if SubscribersListCmdSpeedClassFilter != "" { result.Add("speed_class_filter", SubscribersListCmdSpeedClassFilter) } if SubscribersListCmdStatusFilter != "" { result.Add("status_filter", SubscribersListCmdStatusFilter) } if SubscribersListCmdTagName != "" { result.Add("tag_name", SubscribersListCmdTagName) } if SubscribersListCmdTagValue != "" { result.Add("tag_value", SubscribersListCmdTagValue) } if SubscribersListCmdTagValueMatchMode != "exact" { result.Add("tag_value_match_mode", SubscribersListCmdTagValueMatchMode) } if SubscribersListCmdLimit != 0 { result.Add("limit", sprintf("%d", SubscribersListCmdLimit)) } return result }
[ "\"SORACOM_VERBOSE\"" ]
[]
[ "SORACOM_VERBOSE" ]
[]
["SORACOM_VERBOSE"]
go
1
0
plugin/mq2db/vendor/github.com/influxdata/influxdb/tsdb/store.go
package tsdb // import "github.com/influxdata/influxdb/tsdb" import ( "bytes" "errors" "fmt" "io" "io/ioutil" "os" "path/filepath" "runtime" "sort" "strconv" "strings" "sync" "time" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/pkg/bytesutil" "github.com/influxdata/influxdb/pkg/estimator" "github.com/influxdata/influxdb/pkg/limiter" "github.com/influxdata/influxdb/query" "github.com/influxdata/influxql" "go.uber.org/zap" ) var ( // ErrShardNotFound is returned when trying to get a non existing shard. ErrShardNotFound = fmt.Errorf("shard not found") // ErrStoreClosed is returned when trying to use a closed Store. ErrStoreClosed = fmt.Errorf("store is closed") ) // Statistics gathered by the store. const ( statDatabaseSeries = "numSeries" // number of series in a database statDatabaseMeasurements = "numMeasurements" // number of measurements in a database ) // Store manages shards and indexes for databases. type Store struct { mu sync.RWMutex // databases keeps track of the number of databases being managed by the store. databases map[string]struct{} path string // shared per-database indexes, only if using "inmem". indexes map[string]interface{} // shards is a map of shard IDs to the associated Shard. shards map[uint64]*Shard EngineOptions EngineOptions baseLogger *zap.Logger Logger *zap.Logger closing chan struct{} wg sync.WaitGroup opened bool } // NewStore returns a new store with the given path and a default configuration. // The returned store must be initialized by calling Open before using it. func NewStore(path string) *Store { logger := zap.NewNop() return &Store{ databases: make(map[string]struct{}), path: path, indexes: make(map[string]interface{}), EngineOptions: NewEngineOptions(), Logger: logger, baseLogger: logger, } } // WithLogger sets the logger for the store. func (s *Store) WithLogger(log *zap.Logger) { s.baseLogger = log s.Logger = log.With(zap.String("service", "store")) for _, sh := range s.shards { sh.WithLogger(s.baseLogger) } } // Statistics returns statistics for period monitoring. func (s *Store) Statistics(tags map[string]string) []models.Statistic { s.mu.RLock() shards := s.shardsSlice() s.mu.RUnlock() // Add all the series and measurements cardinality estimations. databases := s.Databases() statistics := make([]models.Statistic, 0, len(databases)) for _, database := range databases { sc, err := s.SeriesCardinality(database) if err != nil { s.Logger.Error("cannot retrieve series cardinality", zap.Error(err)) continue } mc, err := s.MeasurementsCardinality(database) if err != nil { s.Logger.Error("cannot retrieve measurement cardinality", zap.Error(err)) continue } statistics = append(statistics, models.Statistic{ Name: "database", Tags: models.StatisticTags{"database": database}.Merge(tags), Values: map[string]interface{}{ statDatabaseSeries: sc, statDatabaseMeasurements: mc, }, }) } // Gather all statistics for all shards. for _, shard := range shards { statistics = append(statistics, shard.Statistics(tags)...) } return statistics } // Path returns the store's root path. func (s *Store) Path() string { return s.path } // Open initializes the store, creating all necessary directories, loading all // shards as well as initializing periodic maintenance of them. func (s *Store) Open() error { s.mu.Lock() defer s.mu.Unlock() if s.opened { // Already open return nil } s.closing = make(chan struct{}) s.shards = map[uint64]*Shard{} s.Logger.Info(fmt.Sprintf("Using data dir: %v", s.Path())) // Create directory. if err := os.MkdirAll(s.path, 0777); err != nil { return err } if err := s.loadShards(); err != nil { return err } s.opened = true s.wg.Add(1) go s.monitorShards() return nil } func (s *Store) loadShards() error { // res holds the result from opening each shard in a goroutine type res struct { s *Shard err error } // Setup a shared limiter for compactions lim := s.EngineOptions.Config.MaxConcurrentCompactions if lim == 0 { lim = runtime.GOMAXPROCS(0) / 2 // Default to 50% of cores for compactions // On systems with more cores, cap at 4 to reduce disk utilization if lim > 4 { lim = 4 } if lim < 1 { lim = 1 } } // Don't allow more compactions to run than cores. if lim > runtime.GOMAXPROCS(0) { lim = runtime.GOMAXPROCS(0) } s.EngineOptions.CompactionLimiter = limiter.NewFixed(lim) // Env var to disable throughput limiter. This will be moved to a config option in 1.5. if os.Getenv("INFLUXDB_DATA_COMPACTION_THROUGHPUT") == "" { s.EngineOptions.CompactionThroughputLimiter = limiter.NewRate(48*1024*1024, 48*1024*1024) } else { s.Logger.Info("Compaction throughput limit disabled") } t := limiter.NewFixed(runtime.GOMAXPROCS(0)) resC := make(chan *res) var n int // Determine how many shards we need to open by checking the store path. dbDirs, err := ioutil.ReadDir(s.path) if err != nil { return err } for _, db := range dbDirs { if !db.IsDir() { s.Logger.Info("Not loading. Not a database directory.", zap.String("name", db.Name())) continue } // Retrieve database index. idx, err := s.createIndexIfNotExists(db.Name()) if err != nil { return err } // Load each retention policy within the database directory. rpDirs, err := ioutil.ReadDir(filepath.Join(s.path, db.Name())) if err != nil { return err } for _, rp := range rpDirs { if !rp.IsDir() { s.Logger.Info(fmt.Sprintf("Skipping retention policy dir: %s. Not a directory", rp.Name())) continue } shardDirs, err := ioutil.ReadDir(filepath.Join(s.path, db.Name(), rp.Name())) if err != nil { return err } for _, sh := range shardDirs { n++ go func(db, rp, sh string) { t.Take() defer t.Release() start := time.Now() path := filepath.Join(s.path, db, rp, sh) walPath := filepath.Join(s.EngineOptions.Config.WALDir, db, rp, sh) // Shard file names are numeric shardIDs shardID, err := strconv.ParseUint(sh, 10, 64) if err != nil { resC <- &res{err: fmt.Errorf("%s is not a valid ID. Skipping shard.", sh)} return } // Copy options and assign shared index. opt := s.EngineOptions opt.InmemIndex = idx // Existing shards should continue to use inmem index. if _, err := os.Stat(filepath.Join(path, "index")); os.IsNotExist(err) { opt.IndexVersion = "inmem" } // Open engine. shard := NewShard(shardID, path, walPath, opt) // Disable compactions, writes and queries until all shards are loaded shard.EnableOnOpen = false shard.WithLogger(s.baseLogger) err = shard.Open() if err != nil { resC <- &res{err: fmt.Errorf("Failed to open shard: %d: %s", shardID, err)} return } resC <- &res{s: shard} s.Logger.Info(fmt.Sprintf("%s opened in %s", path, time.Since(start))) }(db.Name(), rp.Name(), sh.Name()) } } } // Gather results of opening shards concurrently, keeping track of how // many databases we are managing. for i := 0; i < n; i++ { res := <-resC if res.err != nil { s.Logger.Info(res.err.Error()) continue } s.shards[res.s.id] = res.s s.databases[res.s.database] = struct{}{} } close(resC) // Enable all shards for _, sh := range s.shards { sh.SetEnabled(true) if sh.IsIdle() { if err := sh.Free(); err != nil { return err } } } return nil } // Close closes the store and all associated shards. After calling Close accessing // shards through the Store will result in ErrStoreClosed being returned. func (s *Store) Close() error { s.mu.Lock() if s.opened { close(s.closing) } s.mu.Unlock() s.wg.Wait() // No other goroutines accessing the store, so no need for a Lock. // Close all the shards in parallel. if err := s.walkShards(s.shardsSlice(), func(sh *Shard) error { return sh.CloseFast() }); err != nil { return err } s.mu.Lock() s.shards = nil s.opened = false // Store may now be opened again. s.mu.Unlock() return nil } // createIndexIfNotExists returns a shared index for a database, if the inmem // index is being used. If the TSI index is being used, then this method is // basically a no-op. func (s *Store) createIndexIfNotExists(name string) (interface{}, error) { if idx := s.indexes[name]; idx != nil { return idx, nil } idx, err := NewInmemIndex(name) if err != nil { return nil, err } s.indexes[name] = idx return idx, nil } // Shard returns a shard by id. func (s *Store) Shard(id uint64) *Shard { s.mu.RLock() defer s.mu.RUnlock() sh, ok := s.shards[id] if !ok { return nil } return sh } // Shards returns a list of shards by id. func (s *Store) Shards(ids []uint64) []*Shard { s.mu.RLock() defer s.mu.RUnlock() a := make([]*Shard, 0, len(ids)) for _, id := range ids { sh, ok := s.shards[id] if !ok { continue } a = append(a, sh) } return a } // ShardGroup returns a ShardGroup with a list of shards by id. func (s *Store) ShardGroup(ids []uint64) ShardGroup { return Shards(s.Shards(ids)) } // ShardN returns the number of shards in the store. func (s *Store) ShardN() int { s.mu.RLock() defer s.mu.RUnlock() return len(s.shards) } // ShardDigest returns a digest of the shard with the specified ID. func (s *Store) ShardDigest(id uint64) (io.ReadCloser, error) { sh := s.Shard(id) if sh == nil { return nil, ErrShardNotFound } return sh.Digest() } // CreateShard creates a shard with the given id and retention policy on a database. func (s *Store) CreateShard(database, retentionPolicy string, shardID uint64, enabled bool) error { s.mu.Lock() defer s.mu.Unlock() select { case <-s.closing: return ErrStoreClosed default: } // Shard already exists. if _, ok := s.shards[shardID]; ok { return nil } // Create the db and retention policy directories if they don't exist. if err := os.MkdirAll(filepath.Join(s.path, database, retentionPolicy), 0700); err != nil { return err } // Create the WAL directory. walPath := filepath.Join(s.EngineOptions.Config.WALDir, database, retentionPolicy, fmt.Sprintf("%d", shardID)) if err := os.MkdirAll(walPath, 0700); err != nil { return err } // Retrieve shared index, if needed. idx, err := s.createIndexIfNotExists(database) if err != nil { return err } // Copy index options and pass in shared index. opt := s.EngineOptions opt.InmemIndex = idx path := filepath.Join(s.path, database, retentionPolicy, strconv.FormatUint(shardID, 10)) shard := NewShard(shardID, path, walPath, opt) shard.WithLogger(s.baseLogger) shard.EnableOnOpen = enabled if err := shard.Open(); err != nil { return err } s.shards[shardID] = shard s.databases[database] = struct{}{} // Ensure we are tracking any new db. return nil } // CreateShardSnapShot will create a hard link to the underlying shard and return a path. // The caller is responsible for cleaning up (removing) the file path returned. func (s *Store) CreateShardSnapshot(id uint64) (string, error) { sh := s.Shard(id) if sh == nil { return "", ErrShardNotFound } return sh.CreateSnapshot() } // SetShardEnabled enables or disables a shard for read and writes. func (s *Store) SetShardEnabled(shardID uint64, enabled bool) error { sh := s.Shard(shardID) if sh == nil { return ErrShardNotFound } sh.SetEnabled(enabled) return nil } // DeleteShard removes a shard from disk. func (s *Store) DeleteShard(shardID uint64) error { sh := s.Shard(shardID) if sh == nil { return nil } // Remove the shard from the database indexes before closing the shard. // Closing the shard will do this as well, but it will unload it while // the shard is locked which can block stats collection and other calls. sh.UnloadIndex() if err := sh.Close(); err != nil { return err } if err := os.RemoveAll(sh.path); err != nil { return err } if err := os.RemoveAll(sh.walPath); err != nil { return err } s.mu.Lock() delete(s.shards, shardID) s.mu.Unlock() return nil } // DeleteDatabase will close all shards associated with a database and remove the directory and files from disk. func (s *Store) DeleteDatabase(name string) error { s.mu.RLock() if _, ok := s.databases[name]; !ok { s.mu.RUnlock() // no files locally, so nothing to do return nil } shards := s.filterShards(func(sh *Shard) bool { return sh.database == name }) s.mu.RUnlock() if err := s.walkShards(shards, func(sh *Shard) error { if sh.database != name { return nil } return sh.CloseFast() }); err != nil { return err } dbPath := filepath.Clean(filepath.Join(s.path, name)) // extra sanity check to make sure that even if someone named their database "../.." // that we don't delete everything because of it, they'll just have extra files forever if filepath.Clean(s.path) != filepath.Dir(dbPath) { return fmt.Errorf("invalid database directory location for database '%s': %s", name, dbPath) } if err := os.RemoveAll(dbPath); err != nil { return err } if err := os.RemoveAll(filepath.Join(s.EngineOptions.Config.WALDir, name)); err != nil { return err } s.mu.Lock() for _, sh := range shards { delete(s.shards, sh.id) } // Remove database from store list of databases delete(s.databases, name) // Remove shared index for database if using inmem index. delete(s.indexes, name) s.mu.Unlock() return nil } // DeleteRetentionPolicy will close all shards associated with the // provided retention policy, remove the retention policy directories on // both the DB and WAL, and remove all shard files from disk. func (s *Store) DeleteRetentionPolicy(database, name string) error { s.mu.RLock() if _, ok := s.databases[database]; !ok { s.mu.RUnlock() // unknown database, nothing to do return nil } shards := s.filterShards(func(sh *Shard) bool { return sh.database == database && sh.retentionPolicy == name }) s.mu.RUnlock() // Close and delete all shards under the retention policy on the // database. if err := s.walkShards(shards, func(sh *Shard) error { if sh.database != database || sh.retentionPolicy != name { return nil } return sh.Close() }); err != nil { return err } // Remove the retention policy folder. rpPath := filepath.Clean(filepath.Join(s.path, database, name)) // ensure Store's path is the grandparent of the retention policy if filepath.Clean(s.path) != filepath.Dir(filepath.Dir(rpPath)) { return fmt.Errorf("invalid path for database '%s', retention policy '%s': %s", database, name, rpPath) } // Remove the retention policy folder. if err := os.RemoveAll(filepath.Join(s.path, database, name)); err != nil { return err } // Remove the retention policy folder from the the WAL. if err := os.RemoveAll(filepath.Join(s.EngineOptions.Config.WALDir, database, name)); err != nil { return err } s.mu.Lock() for _, sh := range shards { delete(s.shards, sh.id) } s.mu.Unlock() return nil } // DeleteMeasurement removes a measurement and all associated series from a database. func (s *Store) DeleteMeasurement(database, name string) error { s.mu.RLock() shards := s.filterShards(byDatabase(database)) s.mu.RUnlock() // Limit to 1 delete for each shard since expanding the measurement into the list // of series keys can be very memory intensive if run concurrently. limit := limiter.NewFixed(1) return s.walkShards(shards, func(sh *Shard) error { limit.Take() defer limit.Release() if err := sh.DeleteMeasurement([]byte(name)); err != nil { return err } return nil }) } // filterShards returns a slice of shards where fn returns true // for the shard. If the provided predicate is nil then all shards are returned. func (s *Store) filterShards(fn func(sh *Shard) bool) []*Shard { var shards []*Shard if fn == nil { shards = make([]*Shard, 0, len(s.shards)) fn = func(*Shard) bool { return true } } else { shards = make([]*Shard, 0) } for _, sh := range s.shards { if fn(sh) { shards = append(shards, sh) } } return shards } // byDatabase provides a predicate for filterShards that matches on the name of // the database passed in. func byDatabase(name string) func(sh *Shard) bool { return func(sh *Shard) bool { return sh.database == name } } // walkShards apply a function to each shard in parallel. If any of the // functions return an error, the first error is returned. func (s *Store) walkShards(shards []*Shard, fn func(sh *Shard) error) error { // struct to hold the result of opening each reader in a goroutine type res struct { err error } resC := make(chan res) var n int for _, sh := range shards { n++ go func(sh *Shard) { if err := fn(sh); err != nil { resC <- res{err: fmt.Errorf("shard %d: %s", sh.id, err)} return } resC <- res{} }(sh) } var err error for i := 0; i < n; i++ { res := <-resC if res.err != nil { err = res.err } } close(resC) return err } // ShardIDs returns a slice of all ShardIDs under management. func (s *Store) ShardIDs() []uint64 { s.mu.RLock() defer s.mu.RUnlock() return s.shardIDs() } func (s *Store) shardIDs() []uint64 { a := make([]uint64, 0, len(s.shards)) for shardID := range s.shards { a = append(a, shardID) } return a } // shardsSlice returns an ordered list of shards. func (s *Store) shardsSlice() []*Shard { a := make([]*Shard, 0, len(s.shards)) for _, sh := range s.shards { a = append(a, sh) } sort.Sort(Shards(a)) return a } // Databases returns the names of all databases managed by the store. func (s *Store) Databases() []string { s.mu.RLock() defer s.mu.RUnlock() databases := make([]string, 0, len(s.databases)) for k, _ := range s.databases { databases = append(databases, k) } return databases } // DiskSize returns the size of all the shard files in bytes. // This size does not include the WAL size. func (s *Store) DiskSize() (int64, error) { var size int64 s.mu.RLock() allShards := s.filterShards(nil) s.mu.RUnlock() for _, sh := range allShards { sz, err := sh.DiskSize() if err != nil { return 0, err } size += sz } return size, nil } func (s *Store) estimateCardinality(dbName string, getSketches func(*Shard) (estimator.Sketch, estimator.Sketch, error)) (int64, error) { var ( ss estimator.Sketch // Sketch estimating number of items. ts estimator.Sketch // Sketch estimating number of tombstoned items. ) s.mu.RLock() shards := s.filterShards(byDatabase(dbName)) s.mu.RUnlock() // Iterate over all shards for the database and combine all of the sketches. for _, shard := range shards { s, t, err := getSketches(shard) if err != nil { return 0, err } if ss == nil { ss, ts = s, t } else if err = ss.Merge(s); err != nil { return 0, err } else if err = ts.Merge(t); err != nil { return 0, err } } if ss != nil { return int64(ss.Count() - ts.Count()), nil } return 0, nil } // SeriesCardinality returns the series cardinality for the provided database. func (s *Store) SeriesCardinality(database string) (int64, error) { return s.estimateCardinality(database, func(sh *Shard) (estimator.Sketch, estimator.Sketch, error) { if sh == nil { return nil, nil, errors.New("shard nil, can't get cardinality") } return sh.SeriesSketches() }) } // MeasurementsCardinality returns the measurement cardinality for the provided // database. func (s *Store) MeasurementsCardinality(database string) (int64, error) { return s.estimateCardinality(database, func(sh *Shard) (estimator.Sketch, estimator.Sketch, error) { if sh == nil { return nil, nil, errors.New("shard nil, can't get cardinality") } return sh.MeasurementsSketches() }) } // BackupShard will get the shard and have the engine backup since the passed in // time to the writer. func (s *Store) BackupShard(id uint64, since time.Time, w io.Writer) error { shard := s.Shard(id) if shard == nil { return fmt.Errorf("shard %d doesn't exist on this server", id) } path, err := relativePath(s.path, shard.path) if err != nil { return err } return shard.Backup(w, path, since) } func (s *Store) ExportShard(id uint64, start time.Time, end time.Time, w io.Writer) error { shard := s.Shard(id) if shard == nil { return fmt.Errorf("shard %d doesn't exist on this server", id) } path, err := relativePath(s.path, shard.path) if err != nil { return err } return shard.Export(w, path, start, end) } // RestoreShard restores a backup from r to a given shard. // This will only overwrite files included in the backup. func (s *Store) RestoreShard(id uint64, r io.Reader) error { shard := s.Shard(id) if shard == nil { return fmt.Errorf("shard %d doesn't exist on this server", id) } path, err := relativePath(s.path, shard.path) if err != nil { return err } return shard.Restore(r, path) } // ImportShard imports the contents of r to a given shard. // All files in the backup are added as new files which may // cause duplicated data to occur requiring more expensive // compactions. func (s *Store) ImportShard(id uint64, r io.Reader) error { shard := s.Shard(id) if shard == nil { return fmt.Errorf("shard %d doesn't exist on this server", id) } path, err := relativePath(s.path, shard.path) if err != nil { return err } return shard.Import(r, path) } // ShardRelativePath will return the relative path to the shard, i.e., // <database>/<retention>/<id>. func (s *Store) ShardRelativePath(id uint64) (string, error) { shard := s.Shard(id) if shard == nil { return "", fmt.Errorf("shard %d doesn't exist on this server", id) } return relativePath(s.path, shard.path) } // DeleteSeries loops through the local shards and deletes the series data for // the passed in series keys. func (s *Store) DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error { // Expand regex expressions in the FROM clause. a, err := s.ExpandSources(sources) if err != nil { return err } else if sources != nil && len(sources) != 0 && len(a) == 0 { return nil } sources = a // Determine deletion time range. condition, timeRange, err := influxql.ConditionExpr(condition, nil) if err != nil { return err } var min, max int64 if !timeRange.Min.IsZero() { min = timeRange.Min.UnixNano() } else { min = influxql.MinTime } if !timeRange.Max.IsZero() { max = timeRange.Max.UnixNano() } else { max = influxql.MaxTime } s.mu.RLock() shards := s.filterShards(byDatabase(database)) s.mu.RUnlock() s.mu.RLock() defer s.mu.RUnlock() // Limit to 1 delete for each shard since expanding the measurement into the list // of series keys can be very memory intensive if run concurrently. limit := limiter.NewFixed(1) return s.walkShards(shards, func(sh *Shard) error { // Determine list of measurements from sources. // Use all measurements if no FROM clause was provided. var names []string if len(sources) > 0 { for _, source := range sources { names = append(names, source.(*influxql.Measurement).Name) } } else { if err := sh.ForEachMeasurementName(func(name []byte) error { names = append(names, string(name)) return nil }); err != nil { return err } } sort.Strings(names) limit.Take() defer limit.Release() // Find matching series keys for each measurement. for _, name := range names { itr, err := sh.MeasurementSeriesKeysByExprIterator([]byte(name), condition) if err != nil { return err } else if itr == nil { continue } if err := sh.DeleteSeriesRange(itr, min, max); err != nil { return err } } return nil }) } // ExpandSources expands sources against all local shards. func (s *Store) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { shards := func() Shards { s.mu.RLock() defer s.mu.RUnlock() return Shards(s.shardsSlice()) }() return shards.ExpandSources(sources) } // WriteToShard writes a list of points to a shard identified by its ID. func (s *Store) WriteToShard(shardID uint64, points []models.Point) error { s.mu.RLock() select { case <-s.closing: s.mu.RUnlock() return ErrStoreClosed default: } sh := s.shards[shardID] if sh == nil { s.mu.RUnlock() return ErrShardNotFound } s.mu.RUnlock() // Ensure snapshot compactions are enabled since the shard might have been cold // and disabled by the monitor. if sh.IsIdle() { sh.SetCompactionsEnabled(true) } return sh.WritePoints(points) } // MeasurementNames returns a slice of all measurements. Measurements accepts an // optional condition expression. If cond is nil, then all measurements for the // database will be returned. func (s *Store) MeasurementNames(auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) { s.mu.RLock() shards := s.filterShards(byDatabase(database)) s.mu.RUnlock() // If we're using the inmem index then all shards contain a duplicate // version of the global index. We don't need to iterate over all shards // since we have everything we need from the first shard. if len(shards) > 0 && shards[0].IndexType() == "inmem" { shards = shards[:1] } // Map to deduplicate measurement names across all shards. This is kind of naive // and could be improved using a sorted merge of the already sorted measurements in // each shard. set := make(map[string]struct{}) var names [][]byte for _, sh := range shards { a, err := sh.MeasurementNamesByExpr(auth, cond) if err != nil { return nil, err } for _, m := range a { if _, ok := set[string(m)]; !ok { set[string(m)] = struct{}{} names = append(names, m) } } } bytesutil.Sort(names) return names, nil } // MeasurementSeriesCounts returns the number of measurements and series in all // the shards' indices. func (s *Store) MeasurementSeriesCounts(database string) (measuments int, series int) { // TODO: implement me return 0, 0 } type TagKeys struct { Measurement string Keys []string } type TagKeysSlice []TagKeys func (a TagKeysSlice) Len() int { return len(a) } func (a TagKeysSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a TagKeysSlice) Less(i, j int) bool { return a[i].Measurement < a[j].Measurement } type tagKeys struct { name []byte keys []string } type tagKeysSlice []tagKeys func (a tagKeysSlice) Len() int { return len(a) } func (a tagKeysSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a tagKeysSlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[j].name) == -1 } // TagKeys returns the tag keys in the given database, matching the condition. func (s *Store) TagKeys(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]TagKeys, error) { measurementExpr := influxql.CloneExpr(cond) measurementExpr = influxql.Reduce(influxql.RewriteExpr(measurementExpr, func(e influxql.Expr) influxql.Expr { switch e := e.(type) { case *influxql.BinaryExpr: switch e.Op { case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: tag, ok := e.LHS.(*influxql.VarRef) if !ok || tag.Val != "_name" { return nil } } } return e }), nil) filterExpr := influxql.CloneExpr(cond) filterExpr = influxql.Reduce(influxql.RewriteExpr(filterExpr, func(e influxql.Expr) influxql.Expr { switch e := e.(type) { case *influxql.BinaryExpr: switch e.Op { case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: tag, ok := e.LHS.(*influxql.VarRef) if !ok || strings.HasPrefix(tag.Val, "_") { return nil } } } return e }), nil) // Get all the shards we're interested in. shards := make([]*Shard, 0, len(shardIDs)) s.mu.RLock() for _, sid := range shardIDs { shard, ok := s.shards[sid] if !ok { continue } shards = append(shards, shard) } s.mu.RUnlock() // If we're using the inmem index then all shards contain a duplicate // version of the global index. We don't need to iterate over all shards // since we have everything we need from the first shard. if len(shards) > 0 && shards[0].IndexType() == "inmem" { shards = shards[:1] } // Determine list of measurements. nameSet := make(map[string]struct{}) for _, sh := range shards { // Checking for authorisation can be done later on, when non-matching // series might have been filtered out based on other conditions. names, err := sh.MeasurementNamesByExpr(nil, measurementExpr) if err != nil { return nil, err } for _, name := range names { nameSet[string(name)] = struct{}{} } } // Sort names. names := make([]string, 0, len(nameSet)) for name := range nameSet { names = append(names, name) } sort.Strings(names) // Iterate over each measurement. var results []TagKeys for _, name := range names { // Build keyset over all shards for measurement. keySet := map[string]struct{}{} for _, sh := range shards { shardKeySet, err := sh.MeasurementTagKeysByExpr([]byte(name), nil) if err != nil { return nil, err } else if len(shardKeySet) == 0 { continue } // If no tag value filter is present then all the tag keys can be returned // If they have authorized series associated with them. if filterExpr == nil { for tagKey := range shardKeySet { if sh.TagKeyHasAuthorizedSeries(auth, []byte(name), tagKey) { keySet[tagKey] = struct{}{} } } continue } // A tag value condition has been supplied. For each tag key filter // the set of tag values by the condition. Only tag keys with remaining // tag values will be included in the result set. // Sort the tag keys. shardKeys := make([]string, 0, len(shardKeySet)) for k := range shardKeySet { shardKeys = append(shardKeys, k) } sort.Strings(shardKeys) // TODO(edd): This is very expensive. We're materialising all unfiltered // tag values for all required tag keys, only to see if we have any. // Then we're throwing them all away as we only care about the tag // keys in the result set. shardValues, err := sh.MeasurementTagKeyValuesByExpr(auth, []byte(name), shardKeys, filterExpr, true) if err != nil { return nil, err } for i := range shardKeys { if len(shardValues[i]) == 0 { continue } keySet[shardKeys[i]] = struct{}{} } } // Sort key set. keys := make([]string, 0, len(keySet)) for key := range keySet { keys = append(keys, key) } sort.Strings(keys) // Add to resultset. results = append(results, TagKeys{ Measurement: name, Keys: keys, }) } return results, nil } type TagValues struct { Measurement string Values []KeyValue } type TagValuesSlice []TagValues func (a TagValuesSlice) Len() int { return len(a) } func (a TagValuesSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a TagValuesSlice) Less(i, j int) bool { return a[i].Measurement < a[j].Measurement } // tagValues is a temporary representation of a TagValues. Rather than allocating // KeyValues as we build up a TagValues object, We hold off allocating KeyValues // until we have merged multiple tagValues together. type tagValues struct { name []byte keys []string values [][]string } // Is a slice of tagValues that can be sorted by measurement. type tagValuesSlice []tagValues func (a tagValuesSlice) Len() int { return len(a) } func (a tagValuesSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a tagValuesSlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[j].name) == -1 } // TagValues returns the tag keys and values for the provided shards, where the // tag values satisfy the provided condition. func (s *Store) TagValues(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]TagValues, error) { if cond == nil { return nil, errors.New("a condition is required") } measurementExpr := influxql.CloneExpr(cond) measurementExpr = influxql.Reduce(influxql.RewriteExpr(measurementExpr, func(e influxql.Expr) influxql.Expr { switch e := e.(type) { case *influxql.BinaryExpr: switch e.Op { case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: tag, ok := e.LHS.(*influxql.VarRef) if !ok || tag.Val != "_name" { return nil } } } return e }), nil) filterExpr := influxql.CloneExpr(cond) filterExpr = influxql.Reduce(influxql.RewriteExpr(filterExpr, func(e influxql.Expr) influxql.Expr { switch e := e.(type) { case *influxql.BinaryExpr: switch e.Op { case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: tag, ok := e.LHS.(*influxql.VarRef) if !ok || strings.HasPrefix(tag.Val, "_") { return nil } } } return e }), nil) // Get set of Shards to work on. shards := make([]*Shard, 0, len(shardIDs)) s.mu.RLock() for _, sid := range shardIDs { shard, ok := s.shards[sid] if !ok { continue } shards = append(shards, shard) } s.mu.RUnlock() // If we're using the inmem index then all shards contain a duplicate // version of the global index. We don't need to iterate over all shards // since we have everything we need from the first shard. if len(shards) > 0 && shards[0].IndexType() == "inmem" { shards = shards[:1] } // Stores each list of TagValues for each measurement. var allResults []tagValues var maxMeasurements int // Hint as to lower bound on number of measurements. for _, sh := range shards { // names will be sorted by MeasurementNamesByExpr. // Authorisation can be done later one, when series may have been filtered // out by other conditions. names, err := sh.MeasurementNamesByExpr(nil, measurementExpr) if err != nil { return nil, err } if len(names) > maxMeasurements { maxMeasurements = len(names) } if allResults == nil { allResults = make([]tagValues, 0, len(shards)*len(names)) // Assuming all series in all shards. } // Iterate over each matching measurement in the shard. For each // measurement we'll get the matching tag keys (e.g., when a WITH KEYS) // statement is used, and we'll then use those to fetch all the relevant // values from matching series. Series may be filtered using a WHERE // filter. for _, name := range names { // Determine a list of keys from condition. keySet, err := sh.MeasurementTagKeysByExpr(name, cond) if err != nil { return nil, err } if len(keySet) == 0 { // No matching tag keys for this measurement continue } result := tagValues{ name: name, keys: make([]string, 0, len(keySet)), } // Add the keys to the tagValues and sort them. for k := range keySet { result.keys = append(result.keys, k) } sort.Sort(sort.StringSlice(result.keys)) // get all the tag values for each key in the keyset. // Each slice in the results contains the sorted values associated // associated with each tag key for the measurement from the key set. if result.values, err = sh.MeasurementTagKeyValuesByExpr(auth, name, result.keys, filterExpr, true); err != nil { return nil, err } // remove any tag keys that didn't have any authorized values j := 0 for i := range result.keys { if len(result.values[i]) == 0 { continue } result.keys[j] = result.keys[i] result.values[j] = result.values[i] j++ } result.keys = result.keys[:j] result.values = result.values[:j] // only include result if there are keys with values if len(result.keys) > 0 { allResults = append(allResults, result) } } } result := make([]TagValues, 0, maxMeasurements) // We need to sort all results by measurement name. if len(shards) > 1 { sort.Sort(tagValuesSlice(allResults)) } // The next stage is to merge the tagValue results for each shard's measurements. var i, j int // Used as a temporary buffer in mergeTagValues. There can be at most len(shards) // instances of tagValues for a given measurement. idxBuf := make([][2]int, 0, len(shards)) for i < len(allResults) { // Gather all occurrences of the same measurement for merging. for j+1 < len(allResults) && bytes.Equal(allResults[j+1].name, allResults[i].name) { j++ } // An invariant is that there can't be more than n instances of tag // key value pairs for a given measurement, where n is the number of // shards. if got, exp := j-i+1, len(shards); got > exp { return nil, fmt.Errorf("unexpected results returned engine. Got %d measurement sets for %d shards", got, exp) } nextResult := mergeTagValues(idxBuf, allResults[i:j+1]...) i = j + 1 if len(nextResult.Values) > 0 { result = append(result, nextResult) } } return result, nil } // mergeTagValues merges multiple sorted sets of temporary tagValues using a // direct k-way merge whilst also removing duplicated entries. The result is a // single TagValue type. // // TODO(edd): a Tournament based merge (see: Knuth's TAOCP 5.4.1) might be more // appropriate at some point. // func mergeTagValues(valueIdxs [][2]int, tvs ...tagValues) TagValues { var result TagValues if len(tvs) == 0 { return TagValues{} } else if len(tvs) == 1 { result.Measurement = string(tvs[0].name) // TODO(edd): will be too small likely. Find a hint? result.Values = make([]KeyValue, 0, len(tvs[0].values)) for ki, key := range tvs[0].keys { for _, value := range tvs[0].values[ki] { result.Values = append(result.Values, KeyValue{Key: key, Value: value}) } } return result } result.Measurement = string(tvs[0].name) var maxSize int for _, tv := range tvs { if len(tv.values) > maxSize { maxSize = len(tv.values) } } result.Values = make([]KeyValue, 0, maxSize) // This will likely be too small but it's a start. // Resize and reset to the number of TagValues we're merging. valueIdxs = valueIdxs[:len(tvs)] for i := 0; i < len(valueIdxs); i++ { valueIdxs[i][0], valueIdxs[i][1] = 0, 0 } var ( j int keyCmp, valCmp int ) for { // Which of the provided TagValue sets currently holds the smallest element. // j is the candidate we're going to next pick for the result set. j = -1 // Find the smallest element for i := 0; i < len(tvs); i++ { if valueIdxs[i][0] >= len(tvs[i].keys) { continue // We have completely drained all tag keys and values for this shard. } else if len(tvs[i].values[valueIdxs[i][0]]) == 0 { // There are no tag values for these keys. valueIdxs[i][0]++ valueIdxs[i][1] = 0 continue } else if j == -1 { // We haven't picked a best TagValues set yet. Pick this one. j = i continue } // It this tag key is lower than the candidate's tag key keyCmp = strings.Compare(tvs[i].keys[valueIdxs[i][0]], tvs[j].keys[valueIdxs[j][0]]) if keyCmp == -1 { j = i } else if keyCmp == 0 { valCmp = strings.Compare(tvs[i].values[valueIdxs[i][0]][valueIdxs[i][1]], tvs[j].values[valueIdxs[j][0]][valueIdxs[j][1]]) // Same tag key but this tag value is lower than the candidate. if valCmp == -1 { j = i } else if valCmp == 0 { // Duplicate tag key/value pair.... Remove and move onto // the next value for shard i. valueIdxs[i][1]++ if valueIdxs[i][1] >= len(tvs[i].values[valueIdxs[i][0]]) { // Drained all these tag values, move onto next key. valueIdxs[i][0]++ valueIdxs[i][1] = 0 } } } } // We could have drained all of the TagValue sets and be done... if j == -1 { break } // Append the smallest KeyValue result.Values = append(result.Values, KeyValue{ Key: string(tvs[j].keys[valueIdxs[j][0]]), Value: tvs[j].values[valueIdxs[j][0]][valueIdxs[j][1]], }) // Increment the indexes for the chosen TagValue. valueIdxs[j][1]++ if valueIdxs[j][1] >= len(tvs[j].values[valueIdxs[j][0]]) { // Drained all these tag values, move onto next key. valueIdxs[j][0]++ valueIdxs[j][1] = 0 } } return result } func (s *Store) monitorShards() { defer s.wg.Done() t := time.NewTicker(10 * time.Second) defer t.Stop() t2 := time.NewTicker(time.Minute) defer t2.Stop() for { select { case <-s.closing: return case <-t.C: s.mu.RLock() for _, sh := range s.shards { if sh.IsIdle() { if err := sh.Free(); err != nil { s.Logger.Warn("error free cold shard resources:", zap.Error(err)) } } else { sh.SetCompactionsEnabled(true) } } s.mu.RUnlock() case <-t2.C: if s.EngineOptions.Config.MaxValuesPerTag == 0 { continue } s.mu.RLock() shards := s.filterShards(func(sh *Shard) bool { return sh.IndexType() == "inmem" }) s.mu.RUnlock() // No inmem shards... if len(shards) == 0 { continue } var dbLock sync.Mutex databases := make(map[string]struct{}, len(shards)) s.walkShards(shards, func(sh *Shard) error { db := sh.database // Only process 1 shard from each database dbLock.Lock() if _, ok := databases[db]; ok { dbLock.Unlock() return nil } databases[db] = struct{}{} dbLock.Unlock() // inmem shards share the same index instance so just use the first one to avoid // allocating the same measurements repeatedly first := shards[0] names, err := first.MeasurementNamesByExpr(nil, nil) if err != nil { s.Logger.Warn("cannot retrieve measurement names", zap.Error(err)) return nil } for _, name := range names { sh.ForEachMeasurementTagKey(name, func(k []byte) error { n := sh.TagKeyCardinality(name, k) perc := int(float64(n) / float64(s.EngineOptions.Config.MaxValuesPerTag) * 100) if perc > 100 { perc = 100 } // Log at 80, 85, 90-100% levels if perc == 80 || perc == 85 || perc >= 90 { s.Logger.Info(fmt.Sprintf("WARN: %d%% of max-values-per-tag limit exceeded: (%d/%d), db=%s measurement=%s tag=%s", perc, n, s.EngineOptions.Config.MaxValuesPerTag, db, name, k)) } return nil }) } return nil }) } } } // KeyValue holds a string key and a string value. type KeyValue struct { Key, Value string } // KeyValues is a sortable slice of KeyValue. type KeyValues []KeyValue // Len implements sort.Interface. func (a KeyValues) Len() int { return len(a) } // Swap implements sort.Interface. func (a KeyValues) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // Less implements sort.Interface. Keys are compared before values. func (a KeyValues) Less(i, j int) bool { ki, kj := a[i].Key, a[j].Key if ki == kj { return a[i].Value < a[j].Value } return ki < kj } // filterShowSeriesResult will limit the number of series returned based on the limit and the offset. // Unlike limit and offset on SELECT statements, the limit and offset don't apply to the number of Rows, but // to the number of total Values returned, since each Value represents a unique series. func (e *Store) filterShowSeriesResult(limit, offset int, rows models.Rows) models.Rows { var filteredSeries models.Rows seriesCount := 0 for _, r := range rows { var currentSeries [][]interface{} // filter the values for _, v := range r.Values { if seriesCount >= offset && seriesCount-offset < limit { currentSeries = append(currentSeries, v) } seriesCount++ } // only add the row back in if there are some values in it if len(currentSeries) > 0 { r.Values = currentSeries filteredSeries = append(filteredSeries, r) if seriesCount > limit+offset { return filteredSeries } } } return filteredSeries } // decodeStorePath extracts the database and retention policy names // from a given shard or WAL path. func decodeStorePath(shardOrWALPath string) (database, retentionPolicy string) { // shardOrWALPath format: /maybe/absolute/base/then/:database/:retentionPolicy/:nameOfShardOrWAL // Discard the last part of the path (the shard name or the wal name). path, _ := filepath.Split(filepath.Clean(shardOrWALPath)) // Extract the database and retention policy. path, rp := filepath.Split(filepath.Clean(path)) _, db := filepath.Split(filepath.Clean(path)) return db, rp } // relativePath will expand out the full paths passed in and return // the relative shard path from the store func relativePath(storePath, shardPath string) (string, error) { path, err := filepath.Abs(storePath) if err != nil { return "", fmt.Errorf("store abs path: %s", err) } fp, err := filepath.Abs(shardPath) if err != nil { return "", fmt.Errorf("file abs path: %s", err) } name, err := filepath.Rel(path, fp) if err != nil { return "", fmt.Errorf("file rel path: %s", err) } return name, nil }
[ "\"INFLUXDB_DATA_COMPACTION_THROUGHPUT\"" ]
[]
[ "INFLUXDB_DATA_COMPACTION_THROUGHPUT" ]
[]
["INFLUXDB_DATA_COMPACTION_THROUGHPUT"]
go
1
0
availability_app/lib/ezb_v1_handler.py
# -*- coding: utf-8 -*- """ Helper for views.handler() """ import datetime, json, logging, os, pickle, pprint, subprocess, urllib import isbnlib import pymarc from availability_app import settings_app from django.core.cache import cache log = logging.getLogger(__name__) slog = logging.getLogger( 'stats_logger' ) class EzbV1Helper( object ): """ Helper for v1 api route. """ def __init__( self ): log.debug( 'initializing EzbV1Helper instance' ) # self.legit_services = [ 'isbn', 'oclc' ] self.legit_services = [ 'bib', 'isbn', 'oclc' ] self.parser = Parser() self.ezb_available_locations = None self.ezb_available_statuses = None def validate( self, key, value ): """ Initial validator. IP checking another possibility. Returns dct because isbn value may be changed. Called by views.ezb_v1(). """ validator = Validator() rslt_dct = { 'validity': False, 'key': key, 'value': value, 'error': None } if key not in self.legit_services: rslt_dct['error'] = 'bad query-key' rslt_dct['validity'] = False elif key == 'isbn': if validator.validate_isbn(value) is False: rslt_dct['error'] = 'bad isbn' rslt_dct['validity'] = False else: rslt_dct['value'] = validator.EAN13 rslt_dct['validity'] = True else: rslt_dct['validity'] = True log.debug( 'rslt_dct, ```%s```' % pprint.pformat(rslt_dct) ) return rslt_dct def build_data_dct( self, key, value, show_marc_param, request ): """ Manager for z39.50 query, and result-processor. Called by views.ezb_v1(). """ rq_now = datetime.datetime.now() data_dct = { 'request': self.build_query_dct( request, rq_now ), 'response': {'basics': 'init', 'sierra': 'init', 'time_taken': 'init'} } pickled_data = self.grab_z3950_data( key, value, show_marc_param ) assert type(pickled_data) == bytes, 'type(pickled_data), %s' % type(pickled_data) unpickled_data = pickle.loads( pickled_data ) log.debug( 'unpickled_data, ```%s```' % pprint.pformat(unpickled_data) ) data_dct['response']['sierra'] = self.build_holdings_dct( unpickled_data ) data_dct['response']['basics'] = self.build_summary_dct( data_dct['response']['sierra'] ) data_dct['response']['searched'] = { 'key': key, 'value': value } # because the isbn value may have been changed ( see EzbV1HelperTest.test_good_short_isbn() ) data_dct['response']['time_taken'] = str( datetime.datetime.now() - rq_now ) return data_dct def grab_z3950_data( self, key, value, show_marc_param ): """ Returns data from cache if available; otherwise calls sierra. Called by build_data_dct() """ cache_key = '%s_%s' % (key, value) pickled_data = cache.get( cache_key ) if pickled_data is None: log.debug( 'pickled_data was not in cache' ) pickled_data = self.query_josiah( key, value, show_marc_param ) cache.set( cache_key, pickled_data ) # time could be last argument; defaults to settings.py entry else: log.debug( 'pickled_data was in cache' ) return pickled_data def query_josiah( self, key, value, show_marc_param ): """ Perform actual query. Called by grab_z3950_data(). """ log.debug( 'starting query_josiah()' ) cmd_1 = 'cd %s' % ( settings_app.CMD_START_DIR_PATH ) cmd_2 = 'source %s/activate' % ( settings_app.CMD_ENV_BIN_DIR_PATH ) cmd_3 = '%s/python2 %s/py2_z3950_wrapper.py --key %s --value %s' % ( settings_app.CMD_ENV_BIN_DIR_PATH, settings_app.CMD_WRAPPER_DIR_PATH, key, value ) py3_cmd = cmd_1 + '; ' + cmd_2 + '; ' + cmd_3 log.debug( 'py3_cmd, ```%s```' % py3_cmd ) process = subprocess.Popen( py3_cmd, shell=True, stdout=subprocess.PIPE ) output, error = process.communicate() # receive output from the python2 script log.debug( 'output, ```%s```; error, ```%s```' % (output, error) ) return output def build_query_dct( self, request, rq_now ): """ Builds query-dct part of response. Called by: build_data_dct() """ query_dct = { 'url': '%s://%s%s' % ( request.scheme, request.META.get( 'HTTP_HOST', '127.0.0.1' ), # HTTP_HOST doesn't exist for client-tests request.META.get('REQUEST_URI', request.META['PATH_INFO']) ), 'timestamp': str( rq_now ) } self.build_stats_dct( query_dct['url'], request.META.get('HTTP_REFERER', None), request.META.get('HTTP_USER_AGENT', None), request.META.get('REMOTE_ADDR', None) ) log.debug( 'query_dct, ```%s``' % pprint.pformat(query_dct) ) return query_dct def build_stats_dct( self, query_url, referrer, user_agent, ip ): """ Builds and logs data for stats. Called by build_query_dct() """ stats_dct = { 'datetime': datetime.datetime.now().isoformat(), 'query': query_url, 'referrer': None, 'user_agent': user_agent, 'ip': ip } if referrer: output = urllib.parse.urlparse( referrer ) stats_dct['referrer'] = output slog.info( json.dumps(stats_dct) ) return def build_holdings_dct( self, unpickled_dct ): """ Processes z3950 data into response. Called by build_data_dct() """ items = [] z_items = unpickled_dct['backend_response'] for z_item in z_items: pymrc_obj = z_item['pymarc_obj'] log.debug( 'pymrc_obj.as_dict(), ```%s```' % pprint.pformat(pymrc_obj.as_dict()) ) holdings = z_item['holdings_data'] # # log.debug( 'bib?, ```%s```' % pymrc_obj.get_fields('907')[0].format_field() ) # locations_val = [] locations = pymrc_obj.location() for loc in locations: locations_val.append( loc.format_field() ) # notes_val = [] notes = pymrc_obj.notes() for note in notes: notes_val.append( note.format_field() ) # phys_desc_val = [] physicaldescriptions = pymrc_obj.physicaldescription() for phys_desc in physicaldescriptions: phys_desc_val.append( phys_desc.format_field() ) # series_val = [] series_entries = pymrc_obj.series() for series in series_entries: series_val.append( series.format_field() ) # subjects_val = [] subjects = pymrc_obj.subjects() for subject in subjects: subjects_val.append( subject.format_field() ) # item_dct = { 'bib': self.parser.grab_bib( pymrc_obj ), 'author': pymrc_obj.author(), 'isbn': pymrc_obj.isbn(), 'locations': locations_val, 'notes': notes_val, 'physicaldescription': phys_desc_val, 'publisher': pymrc_obj.publisher(), 'pubyear': pymrc_obj.pubyear(), 'series': series_val, 'subjects': subjects_val, 'title': pymrc_obj.title(), 'uniformtitle': pymrc_obj.uniformtitle(), 'holdings': holdings } items.append( item_dct ) log.debug( 'items, ```%s```' % items ) return items def build_summary_dct( self, sierra_holdings ): """ Builds summary data needed by easyBorrow. Called by build_data_dct() """ self.prep_ezb_available_locations() self.prep_ezb_available_statuses() summary_dct = { 'ezb_available_bibs': [], 'ezb_available_holdings': [], 'online_holdings': [], 'ezb_other_holdings': [] } summary_dct = self.determine_ezb_requestability( sierra_holdings, summary_dct ) summary_dct = self.check_online_holdings( sierra_holdings, summary_dct ) summary_dct = self.check_other_holdings( sierra_holdings, summary_dct ) log.debug( 'summary_dct, ```%s```' % pprint.pformat(summary_dct) ) return summary_dct # def build_summary_dct( self, sierra_holdings ): # """ Builds simple summary data. # Called by build_data_dct() """ # self.prep_ezb_available_locations() # self.prep_ezb_available_statuses() # summary_dct = { 'ezb_available_bibs': [], 'ezb_available_holdings': [], 'online_holdings': [] } # summary_dct = self.determine_ezb_requestability( sierra_holdings, summary_dct ) # summary_dct = self.check_online_holdings( sierra_holdings, summary_dct ) # log.debug( 'summary_dct, ```%s```' % pprint.pformat(summary_dct) ) # return summary_dct def determine_ezb_requestability( self, sierra_holdings, summary_dct ): """ Returns boolean for easyBorrow requestability. Called by build_summary_dct() """ for item in sierra_holdings: item_available_holdings = [] for holding_info in item['holdings']: if holding_info['localLocation'] in self.ezb_available_locations and holding_info['publicNote'] in self.ezb_available_statuses: item_available_holdings.append( holding_info ) if len( item_available_holdings ) > 0: summary_dct['ezb_available_holdings'] = summary_dct['ezb_available_holdings'] + item_available_holdings bib_dct = { 'bib': item['bib'], 'title': item['title'], 'url': 'https://search.library.brown.edu/catalog/%s' % item['bib'] } summary_dct['ezb_available_bibs'].append( bib_dct ) log.debug( 'summary_dct, ```%s```' % pprint.pformat(summary_dct) ) return summary_dct dict2 = dict1.copy() def check_online_holdings( self, sierra_holdings, summary_dct ): """ Adds any online holdings to the summary basics dct. Called by: build_summary_dct() """ for item in sierra_holdings: for holding_info in item['holdings']: if 'online' in holding_info['localLocation'].lower(): online_holding_info = holding_info.copy() online_holding_info['title'] = item['title'] online_holding_info['url'] = 'https://search.library.brown.edu/catalog/%s' % item['bib'] summary_dct['online_holdings'].append( online_holding_info ) log.debug( 'summary_dct, ```%s```' % pprint.pformat(summary_dct) ) return summary_dct def check_other_holdings( self, sierra_holdings, summary_dct ): """ Adds any online holdings to the summary basics dct. Called by: build_summary_dct() """ for item in sierra_holdings: for holding_info in item['holdings']: if 'hay' in holding_info['localLocation'].lower(): other_holding_info = holding_info.copy() other_holding_info['title'] = item['title'] other_holding_info['url'] = 'https://search.library.brown.edu/catalog/%s' % item['bib'] summary_dct['ezb_other_holdings'].append( other_holding_info ) log.debug( 'summary_dct, ```%s```' % pprint.pformat(summary_dct) ) return summary_dct def prep_ezb_available_locations( self ): """ Populates ezb_available_locations. Called by build_summary_dct() TODO: load from editable admin-db. """ ezb_available_locations = json.loads( os.environ['AVL_API__EZB_AVAILABLE_LOCATIONS'] ) log.debug( 'ezb_available_locations, ```%s```' % ezb_available_locations ) self.ezb_available_locations = ezb_available_locations return def prep_ezb_available_statuses( self ): """ Populates ezb_available_statuses. Called by build_summary_dct() TODO: load from editable admin-db. """ ezb_available_statuses = json.loads( os.environ['AVL_API__EZB_AVAILABLE_STATUSES'] ) log.debug( 'ezb_available_statuses, ```%s```' % ezb_available_statuses ) self.ezb_available_statuses = ezb_available_statuses return ## end EzbV1Helper() class Parser( object ): """ Parses data from marc. """ def __init__( self ): pass def grab_bib( self, pymrc_rcrd ): """ Parses bib. Called by EzbV1Helper.build_holdings_dct() """ try: bib = pymrc_rcrd['907']['a'] bib = bib[1:-1] # removes initial '.', and ending check-digit except AttributeError as e: log.warning( 'exception getting bib, ```%s```' % e ) bib = None log.debug( 'bib, `%s`' % bib ) return bib ## end Parser() class Validator( object ): """ Validates isbn and eventually oclc number. Useful to prevent unnecessary z3950 queries. """ def __init__( self ): self.EAN13 = None def validate_isbn( self, isbn ): """ Returns boolean. Called by TBD """ self.EAN13 = isbnlib.EAN13( isbn ) if self.EAN13: rslt = True else: rslt = False log.debug( 'isbn, `%s` is not valid' ) return rslt ## end Validator()
[]
[]
[ "AVL_API__EZB_AVAILABLE_LOCATIONS", "AVL_API__EZB_AVAILABLE_STATUSES" ]
[]
["AVL_API__EZB_AVAILABLE_LOCATIONS", "AVL_API__EZB_AVAILABLE_STATUSES"]
python
2
0
src/runtime/crash_test.go
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime_test import ( "bytes" "errors" "flag" "fmt" "internal/testenv" "os" "os/exec" "path/filepath" "regexp" "runtime" "strconv" "strings" "sync" "testing" "time" ) var toRemove []string func TestMain(m *testing.M) { status := m.Run() for _, file := range toRemove { os.RemoveAll(file) } os.Exit(status) } var testprog struct { sync.Mutex dir string target map[string]*buildexe } type buildexe struct { once sync.Once exe string err error } func runTestProg(t *testing.T, binary, name string, env ...string) string { if *flagQuick { t.Skip("-quick") } testenv.MustHaveGoBuild(t) exe, err := buildTestProg(t, binary) if err != nil { t.Fatal(err) } return runBuiltTestProg(t, exe, name, env...) } func runBuiltTestProg(t *testing.T, exe, name string, env ...string) string { if *flagQuick { t.Skip("-quick") } testenv.MustHaveGoBuild(t) cmd := testenv.CleanCmdEnv(exec.Command(exe, name)) cmd.Env = append(cmd.Env, env...) if testing.Short() { cmd.Env = append(cmd.Env, "RUNTIME_TEST_SHORT=1") } var b bytes.Buffer cmd.Stdout = &b cmd.Stderr = &b if err := cmd.Start(); err != nil { t.Fatalf("starting %s %s: %v", exe, name, err) } // If the process doesn't complete within 1 minute, // assume it is hanging and kill it to get a stack trace. p := cmd.Process done := make(chan bool) go func() { scale := 1 // This GOARCH/GOOS test is copied from cmd/dist/test.go. // TODO(iant): Have cmd/dist update the environment variable. if runtime.GOARCH == "arm" || runtime.GOOS == "windows" { scale = 2 } if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" { if sc, err := strconv.Atoi(s); err == nil { scale = sc } } select { case <-done: case <-time.After(time.Duration(scale) * time.Minute): p.Signal(sigquit) } }() if err := cmd.Wait(); err != nil { t.Logf("%s %s exit status: %v", exe, name, err) } close(done) return b.String() } var serializeBuild = make(chan bool, 2) func buildTestProg(t *testing.T, binary string, flags ...string) (string, error) { if *flagQuick { t.Skip("-quick") } testenv.MustHaveGoBuild(t) testprog.Lock() if testprog.dir == "" { dir, err := os.MkdirTemp("", "go-build") if err != nil { t.Fatalf("failed to create temp directory: %v", err) } testprog.dir = dir toRemove = append(toRemove, dir) } if testprog.target == nil { testprog.target = make(map[string]*buildexe) } name := binary if len(flags) > 0 { name += "_" + strings.Join(flags, "_") } target, ok := testprog.target[name] if !ok { target = &buildexe{} testprog.target[name] = target } dir := testprog.dir // Unlock testprog while actually building, so that other // tests can look up executables that were already built. testprog.Unlock() target.once.Do(func() { // Only do two "go build"'s at a time, // to keep load from getting too high. serializeBuild <- true defer func() { <-serializeBuild }() // Don't get confused if testenv.GoToolPath calls t.Skip. target.err = errors.New("building test called t.Skip") exe := filepath.Join(dir, name+".exe") t.Logf("running go build -o %s %s", exe, strings.Join(flags, " ")) cmd := exec.Command(testenv.GoToolPath(t), append([]string{"build", "-o", exe}, flags...)...) cmd.Dir = "testdata/" + binary out, err := testenv.CleanCmdEnv(cmd).CombinedOutput() if err != nil { target.err = fmt.Errorf("building %s %v: %v\n%s", binary, flags, err, out) } else { target.exe = exe target.err = nil } }) return target.exe, target.err } func TestVDSO(t *testing.T) { t.Parallel() output := runTestProg(t, "testprog", "SignalInVDSO") want := "success\n" if output != want { t.Fatalf("output:\n%s\n\nwanted:\n%s", output, want) } } func testCrashHandler(t *testing.T, cgo bool) { type crashTest struct { Cgo bool } var output string if cgo { output = runTestProg(t, "testprogcgo", "Crash") } else { output = runTestProg(t, "testprog", "Crash") } want := "main: recovered done\nnew-thread: recovered done\nsecond-new-thread: recovered done\nmain-again: recovered done\n" if output != want { t.Fatalf("output:\n%s\n\nwanted:\n%s", output, want) } } func TestCrashHandler(t *testing.T) { testCrashHandler(t, false) } func testDeadlock(t *testing.T, name string) { // External linking brings in cgo, causing deadlock detection not working. testenv.MustInternalLink(t) output := runTestProg(t, "testprog", name) want := "fatal error: all goroutines are asleep - deadlock!\n" if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } func TestSimpleDeadlock(t *testing.T) { testDeadlock(t, "SimpleDeadlock") } func TestInitDeadlock(t *testing.T) { testDeadlock(t, "InitDeadlock") } func TestLockedDeadlock(t *testing.T) { testDeadlock(t, "LockedDeadlock") } func TestLockedDeadlock2(t *testing.T) { testDeadlock(t, "LockedDeadlock2") } func TestGoexitDeadlock(t *testing.T) { // External linking brings in cgo, causing deadlock detection not working. testenv.MustInternalLink(t) output := runTestProg(t, "testprog", "GoexitDeadlock") want := "no goroutines (main called runtime.Goexit) - deadlock!" if !strings.Contains(output, want) { t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want) } } func TestStackOverflow(t *testing.T) { output := runTestProg(t, "testprog", "StackOverflow") want := []string{ "runtime: goroutine stack exceeds 1474560-byte limit\n", "fatal error: stack overflow", // information about the current SP and stack bounds "runtime: sp=", "stack=[", } if !strings.HasPrefix(output, want[0]) { t.Errorf("output does not start with %q", want[0]) } for _, s := range want[1:] { if !strings.Contains(output, s) { t.Errorf("output does not contain %q", s) } } if t.Failed() { t.Logf("output:\n%s", output) } } func TestThreadExhaustion(t *testing.T) { output := runTestProg(t, "testprog", "ThreadExhaustion") want := "runtime: program exceeds 10-thread limit\nfatal error: thread exhaustion" if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } func TestRecursivePanic(t *testing.T) { output := runTestProg(t, "testprog", "RecursivePanic") want := `wrap: bad panic: again ` if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } func TestRecursivePanic2(t *testing.T) { output := runTestProg(t, "testprog", "RecursivePanic2") want := `first panic second panic panic: third panic ` if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } func TestRecursivePanic3(t *testing.T) { output := runTestProg(t, "testprog", "RecursivePanic3") want := `panic: first panic ` if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } func TestRecursivePanic4(t *testing.T) { output := runTestProg(t, "testprog", "RecursivePanic4") want := `panic: first panic [recovered] panic: second panic ` if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } func TestRecursivePanic5(t *testing.T) { output := runTestProg(t, "testprog", "RecursivePanic5") want := `first panic second panic panic: third panic ` if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } func TestGoexitCrash(t *testing.T) { // External linking brings in cgo, causing deadlock detection not working. testenv.MustInternalLink(t) output := runTestProg(t, "testprog", "GoexitExit") want := "no goroutines (main called runtime.Goexit) - deadlock!" if !strings.Contains(output, want) { t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want) } } func TestGoexitDefer(t *testing.T) { c := make(chan struct{}) go func() { defer func() { r := recover() if r != nil { t.Errorf("non-nil recover during Goexit") } c <- struct{}{} }() runtime.Goexit() }() // Note: if the defer fails to run, we will get a deadlock here <-c } func TestGoNil(t *testing.T) { output := runTestProg(t, "testprog", "GoNil") want := "go of nil func value" if !strings.Contains(output, want) { t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want) } } func TestMainGoroutineID(t *testing.T) { output := runTestProg(t, "testprog", "MainGoroutineID") want := "panic: test\n\ngoroutine 1 [running]:\n" if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } func TestNoHelperGoroutines(t *testing.T) { output := runTestProg(t, "testprog", "NoHelperGoroutines") matches := regexp.MustCompile(`goroutine [0-9]+ \[`).FindAllStringSubmatch(output, -1) if len(matches) != 1 || matches[0][0] != "goroutine 1 [" { t.Fatalf("want to see only goroutine 1, see:\n%s", output) } } func TestBreakpoint(t *testing.T) { output := runTestProg(t, "testprog", "Breakpoint") // If runtime.Breakpoint() is inlined, then the stack trace prints // "runtime.Breakpoint(...)" instead of "runtime.Breakpoint()". want := "runtime.Breakpoint(" if !strings.Contains(output, want) { t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want) } } func TestGoexitInPanic(t *testing.T) { // External linking brings in cgo, causing deadlock detection not working. testenv.MustInternalLink(t) // see issue 8774: this code used to trigger an infinite recursion output := runTestProg(t, "testprog", "GoexitInPanic") want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } // Issue 14965: Runtime panics should be of type runtime.Error func TestRuntimePanicWithRuntimeError(t *testing.T) { testCases := [...]func(){ 0: func() { var m map[uint64]bool m[1234] = true }, 1: func() { ch := make(chan struct{}) close(ch) close(ch) }, 2: func() { var ch = make(chan struct{}) close(ch) ch <- struct{}{} }, 3: func() { var s = make([]int, 2) _ = s[2] }, 4: func() { n := -1 _ = make(chan bool, n) }, 5: func() { close((chan bool)(nil)) }, } for i, fn := range testCases { got := panicValue(fn) if _, ok := got.(runtime.Error); !ok { t.Errorf("test #%d: recovered value %v(type %T) does not implement runtime.Error", i, got, got) } } } func panicValue(fn func()) (recovered interface{}) { defer func() { recovered = recover() }() fn() return } func TestPanicAfterGoexit(t *testing.T) { // an uncaught panic should still work after goexit output := runTestProg(t, "testprog", "PanicAfterGoexit") want := "panic: hello" if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } func TestRecoveredPanicAfterGoexit(t *testing.T) { // External linking brings in cgo, causing deadlock detection not working. testenv.MustInternalLink(t) output := runTestProg(t, "testprog", "RecoveredPanicAfterGoexit") want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } func TestRecoverBeforePanicAfterGoexit(t *testing.T) { // External linking brings in cgo, causing deadlock detection not working. testenv.MustInternalLink(t) t.Parallel() output := runTestProg(t, "testprog", "RecoverBeforePanicAfterGoexit") want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } func TestRecoverBeforePanicAfterGoexit2(t *testing.T) { // External linking brings in cgo, causing deadlock detection not working. testenv.MustInternalLink(t) t.Parallel() output := runTestProg(t, "testprog", "RecoverBeforePanicAfterGoexit2") want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } func TestNetpollDeadlock(t *testing.T) { t.Parallel() output := runTestProg(t, "testprognet", "NetpollDeadlock") want := "done\n" if !strings.HasSuffix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } func TestPanicTraceback(t *testing.T) { t.Parallel() output := runTestProg(t, "testprog", "PanicTraceback") want := "panic: hello\n\tpanic: panic pt2\n\tpanic: panic pt1\n" if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } // Check functions in the traceback. fns := []string{"main.pt1.func1", "panic", "main.pt2.func1", "panic", "main.pt2", "main.pt1"} for _, fn := range fns { re := regexp.MustCompile(`(?m)^` + regexp.QuoteMeta(fn) + `\(.*\n`) idx := re.FindStringIndex(output) if idx == nil { t.Fatalf("expected %q function in traceback:\n%s", fn, output) } output = output[idx[1]:] } } func testPanicDeadlock(t *testing.T, name string, want string) { // test issue 14432 output := runTestProg(t, "testprog", name) if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } func TestPanicDeadlockGosched(t *testing.T) { testPanicDeadlock(t, "GoschedInPanic", "panic: errorThatGosched\n\n") } func TestPanicDeadlockSyscall(t *testing.T) { testPanicDeadlock(t, "SyscallInPanic", "1\n2\npanic: 3\n\n") } func TestPanicLoop(t *testing.T) { output := runTestProg(t, "testprog", "PanicLoop") if want := "panic while printing panic value"; !strings.Contains(output, want) { t.Errorf("output does not contain %q:\n%s", want, output) } } func TestMemPprof(t *testing.T) { testenv.MustHaveGoRun(t) exe, err := buildTestProg(t, "testprog") if err != nil { t.Fatal(err) } got, err := testenv.CleanCmdEnv(exec.Command(exe, "MemProf")).CombinedOutput() if err != nil { t.Fatal(err) } fn := strings.TrimSpace(string(got)) defer os.Remove(fn) for try := 0; try < 2; try++ { cmd := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), "tool", "pprof", "-alloc_space", "-top")) // Check that pprof works both with and without explicit executable on command line. if try == 0 { cmd.Args = append(cmd.Args, exe, fn) } else { cmd.Args = append(cmd.Args, fn) } found := false for i, e := range cmd.Env { if strings.HasPrefix(e, "PPROF_TMPDIR=") { cmd.Env[i] = "PPROF_TMPDIR=" + os.TempDir() found = true break } } if !found { cmd.Env = append(cmd.Env, "PPROF_TMPDIR="+os.TempDir()) } top, err := cmd.CombinedOutput() t.Logf("%s:\n%s", cmd.Args, top) if err != nil { t.Error(err) } else if !bytes.Contains(top, []byte("MemProf")) { t.Error("missing MemProf in pprof output") } } } var concurrentMapTest = flag.Bool("run_concurrent_map_tests", false, "also run flaky concurrent map tests") func TestConcurrentMapWrites(t *testing.T) { if !*concurrentMapTest { t.Skip("skipping without -run_concurrent_map_tests") } testenv.MustHaveGoRun(t) output := runTestProg(t, "testprog", "concurrentMapWrites") want := "fatal error: concurrent map writes" if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } func TestConcurrentMapReadWrite(t *testing.T) { if !*concurrentMapTest { t.Skip("skipping without -run_concurrent_map_tests") } testenv.MustHaveGoRun(t) output := runTestProg(t, "testprog", "concurrentMapReadWrite") want := "fatal error: concurrent map read and map write" if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } func TestConcurrentMapIterateWrite(t *testing.T) { if !*concurrentMapTest { t.Skip("skipping without -run_concurrent_map_tests") } testenv.MustHaveGoRun(t) output := runTestProg(t, "testprog", "concurrentMapIterateWrite") want := "fatal error: concurrent map iteration and map write" if !strings.HasPrefix(output, want) { t.Fatalf("output does not start with %q:\n%s", want, output) } } type point struct { x, y *int } func (p *point) negate() { *p.x = *p.x * -1 *p.y = *p.y * -1 } // Test for issue #10152. func TestPanicInlined(t *testing.T) { defer func() { r := recover() if r == nil { t.Fatalf("recover failed") } buf := make([]byte, 2048) n := runtime.Stack(buf, false) buf = buf[:n] if !bytes.Contains(buf, []byte("(*point).negate(")) { t.Fatalf("expecting stack trace to contain call to (*point).negate()") } }() pt := new(point) pt.negate() } // Test for issues #3934 and #20018. // We want to delay exiting until a panic print is complete. func TestPanicRace(t *testing.T) { testenv.MustHaveGoRun(t) exe, err := buildTestProg(t, "testprog") if err != nil { t.Fatal(err) } // The test is intentionally racy, and in my testing does not // produce the expected output about 0.05% of the time. // So run the program in a loop and only fail the test if we // get the wrong output ten times in a row. const tries = 10 retry: for i := 0; i < tries; i++ { got, err := testenv.CleanCmdEnv(exec.Command(exe, "PanicRace")).CombinedOutput() if err == nil { t.Logf("try %d: program exited successfully, should have failed", i+1) continue } if i > 0 { t.Logf("try %d:\n", i+1) } t.Logf("%s\n", got) wants := []string{ "panic: crash", "PanicRace", "created by ", } for _, want := range wants { if !bytes.Contains(got, []byte(want)) { t.Logf("did not find expected string %q", want) continue retry } } // Test generated expected output. return } t.Errorf("test ran %d times without producing expected output", tries) } func TestBadTraceback(t *testing.T) { output := runTestProg(t, "testprog", "BadTraceback") for _, want := range []string{ "runtime: unexpected return pc", "called from 0xbad", "00000bad", // Smashed LR in hex dump "<main.badLR", // Symbolization in hex dump (badLR1 or badLR2) } { if !strings.Contains(output, want) { t.Errorf("output does not contain %q:\n%s", want, output) } } } func TestTimePprof(t *testing.T) { // This test is unreliable on any system in which nanotime // calls into libc. switch runtime.GOOS { case "aix", "darwin", "openbsd", "solaris": t.Skipf("skipping on %s because nanotime calls libc", runtime.GOOS) } // Pass GOTRACEBACK for issue #41120 to try to get more // information on timeout. fn := runTestProg(t, "testprog", "TimeProf", "GOTRACEBACK=crash") fn = strings.TrimSpace(fn) defer os.Remove(fn) cmd := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), "tool", "pprof", "-top", "-nodecount=1", fn)) cmd.Env = append(cmd.Env, "PPROF_TMPDIR="+os.TempDir()) top, err := cmd.CombinedOutput() t.Logf("%s", top) if err != nil { t.Error(err) } else if bytes.Contains(top, []byte("ExternalCode")) { t.Error("profiler refers to ExternalCode") } } // Test that runtime.abort does so. func TestAbort(t *testing.T) { // Pass GOTRACEBACK to ensure we get runtime frames. output := runTestProg(t, "testprog", "Abort", "GOTRACEBACK=system") if want := "runtime.abort"; !strings.Contains(output, want) { t.Errorf("output does not contain %q:\n%s", want, output) } if strings.Contains(output, "BAD") { t.Errorf("output contains BAD:\n%s", output) } // Check that it's a signal traceback. want := "PC=" // For systems that use a breakpoint, check specifically for that. switch runtime.GOARCH { case "386", "amd64": switch runtime.GOOS { case "plan9": want = "sys: breakpoint" case "windows": want = "Exception 0x80000003" default: want = "SIGTRAP" } } if !strings.Contains(output, want) { t.Errorf("output does not contain %q:\n%s", want, output) } } // For TestRuntimePanic: test a panic in the runtime package without // involving the testing harness. func init() { if os.Getenv("GO_TEST_RUNTIME_PANIC") == "1" { defer func() { if r := recover(); r != nil { // We expect to crash, so exit 0 // to indicate failure. os.Exit(0) } }() runtime.PanicForTesting(nil, 1) // We expect to crash, so exit 0 to indicate failure. os.Exit(0) } } func TestRuntimePanic(t *testing.T) { testenv.MustHaveExec(t) cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestRuntimePanic")) cmd.Env = append(cmd.Env, "GO_TEST_RUNTIME_PANIC=1") out, err := cmd.CombinedOutput() t.Logf("%s", out) if err == nil { t.Error("child process did not fail") } else if want := "runtime.unexportedPanicForTesting"; !bytes.Contains(out, []byte(want)) { t.Errorf("output did not contain expected string %q", want) } } // Test that g0 stack overflows are handled gracefully. func TestG0StackOverflow(t *testing.T) { testenv.MustHaveExec(t) switch runtime.GOOS { case "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "android": t.Skipf("g0 stack is wrong on pthread platforms (see golang.org/issue/26061)") } if os.Getenv("TEST_G0_STACK_OVERFLOW") != "1" { cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestG0StackOverflow", "-test.v")) cmd.Env = append(cmd.Env, "TEST_G0_STACK_OVERFLOW=1") out, err := cmd.CombinedOutput() // Don't check err since it's expected to crash. if n := strings.Count(string(out), "morestack on g0\n"); n != 1 { t.Fatalf("%s\n(exit status %v)", out, err) } // Check that it's a signal-style traceback. if runtime.GOOS != "windows" { if want := "PC="; !strings.Contains(string(out), want) { t.Errorf("output does not contain %q:\n%s", want, out) } } return } runtime.G0StackOverflow() } // Test that panic message is not clobbered. // See issue 30150. func TestDoublePanic(t *testing.T) { output := runTestProg(t, "testprog", "DoublePanic", "GODEBUG=clobberfree=1") wants := []string{"panic: XXX", "panic: YYY"} for _, want := range wants { if !strings.Contains(output, want) { t.Errorf("output:\n%s\n\nwant output containing: %s", output, want) } } }
[ "\"GO_TEST_TIMEOUT_SCALE\"", "\"GO_TEST_RUNTIME_PANIC\"", "\"TEST_G0_STACK_OVERFLOW\"" ]
[]
[ "GO_TEST_TIMEOUT_SCALE", "GO_TEST_RUNTIME_PANIC", "TEST_G0_STACK_OVERFLOW" ]
[]
["GO_TEST_TIMEOUT_SCALE", "GO_TEST_RUNTIME_PANIC", "TEST_G0_STACK_OVERFLOW"]
go
3
0
controllers/specialresource_controller.go
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controllers import ( "context" "os" buildv1 "github.com/openshift/api/build/v1" imagev1 "github.com/openshift/api/image/v1" secv1 "github.com/openshift/api/security/v1" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" storagev1 "k8s.io/api/storage/v1" k8sruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/reconcile" srov1beta1 "github.com/openshift-psap/special-resource-operator/api/v1beta1" "github.com/openshift-psap/special-resource-operator/internal/controllers/finalizers" "github.com/openshift-psap/special-resource-operator/internal/controllers/state" "github.com/openshift-psap/special-resource-operator/pkg/assets" "github.com/openshift-psap/special-resource-operator/pkg/clients" "github.com/openshift-psap/special-resource-operator/pkg/cluster" "github.com/openshift-psap/special-resource-operator/pkg/filter" "github.com/openshift-psap/special-resource-operator/pkg/helmer" "github.com/openshift-psap/special-resource-operator/pkg/kernel" "github.com/openshift-psap/special-resource-operator/pkg/metrics" "github.com/openshift-psap/special-resource-operator/pkg/poll" "github.com/openshift-psap/special-resource-operator/pkg/proxy" "github.com/openshift-psap/special-resource-operator/pkg/resource" "github.com/openshift-psap/special-resource-operator/pkg/runtime" "github.com/openshift-psap/special-resource-operator/pkg/storage" "github.com/openshift-psap/special-resource-operator/pkg/upgrade" ) // SpecialResourceReconciler reconciles a SpecialResource object type SpecialResourceReconciler struct { Scheme *k8sruntime.Scheme Metrics metrics.Metrics Cluster cluster.Cluster ClusterInfo upgrade.ClusterInfo Creator resource.Creator Filter filter.Filter Finalizer finalizers.SpecialResourceFinalizer Helmer helmer.Helmer Assets assets.Assets PollActions poll.PollActions StatusUpdater state.StatusUpdater Storage storage.Storage KernelData kernel.KernelData ProxyAPI proxy.ProxyAPI RuntimeAPI runtime.RuntimeAPI KubeClient clients.ClientsInterface } // Reconcile Reconiliation entry point func (r *SpecialResourceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) log.Info("Reconciling", "mode", r.Filter.GetMode()) log.Info("TODO: preflight checks") sr, srs, err := r.getSpecialResources(ctx, req) if err != nil { log.Error(err, "Failed to get SpecialResources") return ctrl.Result{}, err } else if sr == nil { log.Info("SpecialResource not found - probably deleted. Not reconciling.") return ctrl.Result{}, nil } r.Metrics.SetSpecialResourcesCreated(len(srs.Items)) wi := &WorkItem{ SpecialResource: sr, AllSRs: srs, } // Reconcile all specialresources if res, err := r.SpecialResourcesReconcile(ctx, wi); err == nil || !res.Requeue { return res, errors.Wrap(err, "Failed to reconcile SpecialResource") } log.Info("Reconciliation successful") return reconcile.Result{}, nil } func (r *SpecialResourceReconciler) getSpecialResources(ctx context.Context, req ctrl.Request) (*srov1beta1.SpecialResource, *srov1beta1.SpecialResourceList, error) { specialresources := &srov1beta1.SpecialResourceList{} opts := []client.ListOption{} err := r.KubeClient.List(ctx, specialresources, opts...) if err != nil { return nil, nil, err } var idx int var found bool if idx, found = FindSR(specialresources.Items, req.Name, "Name"); !found { // If we do not find the specialresource it might be deleted, // if it is a depdendency of another specialresource assign the // parent specialresource for processing. obj := types.NamespacedName{ Namespace: os.Getenv("OPERATOR_NAMESPACE"), Name: "special-resource-dependencies", } parent, err := r.Storage.CheckConfigMapEntry(ctx, req.Name, obj) if err != nil { return nil, nil, err } idx, found = FindSR(specialresources.Items, parent, "Name") if !found { return nil, nil, nil } } return &specialresources.Items[idx], specialresources, nil } // SetupWithManager main initalization for manager func (r *SpecialResourceReconciler) SetupWithManager(mgr ctrl.Manager) error { platform, err := r.KubeClient.GetPlatform() if err != nil { return err } if platform == "OCP" { return ctrl.NewControllerManagedBy(mgr). Named("specialresource"). For(&srov1beta1.SpecialResource{}). Owns(&v1.Pod{}). Owns(&appsv1.DaemonSet{}). Owns(&appsv1.Deployment{}). Owns(&storagev1.CSIDriver{}). Owns(&imagev1.ImageStream{}). Owns(&buildv1.BuildConfig{}). Owns(&v1.ConfigMap{}). Owns(&v1.ServiceAccount{}). Owns(&rbacv1.Role{}). Owns(&rbacv1.RoleBinding{}). Owns(&rbacv1.ClusterRole{}). Owns(&rbacv1.ClusterRoleBinding{}). Owns(&secv1.SecurityContextConstraints{}). Owns(&v1.Secret{}). WithOptions(controller.Options{ MaxConcurrentReconciles: 1, }). WithEventFilter(r.Filter.GetPredicates()). Complete(r) } else { return ctrl.NewControllerManagedBy(mgr). Named("specialresource"). For(&srov1beta1.SpecialResource{}). Owns(&v1.Pod{}). Owns(&appsv1.DaemonSet{}). Owns(&appsv1.Deployment{}). Owns(&storagev1.CSIDriver{}). Owns(&v1.ConfigMap{}). Owns(&v1.ServiceAccount{}). Owns(&rbacv1.Role{}). Owns(&rbacv1.RoleBinding{}). Owns(&rbacv1.ClusterRole{}). Owns(&rbacv1.ClusterRoleBinding{}). Owns(&v1.Secret{}). WithOptions(controller.Options{ MaxConcurrentReconciles: 1, }). WithEventFilter(r.Filter.GetPredicates()). Complete(r) } }
[ "\"OPERATOR_NAMESPACE\"" ]
[]
[ "OPERATOR_NAMESPACE" ]
[]
["OPERATOR_NAMESPACE"]
go
1
0
cmd/playground/playground.go
package main import ( "fmt" "log" "os" "github.com/kashyaprahul94/go-playground/pkg/algorithm" datamodel "github.com/kashyaprahul94/go-playground/pkg/data-model" _ "github.com/kashyaprahul94/go-playground/pkg/web/mux" _ "github.com/kashyaprahul94/go-playground/pkg/web/native" ) func main() { if err := run(); err != nil { log.Println("error :", err) os.Exit(1) } } func algos() { // Sorting sorter := algorithm.BubbleSort{Numbers: []int{4, 3, 1, 5, 2}} sortingResult := sorter.Sort() fmt.Printf("%v", sortingResult) fmt.Println() // LinkedList list := algorithm.SinglyLinkedList() list.Push(4) list.Push(3) list.Push(1) list.Push(12.2) list.Push("s") fmt.Println(list) } func webStuff() { port := os.Getenv("PORT") if port == "" { port = "4444" } // native.StartServer(port) // mux.StartServer(port) } func dataModels() { datamodel.PlayWithJSON() } func run() error { // algos() // webStuff() // dataModels() return nil }
[ "\"PORT\"" ]
[]
[ "PORT" ]
[]
["PORT"]
go
1
0
HackerRank Solutions/Algorithms/Implementation/Migratory Birds.java
import java.io.*; import java.math.*; import java.security.*; import java.text.*; import java.util.*; import java.util.concurrent.*; import java.util.function.*; import java.util.regex.*; import java.util.stream.*; import static java.util.stream.Collectors.joining; import static java.util.stream.Collectors.toList; public class Solution { // Complete the migratoryBirds function below. static int migratoryBirds(List<Integer> arr) { int[] frequencyArray = new int[5]; for (Integer i : arr) { frequencyArray[i-1] += 1; } int maxFrequency = frequencyArray[0]; int maxFrequencyBird = 1; for (int i = 1; i < frequencyArray.length; i++) { if (frequencyArray[i] > maxFrequency) { maxFrequency = frequencyArray[i]; maxFrequencyBird = i+1; } } return maxFrequencyBird; } public static void main(String[] args) throws IOException { BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(System.in)); BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH"))); int arrCount = Integer.parseInt(bufferedReader.readLine().trim()); List<Integer> arr = Stream.of(bufferedReader.readLine().replaceAll("\\s+$", "").split(" ")) .map(Integer::parseInt) .collect(toList()); int result = migratoryBirds(arr); bufferedWriter.write(String.valueOf(result)); bufferedWriter.newLine(); bufferedReader.close(); bufferedWriter.close(); } }
[ "\"OUTPUT_PATH\"" ]
[]
[ "OUTPUT_PATH" ]
[]
["OUTPUT_PATH"]
java
1
0
python/helpers/pycharm/behave_runner.py
# coding=utf-8 """ Behave BDD runner. See _bdd_utils#get_path_by_env for information how to pass list of features here. Each feature could be file, folder with feature files or folder with "features" subfolder Other args are tag expressionsin format (--tags=.. --tags=..). See https://pythonhosted.org/behave/behave.html#tag-expression """ import functools import glob import re import sys import traceback from behave import __version__ as behave_version from behave.formatter.base import Formatter from behave.model import Step, ScenarioOutline, Feature, Scenario from behave.tag_expression import TagExpression from distutils import version from _jb_django_behave import run_as_django_behave import _bdd_utils import tcmessages from _jb_utils import VersionAgnosticUtils _MAX_STEPS_SEARCH_FEATURES = 5000 # Do not look for features in folder that has more that this number of children _FEATURES_FOLDER = 'features' # "features" folder name. __author__ = 'Ilya.Kazakevich' from behave import configuration, runner import os def _get_dirs_to_run(base_dir_to_search): """ Searches for "features" dirs in some base_dir :return: list of feature dirs to run :rtype: list :param base_dir_to_search root directory to search (should not have too many children!) :type base_dir_to_search str """ result = set() for (step, (folder, sub_folders, files)) in enumerate(os.walk(base_dir_to_search)): if os.path.basename(folder) == _FEATURES_FOLDER and os.path.isdir(folder): result.add(os.path.abspath(folder)) if step == _MAX_STEPS_SEARCH_FEATURES: # Guard err = "Folder {0} is too deep to find any features folder. Please provider concrete folder".format( base_dir_to_search) raise Exception(err) return list(result) def _merge_hooks_wrapper(*hooks): """ Creates wrapper that runs provided behave hooks sequentally :param hooks: hooks to run :return: wrapper """ # TODO: Wheel reinvented!!!! def wrapper(*args, **kwargs): for hook in hooks: hook(*args, **kwargs) return wrapper class _RunnerWrapper(runner.Runner): """ Wrapper around behave native wrapper. Has nothing todo with BddRunner! We need it to support dry runs (to fetch data from scenarios) and hooks api """ def __init__(self, config, hooks): """ :type config configuration.Configuration :param config behave configuration :type hooks dict or empty if new runner mode :param hooks hooks in format "before_scenario" => f(context, scenario) to load after/before hooks, provided by user """ super(_RunnerWrapper, self).__init__(config) self.dry_run = False """ Does not run tests (only fetches "self.features") if true. Runs tests otherwise. """ self.__hooks = hooks def load_hooks(self, filename='environment.py'): """ Overrides parent "load_hooks" to add "self.__hooks" :param filename: env. file name """ super(_RunnerWrapper, self).load_hooks(filename) for (hook_name, hook) in self.__hooks.items(): hook_to_add = hook if hook_name in self.hooks: user_hook = self.hooks[hook_name] if hook_name.startswith("before"): user_and_custom_hook = [user_hook, hook] else: user_and_custom_hook = [hook, user_hook] hook_to_add = _merge_hooks_wrapper(*user_and_custom_hook) self.hooks[hook_name] = hook_to_add def run_model(self, features=None): """ Overrides parent method to stop (do nothing) in case of "dry_run" :param features: features to run :return: """ if self.dry_run: # To stop further execution return return super(_RunnerWrapper, self).run_model(features) def clean(self): """ Cleans runner after dry run (clears hooks, features etc). To be called before real run! """ self.dry_run = False self.hooks.clear() self.features = [] class _BehaveRunner(_bdd_utils.BddRunner): """ BddRunner for behave """ def __process_hook(self, is_started, context, element): """ Hook to be installed. Reports steps, features etc. :param is_started true if test/feature/scenario is started :type is_started bool :param context behave context :type context behave.runner.Context :param element feature/suite/step """ element.location.file = element.location.filename # To preserve _bdd_utils contract utils = VersionAgnosticUtils() if isinstance(element, Step): # Process step step_name = u"{0} {1}".format(utils.to_unicode(element.keyword), utils.to_unicode(element.name)) duration_ms = element.duration * 1000 if is_started: self._test_started(step_name, element.location) elif element.status == 'passed': self._test_passed(step_name, duration_ms) elif element.status == 'failed': # Correct way is to use element.errormessage # but assertions do not have trace there (due to Behave internals) # do, we collect it manually error_message = element.error_message fetch_log = not error_message # If no error_message provided, need to fetch log manually trace = "" if isinstance(element.exception, AssertionError) or not error_message: trace = self._collect_trace(element, utils) # May be empty https://github.com/behave/behave/issues/468 for some exceptions if not trace and not error_message: try: error_message = traceback.format_exc() except AttributeError: # Exception may have empty stracktrace, and traceback.format_exc() throws # AttributeError in this case trace = self._collect_trace(element, utils) if not error_message: # Format exception as last resort error_message = element.exception message_as_string = utils.to_unicode(error_message) if fetch_log and self.__real_runner.config.log_capture: try: capture = self.__real_runner.log_capture # 1.2.5 except AttributeError: capture = self.__real_runner.capture_controller.log_capture # 1.2.6 message_as_string += u"\n" + utils.to_unicode(capture.getvalue()) self._test_failed(step_name, message_as_string, trace, duration=duration_ms) elif element.status == 'undefined': self._test_undefined(step_name, element.location) else: self._test_skipped(step_name, element.status, element.location) elif not is_started and isinstance(element, Scenario) and element.status == 'failed': # To process scenarios with undefined/skipped tests for step in element.steps: assert isinstance(step, Step), step if step.status not in ['passed', 'failed']: # Something strange, probably skipped or undefined self.__process_hook(False, context, step) self._feature_or_scenario(is_started, element.name, element.location) elif isinstance(element, ScenarioOutline): self._feature_or_scenario(is_started, str(element.examples), element.location) else: self._feature_or_scenario(is_started, element.name, element.location) def _collect_trace(self, element, utils): return u"".join([utils.to_unicode(l) for l in traceback.format_tb(element.exc_traceback)]) def __init__(self, config, base_dir, use_old_runner): """ :type config configuration.Configuration """ super(_BehaveRunner, self).__init__(base_dir) self.__config = config # Install hooks self.__real_runner = _RunnerWrapper(config, { "before_feature": functools.partial(self.__process_hook, True), "after_feature": functools.partial(self.__process_hook, False), "before_scenario": functools.partial(self.__process_hook, True), "after_scenario": functools.partial(self.__process_hook, False), "before_step": functools.partial(self.__process_hook, True), "after_step": functools.partial(self.__process_hook, False) } if use_old_runner else dict()) def _run_tests(self): self.__real_runner.run() def __filter_scenarios_by_args(self, scenario): """ Filters out scenarios that should be skipped by tags or scenario names :param scenario scenario to check :return true if should pass """ assert isinstance(scenario, Scenario), scenario expected_tags = self.__config.tags scenario_name_re = self.__config.name_re if scenario_name_re and not scenario_name_re.match(scenario.name): return False if not expected_tags: return True # No tags nor names are required return isinstance(expected_tags, TagExpression) and expected_tags.check(scenario.tags) def _get_features_to_run(self): self.__real_runner.dry_run = True self.__real_runner.run() features_to_run = self.__real_runner.features self.__real_runner.clean() # To make sure nothing left after dry run # Change outline scenario skeletons with real scenarios for feature in features_to_run: assert isinstance(feature, Feature), feature scenarios = [] for scenario in feature.scenarios: try: scenario.tags.extend(feature.tags) except AttributeError: pass if isinstance(scenario, ScenarioOutline): scenarios.extend(scenario.scenarios) else: scenarios.append(scenario) feature.scenarios = filter(self.__filter_scenarios_by_args, scenarios) return features_to_run if __name__ == "__main__": # TODO: support all other params instead command_args = list(filter(None, sys.argv[1:])) if command_args: if "--junit" in command_args: raise Exception("--junit report type for Behave is unsupported in PyCharm. \n " "See: https://youtrack.jetbrains.com/issue/PY-14219") _bdd_utils.fix_win_drive(command_args[0]) (base_dir, scenario_names, what_to_run) = _bdd_utils.get_what_to_run_by_env(os.environ) for scenario_name in scenario_names: command_args += ["-n", re.escape(scenario_name)] # TODO : rewite pythonic my_config = configuration.Configuration(command_args=command_args) loose_version = version.LooseVersion(behave_version) assert loose_version >= version.LooseVersion("1.2.5"), "Version not supported, please upgrade Behave" # New version supports 1.2.6 only use_old_runner = "PYCHARM_BEHAVE_OLD_RUNNER" in os.environ or loose_version < version.LooseVersion("1.2.6") from behave.formatter import _registry FORMAT_NAME = "com.jetbrains.pycharm.formatter" if use_old_runner: class _Null(Formatter): """ Null formater to prevent stdout output """ pass _registry.register_as(FORMAT_NAME, _Null) else: custom_messages = tcmessages.TeamcityServiceMessages() # Not safe to import it in old mode from teamcity.jb_behave_formatter import TeamcityFormatter class TeamcityFormatterWithLocation(TeamcityFormatter): def _report_suite_started(self, suite, suite_name): location = suite.location custom_messages.testSuiteStarted(suite_name, _bdd_utils.get_location(base_dir, location.filename, location.line)) def _report_test_started(self, test, test_name): location = test.location custom_messages.testStarted(test_name, _bdd_utils.get_location(base_dir, location.filename, location.line)) _registry.register_as(FORMAT_NAME, TeamcityFormatterWithLocation) my_config.format = [FORMAT_NAME] # To prevent output to stdout my_config.reporters = [] # To prevent summary to stdout my_config.stdout_capture = False # For test output my_config.stderr_capture = False # For test output features = set() for feature in what_to_run: if os.path.isfile(feature) or glob.glob( os.path.join(feature, "*.feature")): # File of folder with "features" provided, load it features.add(feature) elif os.path.isdir(feature): features |= set(_get_dirs_to_run(feature)) # Find "features" subfolder my_config.paths = list(features) if what_to_run and not my_config.paths: raise Exception("Nothing to run in {0}".format(what_to_run)) # Run as Django if supported, run plain otherwise if not run_as_django_behave(FORMAT_NAME, what_to_run, command_args): _BehaveRunner(my_config, base_dir, use_old_runner).run()
[]
[]
[]
[]
[]
python
0
0
cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java
/* * Copyright 2017-2018, Strimzi authors. * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). */ package io.strimzi.operator.cluster.model; import io.fabric8.kubernetes.api.model.Affinity; import io.fabric8.kubernetes.api.model.ConfigMap; import io.fabric8.kubernetes.api.model.ConfigMapBuilder; import io.fabric8.kubernetes.api.model.ConfigMapVolumeSource; import io.fabric8.kubernetes.api.model.ConfigMapVolumeSourceBuilder; import io.fabric8.kubernetes.api.model.Container; import io.fabric8.kubernetes.api.model.ContainerBuilder; import io.fabric8.kubernetes.api.model.ContainerPort; import io.fabric8.kubernetes.api.model.ContainerPortBuilder; import io.fabric8.kubernetes.api.model.EnvVar; import io.fabric8.kubernetes.api.model.EnvVarBuilder; import io.fabric8.kubernetes.api.model.EnvVarSource; import io.fabric8.kubernetes.api.model.EnvVarSourceBuilder; import io.fabric8.kubernetes.api.model.HasMetadata; import io.fabric8.kubernetes.api.model.LabelSelector; import io.fabric8.kubernetes.api.model.LabelSelectorBuilder; import io.fabric8.kubernetes.api.model.OwnerReference; import io.fabric8.kubernetes.api.model.OwnerReferenceBuilder; import io.fabric8.kubernetes.api.model.PersistentVolumeClaim; import io.fabric8.kubernetes.api.model.PersistentVolumeClaimBuilder; import io.fabric8.kubernetes.api.model.PodSecurityContext; import io.fabric8.kubernetes.api.model.PodSecurityContextBuilder; import io.fabric8.kubernetes.api.model.Probe; import io.fabric8.kubernetes.api.model.ProbeBuilder; import io.fabric8.kubernetes.api.model.Quantity; import io.fabric8.kubernetes.api.model.ResourceRequirements; import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder; import io.fabric8.kubernetes.api.model.Secret; import io.fabric8.kubernetes.api.model.SecretBuilder; import io.fabric8.kubernetes.api.model.SecretVolumeSource; import io.fabric8.kubernetes.api.model.SecretVolumeSourceBuilder; import io.fabric8.kubernetes.api.model.Service; import io.fabric8.kubernetes.api.model.ServiceBuilder; import io.fabric8.kubernetes.api.model.ServicePort; import io.fabric8.kubernetes.api.model.ServicePortBuilder; import io.fabric8.kubernetes.api.model.Toleration; import io.fabric8.kubernetes.api.model.Volume; import io.fabric8.kubernetes.api.model.VolumeBuilder; import io.fabric8.kubernetes.api.model.VolumeMount; import io.fabric8.kubernetes.api.model.VolumeMountBuilder; import io.fabric8.kubernetes.api.model.extensions.Deployment; import io.fabric8.kubernetes.api.model.extensions.DeploymentBuilder; import io.fabric8.kubernetes.api.model.extensions.DeploymentStrategy; import io.fabric8.kubernetes.api.model.extensions.StatefulSet; import io.fabric8.kubernetes.api.model.extensions.StatefulSetBuilder; import io.fabric8.kubernetes.api.model.extensions.StatefulSetUpdateStrategyBuilder; import io.strimzi.api.kafka.model.CpuMemory; import io.strimzi.api.kafka.model.ExternalLogging; import io.strimzi.api.kafka.model.InlineLogging; import io.strimzi.api.kafka.model.JvmOptions; import io.strimzi.api.kafka.model.Logging; import io.strimzi.api.kafka.model.PersistentClaimStorage; import io.strimzi.api.kafka.model.Resources; import io.strimzi.api.kafka.model.Storage; import io.strimzi.certs.CertAndKey; import io.strimzi.certs.CertManager; import io.strimzi.certs.Subject; import io.strimzi.operator.cluster.ClusterOperator; import io.strimzi.operator.common.model.Labels; import io.vertx.core.json.JsonObject; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.StringWriter; import java.nio.file.Files; import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.function.BiFunction; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; import static io.strimzi.api.kafka.model.Quantities.normalizeCpu; import static io.strimzi.api.kafka.model.Quantities.normalizeMemory; import static java.util.Arrays.asList; public abstract class AbstractModel { protected static final Logger log = LogManager.getLogger(AbstractModel.class.getName()); // the Kubernetes service DNS domain is customizable on cluster creation but it's "cluster.local" by default // there is no clean way to get it from a running application so we are passing it through an env var public static final String KUBERNETES_SERVICE_DNS_DOMAIN = System.getenv().getOrDefault("KUBERNETES_SERVICE_DNS_DOMAIN", "cluster.local"); protected static final int CERTS_EXPIRATION_DAYS = 365; private static final String VOLUME_MOUNT_HACK_IMAGE = "busybox"; protected static final String VOLUME_MOUNT_HACK_NAME = "volume-mount-hack"; private static final Long VOLUME_MOUNT_HACK_GROUPID = 1001L; public static final String ANCILLARY_CM_KEY_METRICS = "metrics-config.yml"; public static final String ANCILLARY_CM_KEY_LOG_CONFIG = "log4j.properties"; public static final String ENV_VAR_DYNAMIC_HEAP_FRACTION = "DYNAMIC_HEAP_FRACTION"; public static final String ENV_VAR_KAFKA_HEAP_OPTS = "KAFKA_HEAP_OPTS"; public static final String ENV_VAR_KAFKA_JVM_PERFORMANCE_OPTS = "KAFKA_JVM_PERFORMANCE_OPTS"; public static final String ENV_VAR_DYNAMIC_HEAP_MAX = "DYNAMIC_HEAP_MAX"; public static final String NETWORK_POLICY_KEY_SUFFIX = "-network-policy"; private static final String DELETE_CLAIM_ANNOTATION = ClusterOperator.STRIMZI_CLUSTER_OPERATOR_DOMAIN + "/delete-claim"; protected final String cluster; protected final String namespace; protected final Labels labels; // Docker image configuration protected String image; // Number of replicas protected int replicas; protected String readinessPath; protected int readinessTimeout; protected int readinessInitialDelay; protected String livenessPath; protected int livenessTimeout; protected int livenessInitialDelay; protected String serviceName; protected String headlessServiceName; protected String name; protected static final int METRICS_PORT = 9404; protected static final String METRICS_PORT_NAME = "metrics"; protected boolean isMetricsEnabled; protected Iterable<Map.Entry<String, Object>> metricsConfig; protected String ancillaryConfigName; protected String logConfigName; protected Storage storage; protected AbstractConfiguration configuration; protected String mountPath; public static final String VOLUME_NAME = "data"; protected String logAndMetricsConfigMountPath; protected String logAndMetricsConfigVolumeName; private JvmOptions jvmOptions; private Resources resources; private Affinity userAffinity; private List<Toleration> tolerations; protected Map validLoggerFields; private String[] validLoggerValues = new String[]{"INFO", "ERROR", "WARN", "TRACE", "DEBUG", "FATAL", "OFF" }; private Logging logging; protected CertAndKey clusterCA; // Owner Reference information private String ownerApiVersion; private String ownerKind; private String ownerUid; /** * Constructor * * @param namespace Kubernetes/OpenShift namespace where cluster resources are going to be created * @param cluster overall cluster name */ protected AbstractModel(String namespace, String cluster, Labels labels) { this.cluster = cluster; this.namespace = namespace; this.labels = labels.withCluster(cluster); } public Labels getLabels() { return labels; } public int getReplicas() { return replicas; } protected void setReplicas(int replicas) { this.replicas = replicas; } protected void setImage(String image) { this.image = image; } protected void setReadinessTimeout(int readinessTimeout) { this.readinessTimeout = readinessTimeout; } protected void setReadinessInitialDelay(int readinessInitialDelay) { this.readinessInitialDelay = readinessInitialDelay; } protected void setLivenessTimeout(int livenessTimeout) { this.livenessTimeout = livenessTimeout; } protected void setLivenessInitialDelay(int livenessInitialDelay) { this.livenessInitialDelay = livenessInitialDelay; } /** * Returns the Docker image which should be used by this cluster * * @return */ public String getName() { return name; } public String getServiceName() { return serviceName; } public String getHeadlessServiceName() { return headlessServiceName; } protected Map<String, String> getSelectorLabels() { return labels.withName(name).strimziLabels().toMap(); } protected Map<String, String> getLabelsWithName() { return getLabelsWithName(name); } protected Map<String, String> getLabelsWithName(String name) { return labels.withName(name).toMap(); } public boolean isMetricsEnabled() { return isMetricsEnabled; } protected void setMetricsEnabled(boolean isMetricsEnabled) { this.isMetricsEnabled = isMetricsEnabled; } protected abstract String getDefaultLogConfigFileName(); /** * Returns map with all available loggers for current pod and default values. * @return */ protected Properties getDefaultLogConfig() { Properties properties = new Properties(); String defaultLogConfigFileName = getDefaultLogConfigFileName(); try { properties = getDefaultLoggingProperties(defaultLogConfigFileName); } catch (IOException e) { log.warn("Unable to read default log config from '{}'", defaultLogConfigFileName); } return properties; } /** * Takes resource file containing default log4j properties and returns it as a Properties. * @param defaultConfigResourceFileName name of file, where default log4j properties are stored * @return */ protected Properties getDefaultLoggingProperties(String defaultConfigResourceFileName) throws IOException { Properties defaultSettings = new Properties(); InputStream is = null; try { is = AbstractModel.class.getResourceAsStream("/" + defaultConfigResourceFileName); defaultSettings.load(is); } finally { if (is != null) { is.close(); } } return defaultSettings; } /** * Transforms map to log4j properties file format * @param newSettings map with properties * @return */ protected static String createPropertiesString(Properties newSettings) { StringWriter sw = new StringWriter(); try { newSettings.store(sw, "Do not change this generated file. Logging can be configured in the corresponding kubernetes/openshift resource."); } catch (IOException e) { e.printStackTrace(); } // remove date comment, because it is updated with each reconciliation which leads to restarting pods return sw.toString().replaceAll("#[A-Za-z]+ [A-Za-z]+ [0-9]+ [0-9]+:[0-9]+:[0-9]+ [A-Z]+ [0-9]+", ""); } public Logging getLogging() { return logging; } protected void setLogging(Logging logging) { this.logging = logging; } public String parseLogging(Logging logging, ConfigMap externalCm) { if (logging instanceof InlineLogging) { // validate all entries ((InlineLogging) logging).getLoggers().forEach((key, tmpEntry) -> { if (validLoggerFields.containsKey(key)) { // correct logger, test appender appearance for log4j.rootLogger String appender = tmpEntry.replaceAll(" ", ""); if (key.equals("log4j.rootLogger") && !appender.contains(",CONSOLE")) { ((InlineLogging) logging).getLoggers().replace(key, tmpEntry + ", CONSOLE"); log.warn("Appender for {} was not set. Using \"{}: {}, CONSOLE\"", key, key, tmpEntry); } } else { // incorrect logger log.warn(key + " is not a valid logger"); return; } if (key.toString().contains("log4j.appender.CONSOLE")) { log.warn("You cannot set appender"); return; } if ((asList(validLoggerValues).contains(tmpEntry.toString().replaceAll(",[ ]+CONSOLE", ""))) || (asList(validLoggerValues).contains(tmpEntry))) { // correct value } else { Pattern p = Pattern.compile("\\$\\{(.*)\\}, ([A-Z]+)"); Matcher m = p.matcher(tmpEntry.toString()); String logger = ""; String value = ""; boolean regexMatch = false; while (m.find()) { logger = m.group(1); value = m.group(2); regexMatch = true; } if (regexMatch) { if (!validLoggerFields.containsKey(logger)) { log.warn(logger + " is not a valid logger"); return; } if (!value.equals("CONSOLE")) { log.warn(value + " is not a valid value."); return; } } else { log.warn(tmpEntry + " is not a valid value. Use one of " + Arrays.toString(validLoggerValues)); return; } } }); // update fields otherwise use default values Properties newSettings = getDefaultLogConfig(); newSettings.putAll(((InlineLogging) logging).getLoggers()); return createPropertiesString(newSettings); } else if (logging instanceof ExternalLogging) { if (externalCm != null) { return externalCm.getData().get(getAncillaryConfigMapKeyLogConfig()); } else { log.warn("Configmap " + ((ExternalLogging) getLogging()).getName() + " does not exist. Default settings are used"); return createPropertiesString(getDefaultLogConfig()); } } else { // field is not in the cluster CM return createPropertiesString(getDefaultLogConfig()); } } /** * Generates a metrics and logging ConfigMap according to configured defaults * @return The generated ConfigMap */ public ConfigMap generateMetricsAndLogConfigMap(ConfigMap cm) { Map<String, String> data = new HashMap<>(); data.put(getAncillaryConfigMapKeyLogConfig(), parseLogging(getLogging(), cm)); if (isMetricsEnabled()) { HashMap m = new HashMap(); for (Map.Entry<String, Object> entry : getMetricsConfig()) { m.put(entry.getKey(), entry.getValue()); } data.put(ANCILLARY_CM_KEY_METRICS, new JsonObject(m).toString()); } return createConfigMap(getAncillaryConfigName(), data); } public String getLogConfigName() { return logConfigName; } /** * Sets name of field in cluster config map, where logging configuration is stored * @param logConfigName */ protected void setLogConfigName(String logConfigName) { this.logConfigName = logConfigName; } protected Iterable<Map.Entry<String, Object>> getMetricsConfig() { return metricsConfig; } protected void setMetricsConfig(Iterable<Map.Entry<String, Object>> metricsConfig) { this.metricsConfig = metricsConfig; } /** * Returns name of config map used for storing metrics and logging configuration * @return */ public String getAncillaryConfigName() { return ancillaryConfigName; } protected void setMetricsConfigName(String metricsAndLogsConfigName) { this.ancillaryConfigName = metricsAndLogsConfigName; } protected List<EnvVar> getEnvVars() { return null; } public Storage getStorage() { return storage; } protected void setStorage(Storage storage) { this.storage = storage; } /** * Returns the Configuration object which is passed to the cluster as EnvVar * * @return Configuration object with cluster configuration */ public AbstractConfiguration getConfiguration() { return configuration; } /** * Set the configuration object which might be passed to the cluster as EnvVar * * @param configuration Configuration object with cluster configuration */ protected void setConfiguration(AbstractConfiguration configuration) { this.configuration = configuration; } public String getVolumeName() { return this.VOLUME_NAME; } public String getImage() { return this.image; } /** * @return the service account used by the deployed cluster for Kubernetes/OpenShift API operations */ protected String getServiceAccountName() { return null; } /** * @return the cluster name */ public String getCluster() { return cluster; } public String getPersistentVolumeClaimName(int podId) { return getPersistentVolumeClaimName(name, podId); } public static String getPersistentVolumeClaimName(String kafkaClusterName, int podId) { return VOLUME_NAME + "-" + kafkaClusterName + "-" + podId; } public String getPodName(int podId) { return name + "-" + podId; } /** * Sets the affinity as configured by the user in the cluster CR * @param affinity */ protected void setUserAffinity(Affinity affinity) { this.userAffinity = affinity; } /** * Gets the affinity as configured by the user in the cluster CR */ protected Affinity getUserAffinity() { return this.userAffinity; } /** * Gets the tolerations as configured by the user in the cluster CR */ public List<Toleration> getTolerations() { return tolerations; } /** * Sets the tolerations as configured by the user in the cluster CR * * @param tolerations */ public void setTolerations(List<Toleration> tolerations) { this.tolerations = tolerations; } /** * Gets the affinity to use in a template Pod (in a StatefulSet, or Deployment). * In general this may include extra rules than just the {@link #userAffinity}. * By default it is just the {@link #userAffinity}. */ protected Affinity getMergedAffinity() { return getUserAffinity(); } /** * @return a list of init containers to add to the StatefulSet/Deployment */ protected List<Container> getInitContainers() { return null; } /** * @return a list of containers to add to the StatefulSet/Deployment */ protected abstract List<Container> getContainers(); protected VolumeMount createVolumeMount(String name, String path) { VolumeMount volumeMount = new VolumeMountBuilder() .withName(name) .withMountPath(path) .build(); log.trace("Created volume mount {}", volumeMount); return volumeMount; } protected ContainerPort createContainerPort(String name, int port, String protocol) { ContainerPort containerPort = new ContainerPortBuilder() .withName(name) .withProtocol(protocol) .withContainerPort(port) .build(); log.trace("Created container port {}", containerPort); return containerPort; } protected ServicePort createServicePort(String name, int port, int targetPort, String protocol) { ServicePort servicePort = new ServicePortBuilder() .withName(name) .withProtocol(protocol) .withPort(port) .withNewTargetPort(targetPort) .build(); log.trace("Created service port {}", servicePort); return servicePort; } protected PersistentVolumeClaim createPersistentVolumeClaim(String name) { PersistentClaimStorage storage = (PersistentClaimStorage) this.storage; Map<String, Quantity> requests = new HashMap<>(); requests.put("storage", new Quantity(storage.getSize(), null)); LabelSelector selector = null; if (storage.getSelector() != null && !storage.getSelector().isEmpty()) { selector = new LabelSelector(null, storage.getSelector()); } PersistentVolumeClaimBuilder pvcb = new PersistentVolumeClaimBuilder() .withNewMetadata() .withName(name) .endMetadata() .withNewSpec() .withAccessModes("ReadWriteOnce") .withNewResources() .withRequests(requests) .endResources() .withStorageClassName(storage.getStorageClass()) .withSelector(selector) .endSpec(); return pvcb.build(); } protected Volume createEmptyDirVolume(String name) { Volume volume = new VolumeBuilder() .withName(name) .withNewEmptyDir() .endEmptyDir() .build(); log.trace("Created emptyDir Volume named '{}'", name); return volume; } protected Volume createConfigMapVolume(String name, String configMapName) { ConfigMapVolumeSource configMapVolumeSource = new ConfigMapVolumeSourceBuilder() .withName(configMapName) .build(); Volume volume = new VolumeBuilder() .withName(name) .withConfigMap(configMapVolumeSource) .build(); log.trace("Created configMap Volume named '{}' with source configMap '{}'", name, configMapName); return volume; } protected ConfigMap createConfigMap(String name, Map<String, String> data) { return new ConfigMapBuilder() .withNewMetadata() .withName(name) .withNamespace(namespace) .withLabels(labels.toMap()) .withOwnerReferences(createOwnerReference()) .endMetadata() .withData(data) .build(); } protected Volume createSecretVolume(String name, String secretName) { SecretVolumeSource secretVolumeSource = new SecretVolumeSourceBuilder() .withSecretName(secretName) .build(); Volume volume = new VolumeBuilder() .withName(name) .withSecret(secretVolumeSource) .build(); log.trace("Created secret Volume named '{}' with source secret '{}'", name, secretName); return volume; } protected Secret createSecret(String name, Map<String, String> data) { Secret s = new SecretBuilder() .withNewMetadata() .withName(name) .withNamespace(namespace) .withLabels(labels.toMap()) .withOwnerReferences(createOwnerReference()) .endMetadata() .withData(data) .build(); return s; } protected Probe createExecProbe(String command, int initialDelay, int timeout) { Probe probe = new ProbeBuilder().withNewExec() .withCommand(command) .endExec() .withInitialDelaySeconds(initialDelay) .withTimeoutSeconds(timeout) .build(); log.trace("Created exec probe {}", probe); return probe; } protected Probe createTcpSocketProbe(int port, int initialDelay, int timeout) { Probe probe = new ProbeBuilder() .withNewTcpSocket() .withNewPort() .withIntVal(port) .endPort() .endTcpSocket() .withInitialDelaySeconds(initialDelay) .withTimeoutSeconds(timeout) .build(); log.trace("Created TCP socket probe {}", probe); return probe; } protected Probe createHttpProbe(String path, String port, int initialDelay, int timeout) { Probe probe = new ProbeBuilder().withNewHttpGet() .withPath(path) .withNewPort(port) .endHttpGet() .withInitialDelaySeconds(initialDelay) .withTimeoutSeconds(timeout) .build(); log.trace("Created http probe {}", probe); return probe; } protected Service createService(String type, List<ServicePort> ports) { return createService(type, ports, Collections.emptyMap()); } protected Service createService(String type, List<ServicePort> ports, Map<String, String> annotations) { return createService(serviceName, type, ports, getLabelsWithName(serviceName), getSelectorLabels(), annotations); } protected Service createService(String name, String type, List<ServicePort> ports, Map<String, String> labels, Map<String, String> selector, Map<String, String> annotations) { Service service = new ServiceBuilder() .withNewMetadata() .withName(name) .withLabels(labels) .withNamespace(namespace) .withAnnotations(annotations) .withOwnerReferences(createOwnerReference()) .endMetadata() .withNewSpec() .withType(type) .withSelector(selector) .withPorts(ports) .endSpec() .build(); log.trace("Created service {}", service); return service; } protected Service createHeadlessService(List<ServicePort> ports) { return createHeadlessService(ports, Collections.emptyMap()); } protected Service createHeadlessService(List<ServicePort> ports, Map<String, String> annotations) { Service service = new ServiceBuilder() .withNewMetadata() .withName(headlessServiceName) .withLabels(getLabelsWithName(headlessServiceName)) .withNamespace(namespace) .withAnnotations(annotations) .withOwnerReferences(createOwnerReference()) .endMetadata() .withNewSpec() .withType("ClusterIP") .withClusterIP("None") .withSelector(getSelectorLabels()) .withPorts(ports) .endSpec() .build(); log.trace("Created headless service {}", service); return service; } protected StatefulSet createStatefulSet( List<Volume> volumes, List<PersistentVolumeClaim> volumeClaims, List<VolumeMount> volumeMounts, Affinity affinity, List<Container> initContainers, List<Container> containers, boolean isOpenShift) { Map<String, String> annotations = new HashMap<>(); annotations.put(DELETE_CLAIM_ANNOTATION, String.valueOf(storage instanceof PersistentClaimStorage && ((PersistentClaimStorage) storage).isDeleteClaim())); List<Container> initContainersInternal = new ArrayList<>(); PodSecurityContext securityContext = null; // if a persistent volume claim is requested and the running cluster is a Kubernetes one // there is an hack on volume mounting which needs an "init-container" if (this.storage instanceof PersistentClaimStorage && !isOpenShift) { String chown = String.format("chown -R %d:%d %s", AbstractModel.VOLUME_MOUNT_HACK_GROUPID, AbstractModel.VOLUME_MOUNT_HACK_GROUPID, volumeMounts.get(0).getMountPath()); Container initContainer = new ContainerBuilder() .withName(AbstractModel.VOLUME_MOUNT_HACK_NAME) .withImage(AbstractModel.VOLUME_MOUNT_HACK_IMAGE) .withVolumeMounts(volumeMounts.get(0)) .withCommand("sh", "-c", chown) .build(); initContainersInternal.add(initContainer); securityContext = new PodSecurityContextBuilder() .withFsGroup(AbstractModel.VOLUME_MOUNT_HACK_GROUPID) .build(); } // add all the other init containers provided by the specific model implementation if (initContainers != null) { initContainersInternal.addAll(initContainers); } StatefulSet statefulSet = new StatefulSetBuilder() .withNewMetadata() .withName(name) .withLabels(getLabelsWithName()) .withNamespace(namespace) .withAnnotations(annotations) .withOwnerReferences(createOwnerReference()) .endMetadata() .withNewSpec() .withPodManagementPolicy("Parallel") .withUpdateStrategy(new StatefulSetUpdateStrategyBuilder().withType("OnDelete").build()) .withSelector(new LabelSelectorBuilder().withMatchLabels(getSelectorLabels()).build()) .withServiceName(headlessServiceName) .withReplicas(replicas) .withNewTemplate() .withNewMetadata() .withName(name) .withLabels(getLabelsWithName()) .endMetadata() .withNewSpec() .withServiceAccountName(getServiceAccountName()) .withAffinity(affinity) .withSecurityContext(securityContext) .withInitContainers(initContainersInternal) .withContainers(containers) .withVolumes(volumes) .withTolerations(getTolerations()) .endSpec() .endTemplate() .withVolumeClaimTemplates(volumeClaims) .endSpec() .build(); return statefulSet; } protected Deployment createDeployment( DeploymentStrategy updateStrategy, Map<String, String> deploymentAnnotations, Map<String, String> podAnnotations, Affinity affinity, List<Container> initContainers, List<Container> containers, List<Volume> volumes) { Deployment dep = new DeploymentBuilder() .withNewMetadata() .withName(name) .withLabels(getLabelsWithName()) .withNamespace(namespace) .withAnnotations(deploymentAnnotations) .withOwnerReferences(createOwnerReference()) .endMetadata() .withNewSpec() .withStrategy(updateStrategy) .withReplicas(replicas) .withSelector(new LabelSelectorBuilder().withMatchLabels(getSelectorLabels()).build()) .withNewTemplate() .withNewMetadata() .withLabels(getLabelsWithName()) .withAnnotations(podAnnotations) .endMetadata() .withNewSpec() .withAffinity(affinity) .withServiceAccountName(getServiceAccountName()) .withInitContainers(initContainers) .withContainers(containers) .withVolumes(volumes) .withTolerations(getTolerations()) .endSpec() .endTemplate() .endSpec() .build(); return dep; } /** * Build an environment variable instance with the provided name and value * * @param name The name of the environment variable * @param value The value of the environment variable * @return The environment variable instance */ protected static EnvVar buildEnvVar(String name, String value) { return new EnvVarBuilder().withName(name).withValue(value).build(); } /** * Build an environment variable instance with the provided name from a field reference * using Downward API * * @param name The name of the environment variable * @param field The field path from which getting the value * @return The environment variable instance */ protected static EnvVar buildEnvVarFromFieldRef(String name, String field) { EnvVarSource envVarSource = new EnvVarSourceBuilder() .withNewFieldRef() .withFieldPath(field) .endFieldRef() .build(); return new EnvVarBuilder().withName(name).withValueFrom(envVarSource).build(); } /** * Gets the given container's environment. */ public static Map<String, String> containerEnvVars(Container container) { return container.getEnv().stream().collect( Collectors.toMap(EnvVar::getName, EnvVar::getValue, // On duplicates, last in wins (u, v) -> v)); } public static ResourceRequirements resources(Resources resources) { if (resources != null) { ResourceRequirementsBuilder builder = new ResourceRequirementsBuilder(); CpuMemory limits = resources.getLimits(); if (limits != null && limits.milliCpuAsInt() > 0) { builder.addToLimits("cpu", new Quantity(normalizeCpu(limits.getMilliCpu()))); } if (limits != null && limits.memoryAsLong() > 0) { builder.addToLimits("memory", new Quantity(normalizeMemory(limits.getMemory()))); } CpuMemory requests = resources.getRequests(); if (requests != null && requests.milliCpuAsInt() > 0) { builder.addToRequests("cpu", new Quantity(normalizeCpu(requests.getMilliCpu()))); } if (requests != null && requests.memoryAsLong() > 0) { builder.addToRequests("memory", new Quantity(normalizeMemory(requests.getMemory()))); } return builder.build(); } return null; } public void setResources(Resources resources) { this.resources = resources; } public Resources getResources() { return resources; } public void setJvmOptions(JvmOptions jvmOptions) { this.jvmOptions = jvmOptions; } /** * Adds KAFKA_HEAP_OPTS variable to the EnvVar list if any heap related options were specified. * * @param envVars List of Environment Variables */ protected void heapOptions(List<EnvVar> envVars, double dynamicHeapFraction, long dynamicHeapMaxBytes) { StringBuilder kafkaHeapOpts = new StringBuilder(); String xms = jvmOptions != null ? jvmOptions.getXms() : null; if (xms != null) { kafkaHeapOpts.append("-Xms").append(xms); } String xmx = jvmOptions != null ? jvmOptions.getXmx() : null; if (xmx != null) { // Honour explicit max heap kafkaHeapOpts.append(' ').append("-Xmx").append(xmx); } else { // Otherwise delegate to the container to figure out // Using whatever cgroup memory limit has been set by the k8s infra envVars.add(buildEnvVar(ENV_VAR_DYNAMIC_HEAP_FRACTION, Double.toString(dynamicHeapFraction))); if (dynamicHeapMaxBytes > 0) { envVars.add(buildEnvVar(ENV_VAR_DYNAMIC_HEAP_MAX, Long.toString(dynamicHeapMaxBytes))); } } String trim = kafkaHeapOpts.toString().trim(); if (!trim.isEmpty()) { envVars.add(buildEnvVar(ENV_VAR_KAFKA_HEAP_OPTS, trim)); } } /** * Adds KAFKA_JVM_PERFORMANCE_OPTS variable to the EnvVar list if any performance related options were specified. * * @param envVars List of Environment Variables */ protected void jvmPerformanceOptions(List<EnvVar> envVars) { StringBuilder jvmPerformanceOpts = new StringBuilder(); Boolean server = jvmOptions != null ? jvmOptions.isServer() : null; if (server != null && server) { jvmPerformanceOpts.append("-server"); } Map<String, String> xx = jvmOptions != null ? jvmOptions.getXx() : null; if (xx != null) { xx.forEach((k, v) -> { jvmPerformanceOpts.append(' ').append("-XX:"); if ("true".equalsIgnoreCase(v)) { jvmPerformanceOpts.append("+").append(k); } else if ("false".equalsIgnoreCase(v)) { jvmPerformanceOpts.append("-").append(k); } else { jvmPerformanceOpts.append(k).append("=").append(v); } }); } String trim = jvmPerformanceOpts.toString().trim(); if (!trim.isEmpty()) { envVars.add(buildEnvVar(ENV_VAR_KAFKA_JVM_PERFORMANCE_OPTS, trim)); } } /** * Decode from Base64 a keyed value from a Secret * * @param secret Secret from which decoding the value * @param key Key of the value to decode * @return decoded value */ protected byte[] decodeFromSecret(Secret secret, String key) { return Base64.getDecoder().decode(secret.getData().get(key)); } /** * Copy already existing certificates from provided Secret based on number of effective replicas * and maybe generate new ones for new replicas (i.e. scale-up) * * @param certManager CertManager instance for handling certificates creation * @param secret The Secret from which getting already existing certificates * @param replicasInSecret How many certificates are in the Secret * @param caCert CA certificate to use for signing new certificates * @param podName A function for resolving the Pod name * @return Collection with certificates * @throws IOException */ protected Map<String, CertAndKey> maybeCopyOrGenerateCerts(CertManager certManager, Secret secret, int replicasInSecret, CertAndKey caCert, BiFunction<String, Integer, String> podName) throws IOException { return maybeCopyOrGenerateCerts(certManager, secret, replicasInSecret, caCert, podName, null, Collections.EMPTY_MAP); } /** * Copy already existing certificates from provided Secret based on number of effective replicas * and maybe generate new ones for new replicas (i.e. scale-up) * * @param certManager CertManager instance for handling certificates creation * @param secret The Secret from which getting already existing certificates * @param replicasInSecret How many certificates are in the Secret * @param caCert CA certificate to use for signing new certificates * @param podName A function for resolving the Pod name * @param externalBootstrapAddress External address to the bootstrap service * @param externalAddresses Map with external addresses under which the individual pods are available * @return Collection with certificates * @throws IOException */ protected Map<String, CertAndKey> maybeCopyOrGenerateCerts(CertManager certManager, Secret secret, int replicasInSecret, CertAndKey caCert, BiFunction<String, Integer, String> podName, String externalBootstrapAddress, Map<Integer, String> externalAddresses) throws IOException { Map<String, CertAndKey> certs = new HashMap<>(); // copying the minimum number of certificates already existing in the secret // scale up -> it will copy all certificates // scale down -> it will copy just the requested number of replicas for (int i = 0; i < Math.min(replicasInSecret, replicas); i++) { log.debug("{} already exists", podName.apply(cluster, i)); certs.put( podName.apply(cluster, i), new CertAndKey( decodeFromSecret(secret, podName.apply(cluster, i) + ".key"), decodeFromSecret(secret, podName.apply(cluster, i) + ".crt"))); } File brokerCsrFile = File.createTempFile("tls", "broker-csr"); File brokerKeyFile = File.createTempFile("tls", "broker-key"); File brokerCertFile = File.createTempFile("tls", "broker-cert"); // generate the missing number of certificates // scale up -> generate new certificates for added replicas // scale down -> does nothing for (int i = replicasInSecret; i < replicas; i++) { log.debug("{} to generate", podName.apply(cluster, i)); Subject sbj = new Subject(); sbj.setOrganizationName("io.strimzi"); sbj.setCommonName(getName()); Map<String, String> sbjAltNames = new HashMap<>(); sbjAltNames.put("DNS.1", getServiceName()); sbjAltNames.put("DNS.2", String.format("%s.%s.svc.%s", getServiceName(), namespace, KUBERNETES_SERVICE_DNS_DOMAIN)); sbjAltNames.put("DNS.3", String.format("%s.%s.%s.svc.%s", podName.apply(cluster, i), getHeadlessServiceName(), namespace, KUBERNETES_SERVICE_DNS_DOMAIN)); int nextDnsId = 4; if (externalBootstrapAddress != null) { sbjAltNames.put("DNS." + nextDnsId, externalBootstrapAddress); nextDnsId++; } if (externalAddresses.get(i) != null) { sbjAltNames.put("DNS." + nextDnsId, externalAddresses.get(i)); nextDnsId++; } sbj.setSubjectAltNames(sbjAltNames); certManager.generateCsr(brokerKeyFile, brokerCsrFile, sbj); certManager.generateCert(brokerCsrFile, caCert.key(), caCert.cert(), brokerCertFile, sbj, CERTS_EXPIRATION_DAYS); certs.put(podName.apply(cluster, i), new CertAndKey(Files.readAllBytes(brokerKeyFile.toPath()), Files.readAllBytes(brokerCertFile.toPath()))); } if (!brokerCsrFile.delete()) { log.warn("{} cannot be deleted", brokerCsrFile.getName()); } if (!brokerKeyFile.delete()) { log.warn("{} cannot be deleted", brokerKeyFile.getName()); } if (!brokerCertFile.delete()) { log.warn("{} cannot be deleted", brokerCertFile.getName()); } return certs; } /** * Generate the OwnerReference object to link newly created objects to their parent (the custom resource) * * @return */ protected OwnerReference createOwnerReference() { return new OwnerReferenceBuilder() .withApiVersion(ownerApiVersion) .withKind(ownerKind) .withName(cluster) .withUid(ownerUid) .withBlockOwnerDeletion(false) .withController(false) .build(); } /** * Set fields needed to generate the OwnerReference object * * @param parent The resource which should be used as parent. It will be used to gather the date needed for generating OwnerReferences. */ protected void setOwnerReference(HasMetadata parent) { this.ownerApiVersion = parent.getApiVersion(); this.ownerKind = parent.getKind(); this.ownerUid = parent.getMetadata().getUid(); } public static boolean deleteClaim(StatefulSet ss) { if (!ss.getSpec().getVolumeClaimTemplates().isEmpty() && ss.getMetadata().getAnnotations() != null) { return Boolean.valueOf(ss.getMetadata().getAnnotations().computeIfAbsent(DELETE_CLAIM_ANNOTATION, s -> "false")); } else { return false; } } /** * Generated a Map with Prometheus annotations * * @return Map with Prometheus annotations using the default port (9404) and path (/metrics) */ protected Map<String, String> getPrometheusAnnotations() { Map<String, String> annotations = new HashMap<String, String>(3); annotations.put("prometheus.io/port", String.valueOf(METRICS_PORT)); annotations.put("prometheus.io/scrape", "true"); annotations.put("prometheus.io/path", "/metrics"); return annotations; } String getAncillaryConfigMapKeyLogConfig() { return ANCILLARY_CM_KEY_LOG_CONFIG; } public static String getClusterCaName(String cluster) { return cluster + "-cluster-ca"; } }
[]
[]
[]
[]
[]
java
0
0
nemo/collections/tts/parts/helpers.py
# Copyright (c) 2019 NVIDIA Corporation import librosa import matplotlib.pylab as plt import numpy as np import torch from nemo.utils import logging __all__ = [ "waveglow_log_to_tb_func", "waveglow_process_eval_batch", "waveglow_eval_log_to_tb_func", "tacotron2_log_to_tb_func", "tacotron2_process_eval_batch", "tacotron2_process_final_eval", "tacotron2_eval_log_to_tb_func", ] def waveglow_log_to_tb_func( swriter, tensors, step, tag="train", log_images=False, log_images_freq=1, n_fft=1024, hop_length=256, window="hann", mel_fb=None, ): loss, audio_pred, spec_target, mel_length = tensors if loss: swriter.add_scalar("loss", loss, step) if log_images and step % log_images_freq == 0: mel_length = mel_length[0] spec_target = spec_target[0].data.cpu().numpy()[:, :mel_length] swriter.add_image( f"{tag}_mel_target", plot_spectrogram_to_numpy(spec_target), step, dataformats="HWC", ) if mel_fb is not None: mag, _ = librosa.core.magphase( librosa.core.stft( np.nan_to_num(audio_pred[0].cpu().detach().numpy()), n_fft=n_fft, hop_length=hop_length, window=window, ) ) mel_pred = np.matmul(mel_fb.cpu().numpy(), mag).squeeze() log_mel_pred = np.log(np.clip(mel_pred, a_min=1e-5, a_max=None)) swriter.add_image( f"{tag}_mel_predicted", plot_spectrogram_to_numpy(log_mel_pred[:, :mel_length]), step, dataformats="HWC", ) def waveglow_process_eval_batch(tensors: dict, global_vars: dict): if 'tensorboard' not in global_vars.keys(): global_vars['tensorboard'] = {} for k, v in tensors.items(): if k.startswith("processed_signal"): global_vars['tensorboard']['mel_target'] = v[0] if k.startswith("audio"): global_vars['tensorboard']['audio_pred'] = v[0] if k.startswith("processed_length"): global_vars['tensorboard']['mel_length'] = v[0] def waveglow_eval_log_to_tb_func( swriter, global_vars, step, tag=None, n_fft=1024, hop_length=256, window="hann", mel_fb=None, ): spec_target = global_vars['tensorboard']["mel_target"] audio_pred = global_vars['tensorboard']["audio_pred"] mel_length = global_vars['tensorboard']['mel_length'] waveglow_log_to_tb_func( swriter, [None, audio_pred, spec_target, mel_length], step, tag=tag, log_images=True, n_fft=n_fft, hop_length=hop_length, window=window, mel_fb=mel_fb, ) def tacotron2_log_to_tb_func(swriter, tensors, step, tag="train", log_images=False, log_images_freq=1): loss, spec_target, mel_postnet, gate, gate_target, alignments = tensors if loss: swriter.add_scalar("loss", loss, step) if log_images and step % log_images_freq == 0: swriter.add_image( f"{tag}_alignment", plot_alignment_to_numpy(alignments[0].data.cpu().numpy().T), step, dataformats="HWC", ) swriter.add_image( f"{tag}_mel_target", plot_spectrogram_to_numpy(spec_target[0].data.cpu().numpy()), step, dataformats="HWC", ) swriter.add_image( f"{tag}_mel_predicted", plot_spectrogram_to_numpy(mel_postnet[0].data.cpu().numpy()), step, dataformats="HWC", ) swriter.add_image( f"{tag}_gate", plot_gate_outputs_to_numpy(gate_target[0].data.cpu().numpy(), torch.sigmoid(gate[0]).data.cpu().numpy(),), step, dataformats="HWC", ) def tacotron2_process_eval_batch(tensors: dict, global_vars: dict): if 'EvalLoss' not in global_vars.keys(): global_vars['EvalLoss'] = [] if 'tensorboard' not in global_vars.keys(): global_vars['tensorboard'] = {} for k, v in tensors.items(): if k.startswith("processed_signal"): global_vars['tensorboard']['mel_target'] = v[0] if k.startswith("mel_output"): global_vars['tensorboard']['mel_pred'] = v[0] if k.startswith("gate_output"): global_vars['tensorboard']['gate'] = v[0] if k.startswith("alignments"): global_vars['tensorboard']['alignments'] = v[0] if k.startswith("gate_target"): global_vars['tensorboard']['gate_target'] = v[0] for k in tensors.keys(): if k.startswith("loss"): loss_key = k global_vars['EvalLoss'].append(torch.mean(torch.stack(tensors[loss_key]))) def tacotron2_process_final_eval(global_vars: dict, tag=None): eloss = torch.mean(torch.stack(global_vars['EvalLoss'])).item() global_vars['EvalLoss'] = eloss logging.info(f"==========>>>>>>Evaluation Loss {tag}: {eloss}") return global_vars def tacotron2_eval_log_to_tb_func(swriter, global_vars, step, tag=None): spec_target = global_vars['tensorboard']["mel_target"] mel_postnet = global_vars['tensorboard']["mel_pred"] gate = global_vars['tensorboard']["gate"] gate_target = global_vars['tensorboard']["gate_target"] alignments = global_vars['tensorboard']["alignments"] swriter.add_scalar(f"{tag}.loss", global_vars['EvalLoss'], step) tacotron2_log_to_tb_func( swriter, [None, spec_target, mel_postnet, gate, gate_target, alignments], step, tag=tag, log_images=True, ) def save_figure_to_numpy(fig): # save it to a numpy array. data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) return data def plot_alignment_to_numpy(alignment, info=None): fig, ax = plt.subplots(figsize=(6, 4)) im = ax.imshow(alignment, aspect='auto', origin='lower', interpolation='none') fig.colorbar(im, ax=ax) xlabel = 'Decoder timestep' if info is not None: xlabel += '\n\n' + info plt.xlabel(xlabel) plt.ylabel('Encoder timestep') plt.tight_layout() fig.canvas.draw() data = save_figure_to_numpy(fig) plt.close() return data def plot_spectrogram_to_numpy(spectrogram): fig, ax = plt.subplots(figsize=(12, 3)) im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation='none') plt.colorbar(im, ax=ax) plt.xlabel("Frames") plt.ylabel("Channels") plt.tight_layout() fig.canvas.draw() data = save_figure_to_numpy(fig) plt.close() return data def plot_gate_outputs_to_numpy(gate_targets, gate_outputs): fig, ax = plt.subplots(figsize=(12, 3)) ax.scatter( range(len(gate_targets)), gate_targets, alpha=0.5, color='green', marker='+', s=1, label='target', ) ax.scatter( range(len(gate_outputs)), gate_outputs, alpha=0.5, color='red', marker='.', s=1, label='predicted', ) plt.xlabel("Frames (Green target, Red predicted)") plt.ylabel("Gate State") plt.tight_layout() fig.canvas.draw() data = save_figure_to_numpy(fig) plt.close() return data
[]
[]
[]
[]
[]
python
null
null
null
tensorflow/python/keras/_impl/keras/backend.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access # pylint: disable=redefined-outer-name # pylint: disable=redefined-builtin """Keras backend API. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import os import numpy as np from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session as session_module from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes as dtypes_module from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.layers import base as tf_base_layers from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import ctc_ops as ctc from tensorflow.python.ops import functional_ops from tensorflow.python.ops import gradients as gradients_module from tensorflow.python.ops import image_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import logging_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import random_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops import variables as variables_module from tensorflow.python.training import moving_averages from tensorflow.python.util import tf_inspect py_all = all py_sum = sum # INTERNAL UTILS # This is the default internal TF session used by Keras. # It can be set manually via `set_session(sess)`. _SESSION = None # This dictionary holds a mapping {graph: learning_phase}. # A learning phase is a bool tensor used to run Keras models in # either train mode (learning_phase == 1) or test mode (learning_phase == 0). _GRAPH_LEARNING_PHASES = {} # This dictionary holds a mapping {graph: UID_DICT}. # each UID_DICT is a dictionary mapping name prefixes to a current index, # used for generating graph-specific string UIDs # for various names (e.g. layer names). _GRAPH_UID_DICTS = {} # This boolean flag can be set to True to leave variable initialization # up to the user. # Change its value via `manual_variable_initialization(value)`. _MANUAL_VAR_INIT = False # The type of float to use throughout a session. _FLOATX = 'float32' # Epsilon fuzz factor used throughout the codebase. _EPSILON = 10e-8 # Default image data format, one of "channels_last", "channels_first". _IMAGE_DATA_FORMAT = 'channels_last' def backend(): """Publicly accessible method for determining the current backend. Only exists for API compatibility with multi-backend Keras. Returns: The string "tensorflow". """ return 'tensorflow' def epsilon(): """Returns the value of the fuzz factor used in numeric expressions. Returns: A float. Example: ```python >>> keras.backend.epsilon() 1e-08 ``` """ return _EPSILON def set_epsilon(value): """Sets the value of the fuzz factor used in numeric expressions. Arguments: value: float. New value of epsilon. Example: ```python >>> from keras import backend as K >>> K.epsilon() 1e-08 >>> K.set_epsilon(1e-05) >>> K.epsilon() 1e-05 ``` """ global _EPSILON _EPSILON = value def floatx(): """Returns the default float type, as a string. E.g. 'float16', 'float32', 'float64'. Returns: String, the current default float type. Example: ```python >>> keras.backend.floatx() 'float32' ``` """ return _FLOATX def set_floatx(value): """Sets the default float type. Arguments: value: String; 'float16', 'float32', or 'float64'. Example: ```python >>> from keras import backend as K >>> K.floatx() 'float32' >>> K.set_floatx('float16') >>> K.floatx() 'float16' ``` Raises: ValueError: In case of invalid value. """ global _FLOATX if value not in {'float16', 'float32', 'float64'}: raise ValueError('Unknown floatx type: ' + str(value)) _FLOATX = str(value) def cast_to_floatx(x): """Cast a Numpy array to the default Keras float type. Arguments: x: Numpy array. Returns: The same Numpy array, cast to its new type. Example: ```python >>> from keras import backend as K >>> K.floatx() 'float32' >>> arr = numpy.array([1.0, 2.0], dtype='float64') >>> arr.dtype dtype('float64') >>> new_arr = K.cast_to_floatx(arr) >>> new_arr array([ 1., 2.], dtype=float32) >>> new_arr.dtype dtype('float32') ``` """ return np.asarray(x, dtype=_FLOATX) def image_data_format(): """Returns the default image data format convention. Returns: A string, either `'channels_first'` or `'channels_last'` Example: ```python >>> keras.backend.image_data_format() 'channels_first' ``` """ return _IMAGE_DATA_FORMAT def set_image_data_format(data_format): """Sets the value of the image data format convention. Arguments: data_format: string. `'channels_first'` or `'channels_last'`. Example: ```python >>> from keras import backend as K >>> K.image_data_format() 'channels_first' >>> K.set_image_data_format('channels_last') >>> K.image_data_format() 'channels_last' ``` Raises: ValueError: In case of invalid `data_format` value. """ global _IMAGE_DATA_FORMAT if data_format not in {'channels_last', 'channels_first'}: raise ValueError('Unknown data_format:', data_format) _IMAGE_DATA_FORMAT = str(data_format) def get_uid(prefix=''): """Associates a string prefix with an integer counter in a TensorFlow graph. Arguments: prefix: String prefix to index. Returns: Unique integer ID. Example: ``` >>> get_uid('dense') 1 >>> get_uid('dense') 2 ``` """ graph = ops.get_default_graph() if graph not in tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS: tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS[graph] = collections.defaultdict( int) layer_name_uids = tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS[graph] layer_name_uids[prefix] += 1 return layer_name_uids[prefix] def reset_uids(): per_graph_layer_name_uids = tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS keys = list(per_graph_layer_name_uids.keys()) for key in keys: del per_graph_layer_name_uids[key] def clear_session(): """Destroys the current TF graph and creates a new one. Useful to avoid clutter from old models / layers. """ global _SESSION global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned ops.reset_default_graph() reset_uids() _SESSION = None phase = array_ops.placeholder(dtype='bool', name='keras_learning_phase') _GRAPH_LEARNING_PHASES = {} _GRAPH_LEARNING_PHASES[ops.get_default_graph()] = phase def manual_variable_initialization(value): """Sets the manual variable initialization flag. This boolean flag determines whether variables should be initialized as they are instantiated (default), or if the user should handle the initialization (e.g. via `tf.initialize_all_variables()`). Arguments: value: Python boolean. """ global _MANUAL_VAR_INIT _MANUAL_VAR_INIT = value def learning_phase(): """Returns the learning phase flag. The learning phase flag is a bool tensor (0 = test, 1 = train) to be passed as input to any Keras function that uses a different behavior at train time and test time. Returns: Learning phase (scalar integer tensor or Python integer). """ graph = ops.get_default_graph() if graph not in _GRAPH_LEARNING_PHASES: phase = array_ops.placeholder(dtype='bool', name='keras_learning_phase') _GRAPH_LEARNING_PHASES[graph] = phase return _GRAPH_LEARNING_PHASES[graph] def set_learning_phase(value): """Sets the learning phase to a fixed value. Arguments: value: Learning phase value, either 0 or 1 (integers). Raises: ValueError: if `value` is neither `0` nor `1`. """ global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned if value not in {0, 1}: raise ValueError('Expected learning phase to be ' '0 or 1.') _GRAPH_LEARNING_PHASES[ops.get_default_graph()] = value def get_session(): """Returns the TF session to be used by the backend. If a default TensorFlow session is available, we will return it. Else, we will return the global Keras session. If no global Keras session exists at this point: we will create a new global session. Note that you can manually set the global session via `K.set_session(sess)`. Returns: A TensorFlow session. """ global _SESSION if ops.get_default_session() is not None: session = ops.get_default_session() else: if _SESSION is None: if not os.environ.get('OMP_NUM_THREADS'): config = config_pb2.ConfigProto(allow_soft_placement=True) else: num_thread = int(os.environ.get('OMP_NUM_THREADS')) config = config_pb2.ConfigProto( intra_op_parallelism_threads=num_thread, allow_soft_placement=True) _SESSION = session_module.Session(config=config) session = _SESSION if not _MANUAL_VAR_INIT: with session.graph.as_default(): _initialize_variables(session) return session def set_session(session): """Sets the global TensorFlow session. Arguments: session: A TF Session. """ global _SESSION _SESSION = session # DEVICE MANIPULATION class _TfDeviceCaptureOp(object): """Class for capturing the TF device scope.""" def __init__(self): self.device = None def _set_device(self, device): """This method captures TF's explicit device scope setting.""" self.device = device def _get_current_tf_device(): """Return explicit device of current context, otherwise returns `None`. Returns: If the current device scope is explicitly set, it returns a string with the device (`CPU` or `GPU`). If the scope is not explicitly set, it will return `None`. """ g = ops.get_default_graph() op = _TfDeviceCaptureOp() g._apply_device_functions(op) return op.device def _is_current_explicit_device(device_type): """Check if the current device is explicitly set on the device type specified. Arguments: device_type: A string containing `GPU` or `CPU` (case-insensitive). Returns: A boolean indicating if the current device scope is explicitly set on the device type. Raises: ValueError: If the `device_type` string indicates an unsupported device. """ device_type = device_type.upper() if device_type not in ['CPU', 'GPU']: raise ValueError('device_type should be either "CPU" or "GPU".') device = _get_current_tf_device() return device is not None and device.device_type == device_type.upper() def _get_available_gpus(): """Get a list of available gpu devices (formatted as strings). Returns: A list of available GPU devices. """ devices = get_session().list_devices() return [x.name for x in devices if x.device_type == 'GPU'] def _has_nchw_support(): """Check whether the current scope supports NCHW ops. Tensorflow does not support NCHW on CPU. Therefore we check if we are not explicitly put on CPU, and have GPUs available. In this case there will be soft-placing on the GPU device. Returns: bool: if the current scope device placement would support nchw """ explicitly_on_cpu = _is_current_explicit_device('CPU') gpus_available = bool(_get_available_gpus()) return not explicitly_on_cpu and gpus_available # VARIABLE MANIPULATION def _to_tensor(x, dtype): """Convert the input `x` to a tensor of type `dtype`. Arguments: x: An object to be converted (numpy array, list, tensors). dtype: The destination type. Returns: A tensor. """ return ops.convert_to_tensor(x, dtype=dtype) def is_sparse(tensor): """Returns whether a tensor is a sparse tensor. Arguments: tensor: A tensor instance. Returns: A boolean. Example: ```python >>> from keras import backend as K >>> a = K.placeholder((2, 2), sparse=False) >>> print(K.is_sparse(a)) False >>> b = K.placeholder((2, 2), sparse=True) >>> print(K.is_sparse(b)) True ``` """ return isinstance(tensor, sparse_tensor.SparseTensor) def to_dense(tensor): """Converts a sparse tensor into a dense tensor and returns it. Arguments: tensor: A tensor instance (potentially sparse). Returns: A dense tensor. Examples: ```python >>> from keras import backend as K >>> b = K.placeholder((2, 2), sparse=True) >>> print(K.is_sparse(b)) True >>> c = K.to_dense(b) >>> print(K.is_sparse(c)) False ``` """ if is_sparse(tensor): return sparse_ops.sparse_tensor_to_dense(tensor) else: return tensor name_scope = ops.name_scope def variable(value, dtype=None, name=None, constraint=None): """Instantiates a variable and returns it. Arguments: value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. constraint: Optional projection function to be applied to the variable after an optimizer update. Returns: A variable instance (with Keras metadata included). Examples: ```python >>> from keras import backend as K >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = K.variable(value=val, dtype='float64', name='example_var') >>> K.dtype(kvar) 'float64' >>> print(kvar) example_var >>> kvar.eval() array([[ 1., 2.], [ 3., 4.]]) ``` """ if dtype is None: dtype = floatx() if hasattr(value, 'tocoo'): sparse_coo = value.tocoo() indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims( sparse_coo.col, 1)), 1) v = sparse_tensor.SparseTensor( indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape) v._keras_shape = sparse_coo.shape v._uses_learning_phase = False return v v = variables_module.Variable( value, dtype=dtypes_module.as_dtype(dtype), name=name, constraint=constraint) if isinstance(value, np.ndarray): v._keras_shape = value.shape elif hasattr(value, 'get_shape'): v._keras_shape = int_shape(value) v._uses_learning_phase = False return v def _initialize_variables(session): """Utility to initialize uninitialized variables on the fly.""" variables = variables_module.global_variables() candidate_vars = [] for v in variables: if not getattr(v, '_keras_initialized', False): candidate_vars.append(v) if candidate_vars: # This step is expensive, so we only run it on variables not already # marked as initialized. is_initialized = session.run( [variables_module.is_variable_initialized(v) for v in candidate_vars]) uninitialized_vars = [] for flag, v in zip(is_initialized, candidate_vars): if not flag: uninitialized_vars.append(v) v._keras_initialized = True if uninitialized_vars: session.run(variables_module.variables_initializer(uninitialized_vars)) def constant(value, dtype=None, shape=None, name=None): """Creates a constant tensor. Arguments: value: A constant value (or list) dtype: The type of the elements of the resulting tensor. shape: Optional dimensions of resulting tensor. name: Optional name for the tensor. Returns: A Constant Tensor. """ if dtype is None: dtype = floatx() return constant_op.constant(value, dtype=dtype, shape=shape, name=name) def is_keras_tensor(x): """Returns whether `x` is a Keras tensor. A "Keras tensor" is a tensor that was returned by a Keras layer, (`Layer` class) or by `Input`. Arguments: x: A candidate tensor. Returns: A boolean: Whether the argument is a Keras tensor. Raises: ValueError: In case `x` is not a symbolic tensor. Examples: ```python >>> from keras import backend as K >>> from keras.layers import Input, Dense >>> np_var = numpy.array([1, 2]) >>> K.is_keras_tensor(np_var) # A numpy array is not a symbolic tensor. ValueError >>> k_var = tf.placeholder('float32', shape=(1,1)) >>> K.is_keras_tensor(k_var) # A variable indirectly created outside of keras is not a Keras tensor. False >>> keras_var = K.variable(np_var) >>> K.is_keras_tensor(keras_var) # A variable created with the keras backend is not a Keras tensor. False >>> keras_placeholder = K.placeholder(shape=(2, 4, 5)) >>> K.is_keras_tensor(keras_placeholder) # A placeholder is not a Keras tensor. False >>> keras_input = Input([10]) >>> K.is_keras_tensor(keras_input) # An Input is a Keras tensor. True >>> keras_layer_output = Dense(10)(keras_input) >>> K.is_keras_tensor(keras_layer_output) # Any Keras layer output is a Keras tensor. True ``` """ if not isinstance(x, (ops.Tensor, variables_module.Variable, sparse_tensor.SparseTensor)): raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) + '`. Expected a symbolic tensor instance.') return hasattr(x, '_keras_history') def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None): """Instantiates a placeholder tensor and returns it. Arguments: shape: Shape of the placeholder (integer tuple, may include `None` entries). ndim: Number of axes of the tensor. At least one of {`shape`, `ndim`} must be specified. If both are specified, `shape` is used. dtype: Placeholder type. sparse: Boolean, whether the placeholder should have a sparse type. name: Optional name string for the placeholder. Returns: Tensor instance (with Keras metadata included). Examples: ```python >>> from keras import backend as K >>> input_ph = K.placeholder(shape=(2, 4, 5)) >>> input_ph <tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32> ``` """ if dtype is None: dtype = floatx() if not shape: if ndim: shape = tuple([None for _ in range(ndim)]) if sparse: x = array_ops.sparse_placeholder(dtype, shape=shape, name=name) else: x = array_ops.placeholder(dtype, shape=shape, name=name) x._uses_learning_phase = False return x def is_placeholder(x): """Returns whether `x` is a placeholder. Arguments: x: A candidate placeholder. Returns: Boolean. """ try: return x.op.type == 'Placeholder' except AttributeError: return False def shape(x): """Returns the symbolic shape of a tensor or variable. Arguments: x: A tensor or variable. Returns: A symbolic shape (which is itself a tensor). Examples: ```python # TensorFlow example >>> from keras import backend as K >>> tf_session = K.get_session() >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = K.variable(value=val) >>> input = keras.backend.placeholder(shape=(2, 4, 5)) >>> K.shape(kvar) <tf.Tensor 'Shape_8:0' shape=(2,) dtype=int32> >>> K.shape(input) <tf.Tensor 'Shape_9:0' shape=(3,) dtype=int32> # To get integer shape (Instead, you can use K.int_shape(x)) >>> K.shape(kvar).eval(session=tf_session) array([2, 2], dtype=int32) >>> K.shape(input).eval(session=tf_session) array([2, 4, 5], dtype=int32) ``` """ return array_ops.shape(x) def int_shape(x): """Returns the shape of tensor or variable as a tuple of int or None entries. Arguments: x: Tensor or variable. Returns: A tuple of integers (or None entries). Examples: ```python >>> from keras import backend as K >>> input = K.placeholder(shape=(2, 4, 5)) >>> K.int_shape(input) (2, 4, 5) >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = K.variable(value=val) >>> K.int_shape(kvar) (2, 2) ``` """ try: return tuple(x.get_shape().as_list()) except ValueError: return None def ndim(x): """Returns the number of axes in a tensor, as an integer. Arguments: x: Tensor or variable. Returns: Integer (scalar), number of axes. Examples: ```python >>> from keras import backend as K >>> input = K.placeholder(shape=(2, 4, 5)) >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = K.variable(value=val) >>> K.ndim(input) 3 >>> K.ndim(kvar) 2 ``` """ dims = x.get_shape()._dims if dims is not None: return len(dims) return None def dtype(x): """Returns the dtype of a Keras tensor or variable, as a string. Arguments: x: Tensor or variable. Returns: String, dtype of `x`. Examples: ```python >>> from keras import backend as K >>> K.dtype(K.placeholder(shape=(2,4,5))) 'float32' >>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32')) 'float32' >>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64')) 'float64' # Keras variable >>> kvar = K.variable(np.array([[1, 2], [3, 4]])) >>> K.dtype(kvar) 'float32_ref' >>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32') >>> K.dtype(kvar) 'float32_ref' ``` """ return x.dtype.base_dtype.name def eval(x): """Evaluates the value of a variable. Arguments: x: A variable. Returns: A Numpy array. Examples: ```python >>> from keras import backend as K >>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32') >>> K.eval(kvar) array([[ 1., 2.], [ 3., 4.]], dtype=float32) ``` """ return to_dense(x).eval(session=get_session()) def zeros(shape, dtype=None, name=None): """Instantiates an all-zeros variable and returns it. Arguments: shape: Tuple of integers, shape of returned Keras variable dtype: String, data type of returned Keras variable name: String, name of returned Keras variable Returns: A variable (including Keras metadata), filled with `0.0`. Example: ```python >>> from keras import backend as K >>> kvar = K.zeros((3,4)) >>> K.eval(kvar) array([[ 0., 0., 0., 0.], [ 0., 0., 0., 0.], [ 0., 0., 0., 0.]], dtype=float32) ``` """ if dtype is None: dtype = floatx() tf_dtype = dtypes_module.as_dtype(dtype) return variable( init_ops.constant_initializer(0., dtype=tf_dtype)(shape), dtype, name) def ones(shape, dtype=None, name=None): """Instantiates an all-ones tensor variable and returns it. Arguments: shape: Tuple of integers, shape of returned Keras variable. dtype: String, data type of returned Keras variable. name: String, name of returned Keras variable. Returns: A Keras variable, filled with `1.0`. Example: ```python >>> from keras import backend as K >>> kvar = K.ones((3,4)) >>> K.eval(kvar) array([[ 1., 1., 1., 1.], [ 1., 1., 1., 1.], [ 1., 1., 1., 1.]], dtype=float32) ``` """ if dtype is None: dtype = floatx() tf_dtype = dtypes_module.as_dtype(dtype) return variable( init_ops.constant_initializer(1., dtype=tf_dtype)(shape), dtype, name) def eye(size, dtype=None, name=None): """Instantiate an identity matrix and returns it. Arguments: size: Integer, number of rows/columns. dtype: String, data type of returned Keras variable. name: String, name of returned Keras variable. Returns: A Keras variable, an identity matrix. Example: ```python >>> from keras import backend as K >>> kvar = K.eye(3) >>> K.eval(kvar) array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]], dtype=float32) ``` """ if dtype is None: dtype = floatx() tf_dtype = dtypes_module.as_dtype(dtype) return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name) def zeros_like(x, dtype=None, name=None): """Instantiates an all-zeros variable of the same shape as another tensor. Arguments: x: Keras variable or Keras tensor. dtype: String, dtype of returned Keras variable. None uses the dtype of x. name: String, name for the variable to create. Returns: A Keras variable with the shape of x filled with zeros. Example: ```python >>> from keras import backend as K >>> kvar = K.variable(np.random.random((2,3))) >>> kvar_zeros = K.zeros_like(kvar) >>> K.eval(kvar_zeros) array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32) ``` """ return array_ops.zeros_like(x, dtype=dtype, name=name) def ones_like(x, dtype=None, name=None): """Instantiates an all-ones variable of the same shape as another tensor. Arguments: x: Keras variable or tensor. dtype: String, dtype of returned Keras variable. None uses the dtype of x. name: String, name for the variable to create. Returns: A Keras variable with the shape of x filled with ones. Example: ```python >>> from keras import backend as K >>> kvar = K.variable(np.random.random((2,3))) >>> kvar_ones = K.ones_like(kvar) >>> K.eval(kvar_ones) array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) ``` """ return array_ops.ones_like(x, dtype=dtype, name=name) def identity(x, name=None): """Returns a tensor with the same content as the input tensor. Arguments: x: The input tensor. name: String, name for the variable to create. Returns: A tensor of the same shape, type and content. """ return array_ops.identity(x, name=name) def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): """Instantiates a variable with values drawn from a uniform distribution. Arguments: shape: Tuple of integers, shape of returned Keras variable. low: Float, lower boundary of the output interval. high: Float, upper boundary of the output interval. dtype: String, dtype of returned Keras variable. name: String, name of returned Keras variable. seed: Integer, random seed. Returns: A Keras variable, filled with drawn samples. Example: ```python # TensorFlow example >>> kvar = K.random_uniform_variable((2,3), 0, 1) >>> kvar <tensorflow.python.ops.variables.Variable object at 0x10ab40b10> >>> K.eval(kvar) array([[ 0.10940075, 0.10047495, 0.476143 ], [ 0.66137183, 0.00869417, 0.89220798]], dtype=float32) ``` """ if dtype is None: dtype = floatx() tf_dtype = dtypes_module.as_dtype(dtype) if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e8) value = init_ops.random_uniform_initializer( low, high, dtype=tf_dtype, seed=seed)(shape) return variable(value, dtype=dtype, name=name) def random_normal_variable(shape, mean, scale, dtype=None, name=None, seed=None): """Instantiates a variable with values drawn from a normal distribution. Arguments: shape: Tuple of integers, shape of returned Keras variable. mean: Float, mean of the normal distribution. scale: Float, standard deviation of the normal distribution. dtype: String, dtype of returned Keras variable. name: String, name of returned Keras variable. seed: Integer, random seed. Returns: A Keras variable, filled with drawn samples. Example: ```python # TensorFlow example >>> kvar = K.random_normal_variable((2,3), 0, 1) >>> kvar <tensorflow.python.ops.variables.Variable object at 0x10ab12dd0> >>> K.eval(kvar) array([[ 1.19591331, 0.68685907, -0.63814116], [ 0.92629528, 0.28055015, 1.70484698]], dtype=float32) ``` """ if dtype is None: dtype = floatx() tf_dtype = dtypes_module.as_dtype(dtype) if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e8) value = init_ops.random_normal_initializer( mean, scale, dtype=tf_dtype, seed=seed)(shape) return variable(value, dtype=dtype, name=name) def count_params(x): """Returns the static number of elements in a variable or tensor. Arguments: x: Variable or tensor. Returns: Integer, the number of scalars in `x`. Example: ```python >>> kvar = K.zeros((2,3)) >>> K.count_params(kvar) 6 >>> K.eval(kvar) array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32) ``` """ return np.prod(x.get_shape().as_list()) def cast(x, dtype): """Casts a tensor to a different dtype and returns it. You can cast a Keras variable but it still returns a Keras tensor. Arguments: x: Keras tensor (or variable). dtype: String, either (`'float16'`, `'float32'`, or `'float64'`). Returns: Keras tensor with dtype `dtype`. Example: ```python >>> from keras import backend as K >>> input = K.placeholder((2, 3), dtype='float32') >>> input <tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32> # It doesn't work in-place as below. >>> K.cast(input, dtype='float16') <tf.Tensor 'Cast_1:0' shape=(2, 3) dtype=float16> >>> input <tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32> # you need to assign it. >>> input = K.cast(input, dtype='float16') >>> input <tf.Tensor 'Cast_2:0' shape=(2, 3) dtype=float16> ``` """ return math_ops.cast(x, dtype) # UPDATES OPS def update(x, new_x): return state_ops.assign(x, new_x) def update_add(x, increment): """Update the value of `x` by adding `increment`. Arguments: x: A Variable. increment: A tensor of same shape as `x`. Returns: The variable `x` updated. """ return state_ops.assign_add(x, increment) def update_sub(x, decrement): """Update the value of `x` by subtracting `decrement`. Arguments: x: A Variable. decrement: A tensor of same shape as `x`. Returns: The variable `x` updated. """ return state_ops.assign_sub(x, decrement) def moving_average_update(x, value, momentum): """Compute the moving average of a variable. Arguments: x: A Variable. value: A tensor with the same shape as `variable`. momentum: The moving average momentum. Returns: An Operation to update the variable. """ return moving_averages.assign_moving_average( x, value, momentum, zero_debias=False) # LINEAR ALGEBRA def dot(x, y): """Multiplies 2 tensors (and/or variables) and returns a *tensor*. When attempting to multiply a nD tensor with a nD tensor, it reproduces the Theano behavior. (e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`) Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A tensor, dot product of `x` and `y`. Examples: ```python # dot product between tensors >>> x = K.placeholder(shape=(2, 3)) >>> y = K.placeholder(shape=(3, 4)) >>> xy = K.dot(x, y) >>> xy <tf.Tensor 'MatMul_9:0' shape=(2, 4) dtype=float32> ``` ```python # dot product between tensors >>> x = K.placeholder(shape=(32, 28, 3)) >>> y = K.placeholder(shape=(3, 4)) >>> xy = K.dot(x, y) >>> xy <tf.Tensor 'MatMul_9:0' shape=(32, 28, 4) dtype=float32> ``` ```python # Theano-like behavior example >>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1) >>> y = K.ones((4, 3, 5)) >>> xy = K.dot(x, y) >>> K.int_shape(xy) (2, 4, 5) ``` """ if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2): x_shape = [] for i, s in zip(int_shape(x), array_ops.unstack(array_ops.shape(x))): if i is not None: x_shape.append(i) else: x_shape.append(s) x_shape = tuple(x_shape) y_shape = [] for i, s in zip(int_shape(y), array_ops.unstack(array_ops.shape(y))): if i is not None: y_shape.append(i) else: y_shape.append(s) y_shape = tuple(y_shape) y_permute_dim = list(range(ndim(y))) y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim xt = array_ops.reshape(x, [-1, x_shape[-1]]) yt = array_ops.reshape( array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1]) return array_ops.reshape( math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:]) if is_sparse(x): out = sparse_ops.sparse_tensor_dense_matmul(x, y) else: out = math_ops.matmul(x, y) return out def batch_dot(x, y, axes=None): """Batchwise dot product. `batch_dot` is used to compute dot product of `x` and `y` when `x` and `y` are data in batch, i.e. in a shape of `(batch_size, :)`. `batch_dot` results in a tensor or variable with less dimensions than the input. If the number of dimensions is reduced to 1, we use `expand_dims` to make sure that ndim is at least 2. Arguments: x: Keras tensor or variable with `ndim >= 2`. y: Keras tensor or variable with `ndim >= 2`. axes: list of (or single) int with target dimensions. The lengths of `axes[0]` and `axes[1]` should be the same. Returns: A tensor with shape equal to the concatenation of `x`'s shape (less the dimension that was summed over) and `y`'s shape (less the batch dimension and the dimension that was summed over). If the final rank is 1, we reshape it to `(batch_size, 1)`. Examples: Assume `x = [[1, 2], [3, 4]]` and `y = [[5, 6], [7, 8]]` `batch_dot(x, y, axes=1) = [[17, 53]]` which is the main diagonal of `x.dot(y.T)`, although we never have to calculate the off-diagonal elements. Shape inference: Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`. If `axes` is (1, 2), to find the output shape of resultant tensor, loop through each dimension in `x`'s shape and `y`'s shape: * `x.shape[0]` : 100 : append to output shape * `x.shape[1]` : 20 : do not append to output shape, dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1) * `y.shape[0]` : 100 : do not append to output shape, always ignore first dimension of `y` * `y.shape[1]` : 30 : append to output shape * `y.shape[2]` : 20 : do not append to output shape, dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2) `output_shape` = `(100, 30)` ```python >>> x_batch = K.ones(shape=(32, 20, 1)) >>> y_batch = K.ones(shape=(32, 30, 20)) >>> xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=[1, 2]) >>> K.int_shape(xy_batch_dot) (32, 1, 30) ``` """ if isinstance(axes, int): axes = (axes, axes) x_ndim = ndim(x) y_ndim = ndim(y) if x_ndim > y_ndim: diff = x_ndim - y_ndim y = array_ops.reshape(y, array_ops.concat( [array_ops.shape(y), [1] * (diff)], axis=0)) elif y_ndim > x_ndim: diff = y_ndim - x_ndim x = array_ops.reshape(x, array_ops.concat( [array_ops.shape(x), [1] * (diff)], axis=0)) else: diff = 0 if ndim(x) == 2 and ndim(y) == 2: if axes[0] == axes[1]: out = math_ops.reduce_sum(math_ops.multiply(x, y), axes[0]) else: out = math_ops.reduce_sum( math_ops.multiply(array_ops.transpose(x, [1, 0]), y), axes[1]) else: if axes is not None: adj_x = None if axes[0] == ndim(x) - 1 else True adj_y = True if axes[1] == ndim(y) - 1 else None else: adj_x = None adj_y = None out = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y) if diff: if x_ndim > y_ndim: idx = x_ndim + y_ndim - 3 else: idx = x_ndim - 1 out = array_ops.squeeze(out, list(range(idx, idx + diff))) if ndim(out) == 1: out = expand_dims(out, 1) return out def transpose(x): """Transposes a tensor and returns it. Arguments: x: Tensor or variable. Returns: A tensor. Examples: ```python >>> var = K.variable([[1, 2, 3], [4, 5, 6]]) >>> K.eval(var) array([[ 1., 2., 3.], [ 4., 5., 6.]], dtype=float32) >>> var_transposed = K.transpose(var) >>> K.eval(var_transposed) array([[ 1., 4.], [ 2., 5.], [ 3., 6.]], dtype=float32) ``` ```python >>> input = K.placeholder((2, 3)) >>> input <tf.Tensor 'Placeholder_11:0' shape=(2, 3) dtype=float32> >>> input_transposed = K.transpose(input) >>> input_transposed <tf.Tensor 'transpose_4:0' shape=(3, 2) dtype=float32> ``` """ return array_ops.transpose(x) def gather(reference, indices): """Retrieves the elements of indices `indices` in the tensor `reference`. Arguments: reference: A tensor. indices: An integer tensor of indices. Returns: A tensor of same type as `reference`. """ return array_ops.gather(reference, indices) # ELEMENT-WISE OPERATIONS def max(x, axis=None, keepdims=False): """Maximum value in a tensor. Arguments: x: A tensor or variable. axis: An integer, the axis to find maximum values. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with maximum values of `x`. """ return math_ops.reduce_max(x, axis=axis, keep_dims=keepdims) def min(x, axis=None, keepdims=False): """Minimum value in a tensor. Arguments: x: A tensor or variable. axis: An integer, the axis to find minimum values. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with miminum values of `x`. """ return math_ops.reduce_min(x, axis=axis, keep_dims=keepdims) def sum(x, axis=None, keepdims=False): """Sum of the values in a tensor, alongside the specified axis. Arguments: x: A tensor or variable. axis: An integer, the axis to sum over. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with sum of `x`. """ return math_ops.reduce_sum(x, axis=axis, keep_dims=keepdims) def prod(x, axis=None, keepdims=False): """Multiplies the values in a tensor, alongside the specified axis. Arguments: x: A tensor or variable. axis: An integer, the axis to compute the product. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with the product of elements of `x`. """ return math_ops.reduce_prod(x, axis=axis, keep_dims=keepdims) def cumsum(x, axis=0): """Cumulative sum of the values in a tensor, alongside the specified axis. Arguments: x: A tensor or variable. axis: An integer, the axis to compute the sum. Returns: A tensor of the cumulative sum of values of `x` along `axis`. """ return math_ops.cumsum(x, axis=axis) def cumprod(x, axis=0): """Cumulative product of the values in a tensor, alongside the specified axis. Arguments: x: A tensor or variable. axis: An integer, the axis to compute the product. Returns: A tensor of the cumulative product of values of `x` along `axis`. """ return math_ops.cumprod(x, axis=axis) def var(x, axis=None, keepdims=False): """Variance of a tensor, alongside the specified axis. Arguments: x: A tensor or variable. axis: An integer, the axis to compute the variance. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with the variance of elements of `x`. """ if x.dtype.base_dtype == dtypes_module.bool: x = math_ops.cast(x, floatx()) m = math_ops.reduce_mean(x, axis=axis, keep_dims=True) devs_squared = math_ops.square(x - m) return math_ops.reduce_mean( devs_squared, axis=axis, keep_dims=keepdims) def std(x, axis=None, keepdims=False): """Standard deviation of a tensor, alongside the specified axis. Arguments: x: A tensor or variable. axis: An integer, the axis to compute the standard deviation. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with the standard deviation of elements of `x`. """ return math_ops.sqrt(var(x, axis=axis, keepdims=keepdims)) def mean(x, axis=None, keepdims=False): """Mean of a tensor, alongside the specified axis. Arguments: x: A tensor or variable. axis: A list of integer. Axes to compute the mean. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is `True`, the reduced dimensions are retained with length 1. Returns: A tensor with the mean of elements of `x`. """ if x.dtype.base_dtype == dtypes_module.bool: x = math_ops.cast(x, floatx()) return math_ops.reduce_mean(x, axis=axis, keep_dims=keepdims) def any(x, axis=None, keepdims=False): """Bitwise reduction (logical OR). Arguments: x: Tensor or variable. axis: axis along which to perform the reduction. keepdims: whether the drop or broadcast the reduction axes. Returns: A uint8 tensor (0s and 1s). """ x = math_ops.cast(x, dtypes_module.bool) return math_ops.reduce_any(x, axis=axis, keep_dims=keepdims) def all(x, axis=None, keepdims=False): """Bitwise reduction (logical AND). Arguments: x: Tensor or variable. axis: axis along which to perform the reduction. keepdims: whether the drop or broadcast the reduction axes. Returns: A uint8 tensor (0s and 1s). """ x = math_ops.cast(x, dtypes_module.bool) return math_ops.reduce_all(x, axis=axis, keep_dims=keepdims) def argmax(x, axis=-1): """Returns the index of the maximum value along an axis. Arguments: x: Tensor or variable. axis: axis along which to perform the reduction. Returns: A tensor. """ return math_ops.argmax(x, axis) def argmin(x, axis=-1): """Returns the index of the minimum value along an axis. Arguments: x: Tensor or variable. axis: axis along which to perform the reduction. Returns: A tensor. """ return math_ops.argmin(x, axis) def square(x): """Element-wise square. Arguments: x: Tensor or variable. Returns: A tensor. """ return math_ops.square(x) def abs(x): """Element-wise absolute value. Arguments: x: Tensor or variable. Returns: A tensor. """ return math_ops.abs(x) def sqrt(x): """Element-wise square root. Arguments: x: Tensor or variable. Returns: A tensor. """ zero = _to_tensor(0., x.dtype.base_dtype) inf = _to_tensor(np.inf, x.dtype.base_dtype) x = clip_ops.clip_by_value(x, zero, inf) return math_ops.sqrt(x) def exp(x): """Element-wise exponential. Arguments: x: Tensor or variable. Returns: A tensor. """ return math_ops.exp(x) def log(x): """Element-wise log. Arguments: x: Tensor or variable. Returns: A tensor. """ return math_ops.log(x) def logsumexp(x, axis=None, keepdims=False): """Computes log(sum(exp(elements across dimensions of a tensor))). This function is more numerically stable than log(sum(exp(x))). It avoids overflows caused by taking the exp of large inputs and underflows caused by taking the log of small inputs. Arguments: x: A tensor or variable. axis: An integer, the axis to reduce over. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: The reduced tensor. """ return math_ops.reduce_logsumexp(x, axis=axis, keep_dims=keepdims) def round(x): """Element-wise rounding to the closest integer. In case of tie, the rounding mode used is "half to even". Arguments: x: Tensor or variable. Returns: A tensor. """ return math_ops.round(x) def sign(x): """Element-wise sign. Arguments: x: Tensor or variable. Returns: A tensor. """ return math_ops.sign(x) def pow(x, a): """Element-wise exponentiation. Arguments: x: Tensor or variable. a: Python integer. Returns: A tensor. """ return math_ops.pow(x, a) def clip(x, min_value, max_value): """Element-wise value clipping. Arguments: x: Tensor or variable. min_value: Python float or integer. max_value: Python float or integer. Returns: A tensor. """ if max_value is not None and max_value < min_value: max_value = min_value if max_value is None: max_value = np.inf min_value = _to_tensor(min_value, x.dtype.base_dtype) max_value = _to_tensor(max_value, x.dtype.base_dtype) return clip_ops.clip_by_value(x, min_value, max_value) def equal(x, y): """Element-wise equality between two tensors. Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return math_ops.equal(x, y) def not_equal(x, y): """Element-wise inequality between two tensors. Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return math_ops.not_equal(x, y) def greater(x, y): """Element-wise truth value of (x > y). Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return math_ops.greater(x, y) def greater_equal(x, y): """Element-wise truth value of (x >= y). Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return math_ops.greater_equal(x, y) def less(x, y): """Element-wise truth value of (x < y). Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return math_ops.less(x, y) def less_equal(x, y): """Element-wise truth value of (x <= y). Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return math_ops.less_equal(x, y) def maximum(x, y): """Element-wise maximum of two tensors. Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A tensor. """ return math_ops.maximum(x, y) def minimum(x, y): """Element-wise minimum of two tensors. Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A tensor. """ return math_ops.minimum(x, y) def sin(x): """Computes sin of x element-wise. Arguments: x: Tensor or variable. Returns: A tensor. """ return math_ops.sin(x) def cos(x): """Computes cos of x element-wise. Arguments: x: Tensor or variable. Returns: A tensor. """ return math_ops.cos(x) def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Computes mean and std for batch then apply batch_normalization on batch. Arguments: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, `(normalized_tensor, mean, variance)`. """ mean, var = nn.moments( x, reduction_axes, shift=None, name=None, keep_dims=False) if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon) else: # need broadcasting target_shape = [] for axis in range(ndim(x)): if axis in reduction_axes: target_shape.append(1) else: target_shape.append(array_ops.shape(x)[axis]) target_shape = array_ops.stack(target_shape) broadcast_mean = array_ops.reshape(mean, target_shape) broadcast_var = array_ops.reshape(var, target_shape) if gamma is None: broadcast_gamma = None else: broadcast_gamma = array_ops.reshape(gamma, target_shape) if beta is None: broadcast_beta = None else: broadcast_beta = array_ops.reshape(beta, target_shape) normed = nn.batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normed, mean, var def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3): """Applies batch normalization on x given mean, var, beta and gamma. I.e. returns: `output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta` Arguments: x: Input tensor or variable. mean: Mean of batch. var: Variance of batch. beta: Tensor with which to center the input. gamma: Tensor by which to scale the input. epsilon: Fuzz factor. Returns: A tensor. """ return nn.batch_normalization(x, mean, var, beta, gamma, epsilon) # SHAPE OPERATIONS def concatenate(tensors, axis=-1): """Concatenates a list of tensors alongside the specified axis. Arguments: tensors: list of tensors to concatenate. axis: concatenation axis. Returns: A tensor. """ if axis < 0: rank = ndim(tensors[0]) if rank: axis %= rank else: axis = 0 if py_all([is_sparse(x) for x in tensors]): return sparse_ops.sparse_concat(axis, tensors) else: return array_ops.concat([to_dense(x) for x in tensors], axis) def reshape(x, shape): """Reshapes a tensor to the specified shape. Arguments: x: Tensor or variable. shape: Target shape tuple. Returns: A tensor. """ return array_ops.reshape(x, shape) def permute_dimensions(x, pattern): """Permutes axes in a tensor. Arguments: x: Tensor or variable. pattern: A tuple of dimension indices, e.g. `(0, 2, 1)`. Returns: A tensor. """ return array_ops.transpose(x, perm=pattern) def resize_images(x, height_factor, width_factor, data_format): """Resizes the images contained in a 4D tensor. Arguments: x: Tensor or variable to resize. height_factor: Positive integer. width_factor: Positive integer. data_format: One of `"channels_first"`, `"channels_last"`. Returns: A tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format == 'channels_first': original_shape = int_shape(x) new_shape = array_ops.shape(x)[2:] new_shape *= constant_op.constant( np.array([height_factor, width_factor]).astype('int32')) x = permute_dimensions(x, [0, 2, 3, 1]) x = image_ops.resize_nearest_neighbor(x, new_shape) x = permute_dimensions(x, [0, 3, 1, 2]) x.set_shape((None, None, original_shape[2] * height_factor if original_shape[2] is not None else None, original_shape[3] * width_factor if original_shape[3] is not None else None)) return x elif data_format == 'channels_last': original_shape = int_shape(x) new_shape = array_ops.shape(x)[1:3] new_shape *= constant_op.constant( np.array([height_factor, width_factor]).astype('int32')) x = image_ops.resize_nearest_neighbor(x, new_shape) x.set_shape((None, original_shape[1] * height_factor if original_shape[1] is not None else None, original_shape[2] * width_factor if original_shape[2] is not None else None, None)) return x else: raise ValueError('Invalid data_format:', data_format) def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): """Resizes the volume contained in a 5D tensor. Arguments: x: Tensor or variable to resize. depth_factor: Positive integer. height_factor: Positive integer. width_factor: Positive integer. data_format: One of `"channels_first"`, `"channels_last"`. Returns: A tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format == 'channels_first': output = repeat_elements(x, depth_factor, axis=2) output = repeat_elements(output, height_factor, axis=3) output = repeat_elements(output, width_factor, axis=4) return output elif data_format == 'channels_last': output = repeat_elements(x, depth_factor, axis=1) output = repeat_elements(output, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output else: raise ValueError('Invalid data_format:', data_format) def repeat_elements(x, rep, axis): """Repeats the elements of a tensor along an axis, like `np.repeat`. If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output will have shape `(s1, s2 * rep, s3)`. Arguments: x: Tensor or variable. rep: Python integer, number of times to repeat. axis: Axis along which to repeat. Returns: A tensor. """ x_shape = x.get_shape().as_list() # For static axis if x_shape[axis] is not None: # slices along the repeat axis splits = array_ops.split(value=x, num_or_size_splits=x_shape[axis], axis=axis) # repeat each slice the given number of reps x_rep = [s for s in splits for _ in range(rep)] return concatenate(x_rep, axis) # Here we use tf.tile to mimic behavior of np.repeat so that # we can handle dynamic shapes (that include None). # To do that, we need an auxiliary axis to repeat elements along # it and then merge them along the desired axis. # Repeating auxiliary_axis = axis + 1 x_shape = array_ops.shape(x) x_rep = array_ops.expand_dims(x, axis=auxiliary_axis) reps = np.ones(len(x.get_shape()) + 1) reps[auxiliary_axis] = rep x_rep = array_ops.tile(x_rep, reps) # Merging reps = np.delete(reps, auxiliary_axis) reps[axis] = rep reps = array_ops.constant(reps, dtype='int32') x_shape *= reps x_rep = array_ops.reshape(x_rep, x_shape) # Fix shape representation x_shape = x.get_shape().as_list() x_rep.set_shape(x_shape) x_rep._keras_shape = tuple(x_shape) return x_rep def repeat(x, n): """Repeats a 2D tensor. if `x` has shape (samples, dim) and `n` is `2`, the output will have shape `(samples, 2, dim)`. Arguments: x: Tensor or variable. n: Python integer, number of times to repeat. Returns: A tensor. """ assert ndim(x) == 2 x = array_ops.expand_dims(x, 1) pattern = array_ops.stack([1, n, 1]) return array_ops.tile(x, pattern) def arange(start, stop=None, step=1, dtype='int32'): """Creates a 1D tensor containing a sequence of integers. The function arguments use the same convention as Theano's arange: if only one argument is provided, it is in fact the "stop" argument. The default type of the returned tensor is `'int32'` to match TensorFlow's default. Arguments: start: Start value. stop: Stop value. step: Difference between two successive values. dtype: Integer dtype to use. Returns: An integer tensor. """ # Match the behavior of numpy and Theano by returning an empty seqence. if stop is None and start < 0: start = 0 result = math_ops.range(start, limit=stop, delta=step, name='arange') if dtype != 'int32': result = cast(result, dtype) return result def tile(x, n): """Creates a tensor by tiling `x` by `n`. Arguments: x: A tensor or variable n: A list of integer. The length must be the same as the number of dimensions in `x`. Returns: A tiled tensor. """ if isinstance(n, int): n = [n] return array_ops.tile(x, n) def flatten(x): """Flatten a tensor. Arguments: x: A tensor or variable. Returns: A tensor, reshaped into 1-D """ return array_ops.reshape(x, [-1]) def batch_flatten(x): """Turn a nD tensor into a 2D tensor with same 0th dimension. In other words, it flattens each data samples of a batch. Arguments: x: A tensor or variable. Returns: A tensor. """ x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])])) return x def expand_dims(x, axis=-1): """Adds a 1-sized dimension at index "axis". Arguments: x: A tensor or variable. axis: Position where to add a new axis. Returns: A tensor with expanded dimensions. """ return array_ops.expand_dims(x, axis) def squeeze(x, axis): """Removes a 1-dimension from the tensor at index "axis". Arguments: x: A tensor or variable. axis: Axis to drop. Returns: A tensor with the same data as `x` but reduced dimensions. """ return array_ops.squeeze(x, [axis]) def temporal_padding(x, padding=(1, 1)): """Pads the middle dimension of a 3D tensor. Arguments: x: Tensor or variable. padding: Tuple of 2 integers, how many zeros to add at the start and end of dim 1. Returns: A padded 3D tensor. """ assert len(padding) == 2 pattern = [[0, 0], [padding[0], padding[1]], [0, 0]] return array_ops.pad(x, pattern) def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): """Pads the 2nd and 3rd dimensions of a 4D tensor. Arguments: x: Tensor or variable. padding: Tuple of 2 tuples, padding pattern. data_format: One of `channels_last` or `channels_first`. Returns: A padded 4D tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ assert len(padding) == 2 assert len(padding[0]) == 2 assert len(padding[1]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) if data_format == 'channels_first': pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])] else: pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]] return array_ops.pad(x, pattern) def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): """Pads 5D tensor with zeros along the depth, height, width dimensions. Pads these dimensions with respectively "padding[0]", "padding[1]" and "padding[2]" zeros left and right. For 'channels_last' data_format, the 2nd, 3rd and 4th dimension will be padded. For 'channels_first' data_format, the 3rd, 4th and 5th dimension will be padded. Arguments: x: Tensor or variable. padding: Tuple of 3 tuples, padding pattern. data_format: One of `channels_last` or `channels_first`. Returns: A padded 5D tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ assert len(padding) == 3 assert len(padding[0]) == 2 assert len(padding[1]) == 2 assert len(padding[2]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) if data_format == 'channels_first': pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]], [padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]] else: pattern = [[0, 0], [padding[0][0], padding[0][1]], [padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]], [0, 0]] return array_ops.pad(x, pattern) def stack(x, axis=0): """Stacks a list of rank `R` tensors into a rank `R+1` tensor. Arguments: x: List of tensors. axis: Axis along which to perform stacking. Returns: A tensor. """ return array_ops.stack(x, axis=axis) def one_hot(indices, num_classes): """Computes the one-hot representation of an integer tensor. Arguments: indices: nD integer tensor of shape `(batch_size, dim1, dim2, ... dim(n-1))` num_classes: Integer, number of classes to consider. Returns: (n + 1)D one hot representation of the input with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)` Returns: The one-hot tensor. """ return array_ops.one_hot(indices, depth=num_classes, axis=-1) def reverse(x, axes): """Reverse a tensor along the specified axes. Arguments: x: Tensor to reverse. axes: Integer or iterable of integers. Axes to reverse. Returns: A tensor. """ if isinstance(axes, int): axes = [axes] return array_ops.reverse(x, axes) # VALUE MANIPULATION def get_value(x): """Returns the value of a variable. Arguments: x: input variable. Returns: A Numpy array. """ return x.eval(session=get_session()) def batch_get_value(tensors): """Returns the value of more than one tensor variable. Arguments: tensors: list of ops to run. Returns: A list of Numpy arrays. """ if tensors: return get_session().run(tensors) else: return [] def set_value(x, value): """Sets the value of a variable, from a Numpy array. Arguments: x: Tensor to set to a new value. value: Value to set the tensor to, as a Numpy array (of the same shape). """ value = np.asarray(value, dtype=dtype(x)) tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0]) if hasattr(x, '_assign_placeholder'): assign_placeholder = x._assign_placeholder assign_op = x._assign_op else: assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape) assign_op = x.assign(assign_placeholder) x._assign_placeholder = assign_placeholder x._assign_op = assign_op get_session().run(assign_op, feed_dict={assign_placeholder: value}) def batch_set_value(tuples): """Sets the values of many tensor variables at once. Arguments: tuples: a list of tuples `(tensor, value)`. `value` should be a Numpy array. """ if tuples: assign_ops = [] feed_dict = {} for x, value in tuples: value = np.asarray(value, dtype=dtype(x)) tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0]) if hasattr(x, '_assign_placeholder'): assign_placeholder = x._assign_placeholder assign_op = x._assign_op else: assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape) assign_op = x.assign(assign_placeholder) x._assign_placeholder = assign_placeholder x._assign_op = assign_op assign_ops.append(assign_op) feed_dict[assign_placeholder] = value get_session().run(assign_ops, feed_dict=feed_dict) def print_tensor(x, message=''): """Prints `message` and the tensor value when evaluated. Note that `print_tensor` returns a new tensor identical to `x` which should be used in the following code. Otherwise the print operation is not taken into account during evaluation. Example: ```python >>> x = K.print_tensor(x, message="x is: ") ``` Arguments: x: Tensor to print. message: Message to print jointly with the tensor. Returns: The same tensor `x`, unchanged. """ return logging_ops.Print(x, [x], message) # GRAPH MANIPULATION class Function(object): """Runs a computation graph. Arguments: inputs: Feed placeholders to the computation graph. outputs: Output tensors to fetch. updates: Additional update ops to be run at function call. name: a name to help users identify what this function does. """ def __init__(self, inputs, outputs, updates=None, name=None, **session_kwargs): updates = updates or [] if not isinstance(inputs, (list, tuple)): raise TypeError('`inputs` to a TensorFlow backend function ' 'should be a list or tuple.') if not isinstance(outputs, (list, tuple)): raise TypeError('`outputs` of a TensorFlow backend function ' 'should be a list or tuple.') if not isinstance(updates, (list, tuple)): raise TypeError('`updates` in a TensorFlow backend function ' 'should be a list or tuple.') self.inputs = list(inputs) self.outputs = list(outputs) with ops.control_dependencies(self.outputs): updates_ops = [] for update in updates: if isinstance(update, tuple): p, new_p = update updates_ops.append(state_ops.assign(p, new_p)) else: # assumed already an op updates_ops.append(update) self.updates_op = control_flow_ops.group(*updates_ops) self.name = name self.session_kwargs = session_kwargs def __call__(self, inputs): if not isinstance(inputs, (list, tuple)): raise TypeError('`inputs` should be a list or tuple.') feed_dict = {} for tensor, value in zip(self.inputs, inputs): if is_sparse(tensor): sparse_coo = value.tocoo() indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(sparse_coo.col, 1)), 1) value = (indices, sparse_coo.data, sparse_coo.shape) feed_dict[tensor] = value session = get_session() updated = session.run( self.outputs + [self.updates_op], feed_dict=feed_dict, **self.session_kwargs) return updated[:len(self.outputs)] def function(inputs, outputs, updates=None, **kwargs): """Instantiates a Keras function. Arguments: inputs: List of placeholder tensors. outputs: List of output tensors. updates: List of update ops. **kwargs: Passed to `tf.Session.run`. Returns: Output values as Numpy arrays. Raises: ValueError: if invalid kwargs are passed in. """ if kwargs: for key in kwargs: if (key not in tf_inspect.getargspec(session_module.Session.run)[0] and key not in tf_inspect.getargspec(Function.__init__)[0]): msg = ('Invalid argument "%s" passed to K.function with Tensorflow ' 'backend') % key raise ValueError(msg) return Function(inputs, outputs, updates=updates, **kwargs) def gradients(loss, variables): """Returns the gradients of `variables` w.r.t. `loss`. Arguments: loss: Scalar tensor to minimize. variables: List of variables. Returns: A gradients tensor. """ return gradients_module.gradients( loss, variables, colocate_gradients_with_ops=True) def stop_gradient(variables): """Returns `variables` but with zero gradient w.r.t. every other variable. Arguments: variables: Tensor or list of tensors to consider constant with respect to any other variable. Returns: A single tensor or a list of tensors (depending on the passed argument) that has no gradient with respect to any other variable. """ if isinstance(variables, (list, tuple)): return map(array_ops.stop_gradient, variables) return array_ops.stop_gradient(variables) # CONTROL FLOW def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False): """Iterates over the time dimension of a tensor. Arguments: step_function: RNN step function. Parameters; input; tensor with shape `(samples, ...)` (no time dimension), representing input for the batch of samples at a certain time step. states; list of tensors. Returns; output; tensor with shape `(samples, output_dim)` (no time dimension). new_states; list of tensors, same length and shapes as 'states'. The first state in the list must be the output tensor at the previous timestep. inputs: tensor of temporal data of shape `(samples, time, ...)` (at least 3D). initial_states: tensor with shape (samples, output_dim) (no time dimension), containing the initial values for the states used in the step function. go_backwards: boolean. If True, do the iteration over the time dimension in reverse order and return the reversed sequence. mask: binary tensor with shape `(samples, time, 1)`, with a zero for every element that is masked. constants: a list of constant values passed at each step. unroll: whether to unroll the RNN or to use a symbolic loop (`while_loop` or `scan` depending on backend). Returns: A tuple, `(last_output, outputs, new_states)`. last_output: the latest output of the rnn, of shape `(samples, ...)` outputs: tensor with shape `(samples, time, ...)` where each entry `outputs[s, t]` is the output of the step function at time `t` for sample `s`. new_states: list of tensors, latest states returned by the step function, of shape `(samples, ...)`. Raises: ValueError: if input dimension is less than 3. ValueError: if `unroll` is `True` but input timestep is not a fixed number. ValueError: if `mask` is provided (not `None`) but states is not provided (`len(states)` == 0). """ ndim = len(inputs.get_shape()) if ndim < 3: raise ValueError('Input should be at least 3D.') axes = [1, 0] + list(range(2, ndim)) inputs = array_ops.transpose(inputs, (axes)) if mask is not None: if mask.dtype != dtypes_module.bool: mask = math_ops.cast(mask, dtypes_module.bool) if len(mask.get_shape()) == ndim - 1: mask = expand_dims(mask) mask = array_ops.transpose(mask, axes) if constants is None: constants = [] global uses_learning_phase # pylint: disable=global-variable-undefined uses_learning_phase = False if unroll: if not inputs.get_shape()[0]: raise ValueError('Unrolling requires a ' 'fixed number of timesteps.') states = initial_states successive_states = [] successive_outputs = [] input_list = array_ops.unstack(inputs) if go_backwards: input_list.reverse() if mask is not None: mask_list = array_ops.unstack(mask) if go_backwards: mask_list.reverse() for inp, mask_t in zip(input_list, mask_list): output, new_states = step_function(inp, states + constants) if getattr(output, '_uses_learning_phase', False): uses_learning_phase = True # tf.where needs its condition tensor # to be the same shape as its two # result tensors, but in our case # the condition (mask) tensor is # (nsamples, 1), and A and B are (nsamples, ndimensions). # So we need to # broadcast the mask to match the shape of A and B. # That's what the tile call does, # it just repeats the mask along its second dimension # n times. tiled_mask_t = array_ops.tile(mask_t, array_ops.stack( [1, array_ops.shape(output)[1]])) if not successive_outputs: prev_output = zeros_like(output) else: prev_output = successive_outputs[-1] output = array_ops.where(tiled_mask_t, output, prev_output) return_states = [] for state, new_state in zip(states, new_states): # (see earlier comment for tile explanation) tiled_mask_t = array_ops.tile(mask_t, array_ops.stack( [1, array_ops.shape(new_state)[1]])) return_states.append(array_ops.where(tiled_mask_t, new_state, state)) states = return_states successive_outputs.append(output) successive_states.append(states) last_output = successive_outputs[-1] new_states = successive_states[-1] outputs = array_ops.stack(successive_outputs) else: for inp in input_list: output, states = step_function(inp, states + constants) if getattr(output, '_uses_learning_phase', False): uses_learning_phase = True successive_outputs.append(output) successive_states.append(states) last_output = successive_outputs[-1] new_states = successive_states[-1] outputs = array_ops.stack(successive_outputs) else: if go_backwards: inputs = reverse(inputs, 0) states = tuple(initial_states) time_steps = array_ops.shape(inputs)[0] outputs, _ = step_function(inputs[0], initial_states + constants) output_ta = tensor_array_ops.TensorArray( dtype=outputs.dtype, size=time_steps, tensor_array_name='output_ta') input_ta = tensor_array_ops.TensorArray( dtype=inputs.dtype, size=time_steps, tensor_array_name='input_ta') input_ta = input_ta.unstack(inputs) time = constant_op.constant(0, dtype='int32', name='time') if mask is not None: if not states: raise ValueError('No initial states provided! ' 'When using masking in an RNN, you should ' 'provide initial states ' '(and your step function should return ' 'as its first state at time `t` ' 'the output at time `t-1`).') if go_backwards: mask = reverse(mask, 0) mask_ta = tensor_array_ops.TensorArray( dtype=dtypes_module.bool, size=time_steps, tensor_array_name='mask_ta') mask_ta = mask_ta.unstack(mask) def _step(time, output_ta_t, *states): """RNN step function. Arguments: time: Current timestep value. output_ta_t: TensorArray. *states: List of states. Returns: Tuple: `(time + 1,output_ta_t) + tuple(new_states)` """ current_input = input_ta.read(time) mask_t = mask_ta.read(time) output, new_states = step_function(current_input, tuple(states) + tuple(constants)) if getattr(output, '_uses_learning_phase', False): global uses_learning_phase # pylint: disable=global-variable-undefined uses_learning_phase = True for state, new_state in zip(states, new_states): new_state.set_shape(state.get_shape()) tiled_mask_t = array_ops.tile(mask_t, array_ops.stack( [1, array_ops.shape(output)[1]])) output = array_ops.where(tiled_mask_t, output, states[0]) new_states = [ array_ops.where(tiled_mask_t, new_states[i], states[i]) for i in range(len(states)) ] output_ta_t = output_ta_t.write(time, output) return (time + 1, output_ta_t) + tuple(new_states) else: def _step(time, output_ta_t, *states): """RNN step function. Arguments: time: Current timestep value. output_ta_t: TensorArray. *states: List of states. Returns: Tuple: `(time + 1,output_ta_t) + tuple(new_states)` """ current_input = input_ta.read(time) output, new_states = step_function(current_input, tuple(states) + tuple(constants)) if getattr(output, '_uses_learning_phase', False): global uses_learning_phase # pylint: disable=global-variable-undefined uses_learning_phase = True for state, new_state in zip(states, new_states): new_state.set_shape(state.get_shape()) output_ta_t = output_ta_t.write(time, output) return (time + 1, output_ta_t) + tuple(new_states) final_outputs = control_flow_ops.while_loop( cond=lambda time, *_: time < time_steps, body=_step, loop_vars=(time, output_ta) + states, parallel_iterations=32, swap_memory=True) last_time = final_outputs[0] output_ta = final_outputs[1] new_states = final_outputs[2:] outputs = output_ta.stack() last_output = output_ta.read(last_time - 1) axes = [1, 0] + list(range(2, len(outputs.get_shape()))) outputs = array_ops.transpose(outputs, axes) last_output._uses_learning_phase = uses_learning_phase return last_output, outputs, new_states def switch(condition, then_expression, else_expression): """Switches between two operations depending on a scalar value. Note that both `then_expression` and `else_expression` should be symbolic tensors of the *same shape*. Arguments: condition: tensor (`int` or `bool`). then_expression: either a tensor, or a callable that returns a tensor. else_expression: either a tensor, or a callable that returns a tensor. Returns: The selected tensor. Raises: ValueError: If rank of `condition` is greater than rank of expressions. """ if condition.dtype != dtypes_module.bool: condition = math_ops.cast(condition, 'bool') cond_ndim = ndim(condition) if not cond_ndim: if not callable(then_expression): def then_expression_fn(): return then_expression else: then_expression_fn = then_expression if not callable(else_expression): def else_expression_fn(): return else_expression else: else_expression_fn = else_expression x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn) else: # tf.where needs its condition tensor # to be the same shape as its two # result tensors if callable(then_expression): then_expression = then_expression() if callable(else_expression): else_expression = else_expression() expr_ndim = ndim(then_expression) if cond_ndim > expr_ndim: raise ValueError('Rank of `condition` should be less than or' ' equal to rank of `then_expression` and ' '`else_expression`. ndim(condition)=' + str(cond_ndim) + ', ndim(then_expression)' '=' + str(expr_ndim)) if cond_ndim > 1: ndim_diff = expr_ndim - cond_ndim cond_shape = array_ops.concat( [array_ops.shape(condition), [1] * ndim_diff], axis=0) condition = array_ops.reshape(condition, cond_shape) expr_shape = array_ops.shape(then_expression) shape_diff = expr_shape - cond_shape tile_shape = array_ops.where(shape_diff > 0, expr_shape, array_ops.ones_like(expr_shape)) condition = array_ops.tile(condition, tile_shape) x = array_ops.where(condition, then_expression, else_expression) return x def in_train_phase(x, alt, training=None): """Selects `x` in train phase, and `alt` otherwise. Note that `alt` should have the *same shape* as `x`. Arguments: x: What to return in train phase (tensor or callable that returns a tensor). alt: What to return otherwise (tensor or callable that returns a tensor). training: Optional scalar tensor (or Python boolean, or Python integer) specifying the learning phase. Returns: Either `x` or `alt` based on the `training` flag. the `training` flag defaults to `K.learning_phase()`. """ if training is None: training = learning_phase() uses_learning_phase = True else: uses_learning_phase = False if training is 1 or training is True: if callable(x): return x() else: return x elif training is 0 or training is False: if callable(alt): return alt() else: return alt # else: assume learning phase is a placeholder tensor. x = switch(training, x, alt) if uses_learning_phase: x._uses_learning_phase = True return x def in_test_phase(x, alt, training=None): """Selects `x` in test phase, and `alt` otherwise. Note that `alt` should have the *same shape* as `x`. Arguments: x: What to return in test phase (tensor or callable that returns a tensor). alt: What to return otherwise (tensor or callable that returns a tensor). training: Optional scalar tensor (or Python boolean, or Python integer) specifying the learning phase. Returns: Either `x` or `alt` based on `K.learning_phase`. """ return in_train_phase(alt, x, training=training) # NN OPERATIONS def relu(x, alpha=0., max_value=None): """Rectified linear unit. With default values, it returns element-wise `max(x, 0)`. Arguments: x: A tensor or variable. alpha: A scalar, slope of negative section (default=`0.`). max_value: Saturation threshold. Returns: A tensor. """ if alpha != 0.: negative_part = nn.relu(-x) x = nn.relu(x) if max_value is not None: max_value = _to_tensor(max_value, x.dtype.base_dtype) zero = _to_tensor(0., x.dtype.base_dtype) x = clip_ops.clip_by_value(x, zero, max_value) if alpha != 0.: alpha = _to_tensor(alpha, x.dtype.base_dtype) x -= alpha * negative_part return x def elu(x, alpha=1.): """Exponential linear unit. Arguments: x: A tensor or variable to compute the activation function for. alpha: A scalar, slope of positive section. Returns: A tensor. """ res = nn.elu(x) if alpha == 1: return res else: return array_ops.where(x > 0, res, alpha * res) def softmax(x): """Softmax of a tensor. Arguments: x: A tensor or variable. Returns: A tensor. """ return nn.softmax(x) def softplus(x): """Softplus of a tensor. Arguments: x: A tensor or variable. Returns: A tensor. """ return nn.softplus(x) def softsign(x): """Softsign of a tensor. Arguments: x: A tensor or variable. Returns: A tensor. """ return nn.softsign(x) def categorical_crossentropy(target, output, from_logits=False): """Categorical crossentropy between an output tensor and a target tensor. Arguments: target: A tensor of the same shape as `output`. output: A tensor resulting from a softmax (unless `from_logits` is True, in which case `output` is expected to be the logits). from_logits: Boolean, whether `output` is the result of a softmax, or is a tensor of logits. Returns: Output tensor. """ # Note: nn.softmax_cross_entropy_with_logits # expects logits, Keras expects probabilities. if not from_logits: # scale preds so that the class probas of each sample sum to 1 output /= math_ops.reduce_sum( output, axis=len(output.get_shape()) - 1, keep_dims=True) # manual computation of crossentropy epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype) output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_) return -math_ops.reduce_sum( target * math_ops.log(output), axis=len(output.get_shape()) - 1) else: return nn.softmax_cross_entropy_with_logits(labels=target, logits=output) def sparse_categorical_crossentropy(target, output, from_logits=False): """Categorical crossentropy with integer targets. Arguments: target: An integer tensor. output: A tensor resulting from a softmax (unless `from_logits` is True, in which case `output` is expected to be the logits). from_logits: Boolean, whether `output` is the result of a softmax, or is a tensor of logits. Returns: Output tensor. """ # Note: nn.sparse_softmax_cross_entropy_with_logits # expects logits, Keras expects probabilities. if not from_logits: epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype) output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_) output = math_ops.log(output) output_shape = output.get_shape() targets = cast(flatten(target), 'int64') logits = array_ops.reshape(output, [-1, int(output_shape[-1])]) res = nn.sparse_softmax_cross_entropy_with_logits( labels=targets, logits=logits) if len(output_shape) == 3: # if our output includes timesteps we need to reshape return array_ops.reshape(res, array_ops.shape(output)[:-1]) else: return res def binary_crossentropy(target, output, from_logits=False): """Binary crossentropy between an output tensor and a target tensor. Arguments: target: A tensor with the same shape as `output`. output: A tensor. from_logits: Whether `output` is expected to be a logits tensor. By default, we consider that `output` encodes a probability distribution. Returns: A tensor. """ # Note: nn.softmax_cross_entropy_with_logits # expects logits, Keras expects probabilities. if not from_logits: # transform back to logits epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype) output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_) output = math_ops.log(output / (1 - output)) return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output) def sigmoid(x): """Element-wise sigmoid. Arguments: x: A tensor or variable. Returns: A tensor. """ return nn.sigmoid(x) def hard_sigmoid(x): """Segment-wise linear approximation of sigmoid. Faster than sigmoid. Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`. In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`. Arguments: x: A tensor or variable. Returns: A tensor. """ x = (0.2 * x) + 0.5 zero = _to_tensor(0., x.dtype.base_dtype) one = _to_tensor(1., x.dtype.base_dtype) x = clip_ops.clip_by_value(x, zero, one) return x def tanh(x): """Element-wise tanh. Arguments: x: A tensor or variable. Returns: A tensor. """ return nn.tanh(x) def dropout(x, level, noise_shape=None, seed=None): """Sets entries in `x` to zero at random, while scaling the entire tensor. Arguments: x: tensor level: fraction of the entries in the tensor that will be set to 0. noise_shape: shape for randomly generated keep/drop flags, must be broadcastable to the shape of `x` seed: random seed to ensure determinism. Returns: A tensor. """ retain_prob = 1. - level if seed is None: seed = np.random.randint(10e6) # the dummy 1. works around a TF bug # (float32_ref vs. float32 incompatibility) return nn.dropout(x * 1., retain_prob, noise_shape, seed=seed) def l2_normalize(x, axis=None): """Normalizes a tensor wrt the L2 norm alongside the specified axis. Arguments: x: Tensor or variable. axis: axis along which to perform normalization. Returns: A tensor. """ return nn.l2_normalize(x, dim=axis) def in_top_k(predictions, targets, k): """Returns whether the `targets` are in the top `k` `predictions`. Arguments: predictions: A tensor of shape `(batch_size, classes)` and type `float32`. targets: A 1D tensor of length `batch_size` and type `int32` or `int64`. k: An `int`, number of top elements to consider. Returns: A 1D tensor of length `batch_size` and type `bool`. `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k` values of `predictions[i]`. """ return nn.in_top_k(predictions, targets, k) # CONVOLUTIONS def _preprocess_conv2d_input(x, data_format): """Transpose and cast the input before the conv2d. Arguments: x: input tensor. data_format: string, `"channels_last"` or `"channels_first"`. Returns: A tensor. """ tf_data_format = 'NHWC' if data_format == 'channels_first': if not _has_nchw_support(): x = array_ops.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC else: tf_data_format = 'NCHW' return x, tf_data_format def _preprocess_conv3d_input(x, data_format): """Transpose and cast the input before the conv3d. Arguments: x: input tensor. data_format: string, `"channels_last"` or `"channels_first"`. Returns: A tensor. """ tf_data_format = 'NDHWC' if data_format == 'channels_first': if not _has_nchw_support(): x = array_ops.transpose(x, (0, 2, 3, 4, 1)) else: tf_data_format = 'NCDHW' return x, tf_data_format def _preprocess_padding(padding): """Convert keras' padding to tensorflow's padding. Arguments: padding: string, one of 'same' , 'valid' Returns: a string, one of 'SAME', 'VALID'. Raises: ValueError: if invalid `padding'` """ if padding == 'same': padding = 'SAME' elif padding == 'valid': padding = 'VALID' else: raise ValueError('Invalid padding:', padding) return padding def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): """1D convolution. Arguments: x: Tensor or variable. kernel: kernel tensor. strides: stride integer. padding: string, `"same"`, `"causal"` or `"valid"`. data_format: string, one of "channels_last", "channels_first". dilation_rate: integer dilate rate. Returns: A tensor, result of 1D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) kernel_shape = kernel.get_shape().as_list() if padding == 'causal': # causal (dilated) convolution: left_pad = dilation_rate * (kernel_shape[0] - 1) x = temporal_padding(x, (left_pad, 0)) padding = 'valid' padding = _preprocess_padding(padding) if data_format == 'channels_last': tf_data_format = 'NWC' else: tf_data_format = 'NCW' x = nn.convolution( input=x, filter=kernel, dilation_rate=(dilation_rate,), strides=(strides,), padding=padding, data_format=tf_data_format) return x def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): """2D convolution. Arguments: x: Tensor or variable. kernel: kernel tensor. strides: strides tuple. padding: string, `"same"` or `"valid"`. data_format: `"channels_last"` or `"channels_first"`. Whether to use Theano or TensorFlow data format for inputs/kernels/outputs. dilation_rate: tuple of 2 integers. Returns: A tensor, result of 2D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x, tf_data_format = _preprocess_conv2d_input(x, data_format) padding = _preprocess_padding(padding) x = nn.convolution( input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NHWC': x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW return x def conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None): """2D deconvolution (i.e. transposed convolution). Arguments: x: Tensor or variable. kernel: kernel tensor. output_shape: 1D int tensor for the output shape. strides: strides tuple. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. Whether to use Theano or TensorFlow/CNTK data format for inputs/kernels/outputs. Returns: A tensor, result of transposed 2D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) if isinstance(output_shape, (tuple, list)): output_shape = array_ops.stack(output_shape) x, tf_data_format = _preprocess_conv2d_input(x, data_format) if data_format == 'channels_first' and tf_data_format == 'NHWC': output_shape = (output_shape[0], output_shape[2], output_shape[3], output_shape[1]) if output_shape[0] is None: output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:]) output_shape = array_ops.stack(list(output_shape)) padding = _preprocess_padding(padding) if tf_data_format == 'NHWC': strides = (1,) + strides + (1,) else: strides = (1, 1) + strides x = nn.conv2d_transpose( x, kernel, output_shape, strides, padding=padding, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NHWC': x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW return x def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): """2D convolution with separable filters. Arguments: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. pointwise_kernel: kernel for the 1x1 convolution. strides: strides tuple (length 2). padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. dilation_rate: tuple of integers, dilation rates for the separable convolution. Returns: Output tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x, tf_data_format = _preprocess_conv2d_input(x, data_format) padding = _preprocess_padding(padding) if tf_data_format == 'NHWC': strides = (1,) + strides + (1,) else: strides = (1, 1) + strides x = nn.separable_conv2d( x, depthwise_kernel, pointwise_kernel, strides=strides, padding=padding, rate=dilation_rate, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NHWC': x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW return x def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): """2D convolution with separable filters. Arguments: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. strides: strides tuple (length 2). padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. dilation_rate: tuple of integers, dilation rates for the separable convolution. Returns: Output tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x, tf_data_format = _preprocess_conv2d_input(x, data_format) padding = _preprocess_padding(padding) if tf_data_format == 'NHWC': strides = (1,) + strides + (1,) else: strides = (1, 1) + strides x = nn.depthwise_conv2d( x, depthwise_kernel, strides=strides, padding=padding, rate=dilation_rate, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NHWC': x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW return x def conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)): """3D convolution. Arguments: x: Tensor or variable. kernel: kernel tensor. strides: strides tuple. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. Whether to use Theano or TensorFlow/CNTK data format for inputs/kernels/outputs. dilation_rate: tuple of 3 integers. Returns: A tensor, result of 3D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x, tf_data_format = _preprocess_conv3d_input(x, data_format) padding = _preprocess_padding(padding) x = nn.convolution( input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NDHWC': x = array_ops.transpose(x, (0, 4, 1, 2, 3)) return x def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1), padding='valid', data_format=None): """3D deconvolution (i.e. transposed convolution). Arguments: x: input tensor. kernel: kernel tensor. output_shape: 1D int tensor for the output shape. strides: strides tuple. padding: string, "same" or "valid". data_format: string, `"channels_last"` or `"channels_first"`. Whether to use Theano or TensorFlow/CNTK data format for inputs/kernels/outputs. Returns: A tensor, result of transposed 3D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) if isinstance(output_shape, (tuple, list)): output_shape = array_ops.stack(output_shape) x, tf_data_format = _preprocess_conv3d_input(x, data_format) if data_format == 'channels_first' and tf_data_format == 'NDHWC': output_shape = (output_shape[0], output_shape[2], output_shape[3], output_shape[4], output_shape[1]) if output_shape[0] is None: output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:]) output_shape = array_ops.stack(list(output_shape)) padding = _preprocess_padding(padding) if tf_data_format == 'NDHWC': strides = (1,) + strides + (1,) else: strides = (1, 1) + strides x = nn.conv3d_transpose( x, kernel, output_shape, strides, padding=padding, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NDHWC': x = array_ops.transpose(x, (0, 4, 1, 2, 3)) return x def pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max'): """2D Pooling. Arguments: x: Tensor or variable. pool_size: tuple of 2 integers. strides: tuple of 2 integers. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. pool_mode: string, `"max"` or `"avg"`. Returns: A tensor, result of 2D pooling. Raises: ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`. ValueError: if `pool_mode` is neither `"max"` or `"avg"`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x, tf_data_format = _preprocess_conv2d_input(x, data_format) padding = _preprocess_padding(padding) if tf_data_format == 'NHWC': strides = (1,) + strides + (1,) pool_size = (1,) + pool_size + (1,) else: strides = (1, 1) + strides pool_size = (1, 1) + pool_size if pool_mode == 'max': x = nn.max_pool( x, pool_size, strides, padding=padding, data_format=tf_data_format) elif pool_mode == 'avg': x = nn.avg_pool( x, pool_size, strides, padding=padding, data_format=tf_data_format) else: raise ValueError('Invalid pooling mode:', pool_mode) if data_format == 'channels_first' and tf_data_format == 'NHWC': x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW return x def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'): """3D Pooling. Arguments: x: Tensor or variable. pool_size: tuple of 3 integers. strides: tuple of 3 integers. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. pool_mode: string, `"max"` or `"avg"`. Returns: A tensor, result of 3D pooling. Raises: ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`. ValueError: if `pool_mode` is neither `"max"` or `"avg"`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x, tf_data_format = _preprocess_conv3d_input(x, data_format) padding = _preprocess_padding(padding) if tf_data_format == 'NDHWC': strides = (1,) + strides + (1,) pool_size = (1,) + pool_size + (1,) else: strides = (1, 1) + strides pool_size = (1, 1) + pool_size if pool_mode == 'max': x = nn.max_pool3d( x, pool_size, strides, padding=padding, data_format=tf_data_format) elif pool_mode == 'avg': x = nn.avg_pool3d( x, pool_size, strides, padding=padding, data_format=tf_data_format) else: raise ValueError('Invalid pooling mode:', pool_mode) if data_format == 'channels_first' and tf_data_format == 'NDHWC': x = array_ops.transpose(x, (0, 4, 1, 2, 3)) return x def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None): """Apply 1D conv with un-shared weights. Arguments: inputs: 3D tensor with shape: (batch_size, steps, input_dim) kernel: the unshared weight for convolution, with shape (output_length, feature_dim, filters) kernel_size: a tuple of a single integer, specifying the length of the 1D convolution window strides: a tuple of a single integer, specifying the stride length of the convolution data_format: the data format, channels_first or channels_last Returns: the tensor after 1d conv with un-shared weights, with shape (batch_size, output_length, filters) Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) stride = strides[0] kernel_shape = int_shape(kernel) output_length = kernel_shape[0] feature_dim = kernel_shape[1] xs = [] for i in range(output_length): slice_length = slice(i * stride, i * stride + kernel_size[0]) xs.append(reshape(inputs[:, slice_length, :], (1, -1, feature_dim))) x_aggregate = concatenate(xs, axis=0) # Shape: `(output_length, batch_size, filters)`. output = batch_dot(x_aggregate, kernel) return permute_dimensions(output, (1, 0, 2)) def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None): """Apply 2D conv with un-shared weights. Arguments: inputs: 4D tensor with shape: (batch_size, filters, new_rows, new_cols) if data_format='channels_first' or 4D tensor with shape: (batch_size, new_rows, new_cols, filters) if data_format='channels_last'. kernel: the unshared weight for convolution, with shape (output_items, feature_dim, filters) kernel_size: a tuple of 2 integers, specifying the width and height of the 2D convolution window. strides: a tuple of 2 integers, specifying the strides of the convolution along the width and height. output_shape: a tuple with (output_row, output_col) data_format: the data format, channels_first or channels_last Returns: A 4d tensor with shape: (batch_size, filters, new_rows, new_cols) if data_format='channels_first' or 4D tensor with shape: (batch_size, new_rows, new_cols, filters) if data_format='channels_last'. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) stride_row, stride_col = strides output_row, output_col = output_shape kernel_shape = int_shape(kernel) feature_dim = kernel_shape[1] filters = kernel_shape[2] xs = [] for i in range(output_row): for j in range(output_col): slice_row = slice(i * stride_row, i * stride_row + kernel_size[0]) slice_col = slice(j * stride_col, j * stride_col + kernel_size[1]) if data_format == 'channels_first': xs.append( reshape(inputs[:, :, slice_row, slice_col], (1, -1, feature_dim))) else: xs.append( reshape(inputs[:, slice_row, slice_col, :], (1, -1, feature_dim))) x_aggregate = concatenate(xs, axis=0) output = batch_dot(x_aggregate, kernel) output = reshape(output, (output_row, output_col, -1, filters)) if data_format == 'channels_first': output = permute_dimensions(output, (2, 3, 0, 1)) else: output = permute_dimensions(output, (2, 0, 1, 3)) return output def bias_add(x, bias, data_format=None): """Adds a bias vector to a tensor. Arguments: x: Tensor or variable. bias: Bias tensor to add. data_format: string, `"channels_last"` or `"channels_first"`. Returns: Output tensor. Raises: ValueError: In one of the two cases below: 1. invalid `data_format` argument. 2. invalid bias shape. the bias should be either a vector or a tensor with ndim(x) - 1 dimension """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) bias_shape = int_shape(bias) if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1: raise ValueError( 'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' % (len(bias_shape), ndim(x))) if ndim(x) == 5: if data_format == 'channels_first': if len(bias_shape) == 1: x += reshape(bias, (1, bias_shape[0], 1, 1, 1)) else: x += reshape(bias, (1, bias_shape[3]) + bias_shape[:3]) elif data_format == 'channels_last': if len(bias_shape) == 1: x += reshape(bias, (1, 1, 1, bias_shape[0])) else: x += reshape(bias, (1,) + bias_shape) elif ndim(x) == 4: if data_format == 'channels_first': if len(bias_shape) == 1: x += reshape(bias, (1, bias_shape[0], 1, 1)) else: x += reshape(bias, (1, bias_shape[2]) + bias_shape[:2]) elif data_format == 'channels_last': if len(bias_shape) == 1: x = nn.bias_add(x, bias, data_format='NHWC') else: x += reshape(bias, (1,) + bias_shape) elif ndim(x) == 3: if data_format == 'channels_first': if len(bias_shape) == 1: x += reshape(bias, (1, bias_shape[0], 1)) else: x += reshape(bias, (1, bias_shape[1], bias_shape[0])) elif data_format == 'channels_last': if len(bias_shape) == 1: x += reshape(bias, (1, 1, bias_shape[0])) else: x += reshape(bias, (1,) + bias_shape) else: x = nn.bias_add(x, bias) return x # RANDOMNESS def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): """Returns a tensor with normal distribution of values. Arguments: shape: A tuple of integers, the shape of tensor to create. mean: A float, mean of the normal distribution to draw samples. stddev: A float, standard deviation of the normal distribution to draw samples. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor. """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(10e6) return random_ops.random_normal( shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed) def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): """Returns a tensor with uniform distribution of values. Arguments: shape: A tuple of integers, the shape of tensor to create. minval: A float, lower boundary of the uniform distribution to draw samples. maxval: A float, upper boundary of the uniform distribution to draw samples. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor. """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(10e6) return random_ops.random_uniform( shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed) def random_binomial(shape, p=0.0, dtype=None, seed=None): """Returns a tensor with random binomial distribution of values. Arguments: shape: A tuple of integers, the shape of tensor to create. p: A float, `0. <= p <= 1`, probability of binomial distribution. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor. """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(10e6) return array_ops.where( random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p, array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype)) def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): """Returns a tensor with truncated random normal distribution of values. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than two standard deviations from the mean are dropped and re-picked. Arguments: shape: A tuple of integers, the shape of tensor to create. mean: Mean of the values. stddev: Standard deviation of the values. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor. """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(10e6) return random_ops.truncated_normal( shape, mean, stddev, dtype=dtype, seed=seed) # CTC # TensorFlow has a native implementation, but it uses sparse tensors # and therefore requires a wrapper for Keras. The functions below convert # dense to sparse tensors and also wraps up the beam search code that is # in TensorFlow's CTC implementation def ctc_label_dense_to_sparse(labels, label_lengths): """Converts CTC labels from dense to sparse. Arguments: labels: dense CTC labels. label_lengths: length of the labels. Returns: A sparse tensor representation of the labels. """ label_shape = array_ops.shape(labels) num_batches_tns = array_ops.stack([label_shape[0]]) max_num_labels_tns = array_ops.stack([label_shape[1]]) def range_less_than(_, current_input): return array_ops.expand_dims( math_ops.range(label_shape[1]), 0) < array_ops.fill( max_num_labels_tns, current_input) init = math_ops.cast( array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool) dense_mask = functional_ops.scan( range_less_than, label_lengths, initializer=init, parallel_iterations=1) dense_mask = dense_mask[:, 0, :] label_array = array_ops.reshape( array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns), label_shape) label_ind = array_ops.boolean_mask(label_array, dense_mask) batch_array = array_ops.transpose( array_ops.reshape( array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns), reverse(label_shape, 0))) batch_ind = array_ops.boolean_mask(batch_array, dense_mask) indices = array_ops.transpose( array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1])) vals_sparse = array_ops.gather_nd(labels, indices) return sparse_tensor.SparseTensor( math_ops.to_int64(indices), vals_sparse, math_ops.to_int64(label_shape)) def ctc_batch_cost(y_true, y_pred, input_length, label_length): """Runs CTC loss algorithm on each batch element. Arguments: y_true: tensor `(samples, max_string_length)` containing the truth labels. y_pred: tensor `(samples, time_steps, num_categories)` containing the prediction, or output of the softmax. input_length: tensor `(samples, 1)` containing the sequence length for each batch item in `y_pred`. label_length: tensor `(samples, 1)` containing the sequence length for each batch item in `y_true`. Returns: Tensor with shape (samples,1) containing the CTC loss of each element. """ label_length = math_ops.to_int32(array_ops.squeeze(label_length)) input_length = math_ops.to_int32(array_ops.squeeze(input_length)) sparse_labels = math_ops.to_int32( ctc_label_dense_to_sparse(y_true, label_length)) y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + 1e-8) return array_ops.expand_dims( ctc.ctc_loss( inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1) def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): """Decodes the output of a softmax. Can use either greedy search (also known as best path) or a constrained dictionary search. Arguments: y_pred: tensor `(samples, time_steps, num_categories)` containing the prediction, or output of the softmax. input_length: tensor `(samples, )` containing the sequence length for each batch item in `y_pred`. greedy: perform much faster best-path search if `true`. This does not use a dictionary. beam_width: if `greedy` is `false`: a beam search decoder will be used with a beam of this width. top_paths: if `greedy` is `false`, how many of the most probable paths will be returned. Returns: Tuple: List: if `greedy` is `true`, returns a list of one element that contains the decoded sequence. If `false`, returns the `top_paths` most probable decoded sequences. Important: blank labels are returned as `-1`. Tensor `(top_paths, )` that contains the log probability of each decoded sequence. """ y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + 1e-8) input_length = math_ops.to_int32(input_length) if greedy: (decoded, log_prob) = ctc.ctc_greedy_decoder( inputs=y_pred, sequence_length=input_length) else: (decoded, log_prob) = ctc.ctc_beam_search_decoder( inputs=y_pred, sequence_length=input_length, beam_width=beam_width, top_paths=top_paths) decoded_dense = [ sparse_ops.sparse_to_dense( st.indices, st.dense_shape, st.values, default_value=-1) for st in decoded ] return (decoded_dense, log_prob) # HIGH ORDER FUNCTIONS def map_fn(fn, elems, name=None, dtype=None): """Map the function fn over the elements elems and return the outputs. Arguments: fn: Callable that will be called upon each element in elems elems: tensor name: A string name for the map node in the graph dtype: Output data type. Returns: Tensor with dtype `dtype`. """ return functional_ops.map_fn(fn, elems, name=name, dtype=dtype) def foldl(fn, elems, initializer=None, name=None): """Reduce elems using fn to combine them from left to right. Arguments: fn: Callable that will be called upon each element in elems and an accumulator, for instance `lambda acc, x: acc + x` elems: tensor initializer: The first value used (`elems[0]` in case of None) name: A string name for the foldl node in the graph Returns: Tensor with same type and shape as `initializer`. """ return functional_ops.foldl(fn, elems, initializer=initializer, name=name) def foldr(fn, elems, initializer=None, name=None): """Reduce elems using fn to combine them from right to left. Arguments: fn: Callable that will be called upon each element in elems and an accumulator, for instance `lambda acc, x: acc + x` elems: tensor initializer: The first value used (`elems[-1]` in case of None) name: A string name for the foldr node in the graph Returns: Same type and shape as initializer """ return functional_ops.foldr(fn, elems, initializer=initializer, name=name) # Load Keras default configuration from config file if present. _keras_base_dir = os.path.expanduser('~') _keras_dir = os.path.join(_keras_base_dir, '.keras') _config_path = os.path.expanduser(os.path.join(_keras_dir, 'keras.json')) if os.path.exists(_config_path): try: _config = json.load(open(_config_path)) except ValueError: _config = {} _floatx = _config.get('floatx', floatx()) assert _floatx in {'float16', 'float32', 'float64'} _epsilon = _config.get('epsilon', epsilon()) assert isinstance(_epsilon, float) _image_data_format = _config.get('image_data_format', image_data_format()) assert _image_data_format in {'channels_last', 'channels_first'} set_floatx(_floatx) set_epsilon(_epsilon) set_image_data_format(_image_data_format) # Save config file. if not os.path.exists(_keras_dir): try: os.makedirs(_keras_dir) except OSError: # Except permission denied and potential race conditions # in multi-threaded environments. pass if not os.path.exists(_config_path): _config = { 'floatx': floatx(), 'epsilon': epsilon(), 'backend': 'tensorflow', 'image_data_format': image_data_format() } try: with open(_config_path, 'w') as f: f.write(json.dumps(_config, indent=4)) except IOError: # Except permission denied. pass
[]
[]
[ "OMP_NUM_THREADS" ]
[]
["OMP_NUM_THREADS"]
python
1
0
LokahiProject/LokahiProject/wsgi.py
""" WSGI config for LokahiProject project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "LokahiProject.settings") application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
soracom/generated/cmd/devices_get_data.go
// Code generated by soracom-cli generate-cmd. DO NOT EDIT. package cmd import ( "net/url" "os" "github.com/spf13/cobra" ) // DevicesGetDataCmdDeviceId holds value of 'device_id' option var DevicesGetDataCmdDeviceId string // DevicesGetDataCmdLastEvaluatedKey holds value of 'last_evaluated_key' option var DevicesGetDataCmdLastEvaluatedKey string // DevicesGetDataCmdSort holds value of 'sort' option var DevicesGetDataCmdSort string // DevicesGetDataCmdFrom holds value of 'from' option var DevicesGetDataCmdFrom int64 // DevicesGetDataCmdLimit holds value of 'limit' option var DevicesGetDataCmdLimit int64 // DevicesGetDataCmdTo holds value of 'to' option var DevicesGetDataCmdTo int64 // DevicesGetDataCmdPaginate indicates to do pagination or not var DevicesGetDataCmdPaginate bool func init() { DevicesGetDataCmd.Flags().StringVar(&DevicesGetDataCmdDeviceId, "device-id", "", TRAPI("Device ID of the target subscriber that generated data entries.")) DevicesGetDataCmd.Flags().StringVar(&DevicesGetDataCmdLastEvaluatedKey, "last-evaluated-key", "", TRAPI("The value of `time` in the last log entry retrieved in the previous page. By specifying this parameter, you can continue to retrieve the list from the next page onward.")) DevicesGetDataCmd.Flags().StringVar(&DevicesGetDataCmdSort, "sort", "desc", TRAPI("Sort order of the data entries. Either descending (latest data entry first) or ascending (oldest data entry first).")) DevicesGetDataCmd.Flags().Int64Var(&DevicesGetDataCmdFrom, "from", 0, TRAPI("Start time for the data entries search range (unixtime in milliseconds).")) DevicesGetDataCmd.Flags().Int64Var(&DevicesGetDataCmdLimit, "limit", 0, TRAPI("Maximum number of data entries to retrieve.")) DevicesGetDataCmd.Flags().Int64Var(&DevicesGetDataCmdTo, "to", 0, TRAPI("End time for the data entries search range (unixtime in milliseconds).")) DevicesGetDataCmd.Flags().BoolVar(&DevicesGetDataCmdPaginate, "fetch-all", false, TRCLI("cli.common_params.paginate.short_help")) DevicesCmd.AddCommand(DevicesGetDataCmd) } // DevicesGetDataCmd defines 'get-data' subcommand var DevicesGetDataCmd = &cobra.Command{ Use: "get-data", Short: TRAPI("/devices/{device_id}/data:get:summary"), Long: TRAPI(`/devices/{device_id}/data:get:description`), RunE: func(cmd *cobra.Command, args []string) error { opt := &apiClientOptions{ BasePath: "/v1", Language: getSelectedLanguage(), } ac := newAPIClient(opt) if v := os.Getenv("SORACOM_VERBOSE"); v != "" { ac.SetVerbose(true) } err := authHelper(ac, cmd, args) if err != nil { cmd.SilenceUsage = true return err } param, err := collectDevicesGetDataCmdParams(ac) if err != nil { return err } body, err := ac.callAPI(param) if err != nil { cmd.SilenceUsage = true return err } if body == "" { return nil } if rawOutput { _, err = os.Stdout.Write([]byte(body)) } else { return prettyPrintStringAsJSON(body) } return err }, } func collectDevicesGetDataCmdParams(ac *apiClient) (*apiParams, error) { var parsedBody interface{} var err error err = checkIfRequiredStringParameterIsSupplied("device_id", "device-id", "path", parsedBody, DevicesGetDataCmdDeviceId) if err != nil { return nil, err } return &apiParams{ method: "GET", path: buildPathForDevicesGetDataCmd("/devices/{device_id}/data"), query: buildQueryForDevicesGetDataCmd(), doPagination: DevicesGetDataCmdPaginate, paginationKeyHeaderInResponse: "x-soracom-next-key", paginationRequestParameterInQuery: "last_evaluated_key", noRetryOnError: noRetryOnError, }, nil } func buildPathForDevicesGetDataCmd(path string) string { escapedDeviceId := url.PathEscape(DevicesGetDataCmdDeviceId) path = strReplace(path, "{"+"device_id"+"}", escapedDeviceId, -1) return path } func buildQueryForDevicesGetDataCmd() url.Values { result := url.Values{} if DevicesGetDataCmdLastEvaluatedKey != "" { result.Add("last_evaluated_key", DevicesGetDataCmdLastEvaluatedKey) } if DevicesGetDataCmdSort != "desc" { result.Add("sort", DevicesGetDataCmdSort) } if DevicesGetDataCmdFrom != 0 { result.Add("from", sprintf("%d", DevicesGetDataCmdFrom)) } if DevicesGetDataCmdLimit != 0 { result.Add("limit", sprintf("%d", DevicesGetDataCmdLimit)) } if DevicesGetDataCmdTo != 0 { result.Add("to", sprintf("%d", DevicesGetDataCmdTo)) } return result }
[ "\"SORACOM_VERBOSE\"" ]
[]
[ "SORACOM_VERBOSE" ]
[]
["SORACOM_VERBOSE"]
go
1
0
openmcp-metric-collector/master/src/main/main.go
package main import ( "log" "openmcp/openmcp/omcplog" "openmcp/openmcp/openmcp-metric-collector/master/src/metricCollector" "openmcp/openmcp/util/clusterManager" "openmcp/openmcp/util/controller/logLevel" "openmcp/openmcp/util/controller/reshape" "os" "runtime" "admiralty.io/multicluster-controller/pkg/cluster" "admiralty.io/multicluster-controller/pkg/manager" ) const ( GRPC_PORT = "2051" ) func main() { logLevel.KetiLogInit() cm := clusterManager.NewClusterManager() go MasterMetricCollector(cm) //cm := clusterManager.NewClusterManager() host_ctx := "openmcp" namespace := "openmcp" host_cfg := cm.Host_config //live := cluster.New(host_ctx, host_cfg, cluster.Options{CacheOptions: cluster.CacheOptions{Namespace: namespace}}) live := cluster.New(host_ctx, host_cfg, cluster.Options{}) ghosts := []*cluster.Cluster{} for _, ghost_cluster := range cm.Cluster_list.Items { ghost_ctx := ghost_cluster.Name ghost_cfg := cm.Cluster_configs[ghost_ctx] //ghost := cluster.New(ghost_ctx, ghost_cfg, cluster.Options{CacheOptions: cluster.CacheOptions{Namespace: namespace}}) ghost := cluster.New(ghost_ctx, ghost_cfg, cluster.Options{}) ghosts = append(ghosts, ghost) } reshape_cont, err_reshape := reshape.NewController(live, ghosts, namespace, cm) if err_reshape != nil { omcplog.V(2).Info("err_reshape : ", err_reshape) return } loglevel_cont, err_log := logLevel.NewController(live, ghosts, namespace) if err_log != nil { omcplog.V(2).Info("err_log : ", err_log) return } m := manager.New() m.AddController(reshape_cont) m.AddController(loglevel_cont) stop := reshape.SetupSignalHandler() if err := m.Start(stop); err != nil { log.Fatal(err) } } func MasterMetricCollector(cm *clusterManager.ClusterManager) { omcplog.V(4).Info("MasterMetricCollector Called") runtime.GOMAXPROCS(runtime.NumCPU()) INFLUX_IP := os.Getenv("INFLUX_IP") INFLUX_PORT := os.Getenv("INFLUX_PORT") INFLUX_USERNAME := os.Getenv("INFLUX_USERNAME") INFLUX_PASSWORD := os.Getenv("INFLUX_PASSWORD") omcplog.V(5).Info("INFLUX_IP: ", INFLUX_IP) omcplog.V(5).Info("INFLUX_PORT: ", INFLUX_PORT) omcplog.V(5).Info("INFLUX_USERNAME: ", INFLUX_USERNAME) omcplog.V(5).Info("INFLUX_PASSWORD: ", INFLUX_PASSWORD) mc := metricCollector.NewMetricCollector(cm, INFLUX_IP, INFLUX_PORT, INFLUX_USERNAME, INFLUX_PASSWORD) omcplog.V(2).Info("Created NewMetricCollector Structure") mc.Influx.CreateDatabase() //mc.Influx.CreateMeasurements() mc.StartGRPC(GRPC_PORT) //mc := &metricCollector.MetricCollector{} //mc.StartGRPC(GRPC_PORT) }
[ "\"INFLUX_IP\"", "\"INFLUX_PORT\"", "\"INFLUX_USERNAME\"", "\"INFLUX_PASSWORD\"" ]
[]
[ "INFLUX_PORT", "INFLUX_USERNAME", "INFLUX_IP", "INFLUX_PASSWORD" ]
[]
["INFLUX_PORT", "INFLUX_USERNAME", "INFLUX_IP", "INFLUX_PASSWORD"]
go
4
0
inventory/app.py
import flask_admin, flask_wtf, os from flask import Flask, url_for, redirect, render_template, request, abort from flask_admin import helpers as admin_helpers from flask_security import Security, SQLAlchemyUserDatastore, current_user from flask_mail import Mail from inventory.models import db, User, Role, Ip, Inventory, Location, Networkdevice, Otherdevice, Networkdevicetype, Otherdevicetype from inventory.views.protected import ProtectedModelView from inventory.views.ip import IpAddressesView from inventory.views.user import UserAdminView from inventory.views.register import ExtendedRegisterForm from inventory.views.networkdevice import InventoryNetworkDevicesView from inventory.views.otherdevice import InventoryOtherDevicesView from flask_apscheduler import APScheduler #from inventory.jobs.job1 import job1 # Create Flask application app = Flask(__name__) # Load configuration stuff app.config.from_pyfile('config.py') # Connect app with db db.init_app(app) mail = Mail(app) flask_wtf.CSRFProtect(app) # Setup Flask-Security user_datastore = SQLAlchemyUserDatastore(db, User, Role) security = Security(app, user_datastore, register_form = ExtendedRegisterForm) # Setup IP-Monitoring Job from inventory.jobs.monitoring import ping_job # important: Not delete, needed to import the in the config file specified job if not app.debug or os.environ.get('WERKZEUG_RUN_MAIN') == 'true': # Prevent second execution of job in debug-mode scheduler = APScheduler() scheduler.init_app(app) scheduler.start() # Flask views @app.route('/') def index(): return render_template('index.html') # Create admin admin = flask_admin.Admin( app, 'VKM: IT-Inventar', base_template='mybase.html', template_mode='bootstrap3' ) # Add model views admin.add_view(UserAdminView(User, db.session, name='Benutzer/Verantwortliche', category='Nutzerverwaltung')) admin.add_view(ProtectedModelView(Role, db.session, name='Berechtigungen', category='Nutzerverwaltung')) admin.add_view(IpAddressesView(Ip, db.session, category='Ip-Addressverwaltung', name='Alle IPs', endpoint='ip_all')) admin.add_view(IpAddressesView(Ip, db.session, category='Ip-Addressverwaltung', name='Freie IPs', endpoint='ip_free')) admin.add_view(IpAddressesView(Ip, db.session, category='Ip-Addressverwaltung', name='Belegte IPs', endpoint='ip_notfree')) admin.add_view(IpAddressesView(Ip, db.session, category='Lizenzen', name='Alle Softwarelizenzen', endpoint='todo3')) # TODO admin.add_view(InventoryNetworkDevicesView(Inventory, db.session, endpoint='inv_network_active', category='Inventar',name='Netzwerkfähige Geräte')) admin.add_view(InventoryOtherDevicesView(Inventory, db.session, endpoint='inv_other_active', category='Inventar',name='Andere inventarisierte Geräte')) #TODO admin.add_view(InventoryNetworkDevicesView(Inventory, db.session, endpoint='inv_network_inactive', category='Inventar',name='Netzwerkfähige Geräte (ausgemustert)')) admin.add_view(InventoryOtherDevicesView(Inventory, db.session, endpoint='inv_other_inactive', category='Inventar',name='Andere inventarisierte Geräte (ausgemustert)')) #TODO admin.add_view(ProtectedModelView(Inventory, db.session, category='Erweitert', name='Alle Inventarnummern (ohne zugeordnete Geräte)')) admin.add_view(ProtectedModelView(Networkdevice, db.session, category='Erweitert', name='Netzwerkfähige Geräte (ohne Inventarnummer)')) admin.add_view(ProtectedModelView(Otherdevice, db.session, category='Erweitert', name='Andere Geräte (ohne Inventarnummer)')) admin.add_view(ProtectedModelView(Networkdevicetype, db.session, category='Erweitert', name='Typen netzwerkfähiger Geräte')) admin.add_view(ProtectedModelView(Otherdevicetype, db.session, category='Erweitert', name='Typen anderer Geräte')) admin.add_view(ProtectedModelView(Location, db.session, category='Erweitert', name='Verfügbare Standorte')) admin.add_view(ProtectedModelView(Ip, db.session, category='Erweitert', name='Verfügbare IPs')) # define a context processor for merging flask-admin's template context into the # flask-security views. @security.context_processor def security_context_processor(): return dict( admin_base_template=admin.base_template, admin_view=admin.index_view, h=admin_helpers, get_url=url_for )
[]
[]
[ "WERKZEUG_RUN_MAIN" ]
[]
["WERKZEUG_RUN_MAIN"]
python
1
0
main.go
package main import ( "./net" httpProxy "./proxy/http" "flag" "fmt" log "github.com/sirupsen/logrus" "os" "os/exec" "syscall" ) func main() { log.SetLevel(log.DebugLevel) fh, err := os.OpenFile("watchmedo.log", os.O_WRONLY | os.O_APPEND | os.O_CREATE, 0644) orPanic(err, "write to watchmedo.log") log.SetOutput(fh) httpProxyAddress := flag.String("http-proxy-address", "127.0.0.1:0", "HTTP proxy listen address") listener, err := net.Listen("tcp",*httpProxyAddress) orPanic(err, "listen") log.Printf("listening on %s", listener.Addr().String()) prx := httpProxy.NewProxy() go func() { log.Fatalf("Failed to serve: %s\n", prx.Serve(listener)) }() cmd := []string{os.Getenv("SHELL")} if len(os.Args)>1 { cmd = os.Args[1:] } log.Infof("starting %v", cmd) proxyAddress := fmt.Sprintf("http://%s", listener.Addr().String()) proc, err := startCommand(cmd, []string{ "http_proxy="+proxyAddress, "https_proxy="+proxyAddress, "HTTP_PROXY="+proxyAddress, "HTTPS_PROXY="+proxyAddress, }) orPanic(err, "start command") log.Infof("Waiting for process to exit.") _, err = proc.Wait() if err != nil { if exitErr, ok := err.(*exec.ExitError); ok { if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { os.Exit(status.ExitStatus()) } } } orPanic(err, "wait") os.Exit(0) } func orPanic(err error, args... interface{}) { if err == nil { return } if len(args) == 0 { panic(err) } format := fmt.Sprintf("%s: %%s", args[0]) newArgs := append(args[1:], err) panic(fmt.Sprintf(format, newArgs...)) } func startCommand(cmd []string, env []string) (*os.Process, error) { cwd, err := os.Getwd() if err != nil { return nil, fmt.Errorf("get cwd: %s", err) } procAttr := &os.ProcAttr{ Files: []*os.File{ os.Stdin, os.Stdout, os.Stderr, }, Dir: cwd, Env: env, } return os.StartProcess(cmd[0], cmd, procAttr) }
[ "\"SHELL\"" ]
[]
[ "SHELL" ]
[]
["SHELL"]
go
1
0
ExpenseProject/manage.py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ExpenseProject.settings") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
k3s_test.go
package dtest import ( "context" "fmt" "os" "runtime" "testing" "github.com/stretchr/testify/assert" "github.com/datawire/dlib/dexec" "github.com/datawire/dlib/dlog" ) // requireDocker calls t.SkipNow() if we're running in CI and Docker isn't available. func requireDocker(t *testing.T) { if os.Getenv("CI") == "" { // Always run when not in CI. return } docker, err := dexec.LookPath("docker") if docker == "" || err != nil { if runtime.GOOS == "linux" { t.Fatal("The CI setup is broken, it doesn't even have docker on Linux") } t.Log("Skipping because 'docker' is not installed") t.SkipNow() } if runtime.GOOS == "windows" { t.Log("Skipping because 'docker' is set to run Windows containers not Linux containers") t.SkipNow() } } func TestContainer(t *testing.T) { requireDocker(t) ctx := dlog.NewTestContext(t, false) WithMachineLock(ctx, func(ctx context.Context) { id := dockerUp(ctx, "dtest-test-tag", "nginx") running := dockerPs(ctx) assert.Contains(t, running, id) dockerKill(ctx, id) running = dockerPs(ctx) assert.NotContains(t, running, id) }) } func TestCluster(t *testing.T) { requireDocker(t) for minor := range k3sImages { ver := KubeVersion{minor} t.Run(fmt.Sprintf("1.%d", minor), func(t *testing.T) { ctx := dlog.NewTestContext(t, false) WithMachineLock(ctx, func(ctx context.Context) { defer func() { if r := recover(); r != nil { t.Fatal(r) } }() K3sDown(ctx) os.Setenv("DTEST_REGISTRY", DockerRegistry(ctx)) // Prevent extra calls to dtest.RegistryUp() which may panic defer func() { RegistryDown(ctx) }() kubeconfig := KubeVersionConfig(ctx, ver) defer func() { K3sDown(ctx) assert.NoError(t, os.Remove(kubeconfig)) }() }) }) } }
[ "\"CI\"" ]
[]
[ "CI" ]
[]
["CI"]
go
1
0
pkg/cmd/opts/common.go
package opts import ( "fmt" "io" "os" "path/filepath" "strings" "time" "github.com/jenkins-x/jx/pkg/kube/cluster" gojenkins "github.com/jenkins-x/golang-jenkins" "github.com/jenkins-x/jx/pkg/cloud/gke" "github.com/jenkins-x/jx/pkg/prow" "github.com/jenkins-x/jx/pkg/versionstream" "github.com/spf13/viper" "github.com/jenkins-x/jx/pkg/secreturl" "github.com/spf13/pflag" "github.com/heptio/sonobuoy/pkg/client" "github.com/jenkins-x/jx/pkg/cmd/clients" "github.com/jenkins-x/jx/pkg/io/secrets" "github.com/jenkins-x/jx/pkg/vault" "github.com/jenkins-x/jx/pkg/kube/resources" "github.com/jenkins-x/jx/pkg/kube/services" "github.com/pkg/errors" vaultoperatorclient "github.com/banzaicloud/bank-vaults/operator/pkg/client/clientset/versioned" jenkinsv1 "github.com/jenkins-x/jx/pkg/apis/jenkins.io/v1" "github.com/jenkins-x/jx/pkg/auth" "github.com/jenkins-x/jx/pkg/client/clientset/versioned" "github.com/jenkins-x/jx/pkg/gits" "github.com/jenkins-x/jx/pkg/helm" "github.com/jenkins-x/jx/pkg/kube" "github.com/jenkins-x/jx/pkg/log" "github.com/jenkins-x/jx/pkg/table" "github.com/jenkins-x/jx/pkg/util" certmngclient "github.com/jetstack/cert-manager/pkg/client/clientset/versioned" buildclient "github.com/knative/build/pkg/client/clientset/versioned" istioclient "github.com/knative/pkg/client/clientset/versioned" kserve "github.com/knative/serving/pkg/client/clientset/versioned" "github.com/spf13/cobra" tektonclient "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" "gopkg.in/AlecAivazis/survey.v1" "gopkg.in/AlecAivazis/survey.v1/terminal" gitcfg "gopkg.in/src-d/go-git.v4/config" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/client-go/kubernetes" ) // LogLevel represents the logging level when reporting feedback type LogLevel string const ( OptionAlias = "alias" OptionApplication = "app" OptionBatchMode = "batch-mode" OptionClusterName = "cluster-name" OptionEnvironment = "env" OptionInstallDeps = "install-dependencies" OptionLabel = "label" OptionName = "name" OptionNamespace = "namespace" OptionNoBrew = "no-brew" OptionRelease = "release" OptionServerName = "name" OptionOutputDir = "output-dir" OptionServerURL = "url" OptionSkipAuthSecMerge = "skip-auth-secrets-merge" OptionTimeout = "timeout" OptionVerbose = "verbose" BranchPatternCommandName = "branchpattern" QuickStartLocationCommandName = "quickstartlocation" // LogInfo info level logging LogInfo LogLevel = "INFO" // LogWarning warning level logging LogWarning LogLevel = "WARN" // LogError error level logging LogError LogLevel = "ERROR" ) var ( BranchPatternCommandAliases = []string{ "branch pattern", } QuickStartLocationCommandAliases = []string{ QuickStartLocationCommandName + "s", "quickstartloc", "qsloc", } ) // ModifyDevEnvironmentFn a callback to create/update the development Environment type ModifyDevEnvironmentFn func(callback func(env *jenkinsv1.Environment) error) error // ModifyEnvironmentFn a callback to create/update an Environment type ModifyEnvironmentFn func(name string, callback func(env *jenkinsv1.Environment) error) error // CommonOptions contains common options and helper methods type CommonOptions struct { prow.Prow AdvancedMode bool Args []string BatchMode bool Cmd *cobra.Command ConfigFile string Domain string Err io.Writer ExternalJenkinsBaseURL string In terminal.FileReader InstallDependencies bool ModifyDevEnvironmentFn ModifyDevEnvironmentFn ModifyEnvironmentFn ModifyEnvironmentFn NameServers []string NoBrew bool RemoteCluster bool Out terminal.FileWriter ServiceAccount string SkipAuthSecretsMerge bool Username string Verbose bool NotifyCallback func(LogLevel, string) apiExtensionsClient apiextensionsclientset.Interface certManagerClient certmngclient.Interface complianceClient *client.SonobuoyClient currentNamespace string devNamespace string environmentsDir string factory clients.Factory fakeGitProvider *gits.FakeProvider git gits.Gitter helm helm.Helmer jenkinsClient gojenkins.JenkinsClient jxClient versioned.Interface gcloudClient gke.GClouder knbClient buildclient.Interface kserveClient kserve.Interface kubeClient kubernetes.Interface kuber kube.Kuber resourcesInstaller resources.Installer systemVaultClient vault.Client tektonClient tektonclient.Interface vaultClient vault.Client secretURLClient secreturl.Client vaultOperatorClient vaultoperatorclient.Interface versionResolver *versionstream.VersionResolver } type ServerFlags struct { ServerName string ServerURL string } // IsEmpty returns true if the server flags and server URL are tempry func (f *ServerFlags) IsEmpty() bool { return f.ServerName == "" && f.ServerURL == "" } // GetFactory lazily creates a Factory if its not already created func (o *CommonOptions) GetFactory() clients.Factory { if o.factory == nil { o.factory = clients.NewFactory() } return o.factory } // SetFactory sets the factory to use func (o *CommonOptions) SetFactory(f clients.Factory) { o.factory = f } // CreateTable creates a new Table func (o *CommonOptions) CreateTable() table.Table { return o.factory.CreateTable(o.Out) } // NotifyProgress by default logs info to the console but a custom callback can be added to send feedback to, say, a web UI func (o *CommonOptions) NotifyProgress(level LogLevel, format string, args ...interface{}) { if o.NotifyCallback != nil { text := fmt.Sprintf(format, args...) o.NotifyCallback(level, text) return } switch level { case LogInfo: log.Logger().Infof(format, args...) case LogWarning: log.Logger().Warnf(format, args...) default: log.Logger().Errorf(format, args...) } } // NewCommonOptionsWithTerm creates a new CommonOptions instance with given terminal input, output and error func NewCommonOptionsWithTerm(factory clients.Factory, in terminal.FileReader, out terminal.FileWriter, err io.Writer) *CommonOptions { return &CommonOptions{ factory: factory, In: in, Out: out, Err: err, } } // NewCommonOptionsWithFactory creates a new CommonOptions instance with the // given factory func NewCommonOptionsWithFactory(factory clients.Factory) CommonOptions { return CommonOptions{ factory: factory, } } // SetDevNamespace configures the current dev namespace func (o *CommonOptions) SetDevNamespace(ns string) { o.devNamespace = ns o.currentNamespace = ns o.kubeClient = nil log.Logger().Debugf("Setting the dev namespace to: %s", util.ColorInfo(ns)) } func (o *CommonOptions) SetCurrentNamespace(ns string) { o.currentNamespace = ns o.kubeClient = nil log.Logger().Debugf("Setting the current namespace to: %s", util.ColorInfo(ns)) } // AddBaseFlags adds the base flags for all commands func (o *CommonOptions) AddBaseFlags(cmd *cobra.Command) { defaultBatchMode := false if os.Getenv("JX_BATCH_MODE") == "true" { defaultBatchMode = true } cmd.PersistentFlags().BoolVarP(&o.BatchMode, OptionBatchMode, "b", defaultBatchMode, "Runs in batch mode without prompting for user input") cmd.PersistentFlags().BoolVarP(&o.Verbose, OptionVerbose, "", false, "Enables verbose output") o.Cmd = cmd } // GetConfiguration read the config file marshal into a config struct func (o *CommonOptions) GetConfiguration(config interface{}) error { configFile := o.ConfigFile if configFile != "" { viper.SetConfigFile(configFile) viper.SetConfigType("yaml") if err := viper.ReadInConfig(); err != nil { if _, ok := err.(viper.ConfigFileNotFoundError); !ok { log.Logger().Warnf("Config file %s not found", configFile) } else { return err } } else { err = viper.Unmarshal(config) if err != nil { return errors.Wrap(err, "unable to decode into config struct") } } } createDebugConfigFile("debug", "config.yaml") return nil } func createDebugConfigFile(dir string, file string) { wkDir, err := util.ConfigDir() if err != nil { log.Logger().Debugf("error determining config dir %v", err) } else { dir := filepath.Join(wkDir, dir) if err = os.MkdirAll(dir, util.DefaultWritePermissions); err != nil { log.Logger().Warnf("Error making directory: %s %s", dir, err) } configFile := filepath.Join(dir, file) if err = viper.WriteConfigAs(configFile); err != nil { log.Logger().Warnf("Error writing config file %s", err) } } } // ApiExtensionsClient return or creates the api extension client func (o *CommonOptions) ApiExtensionsClient() (apiextensionsclientset.Interface, error) { var err error if o.apiExtensionsClient == nil { o.apiExtensionsClient, err = o.factory.CreateApiExtensionsClient() if err != nil { return nil, err } } return o.apiExtensionsClient, nil } // SetAPIExtensionsClient sets the api extensions client func (o *CommonOptions) SetAPIExtensionsClient(client apiextensionsclientset.Interface) { o.apiExtensionsClient = client } // KubeClient returns or creates the kube client func (o *CommonOptions) KubeClient() (kubernetes.Interface, error) { if o.kubeClient == nil { kubeClient, currentNs, err := o.factory.CreateKubeClient() if err != nil { return nil, err } o.kubeClient = kubeClient if o.currentNamespace == "" { o.currentNamespace = currentNs } } if o.kubeClient == nil { return o.kubeClient, fmt.Errorf("failed to create KubeClient") } return o.kubeClient, nil } // KubeClientAndNamespace returns or creates the kube client and the current namespace func (o *CommonOptions) KubeClientAndNamespace() (kubernetes.Interface, string, error) { client, err := o.KubeClient() return client, o.currentNamespace, err } // SetKubeClient sets the kube client func (o *CommonOptions) SetKubeClient(kubeClient kubernetes.Interface) { o.kubeClient = kubeClient } // KubeClientAndDevNamespace returns a kube client and the development namespace func (o *CommonOptions) KubeClientAndDevNamespace() (kubernetes.Interface, string, error) { kubeClient, curNs, err := o.KubeClientAndNamespace() if err != nil { return nil, "", err } if o.devNamespace == "" { o.devNamespace, _, err = kube.GetDevNamespace(kubeClient, curNs) } return kubeClient, o.devNamespace, err } // GetDeployNamespace returns the namespace option from the command line option if defined otherwise we try // the $DEPLOY_NAMESPACE environment variable. If none of those are found lets use the current // kubernetes namespace value func (o *CommonOptions) GetDeployNamespace(namespaceOption string) (string, error) { ns := namespaceOption if ns == "" { ns = os.Getenv("DEPLOY_NAMESPACE") } if ns == "" { var err error _, ns, err = o.KubeClientAndNamespace() if err != nil { return ns, err } log.Logger().Infof("No --namespace option specified or $DEPLOY_NAMESPACE environment variable available so defaulting to using namespace %s", ns) } return ns, nil } // SetJxClient set the jx client func (o *CommonOptions) SetJxClient(jxClient versioned.Interface) { o.jxClient = jxClient } // JXClient returns or creates the jx client and current namespace func (o *CommonOptions) JXClient() (versioned.Interface, string, error) { if o.factory == nil { return nil, "", errors.New("command factory is not initialized") } if o.jxClient == nil { jxClient, ns, err := o.factory.CreateJXClient() if err != nil { return nil, ns, err } o.jxClient = jxClient if o.currentNamespace == "" { o.currentNamespace = ns } } return o.jxClient, o.currentNamespace, nil } // SetGCloudClient set the gcloud client func (o *CommonOptions) SetGCloudClient(gcloudClient gke.GClouder) { o.gcloudClient = gcloudClient } // GCloud returns the implementation of a gcloud helper func (o *CommonOptions) GCloud() gke.GClouder { if o.gcloudClient == nil { return &gke.GCloud{} } return o.gcloudClient } // TektonClient lazily creates a new Knative Pipeline client func (o *CommonOptions) TektonClient() (tektonclient.Interface, string, error) { if o.factory == nil { return nil, "", errors.New("command factory is not initialized") } if o.tektonClient == nil { tektonClient, ns, err := o.factory.CreateTektonClient() if err != nil { return nil, ns, err } o.tektonClient = tektonClient if o.currentNamespace == "" { o.currentNamespace = ns } } return o.tektonClient, o.currentNamespace, nil } // KnativeBuildClient returns or creates the knative build client func (o *CommonOptions) KnativeBuildClient() (buildclient.Interface, string, error) { if o.factory == nil { return nil, "", errors.New("command factory is not initialized") } if o.knbClient == nil { knbClient, ns, err := o.factory.CreateKnativeBuildClient() if err != nil { return nil, ns, err } o.knbClient = knbClient if o.currentNamespace == "" { o.currentNamespace = ns } } return o.knbClient, o.currentNamespace, nil } // KnativeServeClient returns or creates the knative serve client func (o *CommonOptions) KnativeServeClient() (kserve.Interface, string, error) { if o.factory == nil { return nil, "", errors.New("command factory is not initialized") } if o.kserveClient == nil { kserveClient, ns, err := o.factory.CreateKnativeServeClient() if err != nil { return nil, ns, err } o.kserveClient = kserveClient if o.currentNamespace == "" { o.currentNamespace = ns } } return o.kserveClient, o.currentNamespace, nil } // SetKnativeServeClient sets the kantive serve client func (o *CommonOptions) SetKnativeServeClient(client kserve.Interface) { o.kserveClient = client } // JXClientAndAdminNamespace returns or creates the jx client and admin namespace func (o *CommonOptions) JXClientAndAdminNamespace() (versioned.Interface, string, error) { kubeClient, _, err := o.KubeClientAndNamespace() if err != nil { return nil, "", err } jxClient, devNs, err := o.JXClientAndDevNamespace() if err != nil { return nil, "", err } ns, err := kube.GetAdminNamespace(kubeClient, devNs) return jxClient, ns, err } // JXClientAndDevNamespace returns and creates the jx client and dev namespace func (o *CommonOptions) JXClientAndDevNamespace() (versioned.Interface, string, error) { if o.jxClient == nil { jxClient, ns, err := o.JXClient() if err != nil { return nil, ns, err } o.jxClient = jxClient if o.currentNamespace == "" { o.currentNamespace = ns } } if o.devNamespace == "" { client, ns, err := o.KubeClientAndNamespace() if err != nil { return nil, "", err } devNs, _, err := kube.GetDevNamespace(client, ns) if err != nil { return nil, "", err } if devNs == "" { devNs = ns } o.devNamespace = devNs } return o.jxClient, o.devNamespace, nil } // JXClientDevAndAdminNamespace returns or creates the jx client, dev and admin namespaces func (o *CommonOptions) JXClientDevAndAdminNamespace() (versioned.Interface, string, string, error) { kubeClient, _, err := o.KubeClientAndNamespace() if err != nil { return nil, "", "", err } jxClient, devNs, err := o.JXClientAndDevNamespace() if err != nil { return nil, "", "", err } adminNs, err := kube.GetAdminNamespace(kubeClient, devNs) return jxClient, devNs, adminNs, err } // Git returns the git client func (o *CommonOptions) Git() gits.Gitter { if o.git == nil { o.git = gits.NewGitCLI() } return o.git } // SetGit sets the git client func (o *CommonOptions) SetGit(git gits.Gitter) { o.git = git } // SetFakeGitProvider set the fake git provider for testing purposes func (o *CommonOptions) SetFakeGitProvider(provider *gits.FakeProvider) { o.fakeGitProvider = provider } // NewHelm cerates a new helm client from the given list of parameters func (o *CommonOptions) NewHelm(verbose bool, helmBinary string, noTiller bool, helmTemplate bool) helm.Helmer { o.helm = o.factory.CreateHelm(o.Verbose, helmBinary, noTiller, helmTemplate) return o.helm } // Helm returns or creates the helm client func (o *CommonOptions) Helm() helm.Helmer { if o.helm == nil { helm3Flag := os.Getenv("JX_HELM3") if helm3Flag == "true" { o.RemoteCluster = true // let disable loading/modifying team environments as we typically install on empty k8s clusters o.ModifyEnvironmentFn = o.IgnoreModifyEnvironment o.ModifyDevEnvironmentFn = o.IgnoreModifyDevEnvironment helmer := o.NewHelm(false, "helm3", true, false) o.SetHelm(helmer) return helmer } noTillerFlag := os.Getenv("JX_NO_TILLER") if noTillerFlag == "true" || o.RemoteCluster { o.EnableRemoteKubeCluster() if o.helm != nil { return o.helm } } helmBinary, noTiller, helmTemplate, err := o.TeamHelmBin() if err != nil { if noTillerFlag == "true" { helmTemplate = true } else { log.Logger().Warnf("Failed to retrieve team settings: %v - falling back to default settings...", err) } } return o.NewHelm(o.Verbose, helmBinary, noTiller, helmTemplate) } return o.helm } // SetHelm sets the helmer used for this object func (o *CommonOptions) SetHelm(helmer helm.Helmer) { o.helm = helmer } // Kube returns the k8s config client func (o *CommonOptions) Kube() kube.Kuber { if o.kuber == nil { o.kuber = kube.NewKubeConfig() } return o.kuber } // SetKube sets the kube config client func (o *CommonOptions) SetKube(kuber kube.Kuber) { o.kuber = kuber } // SetResourcesInstaller configures the installer for Kubernetes resources func (o *CommonOptions) SetResourcesInstaller(installer resources.Installer) { o.resourcesInstaller = installer } // ResourcesInstaller returns the installer for Kubernetes resources func (o *CommonOptions) ResourcesInstaller() resources.Installer { if o.resourcesInstaller == nil { o.resourcesInstaller = resources.NewKubeCtlInstaller("", true, true) } return o.resourcesInstaller } // TeamAndEnvironmentNames returns team and environment namespace func (o *CommonOptions) TeamAndEnvironmentNames() (string, string, error) { kubeClient, currentNs, err := o.KubeClientAndNamespace() if err != nil { return "", "", err } return kube.GetDevNamespace(kubeClient, currentNs) } // AddGitServerFlags add git server flags to the given cobra command func (o *ServerFlags) AddGitServerFlags(cmd *cobra.Command) { cmd.Flags().StringVarP(&o.ServerName, OptionServerName, "n", "", "The name of the Git server to add a user") cmd.Flags().StringVarP(&o.ServerURL, OptionServerURL, "u", "", "The URL of the Git server to add a user") } // FindGitServer finds the Git server from the given flags or returns an error func (o *CommonOptions) FindGitServer(config *auth.AuthConfig, serverFlags *ServerFlags) (*auth.AuthServer, error) { return o.FindServer(config, serverFlags, "git", "Try creating one via: jx create git server", false) } // FindIssueTrackerServer finds the issue tracker server from the given flags or returns an error func (o *CommonOptions) FindIssueTrackerServer(config *auth.AuthConfig, serverFlags *ServerFlags) (*auth.AuthServer, error) { return o.FindServer(config, serverFlags, "issues", "Try creating one via: jx create tracker server", false) } // FindChatServer finds the chat server from the given flags or returns an error func (o *CommonOptions) FindChatServer(config *auth.AuthConfig, serverFlags *ServerFlags) (*auth.AuthServer, error) { return o.FindServer(config, serverFlags, "chat", "Try creating one via: jx create chat server", false) } // FindAddonServer finds the addon server from the given flags or returns an error func (o *CommonOptions) FindAddonServer(config *auth.AuthConfig, serverFlags *ServerFlags, kind string) (*auth.AuthServer, error) { return o.FindServer(config, serverFlags, kind, "Try creating one via: jx create addon", true) } // FindServer find the server flags from the given flags or returns an error func (o *CommonOptions) FindServer(config *auth.AuthConfig, serverFlags *ServerFlags, defaultKind string, missingServerDescription string, lazyCreate bool) (*auth.AuthServer, error) { kind := defaultKind var server *auth.AuthServer if serverFlags.ServerURL != "" { server = config.GetServer(serverFlags.ServerURL) if server == nil { if lazyCreate { return config.GetOrCreateServerName(serverFlags.ServerURL, serverFlags.ServerName, kind), nil } return nil, util.InvalidOption(OptionServerURL, serverFlags.ServerURL, config.GetServerURLs()) } } if server == nil && serverFlags.ServerName != "" { name := serverFlags.ServerName if lazyCreate { server = config.GetOrCreateServerName(serverFlags.ServerURL, name, kind) } else { server = config.GetServerByName(name) } if server == nil { return nil, util.InvalidOption(OptionServerName, name, config.GetServerNames()) } } if server == nil { name := config.CurrentServer if name != "" && o.BatchMode { server = config.GetServerByName(name) if server == nil { log.Logger().Warnf("Current server %s no longer exists", name) } } } if server == nil && len(config.Servers) == 1 { server = config.Servers[0] } if server == nil && len(config.Servers) > 1 { if o.BatchMode { return nil, fmt.Errorf("Multiple servers found. Please specify one via the %s option", OptionServerName) } defaultServerName := "" if config.CurrentServer != "" { s := config.GetServer(config.CurrentServer) if s != nil { defaultServerName = s.Name } } name, err := util.PickNameWithDefault(config.GetServerNames(), "Pick server to use: ", defaultServerName, "", o.In, o.Out, o.Err) if err != nil { return nil, err } server = config.GetServerByName(name) if server == nil { return nil, fmt.Errorf("Could not find the server for name %s", name) } } if server == nil { return nil, fmt.Errorf("Could not find a %s. %s", kind, missingServerDescription) } return server, nil } // FindService finds the given service and returns its URL func (o *CommonOptions) FindService(name string) (string, error) { client, ns, err := o.KubeClientAndNamespace() if err != nil { return "", err } devNs, _, err := kube.GetDevNamespace(client, ns) if err != nil { return "", err } url, err := services.FindServiceURL(client, ns, name) if url == "" { url, err = services.FindServiceURL(client, devNs, name) } if url == "" { names, err := services.GetServiceNames(client, ns, name) if err != nil { return "", err } if len(names) > 1 { name, err = util.PickName(names, "Pick service to open: ", "", o.In, o.Out, o.Err) if err != nil { return "", err } if name != "" { url, err = services.FindServiceURL(client, ns, name) } } else if len(names) == 1 { // must have been a filter url, err = services.FindServiceURL(client, ns, names[0]) } if url == "" { return "", fmt.Errorf("Could not find URL for service %s in namespace %s", name, ns) } } return url, nil } // FindEnvironmentNamespace returns the namespace of a given environment func (o *CommonOptions) FindEnvironmentNamespace(envName string) (string, error) { client, ns, err := o.KubeClientAndNamespace() if err != nil { return "", err } jxClient, _, err := o.JXClient() if err != nil { return "", err } devNs, _, err := kube.GetDevNamespace(client, ns) if err != nil { return "", err } envMap, envNames, err := kube.GetEnvironments(jxClient, devNs) if err != nil { return "", err } env := envMap[envName] if env == nil { return "", util.InvalidOption(OptionEnvironment, envName, envNames) } answer := env.Spec.Namespace if answer == "" { return "", fmt.Errorf("Environment %s does not have a Namespace!", envName) } return answer, nil } // FindServiceInNamespace searches a service in a given namespace. If found, it returns the service URL func (o *CommonOptions) FindServiceInNamespace(name string, ns string) (string, error) { client, curNs, err := o.KubeClientAndNamespace() if err != nil { return "", err } if ns == "" { ns = curNs } url, err := services.FindServiceURL(client, ns, name) if url == "" { names, err := services.GetServiceNames(client, ns, name) if err != nil { return "", err } if len(names) > 1 { name, err = util.PickName(names, "Pick service to open: ", "", o.In, o.Out, o.Err) if err != nil { return "", err } if name != "" { url, err = services.FindServiceURL(client, ns, name) } } else if len(names) == 1 { // must have been a filter url, err = services.FindServiceURL(client, ns, names[0]) } if url == "" { return "", fmt.Errorf("Could not find URL for service %s in namespace %s", name, ns) } } return url, nil } // Retry executes a given function and reties 'attempts' times with a delay of 'sleep' between the executions func (o *CommonOptions) Retry(attempts int, sleep time.Duration, call func() error) (err error) { for i := 0; ; i++ { err = call() if err == nil { return } if i >= (attempts - 1) { break } time.Sleep(sleep) log.Logger().Warnf("\nretrying after error:%s\n", err) } return fmt.Errorf("after %d attempts, last error: %s", attempts, err) } // FatalError is a wrapper structure around regular error indicating that re(try) processing flow should be interrupted // immediately. type FatalError struct { E error } // Error converts a fatal error into a string func (err *FatalError) Error() string { return fmt.Sprintf("fatal error: %s", err.E.Error()) } // RetryUntilFatalError executes a given function call with retry when the function fails. It stops retrying when a fatal // error is encountered. func (o *CommonOptions) RetryUntilFatalError(attempts int, sleep time.Duration, call func() (*FatalError, error)) (err error) { for i := 0; ; i++ { fatalErr, err := call() if fatalErr != nil { return fatalErr.E } if err == nil { return nil } if i >= (attempts - 1) { break } time.Sleep(sleep) log.Logger().Infof("retrying after error:%s", err) } return fmt.Errorf("after %d attempts, last error: %s", attempts, err) } // RetryQuiet executes a given function call with retry when an error occurs without printing any logs func (o *CommonOptions) RetryQuiet(attempts int, sleep time.Duration, call func() error) (err error) { lastMessage := "" dot := false for i := 0; ; i++ { err = call() if err == nil { if dot { log.Blank() } return } if i >= (attempts - 1) { break } time.Sleep(sleep) message := fmt.Sprintf("retrying after error: %s", err) if lastMessage == message { log.Logger().Info(".") dot = true } else { lastMessage = message if dot { dot = false log.Blank() } log.Logger().Warnf("%s\n", lastMessage) } } return fmt.Errorf("after %d attempts, last error: %s", attempts, err) } // RetryQuietlyUntilTimeout executes a function call with retry when an error occurs. It stops retrying when the timeout is reached. func (o *CommonOptions) RetryQuietlyUntilTimeout(timeout time.Duration, sleep time.Duration, call func() error) (err error) { timeoutTime := time.Now().Add(timeout) lastMessage := "" dot := false for i := 0; ; i++ { err = call() if err == nil { if dot { log.Blank() } return } if time.Now().After(timeoutTime) { return fmt.Errorf("Timed out after %s, last error: %s", timeout.String(), err) } time.Sleep(sleep) message := fmt.Sprintf("retrying after error: %s", err) if lastMessage == message { log.Logger().Info(".") dot = true } else { lastMessage = message if dot { dot = false log.Blank() } log.Logger().Warnf("%s\n", lastMessage) } } } // RetryUntilTrueOrTimeout waits until complete is true, an error occurs or the timeout func (o *CommonOptions) RetryUntilTrueOrTimeout(timeout time.Duration, sleep time.Duration, call func() (bool, error)) (err error) { timeoutTime := time.Now().Add(timeout) for i := 0; ; i++ { complete, err := call() if complete || err != nil { return err } if time.Now().After(timeoutTime) { return fmt.Errorf("Timed out after %s, last error: %s", timeout.String(), err) } time.Sleep(sleep) } } // PickGitRemoteURL picks a git remote URL from git config, or prompts to the user if no URL is found func (o *CommonOptions) PickGitRemoteURL(config *gitcfg.Config) (string, error) { surveyOpts := survey.WithStdio(o.In, o.Out, o.Err) urls := []string{} if config.Remotes != nil { for _, r := range config.Remotes { if r.URLs != nil { for _, u := range r.URLs { urls = append(urls, u) } } } } if len(urls) == 1 { return urls[0], nil } url := "" if len(urls) > 1 { prompt := &survey.Select{ Message: "Choose a remote git URL:", Options: urls, } err := survey.AskOne(prompt, &url, nil, surveyOpts) if err != nil { return "", err } } return url, nil } // VaultOperatorClient returns or creates the vault operator client func (o *CommonOptions) VaultOperatorClient() (vaultoperatorclient.Interface, error) { if o.factory == nil { return nil, errors.New("command factory is not initialized") } if o.vaultOperatorClient == nil { vaultOperatorClient, err := o.factory.CreateVaultOperatorClient() if err != nil { return nil, err } o.vaultOperatorClient = vaultOperatorClient } return o.vaultOperatorClient, nil } // SystemVaultClient return or creates the system vault client func (o *CommonOptions) SystemVaultClient(namespace string) (vault.Client, error) { if o.factory == nil { return nil, errors.New("command factory is not initialized") } if o.systemVaultClient == nil { if namespace == "" { var err error _, namespace, err = o.KubeClientAndDevNamespace() if err != nil { return nil, errors.Wrapf(err, "failed to find development namespace") } } systemVaultClient, err := o.factory.CreateSystemVaultClient(namespace) if err != nil { return nil, err } o.systemVaultClient = systemVaultClient } return o.systemVaultClient, nil } // VaultClient returns or creates the vault client func (o *CommonOptions) VaultClient(name string, namespace string) (vault.Client, error) { if o.factory == nil { return nil, errors.New("command factory is not initialized") } if o.systemVaultClient == nil { if namespace == "" { var err error _, namespace, err = o.KubeClientAndDevNamespace() if err != nil { return nil, errors.Wrapf(err, "failed to find development namespace") } } vaultClient, err := o.factory.CreateVaultClient(name, namespace) if err != nil { return nil, err } o.vaultClient = vaultClient } return o.vaultClient, nil } // GetSecretsLocation returns the location of the secrets func (o *CommonOptions) GetSecretsLocation() secrets.SecretsLocationKind { if o.factory == nil { return secrets.FileSystemLocationKind } return o.factory.SecretsLocation() } // SetSecretsLocation sets the secrets location func (o *CommonOptions) SetSecretsLocation(location secrets.SecretsLocationKind, persist bool) error { if o.factory == nil { return errors.New("command factory is not initialized") } return o.factory.SetSecretsLocation(location, persist) } // ResetSecretsLocation resets the secrets location func (o *CommonOptions) ResetSecretsLocation() error { if o.factory == nil { return errors.New("command factory is not initialized") } o.factory.ResetSecretsLocation() return nil } // GetWebHookEndpoint returns the webhook endpoint func (o *CommonOptions) GetWebHookEndpoint() (string, error) { _, _, err := o.JXClient() if err != nil { return "", errors.Wrap(err, "failed to get jxclient") } _, err = o.KubeClient() if err != nil { return "", errors.Wrap(err, "failed to get kube client") } isProwEnabled, err := o.IsProw() if err != nil { return "", err } ns, _, err := kube.GetDevNamespace(o.kubeClient, o.currentNamespace) if err != nil { return "", err } var webHookUrl string if isProwEnabled { baseURL, err := services.GetServiceURLFromName(o.kubeClient, "hook", ns) if err != nil { return "", err } webHookUrl = util.UrlJoin(baseURL, "hook") } else { baseURL, err := services.GetServiceURLFromName(o.kubeClient, "jenkins", ns) if err != nil { return "", err } webHookUrl = util.UrlJoin(baseURL, "github-webhook/") } return webHookUrl, nil } // ResetClientsAndNamespaces resets the current clients and namespaces func (o *CommonOptions) ResetClientsAndNamespaces() { //Reset all the cached clients & namespace values when switching so that they can be properly recalculated for //the new namespace. o.kubeClient = nil o.jxClient = nil o.currentNamespace = "" o.devNamespace = "" } // GetIn returns the command inputs writer func (o *CommonOptions) GetIn() terminal.FileReader { return o.In } // GetOut returns the command output writer func (o *CommonOptions) GetOut() terminal.FileWriter { return o.Out } // GetErr returns the command error writer func (o *CommonOptions) GetErr() io.Writer { return o.Err } // EnvironmentsDir is the local directory the environments are stored in - can be faked out for tests func (o *CommonOptions) EnvironmentsDir() (string, error) { if o.environmentsDir == "" { var err error o.environmentsDir, err = util.EnvironmentsDir() if err != nil { return "", err } } return o.environmentsDir, nil } // SetEnvironmentsDir sets the environment directory func (o *CommonOptions) SetEnvironmentsDir(dir string) { o.environmentsDir = dir } // ComplianceClient returns or creates the compliance client func (o *CommonOptions) ComplianceClient() (*client.SonobuoyClient, error) { if o.factory == nil { return nil, errors.New("command factory is not initialized") } if o.complianceClient == nil { complianceClient, err := o.factory.CreateComplianceClient() if err != nil { return nil, err } o.complianceClient = complianceClient } return o.complianceClient, nil } // CertManagerClient returns or creates the cert-manager client func (o *CommonOptions) CertManagerClient() (certmngclient.Interface, error) { if o.factory == nil { return nil, errors.New("command factory is not initialized") } if o.certManagerClient == nil { certManagerClient, err := o.factory.CreateCertManagerClient() if err != nil { return nil, err } o.certManagerClient = certManagerClient } return o.certManagerClient, nil } // InCluster return true if the command execution takes place in k8s cluster func (o *CommonOptions) InCluster() bool { return cluster.IsInCluster() } // InCDPipeline return true if the command execution takes place in the CD pipeline func (o *CommonOptions) InCDPipeline() bool { return o.factory.IsInCDPipeline() } // SetBatchMode configures the batch mode func (o *CommonOptions) SetBatchMode(batchMode bool) { o.factory.SetBatch(batchMode) } // IstioClient creates a new Kubernetes client for Istio resources func (o *CommonOptions) IstioClient() (istioclient.Interface, error) { config, err := o.factory.CreateKubeConfig() if err != nil { return nil, err } return istioclient.NewForConfig(config) } // IsFlagExplicitlySet checks whether the flag with the specified name is explicitly set by the user. // If so, true is returned, false otherwise. func (o *CommonOptions) IsFlagExplicitlySet(flagName string) bool { explicit := false explicitlySetFunc := func(f *pflag.Flag) { if f.Name == flagName { explicit = true } } o.Cmd.Flags().Visit(explicitlySetFunc) return explicit } // IsConfigExplicitlySet checks whether the flag or config with the specified name is explicitly set by the user. // If so, true is returned, false otherwise. func (o *CommonOptions) IsConfigExplicitlySet(configPath, configKey string) bool { if o.IsFlagExplicitlySet(configKey) || configExists(configPath, configKey) { return true } return false } func configExists(configPath, configKey string) bool { if configPath != "" { path := append(strings.Split(configPath, "."), configKey) configMap := viper.GetStringMap(path[0]) m := map[string]interface{}{path[0]: configMap} for _, k := range path { m2, ok := m[k] if !ok { return false } m3, ok := m2.(map[string]interface{}) if !ok { if k != configKey { return false } } m = m3 } return true } return viper.InConfig(configKey) }
[ "\"JX_BATCH_MODE\"", "\"DEPLOY_NAMESPACE\"", "\"JX_HELM3\"", "\"JX_NO_TILLER\"" ]
[]
[ "JX_NO_TILLER", "JX_HELM3", "JX_BATCH_MODE", "DEPLOY_NAMESPACE" ]
[]
["JX_NO_TILLER", "JX_HELM3", "JX_BATCH_MODE", "DEPLOY_NAMESPACE"]
go
4
0
pkg/daemon/ceph/config/config.go
/* Copyright 2018 The Rook Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package config provides methods for creating and formatting Ceph configuration files for daemons. package config import ( "fmt" "net" "os" "path" "strconv" "strings" "github.com/coreos/pkg/capnslog" "github.com/go-ini/ini" "github.com/pkg/errors" "github.com/rook/rook/pkg/clusterd" "github.com/rook/rook/pkg/daemon/ceph/client" cephutil "github.com/rook/rook/pkg/daemon/ceph/util" cephver "github.com/rook/rook/pkg/operator/ceph/version" ) var logger = capnslog.NewPackageLogger("github.com/rook/rook", "cephconfig") const ( // DefaultKeyringFile is the default name of the file where Ceph stores its keyring info DefaultKeyringFile = "keyring" // Msgr2port is the listening port of the messenger v2 protocol Msgr2port = 3300 msgr1Prefix = "v1:" msgr2Prefix = "v2:" ) var ( // DefaultConfigDir is the default dir where Ceph stores its configs. Can be overridden for unit // tests. DefaultConfigDir = "/etc/ceph" // DefaultConfigFile is the default name of the file where Ceph stores its configs. Can be // overridden for unit tests. DefaultConfigFile = "ceph.conf" ) // GlobalConfig represents the [global] sections of Ceph's config file. type GlobalConfig struct { FSID string `ini:"fsid,omitempty"` MonMembers string `ini:"mon initial members,omitempty"` MonHost string `ini:"mon host"` PublicAddr string `ini:"public addr,omitempty"` PublicNetwork string `ini:"public network,omitempty"` ClusterAddr string `ini:"cluster addr,omitempty"` ClusterNetwork string `ini:"cluster network,omitempty"` } // CephConfig represents an entire Ceph config including all sections. type CephConfig struct { *GlobalConfig `ini:"global,omitempty"` } // DefaultConfigFilePath returns the full path to Ceph's default config file func DefaultConfigFilePath() string { return path.Join(DefaultConfigDir, DefaultConfigFile) } // GetConfFilePath gets the path of a given cluster's config file func GetConfFilePath(root, clusterName string) string { return fmt.Sprintf("%s/%s.config", root, clusterName) } // GenerateAdminConnectionConfig calls GenerateAdminConnectionConfigWithSettings with no settings // overridden. func GenerateAdminConnectionConfig(context *clusterd.Context, cluster *ClusterInfo) (string, error) { return GenerateAdminConnectionConfigWithSettings(context, cluster, nil) } // GenerateAdminConnectionConfigWithSettings generates a Ceph config and keyring which will allow // the daemon to connect as an admin. Default config file settings can be overridden by specifying // some subset of settings. func GenerateAdminConnectionConfigWithSettings(context *clusterd.Context, cluster *ClusterInfo, settings *CephConfig) (string, error) { root := path.Join(context.ConfigDir, cluster.Name) keyringPath := path.Join(root, fmt.Sprintf("%s.keyring", client.AdminUsername)) err := writeKeyring(AdminKeyring(cluster), keyringPath) if err != nil { return "", errors.Wrapf(err, "failed to write admin keyring to %s", root) } // If this is an external cluster if cluster.IsInitializedExternalCred(false) { keyringPath := path.Join(root, fmt.Sprintf("%s.keyring", cluster.ExternalCred.Username)) err := writeKeyring(ExternalUserKeyring(cluster.ExternalCred.Username, cluster.ExternalCred.Secret), keyringPath) if err != nil { return "", errors.Wrapf(err, "failed to write keyring %q to %s", cluster.ExternalCred.Username, root) } } filePath, err := GenerateConfigFile(context, cluster, root, client.AdminUsername, keyringPath, settings, nil) if err != nil { return "", errors.Wrapf(err, "failed to write config to %s", root) } logger.Infof("generated admin config in %s", root) return filePath, nil } // GenerateConfigFile generates and writes a config file to disk. func GenerateConfigFile(context *clusterd.Context, cluster *ClusterInfo, pathRoot, user, keyringPath string, globalConfig *CephConfig, clientSettings map[string]string) (string, error) { // create the config directory if err := os.MkdirAll(pathRoot, 0744); err != nil { logger.Warningf("failed to create config directory at %q. %v", pathRoot, err) } configFile, err := createGlobalConfigFileSection(context, cluster, globalConfig) if err != nil { return "", errors.Wrapf(err, "failed to create global config section") } qualifiedUser := getQualifiedUser(user) if err := addClientConfigFileSection(configFile, qualifiedUser, keyringPath, clientSettings); err != nil { return "", errors.Wrapf(err, "failed to add admin client config section") } if cluster.IsInitializedExternalCred(false) { keyringPath = path.Join(path.Join(context.ConfigDir, cluster.Name), fmt.Sprintf("%s.keyring", cluster.ExternalCred.Username)) qualifiedUser := getQualifiedUser(cluster.ExternalCred.Username) if err := addClientConfigFileSection(configFile, qualifiedUser, keyringPath, clientSettings); err != nil { return "", errors.Wrap(err, "failed to add user client config section") } } // write the entire config to disk filePath := GetConfFilePath(pathRoot, cluster.Name) logger.Infof("writing config file %s", filePath) if err := configFile.SaveTo(filePath); err != nil { return "", errors.Wrapf(err, "failed to save config file %s", filePath) } return filePath, nil } // prepends "client." if a user namespace is not already specified func getQualifiedUser(user string) string { if strings.Index(user, ".") == -1 { return fmt.Sprintf("client.%s", user) } return user } // CreateDefaultCephConfig creates a default ceph config file. func CreateDefaultCephConfig(context *clusterd.Context, cluster *ClusterInfo) (*CephConfig, error) { cephVersionEnv := os.Getenv("ROOK_CEPH_VERSION") if cephVersionEnv != "" { v, err := cephver.ExtractCephVersion(cephVersionEnv) if err != nil { return nil, errors.Wrapf(err, "failed to extract ceph version") } cluster.CephVersion = *v } // extract a list of just the monitor names, which will populate the "mon initial members" // and "mon hosts" global config field monMembers, monHosts := PopulateMonHostMembers(cluster.Monitors) conf := &CephConfig{ GlobalConfig: &GlobalConfig{ FSID: cluster.FSID, MonMembers: strings.Join(monMembers, " "), MonHost: strings.Join(monHosts, ","), PublicAddr: context.NetworkInfo.PublicAddr, PublicNetwork: context.NetworkInfo.PublicNetwork, ClusterAddr: context.NetworkInfo.ClusterAddr, ClusterNetwork: context.NetworkInfo.ClusterNetwork, }, } return conf, nil } // create a config file with global settings configured, and return an ini file func createGlobalConfigFileSection(context *clusterd.Context, cluster *ClusterInfo, userConfig *CephConfig) (*ini.File, error) { var ceph *CephConfig if userConfig != nil { // use the user config since it was provided ceph = userConfig } else { var err error ceph, err = CreateDefaultCephConfig(context, cluster) if err != nil { return nil, errors.Wrapf(err, "failed to create default ceph config") } } configFile := ini.Empty() err := ini.ReflectFrom(configFile, ceph) return configFile, err } // add client config to the ini file func addClientConfigFileSection(configFile *ini.File, clientName, keyringPath string, settings map[string]string) error { s, err := configFile.NewSection(clientName) if err != nil { return err } if _, err := s.NewKey("keyring", keyringPath); err != nil { return err } for key, val := range settings { if _, err := s.NewKey(key, val); err != nil { return errors.Wrapf(err, "failed to add key %s", key) } } return nil } // PopulateMonHostMembers extracts a list of just the monitor names, which will populate the "mon initial members" // and "mon hosts" global config field func PopulateMonHostMembers(monitors map[string]*MonInfo) ([]string, []string) { monMembers := make([]string, len(monitors)) monHosts := make([]string, len(monitors)) i := 0 for _, monitor := range monitors { monMembers[i] = monitor.Name monIP := cephutil.GetIPFromEndpoint(monitor.Endpoint) // This tries to detect the current port if the mon already exists // This basically handles the transition between monitors running on 6790 to msgr2 // So whatever the previous monitor port was we keep it currentMonPort := cephutil.GetPortFromEndpoint(monitor.Endpoint) monPorts := [2]string{strconv.Itoa(int(Msgr2port)), strconv.Itoa(int(currentMonPort))} msgr2Endpoint := net.JoinHostPort(monIP, monPorts[0]) msgr1Endpoint := net.JoinHostPort(monIP, monPorts[1]) monHosts[i] = "[v2:" + msgr2Endpoint + ",v1:" + msgr1Endpoint + "]" i++ } return monMembers, monHosts }
[ "\"ROOK_CEPH_VERSION\"" ]
[]
[ "ROOK_CEPH_VERSION" ]
[]
["ROOK_CEPH_VERSION"]
go
1
0
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package framework import ( "bytes" "encoding/json" "errors" "fmt" "io" "io/ioutil" "math/rand" "net" "net/http" "net/url" "os" "os/exec" "path" "path/filepath" "regexp" goRuntime "runtime" "sort" "strconv" "strings" "sync" "syscall" "text/tabwriter" "time" "github.com/golang/glog" "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5" "k8s.io/kubernetes/pkg/api" apierrs "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/extensions" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/typed/discovery" "k8s.io/kubernetes/pkg/client/typed/dynamic" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/controller" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/runtime" sshutil "k8s.io/kubernetes/pkg/ssh" "k8s.io/kubernetes/pkg/types" uexec "k8s.io/kubernetes/pkg/util/exec" labelsutil "k8s.io/kubernetes/pkg/util/labels" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/system" "k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/wait" utilyaml "k8s.io/kubernetes/pkg/util/yaml" "k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" testutils "k8s.io/kubernetes/test/utils" "github.com/blang/semver" "golang.org/x/crypto/ssh" "golang.org/x/net/websocket" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" gomegatypes "github.com/onsi/gomega/types" ) const ( // How long to wait for the pod to be listable PodListTimeout = time.Minute // Initial pod start can be delayed O(minutes) by slow docker pulls // TODO: Make this 30 seconds once #4566 is resolved. PodStartTimeout = 5 * time.Minute // How long to wait for the pod to no longer be running podNoLongerRunningTimeout = 30 * time.Second // If there are any orphaned namespaces to clean up, this test is running // on a long lived cluster. A long wait here is preferably to spurious test // failures caused by leaked resources from a previous test run. NamespaceCleanupTimeout = 15 * time.Minute // Some pods can take much longer to get ready due to volume attach/detach latency. slowPodStartTimeout = 15 * time.Minute // How long to wait for a service endpoint to be resolvable. ServiceStartTimeout = 1 * time.Minute // How often to Poll pods, nodes and claims. Poll = 2 * time.Second // service accounts are provisioned after namespace creation // a service account is required to support pod creation in a namespace as part of admission control ServiceAccountProvisionTimeout = 2 * time.Minute // How long to try single API calls (like 'get' or 'list'). Used to prevent // transient failures from failing tests. // TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed. SingleCallTimeout = 5 * time.Minute // How long nodes have to be "ready" when a test begins. They should already // be "ready" before the test starts, so this is small. NodeReadyInitialTimeout = 20 * time.Second // How long pods have to be "ready" when a test begins. PodReadyBeforeTimeout = 5 * time.Minute // How long pods have to become scheduled onto nodes podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second) podRespondingTimeout = 2 * time.Minute ServiceRespondingTimeout = 2 * time.Minute EndpointRegisterTimeout = time.Minute // How long claims have to become dynamically provisioned ClaimProvisionTimeout = 5 * time.Minute // When these values are updated, also update cmd/kubelet/app/options/options.go currentPodInfraContainerImageName = "gcr.io/google_containers/pause" currentPodInfraContainerImageVersion = "3.0" // How long each node is given during a process that restarts all nodes // before the test is considered failed. (Note that the total time to // restart all nodes will be this number times the number of nodes.) RestartPerNodeTimeout = 5 * time.Minute // How often to Poll the statues of a restart. RestartPoll = 20 * time.Second // How long a node is allowed to become "Ready" after it is restarted before // the test is considered failed. RestartNodeReadyAgainTimeout = 5 * time.Minute // How long a pod is allowed to become "running" and "ready" after a node // restart before test is considered failed. RestartPodReadyAgainTimeout = 5 * time.Minute // Number of times we want to retry Updates in case of conflict UpdateRetries = 5 // Number of objects that gc can delete in a second. // GC issues 2 requestes for single delete. gcThroughput = 10 // TODO(justinsb): Avoid hardcoding this. awsMasterIP = "172.20.0.9" // Default time to wait for nodes to become schedulable. // Set so high for scale tests. NodeSchedulableTimeout = 4 * time.Hour ) var ( // Label allocated to the image puller static pod that runs on each node // before e2es. ImagePullerLabels = map[string]string{"name": "e2e-image-puller"} // For parsing Kubectl version for version-skewed testing. gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"") // Slice of regexps for names of pods that have to be running to consider a Node "healthy" requiredPerNodePods = []*regexp.Regexp{ regexp.MustCompile(".*kube-proxy.*"), regexp.MustCompile(".*fluentd-elasticsearch.*"), regexp.MustCompile(".*node-problem-detector.*"), } ) type Address struct { internalIP string externalIP string hostname string } // GetServerArchitecture fetches the architecture of the cluster's apiserver. func GetServerArchitecture(c clientset.Interface) string { arch := "" sVer, err := c.Discovery().ServerVersion() if err != nil || sVer.Platform == "" { // If we failed to get the server version for some reason, default to amd64. arch = "amd64" } else { // Split the platform string into OS and Arch separately. // The platform string may for example be "linux/amd64", "linux/arm" or "windows/amd64". osArchArray := strings.Split(sVer.Platform, "/") arch = osArchArray[1] } return arch } // GetPauseImageName fetches the pause image name for the same architecture as the apiserver. func GetPauseImageName(c clientset.Interface) string { return currentPodInfraContainerImageName + "-" + GetServerArchitecture(c) + ":" + currentPodInfraContainerImageVersion } // GetPauseImageNameForHostArch fetches the pause image name for the same architecture the test is running on. // TODO: move this function to the test/utils func GetPauseImageNameForHostArch() string { return currentPodInfraContainerImageName + "-" + goRuntime.GOARCH + ":" + currentPodInfraContainerImageVersion } // SubResource proxy should have been functional in v1.0.0, but SubResource // proxy via tunneling is known to be broken in v1.0. See // https://github.com/kubernetes/kubernetes/pull/15224#issuecomment-146769463 // // TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively // in v1.3). var SubResourcePodProxyVersion = version.MustParse("v1.1.0") var subResourceServiceAndNodeProxyVersion = version.MustParse("v1.2.0") func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) { subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery()) if err != nil { return nil, err } if subResourceProxyAvailable { return request.Resource("services").SubResource("proxy"), nil } return request.Prefix("proxy").Resource("services"), nil } // unique identifier of the e2e run var RunId = uuid.NewUUID() type CreateTestingNSFn func(baseName string, c clientset.Interface, labels map[string]string) (*api.Namespace, error) type ContainerFailures struct { status *api.ContainerStateTerminated Restarts int } func GetMasterHost() string { masterUrl, err := url.Parse(TestContext.Host) ExpectNoError(err) return masterUrl.Host } func nowStamp() string { return time.Now().Format(time.StampMilli) } func log(level string, format string, args ...interface{}) { fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...) } func Logf(format string, args ...interface{}) { log("INFO", format, args...) } func Failf(format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) log("INFO", msg) Fail(nowStamp()+": "+msg, 1) } func Skipf(format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) log("INFO", msg) Skip(nowStamp() + ": " + msg) } func SkipUnlessNodeCountIsAtLeast(minNodeCount int) { if TestContext.CloudConfig.NumNodes < minNodeCount { Skipf("Requires at least %d nodes (not %d)", minNodeCount, TestContext.CloudConfig.NumNodes) } } func SkipUnlessAtLeast(value int, minValue int, message string) { if value < minValue { Skipf(message) } } func SkipIfProviderIs(unsupportedProviders ...string) { if ProviderIs(unsupportedProviders...) { Skipf("Not supported for providers %v (found %s)", unsupportedProviders, TestContext.Provider) } } func SkipUnlessProviderIs(supportedProviders ...string) { if !ProviderIs(supportedProviders...) { Skipf("Only supported for providers %v (not %s)", supportedProviders, TestContext.Provider) } } func SkipIfContainerRuntimeIs(runtimes ...string) { for _, runtime := range runtimes { if runtime == TestContext.ContainerRuntime { Skipf("Not supported under container runtime %s", runtime) } } } func ProviderIs(providers ...string) bool { for _, provider := range providers { if strings.ToLower(provider) == strings.ToLower(TestContext.Provider) { return true } } return false } func SkipUnlessServerVersionGTE(v semver.Version, c discovery.ServerVersionInterface) { gte, err := ServerVersionGTE(v, c) if err != nil { Failf("Failed to get server version: %v", err) } if !gte { Skipf("Not supported for server versions before %q", v) } } // Detects whether the federation namespace exists in the underlying cluster func SkipUnlessFederated(c clientset.Interface) { federationNS := os.Getenv("FEDERATION_NAMESPACE") if federationNS == "" { federationNS = "federation" } _, err := c.Core().Namespaces().Get(federationNS) if err != nil { if apierrs.IsNotFound(err) { Skipf("Could not find federation namespace %s: skipping federated test", federationNS) } else { Failf("Unexpected error getting namespace: %v", err) } } } func SkipIfMissingResource(clientPool dynamic.ClientPool, gvr unversioned.GroupVersionResource, namespace string) { dynamicClient, err := clientPool.ClientForGroupVersionResource(gvr) if err != nil { Failf("Unexpected error getting dynamic client for %v: %v", gvr.GroupVersion(), err) } apiResource := unversioned.APIResource{Name: gvr.Resource, Namespaced: true} _, err = dynamicClient.Resource(&apiResource, namespace).List(&v1.ListOptions{}) if err != nil { // not all resources support list, so we ignore those if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) { Skipf("Could not find %s resource, skipping test: %#v", gvr, err) } Failf("Unexpected error getting %v: %v", gvr, err) } } // ProvidersWithSSH are those providers where each node is accessible with SSH var ProvidersWithSSH = []string{"gce", "gke", "aws"} // providersWithMasterSSH are those providers where master node is accessible with SSH var providersWithMasterSSH = []string{"gce", "gke", "kubemark", "aws"} type podCondition func(pod *api.Pod) (bool, error) // logPodStates logs basic info of provided pods for debugging. func logPodStates(pods []api.Pod) { // Find maximum widths for pod, node, and phase strings for column printing. maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE") for i := range pods { pod := &pods[i] if len(pod.ObjectMeta.Name) > maxPodW { maxPodW = len(pod.ObjectMeta.Name) } if len(pod.Spec.NodeName) > maxNodeW { maxNodeW = len(pod.Spec.NodeName) } if len(pod.Status.Phase) > maxPhaseW { maxPhaseW = len(pod.Status.Phase) } } // Increase widths by one to separate by a single space. maxPodW++ maxNodeW++ maxPhaseW++ maxGraceW++ // Log pod info. * does space padding, - makes them left-aligned. Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s", maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS") for _, pod := range pods { grace := "" if pod.DeletionGracePeriodSeconds != nil { grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds) } Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s", maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions) } Logf("") // Final empty line helps for readability. } // errorBadPodsStates create error message of basic info of bad pods for debugging. func errorBadPodsStates(badPods []api.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string { errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout) // Pirnt bad pods info only if there are fewer than 10 bad pods if len(badPods) > 10 { return errStr + "There are too many bad pods. Please check log for details." } buf := bytes.NewBuffer(nil) w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS") for _, badPod := range badPods { grace := "" if badPod.DeletionGracePeriodSeconds != nil { grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds) } podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%s", badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions) fmt.Fprintln(w, podInfo) } w.Flush() return errStr + buf.String() } // WaitForPodsSuccess waits till all labels matching the given selector enter // the Success state. The caller is expected to only invoke this method once the // pods have been created. func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[string]string, timeout time.Duration) error { successPodSelector := labels.SelectorFromSet(successPodLabels) start, badPods, desiredPods := time.Now(), []api.Pod{}, 0 if wait.PollImmediate(30*time.Second, timeout, func() (bool, error) { podList, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: successPodSelector}) if err != nil { Logf("Error getting pods in namespace %q: %v", ns, err) return false, nil } if len(podList.Items) == 0 { Logf("Waiting for pods to enter Success, but no pods in %q match label %v", ns, successPodLabels) return true, nil } badPods = []api.Pod{} desiredPods = len(podList.Items) for _, pod := range podList.Items { if pod.Status.Phase != api.PodSucceeded { badPods = append(badPods, pod) } } successPods := len(podList.Items) - len(badPods) Logf("%d / %d pods in namespace %q are in Success state (%d seconds elapsed)", successPods, len(podList.Items), ns, int(time.Since(start).Seconds())) if len(badPods) == 0 { return true, nil } return false, nil }) != nil { logPodStates(badPods) LogPodsWithLabels(c, ns, successPodLabels, Logf) return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "SUCCESS", timeout)) } return nil } var ReadyReplicaVersion = version.MustParse("v1.4.0") // WaitForPodsRunningReady waits up to timeout to ensure that all pods in // namespace ns are either running and ready, or failed but controlled by a // controller. Also, it ensures that at least minPods are running and // ready. It has separate behavior from other 'wait for' pods functions in // that it requests the list of pods on every iteration. This is useful, for // example, in cluster startup, because the number of pods increases while // waiting. // If ignoreLabels is not empty, pods matching this selector are ignored and // this function waits for minPods to enter Running/Ready and for all pods // matching ignoreLabels to enter Success phase. Otherwise an error is returned // even if there are minPods pods, some of which are in Running/Ready // and some in Success. This is to allow the client to decide if "Success" // means "Ready" or not. func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, timeout time.Duration, ignoreLabels map[string]string) error { // This can be removed when we no longer have 1.3 servers running with upgrade tests. hasReadyReplicas, err := ServerVersionGTE(ReadyReplicaVersion, c.Discovery()) if err != nil { Logf("Error getting the server version: %v", err) return err } ignoreSelector := labels.SelectorFromSet(ignoreLabels) start := time.Now() Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready", timeout, minPods, ns) wg := sync.WaitGroup{} wg.Add(1) var waitForSuccessError error badPods := []api.Pod{} desiredPods := 0 go func() { waitForSuccessError = WaitForPodsSuccess(c, ns, ignoreLabels, timeout) wg.Done() }() if wait.PollImmediate(Poll, timeout, func() (bool, error) { // We get the new list of pods, replication controllers, and // replica sets in every iteration because more pods come // online during startup and we want to ensure they are also // checked. replicas, replicaOk := int32(0), int32(0) if hasReadyReplicas { rcList, err := c.Core().ReplicationControllers(ns).List(api.ListOptions{}) if err != nil { Logf("Error getting replication controllers in namespace '%s': %v", ns, err) return false, nil } for _, rc := range rcList.Items { replicas += rc.Spec.Replicas replicaOk += rc.Status.ReadyReplicas } rsList, err := c.Extensions().ReplicaSets(ns).List(api.ListOptions{}) if err != nil { Logf("Error getting replication sets in namespace %q: %v", ns, err) return false, nil } for _, rs := range rsList.Items { replicas += rs.Spec.Replicas replicaOk += rs.Status.ReadyReplicas } } podList, err := c.Core().Pods(ns).List(api.ListOptions{}) if err != nil { Logf("Error getting pods in namespace '%s': %v", ns, err) return false, nil } nOk := int32(0) badPods = []api.Pod{} desiredPods = len(podList.Items) for _, pod := range podList.Items { if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) { Logf("%v in state %v, ignoring", pod.Name, pod.Status.Phase) continue } if res, err := testutils.PodRunningReady(&pod); res && err == nil { nOk++ } else { if pod.Status.Phase != api.PodFailed { Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase) badPods = append(badPods, pod) } else if _, ok := pod.Annotations[api.CreatedByAnnotation]; !ok { Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name) badPods = append(badPods, pod) } //ignore failed pods that are controlled by some controller } } Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)", nOk, len(podList.Items), ns, int(time.Since(start).Seconds())) if hasReadyReplicas { Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk) } if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 { return true, nil } logPodStates(badPods) return false, nil }) != nil { return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout)) } wg.Wait() if waitForSuccessError != nil { return waitForSuccessError } return nil } func podFromManifest(filename string) (*api.Pod, error) { var pod api.Pod Logf("Parsing pod from %v", filename) data := ReadOrDie(filename) json, err := utilyaml.ToJSON(data) if err != nil { return nil, err } if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), json, &pod); err != nil { return nil, err } return &pod, nil } // Run a test container to try and contact the Kubernetes api-server from a pod, wait for it // to flip to Ready, log its output and delete it. func RunKubernetesServiceTestContainer(c clientset.Interface, ns string) { path := "test/images/clusterapi-tester/pod.yaml" p, err := podFromManifest(path) if err != nil { Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err) return } p.Namespace = ns if _, err := c.Core().Pods(ns).Create(p); err != nil { Logf("Failed to create %v: %v", p.Name, err) return } defer func() { if err := c.Core().Pods(ns).Delete(p.Name, nil); err != nil { Logf("Failed to delete pod %v: %v", p.Name, err) } }() timeout := 5 * time.Minute if err := waitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil { Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err) return } logs, err := GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name) if err != nil { Logf("Failed to retrieve logs from %v: %v", p.Name, err) } else { Logf("Output of clusterapi-tester:\n%v", logs) } } func kubectlLogPod(c clientset.Interface, pod api.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) { for _, container := range pod.Spec.Containers { if strings.Contains(container.Name, containerNameSubstr) { // Contains() matches all strings if substr is empty logs, err := GetPodLogs(c, pod.Namespace, pod.Name, container.Name) if err != nil { logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name) if err != nil { logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err) } } By(fmt.Sprintf("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName)) logFunc("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name) } } } func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) { podList, err := c.Core().Pods(ns).List(api.ListOptions{}) if err != nil { logFunc("Error getting pods in namespace '%s': %v", ns, err) return } logFunc("Running kubectl logs on non-ready containers in %v", ns) for _, pod := range podList.Items { if res, err := testutils.PodRunningReady(&pod); !res || err != nil { kubectlLogPod(c, pod, "", Logf) } } } func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string, logFunc func(ftm string, args ...interface{})) { podList, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(match)}) if err != nil { logFunc("Error getting pods in namespace %q: %v", ns, err) return } logFunc("Running kubectl logs on pods with labels %v in %v", match, ns) for _, pod := range podList.Items { kubectlLogPod(c, pod, "", logFunc) } } func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[string]string, containerSubstr string, logFunc func(ftm string, args ...interface{})) { podList, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(match)}) if err != nil { Logf("Error getting pods in namespace %q: %v", ns, err) return } for _, pod := range podList.Items { kubectlLogPod(c, pod, containerSubstr, logFunc) } } // DeleteNamespaces deletes all namespaces that match the given delete and skip filters. // Filter is by simple strings.Contains; first skip filter, then delete filter. // Returns the list of deleted namespaces or an error. func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) { By("Deleting namespaces") nsList, err := c.Core().Namespaces().List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) var deleted []string var wg sync.WaitGroup OUTER: for _, item := range nsList.Items { if skipFilter != nil { for _, pattern := range skipFilter { if strings.Contains(item.Name, pattern) { continue OUTER } } } if deleteFilter != nil { var shouldDelete bool for _, pattern := range deleteFilter { if strings.Contains(item.Name, pattern) { shouldDelete = true break } } if !shouldDelete { continue OUTER } } wg.Add(1) deleted = append(deleted, item.Name) go func(nsName string) { defer wg.Done() defer GinkgoRecover() Expect(c.Core().Namespaces().Delete(nsName, nil)).To(Succeed()) Logf("namespace : %v api call to delete is complete ", nsName) }(item.Name) } wg.Wait() return deleted, nil } func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeout time.Duration) error { By("Waiting for namespaces to vanish") nsMap := map[string]bool{} for _, ns := range namespaces { nsMap[ns] = true } //Now POLL until all namespaces have been eradicated. return wait.Poll(2*time.Second, timeout, func() (bool, error) { nsList, err := c.Core().Namespaces().List(api.ListOptions{}) if err != nil { return false, err } for _, item := range nsList.Items { if _, ok := nsMap[item.Name]; ok { return false, nil } } return true, nil }) } func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error { w, err := c.Core().ServiceAccounts(ns).Watch(api.SingleObject(api.ObjectMeta{Name: serviceAccountName})) if err != nil { return err } _, err = watch.Until(timeout, w, client.ServiceAccountHasSecrets) return err } func waitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error { Logf("Waiting up to %[1]v for pod %[2]s status to be %[3]s", timeout, podName, desc) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { pod, err := c.Core().Pods(ns).Get(podName) if err != nil { if apierrs.IsNotFound(err) { Logf("Pod %q in namespace %q disappeared. Error: %v", podName, ns, err) return err } // Aligning this text makes it much more readable Logf("Get pod %[1]s in namespace '%[2]s' failed, ignoring for %[3]v. Error: %[4]v", podName, ns, Poll, err) continue } done, err := condition(pod) if done { return err } Logf("Waiting for pod %[1]s in namespace '%[2]s' status to be '%[3]s'"+ "(found phase: %[4]q, readiness: %[5]t) (%[6]v elapsed)", podName, ns, desc, pod.Status.Phase, testutils.PodReady(pod), time.Since(start)) } return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout) } // WaitForMatchPodsCondition finds match pods based on the input ListOptions. // waits and checks if all match pods are in the given podCondition func WaitForMatchPodsCondition(c clientset.Interface, opts api.ListOptions, desc string, timeout time.Duration, condition podCondition) error { Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { pods, err := c.Core().Pods(api.NamespaceAll).List(opts) if err != nil { return err } conditionNotMatch := []string{} for _, pod := range pods.Items { done, err := condition(&pod) if done && err != nil { return fmt.Errorf("Unexpected error: %v", err) } if !done { conditionNotMatch = append(conditionNotMatch, format.Pod(&pod)) } } if len(conditionNotMatch) <= 0 { return err } Logf("%d pods are not %s", len(conditionNotMatch), desc) } return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout) } // WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned // the default service account is what is associated with pods when they do not specify a service account // as a result, pods are not able to be provisioned in a namespace until the service account is provisioned func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace string) error { return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout) } // WaitForFederationApiserverReady waits for the federation apiserver to be ready. // It tests the readiness by sending a GET request and expecting a non error response. func WaitForFederationApiserverReady(c *federation_release_1_5.Clientset) error { return wait.PollImmediate(time.Second, 1*time.Minute, func() (bool, error) { _, err := c.Federation().Clusters().List(v1.ListOptions{}) if err != nil { return false, nil } return true, nil }) } // WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first. func WaitForPersistentVolumePhase(phase api.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { pv, err := c.Core().PersistentVolumes().Get(pvName) if err != nil { Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err) continue } else { if pv.Status.Phase == phase { Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start)) return nil } else { Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase) } } } return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout) } // WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first. func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { pv, err := c.Core().PersistentVolumes().Get(pvName) if err == nil { Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start)) continue } else { if apierrs.IsNotFound(err) { Logf("PersistentVolume %s was removed", pvName) return nil } else { Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err) } } } return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout) } // WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first. func WaitForPersistentVolumeClaimPhase(phase api.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { pvc, err := c.Core().PersistentVolumeClaims(ns).Get(pvcName) if err != nil { Logf("Get persistent volume claim %s in failed, ignoring for %v: %v", pvcName, Poll, err) continue } else { if pvc.Status.Phase == phase { Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start)) return nil } else { Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase) } } } return fmt.Errorf("PersistentVolumeClaim %s not in phase %s within %v", pvcName, phase, timeout) } // CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name. // Please see NewFramework instead of using this directly. func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*api.Namespace, error) { if labels == nil { labels = map[string]string{} } labels["e2e-run"] = string(RunId) namespaceObj := &api.Namespace{ ObjectMeta: api.ObjectMeta{ GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName), Namespace: "", Labels: labels, }, Status: api.NamespaceStatus{}, } // Be robust about making the namespace creation call. var got *api.Namespace if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) { var err error got, err = c.Core().Namespaces().Create(namespaceObj) if err != nil { Logf("Unexpected error while creating namespace: %v", err) return false, nil } return true, nil }); err != nil { return nil, err } if TestContext.VerifyServiceAccount { if err := WaitForDefaultServiceAccountInNamespace(c, got.Name); err != nil { // Even if we fail to create serviceAccount in the namespace, // we have successfully create a namespace. // So, return the created namespace. return got, err } } return got, nil } // CheckTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state // and waits until they are finally deleted. It ignores namespace skip. func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error { // TODO: Since we don't have support for bulk resource deletion in the API, // while deleting a namespace we are deleting all objects from that namespace // one by one (one deletion == one API call). This basically exposes us to // throttling - currently controller-manager has a limit of max 20 QPS. // Once #10217 is implemented and used in namespace-controller, deleting all // object from a given namespace should be much faster and we will be able // to lower this timeout. // However, now Density test is producing ~26000 events and Load capacity test // is producing ~35000 events, thus assuming there are no other requests it will // take ~30 minutes to fully delete the namespace. Thus I'm setting it to 60 // minutes to avoid any timeouts here. timeout := 60 * time.Minute Logf("Waiting for terminating namespaces to be deleted...") for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) { namespaces, err := c.Core().Namespaces().List(api.ListOptions{}) if err != nil { Logf("Listing namespaces failed: %v", err) continue } terminating := 0 for _, ns := range namespaces.Items { if strings.HasPrefix(ns.ObjectMeta.Name, "e2e-tests-") && ns.ObjectMeta.Name != skip { if ns.Status.Phase == api.NamespaceActive { return fmt.Errorf("Namespace %s is active", ns.ObjectMeta.Name) } terminating++ } } if terminating == 0 { return nil } } return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out") } // deleteNS deletes the provided namespace, waits for it to be completely deleted, and then checks // whether there are any pods remaining in a non-terminating state. func deleteNS(c clientset.Interface, clientPool dynamic.ClientPool, namespace string, timeout time.Duration) error { if err := c.Core().Namespaces().Delete(namespace, nil); err != nil { return err } // wait for namespace to delete or timeout. err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) { if _, err := c.Core().Namespaces().Get(namespace); err != nil { if apierrs.IsNotFound(err) { return true, nil } Logf("Error while waiting for namespace to be terminated: %v", err) return false, nil } return false, nil }) // verify there is no more remaining content in the namespace remainingContent, cerr := hasRemainingContent(c, clientPool, namespace) if cerr != nil { return cerr } // if content remains, let's dump information about the namespace, and system for flake debugging. remainingPods := 0 missingTimestamp := 0 if remainingContent { // log information about namespace, and set of namespaces in api server to help flake detection logNamespace(c, namespace) logNamespaces(c, namespace) // if we can, check if there were pods remaining with no timestamp. remainingPods, missingTimestamp, _ = countRemainingPods(c, namespace) } // a timeout waiting for namespace deletion happened! if err != nil { // some content remains in the namespace if remainingContent { // pods remain if remainingPods > 0 { // but they were all undergoing deletion (kubelet is probably culprit) if missingTimestamp == 0 { return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v, pods missing deletion timestamp: %v", namespace, err, remainingPods, missingTimestamp) } // pods remained, but were not undergoing deletion (namespace controller is probably culprit) return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v", namespace, err, remainingPods) } // other content remains (namespace controller is probably screwed up) return fmt.Errorf("namespace %v was not deleted with limit: %v, namespaced content other than pods remain", namespace, err) } // no remaining content, but namespace was not deleted (namespace controller is probably wedged) return fmt.Errorf("namespace %v was not deleted with limit: %v, namespace is empty but is not yet removed", namespace, err) } return nil } // logNamespaces logs the number of namespaces by phase // namespace is the namespace the test was operating against that failed to delete so it can be grepped in logs func logNamespaces(c clientset.Interface, namespace string) { namespaceList, err := c.Core().Namespaces().List(api.ListOptions{}) if err != nil { Logf("namespace: %v, unable to list namespaces: %v", namespace, err) return } numActive := 0 numTerminating := 0 for _, namespace := range namespaceList.Items { if namespace.Status.Phase == api.NamespaceActive { numActive++ } else { numTerminating++ } } Logf("namespace: %v, total namespaces: %v, active: %v, terminating: %v", namespace, len(namespaceList.Items), numActive, numTerminating) } // logNamespace logs detail about a namespace func logNamespace(c clientset.Interface, namespace string) { ns, err := c.Core().Namespaces().Get(namespace) if err != nil { if apierrs.IsNotFound(err) { Logf("namespace: %v no longer exists", namespace) return } Logf("namespace: %v, unable to get namespace due to error: %v", namespace, err) return } Logf("namespace: %v, DeletionTimetamp: %v, Finalizers: %v, Phase: %v", ns.Name, ns.DeletionTimestamp, ns.Spec.Finalizers, ns.Status.Phase) } // countRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp. func countRemainingPods(c clientset.Interface, namespace string) (int, int, error) { // check for remaining pods pods, err := c.Core().Pods(namespace).List(api.ListOptions{}) if err != nil { return 0, 0, err } // nothing remains! if len(pods.Items) == 0 { return 0, 0, nil } // stuff remains, log about it logPodStates(pods.Items) // check if there were any pods with missing deletion timestamp numPods := len(pods.Items) missingTimestamp := 0 for _, pod := range pods.Items { if pod.DeletionTimestamp == nil { missingTimestamp++ } } return numPods, missingTimestamp, nil } // hasRemainingContent checks if there is remaining content in the namespace via API discovery func hasRemainingContent(c clientset.Interface, clientPool dynamic.ClientPool, namespace string) (bool, error) { // some tests generate their own framework.Client rather than the default // TODO: ensure every test call has a configured clientPool if clientPool == nil { return false, nil } // find out what content is supported on the server groupVersionResources, err := c.Discovery().ServerPreferredNamespacedResources() if err != nil { return false, err } // TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798 ignoredResources := sets.NewString("bindings") contentRemaining := false // dump how many of resource type is on the server in a log. for _, gvr := range groupVersionResources { // get a client for this group version... dynamicClient, err := clientPool.ClientForGroupVersionResource(gvr) if err != nil { // not all resource types support list, so some errors here are normal depending on the resource type. Logf("namespace: %s, unable to get client - gvr: %v, error: %v", namespace, gvr, err) continue } // get the api resource apiResource := unversioned.APIResource{Name: gvr.Resource, Namespaced: true} // TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798 if ignoredResources.Has(apiResource.Name) { Logf("namespace: %s, resource: %s, ignored listing per whitelist", namespace, apiResource.Name) continue } obj, err := dynamicClient.Resource(&apiResource, namespace).List(&v1.ListOptions{}) if err != nil { // not all resources support list, so we ignore those if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) { continue } return false, err } unstructuredList, ok := obj.(*runtime.UnstructuredList) if !ok { return false, fmt.Errorf("namespace: %s, resource: %s, expected *runtime.UnstructuredList, got %#v", namespace, apiResource.Name, obj) } if len(unstructuredList.Items) > 0 { Logf("namespace: %s, resource: %s, items remaining: %v", namespace, apiResource.Name, len(unstructuredList.Items)) contentRemaining = true } } return contentRemaining, nil } func ContainerInitInvariant(older, newer runtime.Object) error { oldPod := older.(*api.Pod) newPod := newer.(*api.Pod) if len(oldPod.Spec.InitContainers) == 0 { return nil } if len(oldPod.Spec.InitContainers) != len(newPod.Spec.InitContainers) { return fmt.Errorf("init container list changed") } if oldPod.UID != newPod.UID { return fmt.Errorf("two different pods exist in the condition: %s vs %s", oldPod.UID, newPod.UID) } if err := initContainersInvariants(oldPod); err != nil { return err } if err := initContainersInvariants(newPod); err != nil { return err } oldInit, _, _ := podInitialized(oldPod) newInit, _, _ := podInitialized(newPod) if oldInit && !newInit { // TODO: we may in the future enable resetting PodInitialized = false if the kubelet needs to restart it // from scratch return fmt.Errorf("pod cannot be initialized and then regress to not being initialized") } return nil } func podInitialized(pod *api.Pod) (ok bool, failed bool, err error) { allInit := true initFailed := false for _, s := range pod.Status.InitContainerStatuses { switch { case initFailed && s.State.Waiting == nil: return allInit, initFailed, fmt.Errorf("container %s is after a failed container but isn't waiting", s.Name) case allInit && s.State.Waiting == nil: return allInit, initFailed, fmt.Errorf("container %s is after an initializing container but isn't waiting", s.Name) case s.State.Terminated == nil: allInit = false case s.State.Terminated.ExitCode != 0: allInit = false initFailed = true case !s.Ready: return allInit, initFailed, fmt.Errorf("container %s initialized but isn't marked as ready", s.Name) } } return allInit, initFailed, nil } func initContainersInvariants(pod *api.Pod) error { allInit, initFailed, err := podInitialized(pod) if err != nil { return err } if !allInit || initFailed { for _, s := range pod.Status.ContainerStatuses { if s.State.Waiting == nil || s.RestartCount != 0 { return fmt.Errorf("container %s is not waiting but initialization not complete", s.Name) } if s.State.Waiting.Reason != "PodInitializing" { return fmt.Errorf("container %s should have reason PodInitializing: %s", s.Name, s.State.Waiting.Reason) } } } _, c := api.GetPodCondition(&pod.Status, api.PodInitialized) if c == nil { return fmt.Errorf("pod does not have initialized condition") } if c.LastTransitionTime.IsZero() { return fmt.Errorf("PodInitialized condition should always have a transition time") } switch { case c.Status == api.ConditionUnknown: return fmt.Errorf("PodInitialized condition should never be Unknown") case c.Status == api.ConditionTrue && (initFailed || !allInit): return fmt.Errorf("PodInitialized condition was True but all not all containers initialized") case c.Status == api.ConditionFalse && (!initFailed && allInit): return fmt.Errorf("PodInitialized condition was False but all containers initialized") } return nil } type InvariantFunc func(older, newer runtime.Object) error func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error { errs := sets.NewString() for i := range events { j := i + 1 if j >= len(events) { continue } for _, fn := range fns { if err := fn(events[i].Object, events[j].Object); err != nil { errs.Insert(err.Error()) } } } if errs.Len() > 0 { return fmt.Errorf("invariants violated:\n* %s", strings.Join(errs.List(), "\n* ")) } return nil } // Waits default amount of time (PodStartTimeout) for the specified pod to become running. // Returns an error if timeout occurs first, or pod goes in to failed state. func WaitForPodRunningInNamespace(c clientset.Interface, pod *api.Pod) error { // this short-cicuit is needed for cases when we pass a list of pods instead // of newly created pod (e.g. VerifyPods) which means we are getting already // running pod for which waiting does not make sense and will always fail if pod.Status.Phase == api.PodRunning { return nil } return waitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, pod.ResourceVersion, PodStartTimeout) } // Waits default amount of time (PodStartTimeout) for the specified pod to become running. // Returns an error if timeout occurs first, or pod goes in to failed state. func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error { return waitTimeoutForPodRunningInNamespace(c, podName, namespace, "", PodStartTimeout) } // Waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running. // The resourceVersion is used when Watching object changes, it tells since when we care // about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state. func waitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace, resourceVersion string) error { return waitTimeoutForPodRunningInNamespace(c, podName, namespace, resourceVersion, slowPodStartTimeout) } func waitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error { w, err := c.Core().Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) if err != nil { return err } _, err = watch.Until(timeout, w, client.PodRunning) return err } // Waits default amount of time (podNoLongerRunningTimeout) for the specified pod to stop running. // Returns an error if timeout occurs first. func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace, resourceVersion string) error { return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, resourceVersion, podNoLongerRunningTimeout) } func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error { w, err := c.Core().Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) if err != nil { return err } _, err = watch.Until(timeout, w, client.PodCompleted) return err } func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error { w, err := c.Core().Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) if err != nil { return err } _, err = watch.Until(timeout, w, client.PodRunningAndReady) return err } // WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state. // The resourceVersion is used when Watching object changes, it tells since when we care // about changes to the pod. func WaitForPodNotPending(c clientset.Interface, ns, podName, resourceVersion string) error { w, err := c.Core().Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) if err != nil { return err } _, err = watch.Until(PodStartTimeout, w, client.PodNotPending) return err } // waitForPodTerminatedInNamespace returns an error if it took too long for the pod // to terminate or if the pod terminated with an unexpected reason. func waitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error { return waitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", PodStartTimeout, func(pod *api.Pod) (bool, error) { if pod.Status.Phase == api.PodFailed { if pod.Status.Reason == reason { return true, nil } else { return true, fmt.Errorf("Expected pod %v in namespace %v to be terminated with reason %v, got reason: %v", podName, namespace, reason, pod.Status.Reason) } } return false, nil }) } // waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long. func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string, namespace string, timeout time.Duration) error { return waitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *api.Pod) (bool, error) { if pod.Spec.RestartPolicy == api.RestartPolicyAlways { return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName) } switch pod.Status.Phase { case api.PodSucceeded: By("Saw pod success") return true, nil case api.PodFailed: return true, fmt.Errorf("pod %q failed with status: %+v", podName, pod.Status) default: return false, nil } }) } // WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout. func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error { return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, PodStartTimeout) } // WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout. func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error { return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout) } // waitForRCPodOnNode returns the pod from the given replication controller (described by rcName) which is scheduled on the given node. // In case of failure or too long waiting time, an error is returned. func waitForRCPodOnNode(c clientset.Interface, ns, rcName, node string) (*api.Pod, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName})) var p *api.Pod = nil err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) { Logf("Waiting for pod %s to appear on node %s", rcName, node) options := api.ListOptions{LabelSelector: label} pods, err := c.Core().Pods(ns).List(options) if err != nil { return false, err } for _, pod := range pods.Items { if pod.Spec.NodeName == node { Logf("Pod %s found on node %s", pod.Name, node) p = &pod return true, nil } } return false, nil }) return p, err } // WaitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status. func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.Duration) error { options := api.ListOptions{FieldSelector: fields.Set{ "metadata.name": name, "metadata.namespace": ns, }.AsSelector()} w, err := c.Core().ReplicationControllers(ns).Watch(options) if err != nil { return err } _, err = watch.Until(timeout, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Deleted: return false, apierrs.NewNotFound(unversioned.GroupResource{Resource: "replicationcontrollers"}, "") } switch rc := event.Object.(type) { case *api.ReplicationController: if rc.Name == name && rc.Namespace == ns && rc.Generation <= rc.Status.ObservedGeneration && rc.Spec.Replicas == rc.Status.Replicas { return true, nil } Logf("Waiting for rc %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d", name, rc.Generation, rc.Status.ObservedGeneration, rc.Spec.Replicas, rc.Status.Replicas) } return false, nil }) return err } func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error { return wait.PollImmediate(interval, timeout, func() (bool, error) { Logf("Waiting for pod %s to disappear", podName) options := api.ListOptions{LabelSelector: label} pods, err := c.Core().Pods(ns).List(options) if err != nil { return false, err } found := false for _, pod := range pods.Items { if pod.Name == podName { Logf("Pod %s still exists", podName) found = true break } } if !found { Logf("Pod %s no longer exists", podName) return true, nil } return false, nil }) } // WaitForRCPodToDisappear returns nil if the pod from the given replication controller (described by rcName) no longer exists. // In case of failure or too long waiting time, an error is returned. func WaitForRCPodToDisappear(c clientset.Interface, ns, rcName, podName string) error { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName})) // NodeController evicts pod after 5 minutes, so we need timeout greater than that to observe effects. // The grace period must be set to 0 on the pod for it to be deleted during the partition. // Otherwise, it goes to the 'Terminating' state till the kubelet confirms deletion. return WaitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute) } // WaitForService waits until the service appears (exist == true), or disappears (exist == false) func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { _, err := c.Core().Services(namespace).Get(name) switch { case err == nil: if !exist { return false, nil } Logf("Service %s in namespace %s found.", name, namespace) return true, nil case apierrs.IsNotFound(err): if exist { return false, nil } Logf("Service %s in namespace %s disappeared.", name, namespace) return true, nil default: Logf("Get service %s in namespace %s failed: %v", name, namespace, err) return false, nil } }) if err != nil { stateMsg := map[bool]string{true: "to appear", false: "to disappear"} return fmt.Errorf("error waiting for service %s/%s %s: %v", namespace, name, stateMsg[exist], err) } return nil } //WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum. func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error { return wait.Poll(interval, timeout, func() (bool, error) { Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum) list, err := c.Core().Endpoints(namespace).List(api.ListOptions{}) if err != nil { return false, err } for _, e := range list.Items { if e.Name == serviceName && countEndpointsNum(&e) == expectNum { return true, nil } } return false, nil }) } func countEndpointsNum(e *api.Endpoints) int { num := 0 for _, sub := range e.Subsets { num += len(sub.Addresses) } return num } // WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false) func WaitForReplicationController(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { _, err := c.Core().ReplicationControllers(namespace).Get(name) if err != nil { Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err) return !exist, nil } else { Logf("ReplicationController %s in namespace %s found.", name, namespace) return exist, nil } }) if err != nil { stateMsg := map[bool]string{true: "to appear", false: "to disappear"} return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %v", namespace, name, stateMsg[exist], err) } return nil } func WaitForEndpoint(c clientset.Interface, ns, name string) error { for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) { endpoint, err := c.Core().Endpoints(ns).Get(name) Expect(err).NotTo(HaveOccurred()) if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 { Logf("Endpoint %s/%s is not ready yet", ns, name) continue } else { return nil } } return fmt.Errorf("Failed to get endpoints for %s/%s", ns, name) } // Context for checking pods responses by issuing GETs to them (via the API // proxy) and verifying that they answer with ther own pod name. type podProxyResponseChecker struct { c clientset.Interface ns string label labels.Selector controllerName string respondName bool // Whether the pod should respond with its own name. pods *api.PodList } func PodProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *api.PodList) podProxyResponseChecker { return podProxyResponseChecker{c, ns, label, controllerName, respondName, pods} } // CheckAllResponses issues GETs to all pods in the context and verify they // reply with their own pod name. func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) { successes := 0 options := api.ListOptions{LabelSelector: r.label} currentPods, err := r.c.Core().Pods(r.ns).List(options) Expect(err).NotTo(HaveOccurred()) for i, pod := range r.pods.Items { // Check that the replica list remains unchanged, otherwise we have problems. if !isElementOf(pod.UID, currentPods) { return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods) } subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, r.c.Discovery()) if err != nil { return false, err } var body []byte if subResourceProxyAvailable { body, err = r.c.Core().RESTClient().Get(). Namespace(r.ns). Resource("pods"). SubResource("proxy"). Name(string(pod.Name)). Do(). Raw() } else { body, err = r.c.Core().RESTClient().Get(). Prefix("proxy"). Namespace(r.ns). Resource("pods"). Name(string(pod.Name)). Do(). Raw() } if err != nil { Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status) continue } // The response checker expects the pod's name unless !respondName, in // which case it just checks for a non-empty response. got := string(body) what := "" if r.respondName { what = "expected" want := pod.Name if got != want { Logf("Controller %s: Replica %d [%s] expected response %q but got %q", r.controllerName, i+1, pod.Name, want, got) continue } } else { what = "non-empty" if len(got) == 0 { Logf("Controller %s: Replica %d [%s] expected non-empty response", r.controllerName, i+1, pod.Name) continue } } successes++ Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far", r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items)) } if successes < len(r.pods.Items) { return false, nil } return true, nil } // ServerVersionGTE returns true if v is greater than or equal to the server // version. // // TODO(18726): This should be incorporated into client.VersionInterface. func ServerVersionGTE(v semver.Version, c discovery.ServerVersionInterface) (bool, error) { serverVersion, err := c.ServerVersion() if err != nil { return false, fmt.Errorf("Unable to get server version: %v", err) } sv, err := version.Parse(serverVersion.GitVersion) if err != nil { return false, fmt.Errorf("Unable to parse server version %q: %v", serverVersion.GitVersion, err) } return sv.GTE(v), nil } func SkipUnlessKubectlVersionGTE(v semver.Version) { gte, err := KubectlVersionGTE(v) if err != nil { Failf("Failed to get kubectl version: %v", err) } if !gte { Skipf("Not supported for kubectl versions before %q", v) } } // KubectlVersionGTE returns true if the kubectl version is greater than or // equal to v. func KubectlVersionGTE(v semver.Version) (bool, error) { kv, err := KubectlVersion() if err != nil { return false, err } return kv.GTE(v), nil } // KubectlVersion gets the version of kubectl that's currently being used (see // --kubectl-path in e2e.go to use an alternate kubectl). func KubectlVersion() (semver.Version, error) { output := RunKubectlOrDie("version", "--client") matches := gitVersionRegexp.FindStringSubmatch(output) if len(matches) != 2 { return semver.Version{}, fmt.Errorf("Could not find kubectl version in output %v", output) } // Don't use the full match, as it contains "GitVersion:\"" and a // trailing "\"". Just use the submatch. return version.Parse(matches[1]) } func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *api.PodList) error { By("trying to dial each unique pod") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) return wait.PollImmediate(Poll, podRespondingTimeout, PodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses) } func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*api.PodList, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) return PodsCreatedByLabel(c, ns, name, replicas, label) } func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*api.PodList, error) { timeout := 2 * time.Minute for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { options := api.ListOptions{LabelSelector: label} // List the pods, making sure we observe all the replicas. pods, err := c.Core().Pods(ns).List(options) if err != nil { return nil, err } created := []api.Pod{} for _, pod := range pods.Items { if pod.DeletionTimestamp != nil { continue } created = append(created, pod) } Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas) if int32(len(created)) == replicas { pods.Items = created return pods, nil } } return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas) } func podsRunning(c clientset.Interface, pods *api.PodList) []error { // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. By("ensuring each pod is running") e := []error{} error_chan := make(chan error) for _, pod := range pods.Items { go func(p api.Pod) { error_chan <- WaitForPodRunningInNamespace(c, &p) }(pod) } for range pods.Items { err := <-error_chan if err != nil { e = append(e, err) } } return e } func VerifyPods(c clientset.Interface, ns, name string, wantName bool, replicas int32) error { pods, err := PodsCreated(c, ns, name, replicas) if err != nil { return err } e := podsRunning(c, pods) if len(e) > 0 { return fmt.Errorf("failed to wait for pods running: %v", e) } err = PodsResponding(c, ns, name, wantName, pods) if err != nil { return fmt.Errorf("failed to wait for pods responding: %v", err) } return nil } func ServiceResponding(c clientset.Interface, ns, name string) error { By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name)) return wait.PollImmediate(Poll, ServiceRespondingTimeout, func() (done bool, err error) { proxyRequest, errProxy := GetServicesProxyRequest(c, c.Core().RESTClient().Get()) if errProxy != nil { Logf("Failed to get services proxy request: %v:", errProxy) return false, nil } body, err := proxyRequest.Namespace(ns). Name(name). Do(). Raw() if err != nil { Logf("Failed to GET from service %s: %v:", name, err) return false, nil } got := string(body) if len(got) == 0 { Logf("Service %s: expected non-empty response", name) return false, err // stop polling } Logf("Service %s: found nonempty answer: %s", name, got) return true, nil }) } func restclientConfig(kubeContext string) (*clientcmdapi.Config, error) { Logf(">>> kubeConfig: %s\n", TestContext.KubeConfig) if TestContext.KubeConfig == "" { return nil, fmt.Errorf("KubeConfig must be specified to load client config") } c, err := clientcmd.LoadFromFile(TestContext.KubeConfig) if err != nil { return nil, fmt.Errorf("error loading KubeConfig: %v", err.Error()) } if kubeContext != "" { Logf(">>> kubeContext: %s\n", kubeContext) c.CurrentContext = kubeContext } return c, nil } type ClientConfigGetter func() (*restclient.Config, error) func LoadConfig() (*restclient.Config, error) { if TestContext.NodeE2E { // This is a node e2e test, apply the node e2e configuration return &restclient.Config{Host: TestContext.Host}, nil } c, err := restclientConfig(TestContext.KubeContext) if err != nil { return nil, err } return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig() } func LoadFederatedConfig(overrides *clientcmd.ConfigOverrides) (*restclient.Config, error) { c, err := restclientConfig(federatedKubeContext) if err != nil { return nil, fmt.Errorf("error creating federation client config: %v", err.Error()) } cfg, err := clientcmd.NewDefaultClientConfig(*c, overrides).ClientConfig() if cfg != nil { //TODO(colhom): this is only here because https://github.com/kubernetes/kubernetes/issues/25422 cfg.NegotiatedSerializer = api.Codecs } if err != nil { return cfg, fmt.Errorf("error creating federation client config: %v", err.Error()) } return cfg, nil } func LoadFederationClientset_1_5() (*federation_release_1_5.Clientset, error) { config, err := LoadFederatedConfig(&clientcmd.ConfigOverrides{}) if err != nil { return nil, err } c, err := federation_release_1_5.NewForConfig(config) if err != nil { return nil, fmt.Errorf("error creating federation clientset: %v", err.Error()) } return c, nil } func LoadInternalClientset() (*clientset.Clientset, error) { config, err := LoadConfig() if err != nil { return nil, fmt.Errorf("error creating client: %v", err.Error()) } return clientset.NewForConfig(config) } func LoadClientset() (*release_1_5.Clientset, error) { config, err := LoadConfig() if err != nil { return nil, fmt.Errorf("error creating client: %v", err.Error()) } return release_1_5.NewForConfig(config) } // randomSuffix provides a random string to append to pods,services,rcs. // TODO: Allow service names to have the same form as names // for pods and replication controllers so we don't // need to use such a function and can instead // use the UUID utility function. func randomSuffix() string { r := rand.New(rand.NewSource(time.Now().UnixNano())) return strconv.Itoa(r.Int() % 10000) } func ExpectNoError(err error, explain ...interface{}) { if err != nil { Logf("Unexpected error occurred: %v", err) } ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...) } func ExpectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) { var err error for i := 0; i < maxRetries; i++ { err = fn() if err == nil { return } Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err) } ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...) } // Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped. func Cleanup(filePath, ns string, selectors ...string) { By("using delete to clean up resources") var nsArg string if ns != "" { nsArg = fmt.Sprintf("--namespace=%s", ns) } RunKubectlOrDie("delete", "--grace-period=0", "-f", filePath, nsArg) AssertCleanup(ns, selectors...) } // Asserts that cleanup of a namespace wrt selectors occurred. func AssertCleanup(ns string, selectors ...string) { var nsArg string if ns != "" { nsArg = fmt.Sprintf("--namespace=%s", ns) } for _, selector := range selectors { resources := RunKubectlOrDie("get", "rc,svc", "-l", selector, "--no-headers", nsArg) if resources != "" { Failf("Resources left running after stop:\n%s", resources) } pods := RunKubectlOrDie("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}") if pods != "" { Failf("Pods left unterminated after stop:\n%s", pods) } } } // validatorFn is the function which is individual tests will implement. // we may want it to return more than just an error, at some point. type validatorFn func(c clientset.Interface, podID string) error // ValidateController is a generic mechanism for testing RC's that are running. // It takes a container name, a test name, and a validator function which is plugged in by a specific test. // "containername": this is grepped for. // "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated. // "testname": which gets bubbled up to the logging/failure messages if errors happen. // "validator" function: This function is given a podID and a client, and it can do some specific validations that way. func ValidateController(c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) { getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}" // NB: kubectl adds the "exists" function to the standard template functions. // This lets us check to see if the "running" entry exists for each of the containers // we care about. Exists will never return an error and it's safe to check a chain of // things, any one of which may not exist. In the below template, all of info, // containername, and running might be nil, so the normal index function isn't very // helpful. // This template is unit-tested in kubectl, so if you change it, update the unit test. // You can read about the syntax here: http://golang.org/pkg/text/template/. getContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "%s") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`, containername) getImageTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername) By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector waitLoop: for start := time.Now(); time.Since(start) < PodStartTimeout; time.Sleep(5 * time.Second) { getPodsOutput := RunKubectlOrDie("get", "pods", "-o", "template", getPodsTemplate, "-l", testname, fmt.Sprintf("--namespace=%v", ns)) pods := strings.Fields(getPodsOutput) if numPods := len(pods); numPods != replicas { By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods)) continue } var runningPods []string for _, podID := range pods { running := RunKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, fmt.Sprintf("--namespace=%v", ns)) if running != "true" { Logf("%s is created but not running", podID) continue waitLoop } currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns)) if currentImage != containerImage { Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage) continue waitLoop } // Call the generic validator function here. // This might validate for example, that (1) getting a url works and (2) url is serving correct content. if err := validator(c, podID); err != nil { Logf("%s is running right image but validator function failed: %v", podID, err) continue waitLoop } Logf("%s is verified up and running", podID) runningPods = append(runningPods, podID) } // If we reach here, then all our checks passed. if len(runningPods) == replicas { return } } // Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken. Failf("Timed out after %v seconds waiting for %s pods to reach valid state", PodStartTimeout.Seconds(), testname) } // KubectlCmd runs the kubectl executable through the wrapper script. func KubectlCmd(args ...string) *exec.Cmd { defaultArgs := []string{} // Reference a --server option so tests can run anywhere. if TestContext.Host != "" { defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+TestContext.Host) } if TestContext.KubeConfig != "" { defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig) // Reference the KubeContext if TestContext.KubeContext != "" { defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+TestContext.KubeContext) } } else { if TestContext.CertDir != "" { defaultArgs = append(defaultArgs, fmt.Sprintf("--certificate-authority=%s", filepath.Join(TestContext.CertDir, "ca.crt")), fmt.Sprintf("--client-certificate=%s", filepath.Join(TestContext.CertDir, "kubecfg.crt")), fmt.Sprintf("--client-key=%s", filepath.Join(TestContext.CertDir, "kubecfg.key"))) } } kubectlArgs := append(defaultArgs, args...) //We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh" //and so on. cmd := exec.Command(TestContext.KubectlPath, kubectlArgs...) //caller will invoke this and wait on it. return cmd } // kubectlBuilder is used to build, customize and execute a kubectl Command. // Add more functions to customize the builder as needed. type kubectlBuilder struct { cmd *exec.Cmd timeout <-chan time.Time } func NewKubectlCommand(args ...string) *kubectlBuilder { b := new(kubectlBuilder) b.cmd = KubectlCmd(args...) return b } func (b *kubectlBuilder) WithEnv(env []string) *kubectlBuilder { b.cmd.Env = env return b } func (b *kubectlBuilder) WithTimeout(t <-chan time.Time) *kubectlBuilder { b.timeout = t return b } func (b kubectlBuilder) WithStdinData(data string) *kubectlBuilder { b.cmd.Stdin = strings.NewReader(data) return &b } func (b kubectlBuilder) WithStdinReader(reader io.Reader) *kubectlBuilder { b.cmd.Stdin = reader return &b } func (b kubectlBuilder) ExecOrDie() string { str, err := b.Exec() Logf("stdout: %q", str) // In case of i/o timeout error, try talking to the apiserver again after 2s before dying. // Note that we're still dying after retrying so that we can get visibility to triage it further. if isTimeout(err) { Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.") time.Sleep(2 * time.Second) retryStr, retryErr := RunKubectl("version") Logf("stdout: %q", retryStr) Logf("err: %v", retryErr) } Expect(err).NotTo(HaveOccurred()) return str } func isTimeout(err error) bool { switch err := err.(type) { case net.Error: if err.Timeout() { return true } case *url.Error: if err, ok := err.Err.(net.Error); ok && err.Timeout() { return true } } return false } func (b kubectlBuilder) Exec() (string, error) { var stdout, stderr bytes.Buffer cmd := b.cmd cmd.Stdout, cmd.Stderr = &stdout, &stderr Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately if err := cmd.Start(); err != nil { return "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err) } errCh := make(chan error, 1) go func() { errCh <- cmd.Wait() }() select { case err := <-errCh: if err != nil { var rc int = 127 if ee, ok := err.(*exec.ExitError); ok { Logf("rc: %d", rc) rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus()) } return "", uexec.CodeExitError{ Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err), Code: rc, } } case <-b.timeout: b.cmd.Process.Kill() return "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr) } Logf("stderr: %q", stderr.String()) return stdout.String(), nil } // RunKubectlOrDie is a convenience wrapper over kubectlBuilder func RunKubectlOrDie(args ...string) string { return NewKubectlCommand(args...).ExecOrDie() } // RunKubectl is a convenience wrapper over kubectlBuilder func RunKubectl(args ...string) (string, error) { return NewKubectlCommand(args...).Exec() } // RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin func RunKubectlOrDieInput(data string, args ...string) string { return NewKubectlCommand(args...).WithStdinData(data).ExecOrDie() } func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) { stdout, err = cmd.StdoutPipe() if err != nil { return } stderr, err = cmd.StderrPipe() if err != nil { return } Logf("Asynchronously running '%s %s'", cmd.Path, strings.Join(cmd.Args, " ")) err = cmd.Start() return } // Rough equivalent of ctrl+c for cleaning up processes. Intended to be run in defer. func TryKill(cmd *exec.Cmd) { if err := cmd.Process.Kill(); err != nil { Logf("ERROR failed to kill command %v! The process may leak", cmd) } } // testContainerOutputMatcher runs the given pod in the given namespace and waits // for all of the containers in the podSpec to move into the 'Success' status, and tests // the specified container log against the given expected output using the given matcher. func (f *Framework) testContainerOutputMatcher(scenarioName string, pod *api.Pod, containerIndex int, expectedOutput []string, matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) { By(fmt.Sprintf("Creating a pod to test %v", scenarioName)) if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) { Failf("Invalid container index: %d", containerIndex) } ExpectNoError(f.MatchContainerOutput(pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher)) } // MatchContainerOutput creates a pod and waits for all it's containers to exit with success. // It then tests that the matcher with each expectedOutput matches the output of the specified container. func (f *Framework) MatchContainerOutput( pod *api.Pod, containerName string, expectedOutput []string, matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error { podClient := f.PodClient() ns := f.Namespace.Name createdPod := podClient.Create(pod) defer func() { By("delete the pod") podClient.DeleteSync(createdPod.Name, &api.DeleteOptions{}, podNoLongerRunningTimeout) }() // Wait for client pod to complete. if err := WaitForPodSuccessInNamespace(f.ClientSet, createdPod.Name, ns); err != nil { return fmt.Errorf("expected pod %q success: %v", pod.Name, err) } // Grab its logs. Get host first. podStatus, err := podClient.Get(createdPod.Name) if err != nil { return fmt.Errorf("failed to get pod status: %v", err) } Logf("Trying to get logs from node %s pod %s container %s: %v", podStatus.Spec.NodeName, podStatus.Name, containerName, err) // Sometimes the actual containers take a second to get started, try to get logs for 60s logs, err := GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName) if err != nil { Logf("Failed to get logs from node %q pod %q container %q. %v", podStatus.Spec.NodeName, podStatus.Name, containerName, err) return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err) } for _, expected := range expectedOutput { m := matcher(expected) matches, err := m.Match(logs) if err != nil { return fmt.Errorf("expected %q in container output: %v", expected, err) } else if !matches { return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs)) } } return nil } func RunDeployment(config testutils.DeploymentConfig) error { By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace)) config.NodeDumpFunc = DumpNodeDebugInfo config.ContainerDumpFunc = LogFailedContainers return testutils.RunDeployment(config) } func RunReplicaSet(config testutils.ReplicaSetConfig) error { By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace)) config.NodeDumpFunc = DumpNodeDebugInfo config.ContainerDumpFunc = LogFailedContainers return testutils.RunReplicaSet(config) } func RunRC(config testutils.RCConfig) error { By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace)) config.NodeDumpFunc = DumpNodeDebugInfo config.ContainerDumpFunc = LogFailedContainers return testutils.RunRC(config) } type EventsLister func(opts v1.ListOptions, ns string) (*v1.EventList, error) func DumpEventsInNamespace(eventsLister EventsLister, namespace string) { By(fmt.Sprintf("Collecting events from namespace %q.", namespace)) events, err := eventsLister(v1.ListOptions{}, namespace) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("Found %d events.", len(events.Items))) // Sort events by their first timestamp sortedEvents := events.Items if len(sortedEvents) > 1 { sort.Sort(byFirstTimestamp(sortedEvents)) } for _, e := range sortedEvents { Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message) } // Note that we don't wait for any Cleanup to propagate, which means // that if you delete a bunch of pods right before ending your test, // you may or may not see the killing/deletion/Cleanup events. } func DumpAllNamespaceInfo(c clientset.Interface, cs *release_1_5.Clientset, namespace string) { DumpEventsInNamespace(func(opts v1.ListOptions, ns string) (*v1.EventList, error) { return cs.Core().Events(ns).List(opts) }, namespace) // If cluster is large, then the following logs are basically useless, because: // 1. it takes tens of minutes or hours to grab all of them // 2. there are so many of them that working with them are mostly impossible // So we dump them only if the cluster is relatively small. maxNodesForDump := 20 if nodes, err := c.Core().Nodes().List(api.ListOptions{}); err == nil { if len(nodes.Items) <= maxNodesForDump { dumpAllPodInfo(c) dumpAllNodeInfo(c) } else { Logf("skipping dumping cluster info - cluster too large") } } else { Logf("unable to fetch node list: %v", err) } } // byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker. type byFirstTimestamp []v1.Event func (o byFirstTimestamp) Len() int { return len(o) } func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } func (o byFirstTimestamp) Less(i, j int) bool { if o[i].FirstTimestamp.Equal(o[j].FirstTimestamp) { return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name } return o[i].FirstTimestamp.Before(o[j].FirstTimestamp) } func dumpAllPodInfo(c clientset.Interface) { pods, err := c.Core().Pods("").List(api.ListOptions{}) if err != nil { Logf("unable to fetch pod debug info: %v", err) } logPodStates(pods.Items) } func dumpAllNodeInfo(c clientset.Interface) { // It should be OK to list unschedulable Nodes here. nodes, err := c.Core().Nodes().List(api.ListOptions{}) if err != nil { Logf("unable to fetch node list: %v", err) return } names := make([]string, len(nodes.Items)) for ix := range nodes.Items { names[ix] = nodes.Items[ix].Name } DumpNodeDebugInfo(c, names, Logf) } func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) { for _, n := range nodeNames { logFunc("\nLogging node info for node %v", n) node, err := c.Core().Nodes().Get(n) if err != nil { logFunc("Error getting node info %v", err) } logFunc("Node Info: %v", node) logFunc("\nLogging kubelet events for node %v", n) for _, e := range getNodeEvents(c, n) { logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v", e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject) } logFunc("\nLogging pods the kubelet thinks is on node %v", n) podList, err := GetKubeletPods(c, n) if err != nil { logFunc("Unable to retrieve kubelet pods for node %v", n) continue } for _, p := range podList.Items { logFunc("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses)) for _, c := range p.Status.InitContainerStatuses { logFunc("\tInit container %v ready: %v, restart count %v", c.Name, c.Ready, c.RestartCount) } for _, c := range p.Status.ContainerStatuses { logFunc("\tContainer %v ready: %v, restart count %v", c.Name, c.Ready, c.RestartCount) } } HighLatencyKubeletOperations(c, 10*time.Second, n, logFunc) // TODO: Log node resource info } } // logNodeEvents logs kubelet events from the given node. This includes kubelet // restart and node unhealthy events. Note that listing events like this will mess // with latency metrics, beware of calling it during a test. func getNodeEvents(c clientset.Interface, nodeName string) []api.Event { selector := fields.Set{ "involvedObject.kind": "Node", "involvedObject.name": nodeName, "involvedObject.namespace": api.NamespaceAll, "source": "kubelet", }.AsSelector() options := api.ListOptions{FieldSelector: selector} events, err := c.Core().Events(api.NamespaceSystem).List(options) if err != nil { Logf("Unexpected error retrieving node events %v", err) return []api.Event{} } return events.Items } // waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries. func waitListSchedulableNodesOrDie(c clientset.Interface) *api.NodeList { var nodes *api.NodeList var err error if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { nodes, err = c.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector()}) return err == nil, nil }) != nil { ExpectNoError(err, "Timed out while listing nodes for e2e cluster.") } return nodes } // Node is schedulable if: // 1) doesn't have "unschedulable" field set // 2) it's Ready condition is set to true // 3) doesn't have NetworkUnavailable condition set to true func isNodeSchedulable(node *api.Node) bool { nodeReady := IsNodeConditionSetAsExpected(node, api.NodeReady, true) networkReady := IsNodeConditionUnset(node, api.NodeNetworkUnavailable) || IsNodeConditionSetAsExpectedSilent(node, api.NodeNetworkUnavailable, false) return !node.Spec.Unschedulable && nodeReady && networkReady } // Test whether a fake pod can be scheduled on "node", given its current taints. func isNodeUntainted(node *api.Node) bool { fakePod := &api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), }, ObjectMeta: api.ObjectMeta{ Name: "fake-not-scheduled", Namespace: "fake-not-scheduled", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "fake-not-scheduled", Image: "fake-not-scheduled", }, }, }, } nodeInfo := schedulercache.NewNodeInfo() nodeInfo.SetNode(node) fit, _, err := predicates.PodToleratesNodeTaints(fakePod, nil, nodeInfo) if err != nil { Failf("Can't test predicates for node %s: %v", node.Name, err) return false } return fit } // GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on. // 1) Needs to be schedulable. // 2) Needs to be ready. // If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely. func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *api.NodeList) { nodes = waitListSchedulableNodesOrDie(c) // previous tests may have cause failures of some nodes. Let's skip // 'Not Ready' nodes, just in case (there is no need to fail the test). FilterNodes(nodes, func(node api.Node) bool { return isNodeSchedulable(&node) && isNodeUntainted(&node) }) return nodes } func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error { Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes) var notSchedulable []*api.Node return wait.PollImmediate(30*time.Second, timeout, func() (bool, error) { notSchedulable = nil opts := api.ListOptions{ ResourceVersion: "0", FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector(), } nodes, err := c.Core().Nodes().List(opts) if err != nil { Logf("Unexpected error listing nodes: %v", err) // Ignore the error here - it will be retried. return false, nil } for i := range nodes.Items { node := &nodes.Items[i] if !isNodeSchedulable(node) { notSchedulable = append(notSchedulable, node) } } // Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready, // to make it possible e.g. for incorrect deployment of some small percentage // of nodes (which we allow in cluster validation). Some nodes that are not // provisioned correctly at startup will never become ready (e.g. when something // won't install correctly), so we can't expect them to be ready at any point. // // However, we only allow non-ready nodes with some specific reasons. if len(notSchedulable) > 0 { Logf("Unschedulable nodes:") for i := range notSchedulable { Logf("-> %s Ready=%t Network=%t", notSchedulable[i].Name, IsNodeConditionSetAsExpected(notSchedulable[i], api.NodeReady, true), IsNodeConditionSetAsExpected(notSchedulable[i], api.NodeNetworkUnavailable, false)) } } if len(notSchedulable) > TestContext.AllowedNotReadyNodes { return false, nil } return allowedNotReadyReasons(notSchedulable), nil }) } func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) { ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue})) } func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) { By("verifying the node has the label " + labelKey + " " + labelValue) node, err := c.Core().Nodes().Get(nodeName) ExpectNoError(err) Expect(node.Labels[labelKey]).To(Equal(labelValue)) } // RemoveLabelOffNode is for cleaning up labels temporarily added to node, // won't fail if target label doesn't exist or has been removed. func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) { By("removing the label " + labelKey + " off the node " + nodeName) ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey})) By("verifying the node doesn't have the label " + labelKey) ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey})) } func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint api.Taint) { for attempt := 0; attempt < UpdateRetries; attempt++ { node, err := c.Core().Nodes().Get(nodeName) ExpectNoError(err) nodeTaints, err := api.GetTaintsFromNodeAnnotations(node.Annotations) ExpectNoError(err) var newTaints []api.Taint updated := false for _, existingTaint := range nodeTaints { if taint.MatchTaint(existingTaint) { newTaints = append(newTaints, taint) updated = true continue } newTaints = append(newTaints, existingTaint) } if !updated { newTaints = append(newTaints, taint) } taintsData, err := json.Marshal(newTaints) ExpectNoError(err) if node.Annotations == nil { node.Annotations = make(map[string]string) } node.Annotations[api.TaintsAnnotationKey] = string(taintsData) _, err = c.Core().Nodes().Update(node) if err != nil { if !apierrs.IsConflict(err) { ExpectNoError(err) } else { Logf("Conflict when trying to add/update taint %v to %v", taint, nodeName) } } else { break } time.Sleep(100 * time.Millisecond) } } func taintExists(taints []api.Taint, taintToFind api.Taint) bool { for _, taint := range taints { if taint.MatchTaint(taintToFind) { return true } } return false } func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint api.Taint) { By("verifying the node has the taint " + taint.ToString()) node, err := c.Core().Nodes().Get(nodeName) ExpectNoError(err) nodeTaints, err := api.GetTaintsFromNodeAnnotations(node.Annotations) ExpectNoError(err) if len(nodeTaints) == 0 || !taintExists(nodeTaints, taint) { Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName) } } func deleteTaint(oldTaints []api.Taint, taintToDelete api.Taint) ([]api.Taint, error) { newTaints := []api.Taint{} found := false for _, oldTaint := range oldTaints { if oldTaint.MatchTaint(taintToDelete) { found = true continue } newTaints = append(newTaints, taintToDelete) } if !found { return nil, fmt.Errorf("taint %s not found.", taintToDelete.ToString()) } return newTaints, nil } // RemoveTaintOffNode is for cleaning up taints temporarily added to node, // won't fail if target taint doesn't exist or has been removed. func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint api.Taint) { By("removing the taint " + taint.ToString() + " off the node " + nodeName) for attempt := 0; attempt < UpdateRetries; attempt++ { node, err := c.Core().Nodes().Get(nodeName) ExpectNoError(err) nodeTaints, err := api.GetTaintsFromNodeAnnotations(node.Annotations) ExpectNoError(err) if len(nodeTaints) == 0 { return } if !taintExists(nodeTaints, taint) { return } newTaints, err := deleteTaint(nodeTaints, taint) ExpectNoError(err) if len(newTaints) == 0 { delete(node.Annotations, api.TaintsAnnotationKey) } else { taintsData, err := json.Marshal(newTaints) ExpectNoError(err) node.Annotations[api.TaintsAnnotationKey] = string(taintsData) } _, err = c.Core().Nodes().Update(node) if err != nil { if !apierrs.IsConflict(err) { ExpectNoError(err) } else { Logf("Conflict when trying to add/update taint %s to node %v", taint.ToString(), nodeName) } } else { break } time.Sleep(100 * time.Millisecond) } nodeUpdated, err := c.Core().Nodes().Get(nodeName) ExpectNoError(err) By("verifying the node doesn't have the taint " + taint.ToString()) taintsGot, err := api.GetTaintsFromNodeAnnotations(nodeUpdated.Annotations) ExpectNoError(err) if taintExists(taintsGot, taint) { Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName) } } func ScaleRC(clientset clientset.Interface, ns, name string, size uint, wait bool) error { By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size)) scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset) if err != nil { return err } waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) if err = scaler.Scale(ns, name, size, nil, waitForScale, waitForReplicas); err != nil { return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err) } if !wait { return nil } return WaitForRCPodsRunning(clientset, ns, name) } // Wait up to 10 minutes for pods to become Running. func WaitForRCPodsRunning(c clientset.Interface, ns, rcName string) error { rc, err := c.Core().ReplicationControllers(ns).Get(rcName) if err != nil { return err } selector := labels.SelectorFromSet(labels.Set(rc.Spec.Selector)) err = testutils.WaitForPodsWithLabelRunning(c, ns, selector) if err != nil { return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", rcName, err) } return nil } func ScaleDeployment(clientset clientset.Interface, ns, name string, size uint, wait bool) error { By(fmt.Sprintf("Scaling Deployment %s in namespace %s to %d", name, ns, size)) scaler, err := kubectl.ScalerFor(extensions.Kind("Deployment"), clientset) if err != nil { return err } waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) if err = scaler.Scale(ns, name, size, nil, waitForScale, waitForReplicas); err != nil { return fmt.Errorf("error while scaling Deployment %s to %d replicas: %v", name, size, err) } if !wait { return nil } return WaitForDeploymentPodsRunning(clientset, ns, name) } func WaitForDeploymentPodsRunning(c clientset.Interface, ns, name string) error { deployment, err := c.Extensions().Deployments(ns).Get(name) if err != nil { return err } selector := labels.SelectorFromSet(labels.Set(deployment.Spec.Selector.MatchLabels)) err = testutils.WaitForPodsWithLabelRunning(c, ns, selector) if err != nil { return fmt.Errorf("Error while waiting for Deployment %s pods to be running: %v", name, err) } return nil } // Returns true if all the specified pods are scheduled, else returns false. func podsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (bool, error) { PodStore := testutils.NewPodStore(c, ns, label, fields.Everything()) defer PodStore.Stop() pods := PodStore.List() if len(pods) == 0 { return false, nil } for _, pod := range pods { if pod.Spec.NodeName == "" { return false, nil } } return true, nil } // Wait for all matching pods to become scheduled and at least one // matching pod exists. Return the list of matching pods. func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *api.PodList, err error) { err = wait.PollImmediate(Poll, podScheduledBeforeTimeout, func() (bool, error) { pods, err = WaitForPodsWithLabel(c, ns, label) if err != nil { return false, err } for _, pod := range pods.Items { if pod.Spec.NodeName == "" { return false, nil } } return true, nil }) return pods, err } // Wait up to PodListTimeout for getting pods with certain label func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *api.PodList, err error) { for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) { options := api.ListOptions{LabelSelector: label} pods, err = c.Core().Pods(ns).List(options) Expect(err).NotTo(HaveOccurred()) if len(pods.Items) > 0 { break } } if pods == nil || len(pods.Items) == 0 { err = fmt.Errorf("Timeout while waiting for pods with label %v", label) } return } // DeleteRCAndPods a Replication Controller and all pods it spawned func DeleteRCAndPods(clientset clientset.Interface, ns, name string) error { By(fmt.Sprintf("deleting replication controller %s in namespace %s", name, ns)) rc, err := clientset.Core().ReplicationControllers(ns).Get(name) if err != nil { if apierrs.IsNotFound(err) { Logf("RC %s was already deleted: %v", name, err) return nil } return err } reaper, err := kubectl.ReaperForReplicationController(clientset.Core(), 10*time.Minute) if err != nil { if apierrs.IsNotFound(err) { Logf("RC %s was already deleted: %v", name, err) return nil } return err } ps, err := podStoreForRC(clientset, rc) if err != nil { return err } defer ps.Stop() startTime := time.Now() err = reaper.Stop(ns, name, 0, nil) if apierrs.IsNotFound(err) { Logf("RC %s was already deleted: %v", name, err) return nil } if err != nil { return fmt.Errorf("error while stopping RC: %s: %v", name, err) } deleteRCTime := time.Now().Sub(startTime) Logf("Deleting RC %s took: %v", name, deleteRCTime) err = waitForPodsInactive(ps, 10*time.Millisecond, 10*time.Minute) if err != nil { return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err) } terminatePodTime := time.Now().Sub(startTime) - deleteRCTime Logf("Terminating RC %s pods took: %v", name, terminatePodTime) // this is to relieve namespace controller's pressure when deleting the // namespace after a test. err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute) if err != nil { return fmt.Errorf("error while waiting for pods gone %s: %v", name, err) } return nil } // DeleteRCAndWaitForGC deletes only the Replication Controller and waits for GC to delete the pods. func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error { By(fmt.Sprintf("deleting replication controller %s in namespace %s, will wait for the garbage collector to delete the pods", name, ns)) rc, err := c.Core().ReplicationControllers(ns).Get(name) if err != nil { if apierrs.IsNotFound(err) { Logf("RC %s was already deleted: %v", name, err) return nil } return err } ps, err := podStoreForRC(c, rc) if err != nil { return err } defer ps.Stop() startTime := time.Now() falseVar := false deleteOption := &api.DeleteOptions{OrphanDependents: &falseVar} err = c.Core().ReplicationControllers(ns).Delete(name, deleteOption) if err != nil && apierrs.IsNotFound(err) { Logf("RC %s was already deleted: %v", name, err) return nil } if err != nil { return err } deleteRCTime := time.Now().Sub(startTime) Logf("Deleting RC %s took: %v", name, deleteRCTime) var interval, timeout time.Duration switch { case rc.Spec.Replicas < 100: interval = 100 * time.Millisecond case rc.Spec.Replicas < 1000: interval = 1 * time.Second default: interval = 10 * time.Second } if rc.Spec.Replicas < 5000 { timeout = 10 * time.Minute } else { timeout = time.Duration(rc.Spec.Replicas/gcThroughput) * time.Second // gcThroughput is pretty strict now, add a bit more to it timeout = timeout + 3*time.Minute } err = waitForPodsInactive(ps, interval, timeout) if err != nil { return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err) } terminatePodTime := time.Now().Sub(startTime) - deleteRCTime Logf("Terminating RC %s pods took: %v", name, terminatePodTime) err = waitForPodsGone(ps, interval, 10*time.Minute) if err != nil { return fmt.Errorf("error while waiting for pods gone %s: %v", name, err) } return nil } // podStoreForRC creates a PodStore that monitors pods belong to the rc. It // waits until the reflector does a List() before returning. func podStoreForRC(c clientset.Interface, rc *api.ReplicationController) (*testutils.PodStore, error) { labels := labels.SelectorFromSet(rc.Spec.Selector) ps := testutils.NewPodStore(c, rc.Namespace, labels, fields.Everything()) err := wait.Poll(1*time.Second, 2*time.Minute, func() (bool, error) { if len(ps.Reflector.LastSyncResourceVersion()) != 0 { return true, nil } return false, nil }) return ps, err } // waitForPodsInactive waits until there are no active pods left in the PodStore. // This is to make a fair comparison of deletion time between DeleteRCAndPods // and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas // when the pod is inactvie. func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error { return wait.PollImmediate(interval, timeout, func() (bool, error) { pods := ps.List() for _, pod := range pods { if controller.IsPodActive(pod) { return false, nil } } return true, nil }) } // waitForPodsGone waits until there are no pods left in the PodStore. func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error { return wait.PollImmediate(interval, timeout, func() (bool, error) { if pods := ps.List(); len(pods) == 0 { return true, nil } return false, nil }) } // Delete a ReplicaSet and all pods it spawned func DeleteReplicaSet(clientset clientset.Interface, ns, name string) error { By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns)) rc, err := clientset.Extensions().ReplicaSets(ns).Get(name) if err != nil { if apierrs.IsNotFound(err) { Logf("ReplicaSet %s was already deleted: %v", name, err) return nil } return err } reaper, err := kubectl.ReaperFor(extensions.Kind("ReplicaSet"), clientset) if err != nil { if apierrs.IsNotFound(err) { Logf("ReplicaSet %s was already deleted: %v", name, err) return nil } return err } startTime := time.Now() err = reaper.Stop(ns, name, 0, nil) if apierrs.IsNotFound(err) { Logf("ReplicaSet %s was already deleted: %v", name, err) return nil } deleteRSTime := time.Now().Sub(startTime) Logf("Deleting RS %s took: %v", name, deleteRSTime) if err == nil { err = waitForReplicaSetPodsGone(clientset, rc) } terminatePodTime := time.Now().Sub(startTime) - deleteRSTime Logf("Terminating ReplicaSet %s pods took: %v", name, terminatePodTime) return err } // waitForReplicaSetPodsGone waits until there are no pods reported under a // ReplicaSet selector (because the pods have completed termination). func waitForReplicaSetPodsGone(c clientset.Interface, rs *extensions.ReplicaSet) error { return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) { selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector) ExpectNoError(err) options := api.ListOptions{LabelSelector: selector} if pods, err := c.Core().Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 { return true, nil } return false, nil }) } // Waits for the deployment status to become valid (i.e. max unavailable and max surge aren't violated anymore). // Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created. // To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead. func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment) error { var ( oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet newRS *extensions.ReplicaSet deployment *extensions.Deployment reason string ) err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) { var err error deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name) if err != nil { return false, err } oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c) if err != nil { return false, err } if newRS == nil { // New RC hasn't been created yet. reason = "new replica set hasn't been created yet" Logf(reason) return false, nil } allRSs = append(oldRSs, newRS) // The old/new ReplicaSets need to contain the pod-template-hash label for i := range allRSs { if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) { reason = "all replica sets need to contain the pod-template-hash label" Logf(reason) return false, nil } } totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs) maxCreated := deployment.Spec.Replicas + deploymentutil.MaxSurge(*deployment) if totalCreated > maxCreated { reason = fmt.Sprintf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated) Logf(reason) return false, nil } minAvailable := deploymentutil.MinAvailable(deployment) if deployment.Status.AvailableReplicas < minAvailable { reason = fmt.Sprintf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable) Logf(reason) return false, nil } // When the deployment status and its underlying resources reach the desired state, we're done if deployment.Status.Replicas == deployment.Spec.Replicas && deployment.Status.UpdatedReplicas == deployment.Spec.Replicas && deployment.Status.AvailableReplicas == deployment.Spec.Replicas { return true, nil } reason = fmt.Sprintf("deployment status: %#v", deployment.Status) Logf(reason) return false, nil }) if err == wait.ErrWaitTimeout { logReplicaSetsOfDeployment(deployment, allOldRSs, newRS) logPodsOfDeployment(c, deployment) err = fmt.Errorf("%s", reason) } if err != nil { return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err) } return nil } // Waits for the deployment to reach desired state. // Returns an error if the deployment's rolling update strategy (max unavailable or max surge) is broken at any times. func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) error { var ( oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet newRS *extensions.ReplicaSet deployment *extensions.Deployment ) err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) { var err error deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name) if err != nil { return false, err } oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c) if err != nil { return false, err } if newRS == nil { // New RS hasn't been created yet. return false, nil } allRSs = append(oldRSs, newRS) // The old/new ReplicaSets need to contain the pod-template-hash label for i := range allRSs { if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) { return false, nil } } totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs) maxCreated := deployment.Spec.Replicas + deploymentutil.MaxSurge(*deployment) if totalCreated > maxCreated { logReplicaSetsOfDeployment(deployment, allOldRSs, newRS) logPodsOfDeployment(c, deployment) return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated) } minAvailable := deploymentutil.MinAvailable(deployment) if deployment.Status.AvailableReplicas < minAvailable { logReplicaSetsOfDeployment(deployment, allOldRSs, newRS) logPodsOfDeployment(c, deployment) return false, fmt.Errorf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable) } // When the deployment status and its underlying resources reach the desired state, we're done if deployment.Status.Replicas == deployment.Spec.Replicas && deployment.Status.UpdatedReplicas == deployment.Spec.Replicas { return true, nil } return false, nil }) if err == wait.ErrWaitTimeout { logReplicaSetsOfDeployment(deployment, allOldRSs, newRS) logPodsOfDeployment(c, deployment) } if err != nil { return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err) } return nil } // WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int, desiredGeneration int64) error { err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) if err != nil { return false, err } if deployment.Status.ObservedGeneration >= desiredGeneration && deployment.Status.UpdatedReplicas >= int32(minUpdatedReplicas) { return true, nil } return false, nil }) if err != nil { return fmt.Errorf("error waiting for deployment %s to have at least %d updpatedReplicas: %v", deploymentName, minUpdatedReplicas, err) } return nil } // WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback. // Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early. func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error { err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) if err != nil { return false, err } // Rollback not set or is kicked off if deployment.Spec.RollbackTo == nil { return true, nil } return false, nil }) if err != nil { return fmt.Errorf("error waiting for deployment %s rollbackTo to be cleared: %v", deploymentName, err) } return nil } // WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image. // Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early. func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error { var deployment *extensions.Deployment var newRS *extensions.ReplicaSet err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) { var err error deployment, err = c.Extensions().Deployments(ns).Get(deploymentName) if err != nil { return false, err } // The new ReplicaSet needs to be non-nil and contain the pod-template-hash label newRS, err = deploymentutil.GetNewReplicaSet(deployment, c) if err != nil || newRS == nil || !labelsutil.SelectorHasLabel(newRS.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) { return false, err } // Check revision of this deployment, and of the new replica set of this deployment if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision || newRS.Annotations == nil || newRS.Annotations[deploymentutil.RevisionAnnotation] != revision || deployment.Spec.Template.Spec.Containers[0].Image != image || newRS.Spec.Template.Spec.Containers[0].Image != image { return false, nil } return true, nil }) if err == wait.ErrWaitTimeout { logReplicaSetsOfDeployment(deployment, nil, newRS) } if newRS == nil { return fmt.Errorf("deployment %s failed to create new RS: %v", deploymentName, err) } if err != nil { return fmt.Errorf("error waiting for deployment %s (got %s / %s) and new RS %s (got %s / %s) revision and image to match expectation (expected %s / %s): %v", deploymentName, deployment.Annotations[deploymentutil.RevisionAnnotation], deployment.Spec.Template.Spec.Containers[0].Image, newRS.Name, newRS.Annotations[deploymentutil.RevisionAnnotation], newRS.Spec.Template.Spec.Containers[0].Image, revision, image, err) } return nil } func WaitForOverlappingAnnotationMatch(c clientset.Interface, ns, deploymentName, expected string) error { return wait.Poll(Poll, 1*time.Minute, func() (bool, error) { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) if err != nil { return false, err } if deployment.Annotations[deploymentutil.OverlapAnnotation] == expected { return true, nil } return false, nil }) } // CheckNewRSAnnotations check if the new RS's annotation is as expected func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) if err != nil { return err } newRS, err := deploymentutil.GetNewReplicaSet(deployment, c) if err != nil { return err } for k, v := range expectedAnnotations { // Skip checking revision annotations if k != deploymentutil.RevisionAnnotation && v != newRS.Annotations[k] { return fmt.Errorf("Expected new RS annotations = %+v, got %+v", expectedAnnotations, newRS.Annotations) } } return nil } func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) options := api.ListOptions{LabelSelector: label} return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { pods, err := c.Core().Pods(ns).List(options) if err != nil { return false, nil } for _, pod := range pods.Items { if !deploymentutil.IsPodAvailable(&pod, int32(minReadySeconds), time.Now()) { return false, nil } } return true, nil }) } // Waits for the deployment to clean up old rcs. func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error { return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) if err != nil { return false, err } _, oldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c) if err != nil { return false, err } return len(oldRSs) == desiredRSNum, nil }) } func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) { Logf("Deployment: %+v. Selector = %+v", *deployment, deployment.Spec.Selector) for i := range allOldRSs { Logf("All old ReplicaSets (%d/%d) of deployment %s: %+v. Selector = %+v", i+1, len(allOldRSs), deployment.Name, *allOldRSs[i], allOldRSs[i].Spec.Selector) } if newRS != nil { Logf("New ReplicaSet of deployment %s: %+v. Selector = %+v", deployment.Name, *newRS, newRS.Spec.Selector) } else { Logf("New ReplicaSet of deployment %s is nil.", deployment.Name) } } func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error { return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName) }, desiredGeneration, Poll, 1*time.Minute) } func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType) error { var conditions []extensions.DeploymentCondition pollErr := wait.PollImmediate(time.Second, 1*time.Minute, func() (bool, error) { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) if err != nil { return false, err } conditions = deployment.Status.Conditions cond := deploymentutil.GetDeploymentCondition(deployment.Status, condType) return cond != nil && cond.Reason == reason, nil }) if pollErr == wait.ErrWaitTimeout { pollErr = fmt.Errorf("deployment %q never updated with the desired condition and reason: %v", deploymentName, conditions) } return pollErr } func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment) { minReadySeconds := deployment.Spec.MinReadySeconds podList, err := deploymentutil.ListPods(deployment, func(namespace string, options api.ListOptions) (*api.PodList, error) { return c.Core().Pods(namespace).List(options) }) if err != nil { Logf("Failed to list pods of deployment %s: %v", deployment.Name, err) return } if err == nil { for _, pod := range podList.Items { availability := "not available" if deploymentutil.IsPodAvailable(&pod, minReadySeconds, time.Now()) { availability = "available" } Logf("Pod %s is %s: %+v", pod.Name, availability, pod) } } } // Waits for the number of events on the given object to reach a desired count. func WaitForEvents(c clientset.Interface, ns string, objOrRef runtime.Object, desiredEventsCount int) error { return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { events, err := c.Core().Events(ns).Search(objOrRef) if err != nil { return false, fmt.Errorf("error in listing events: %s", err) } eventsCount := len(events.Items) if eventsCount == desiredEventsCount { return true, nil } if eventsCount < desiredEventsCount { return false, nil } // Number of events has exceeded the desired count. return false, fmt.Errorf("number of events has exceeded the desired count, eventsCount: %d, desiredCount: %d", eventsCount, desiredEventsCount) }) } // Waits for the number of events on the given object to be at least a desired count. func WaitForPartialEvents(c clientset.Interface, ns string, objOrRef runtime.Object, atLeastEventsCount int) error { return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { events, err := c.Core().Events(ns).Search(objOrRef) if err != nil { return false, fmt.Errorf("error in listing events: %s", err) } eventsCount := len(events.Items) if eventsCount >= atLeastEventsCount { return true, nil } return false, nil }) } type updateDeploymentFunc func(d *extensions.Deployment) func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) { deployments := c.Extensions().Deployments(namespace) var updateErr error pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { if deployment, err = deployments.Get(name); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(deployment) if deployment, err = deployments.Update(deployment); err == nil { Logf("Updating deployment %s", name) return true, nil } updateErr = err return false, nil }) if pollErr == wait.ErrWaitTimeout { pollErr = fmt.Errorf("couldn't apply the provided updated to deployment %q: %v", name, updateErr) } return deployment, pollErr } type updateRsFunc func(d *extensions.ReplicaSet) func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRsFunc) (*extensions.ReplicaSet, error) { var rs *extensions.ReplicaSet var updateErr error pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { var err error if rs, err = c.Extensions().ReplicaSets(namespace).Get(name); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(rs) if rs, err = c.Extensions().ReplicaSets(namespace).Update(rs); err == nil { Logf("Updating replica set %q", name) return true, nil } updateErr = err return false, nil }) if pollErr == wait.ErrWaitTimeout { pollErr = fmt.Errorf("couldn't apply the provided updated to replicaset %q: %v", name, updateErr) } return rs, pollErr } type updateRcFunc func(d *api.ReplicationController) func UpdateReplicationControllerWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRcFunc) (*api.ReplicationController, error) { var rc *api.ReplicationController var updateErr error pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { var err error if rc, err = c.Core().ReplicationControllers(namespace).Get(name); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(rc) if rc, err = c.Core().ReplicationControllers(namespace).Update(rc); err == nil { Logf("Updating replication controller %q", name) return true, nil } updateErr = err return false, nil }) if pollErr == wait.ErrWaitTimeout { pollErr = fmt.Errorf("couldn't apply the provided updated to rc %q: %v", name, updateErr) } return rc, pollErr } type updateStatefulSetFunc func(*apps.StatefulSet) func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *apps.StatefulSet, err error) { statefulSets := c.Apps().StatefulSets(namespace) var updateErr error pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { if statefulSet, err = statefulSets.Get(name); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(statefulSet) if statefulSet, err = statefulSets.Update(statefulSet); err == nil { Logf("Updating stateful set %s", name) return true, nil } updateErr = err return false, nil }) if pollErr == wait.ErrWaitTimeout { pollErr = fmt.Errorf("couldn't apply the provided updated to stateful set %q: %v", name, updateErr) } return statefulSet, pollErr } type updateJobFunc func(*batch.Job) func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) { jobs := c.Batch().Jobs(namespace) var updateErr error pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { if job, err = jobs.Get(name); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(job) if job, err = jobs.Update(job); err == nil { Logf("Updating job %s", name) return true, nil } updateErr = err return false, nil }) if pollErr == wait.ErrWaitTimeout { pollErr = fmt.Errorf("couldn't apply the provided updated to job %q: %v", name, updateErr) } return job, pollErr } // NodeAddresses returns the first address of the given type of each node. func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []string { hosts := []string{} for _, n := range nodelist.Items { for _, addr := range n.Status.Addresses { // Use the first external IP address we find on the node, and // use at most one per node. // TODO(roberthbailey): Use the "preferred" address for the node, once // such a thing is defined (#2462). if addr.Type == addrType { hosts = append(hosts, addr.Address) break } } } return hosts } // NodeSSHHosts returns SSH-able host names for all schedulable nodes - this excludes master node. // It returns an error if it can't find an external IP for every node, though it still returns all // hosts that it found in that case. func NodeSSHHosts(c clientset.Interface) ([]string, error) { nodelist := waitListSchedulableNodesOrDie(c) // TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462). hosts := NodeAddresses(nodelist, api.NodeExternalIP) // Error if any node didn't have an external IP. if len(hosts) != len(nodelist.Items) { return hosts, fmt.Errorf( "only found %d external IPs on nodes, but found %d nodes. Nodelist: %v", len(hosts), len(nodelist.Items), nodelist) } sshHosts := make([]string, 0, len(hosts)) for _, h := range hosts { sshHosts = append(sshHosts, net.JoinHostPort(h, "22")) } return sshHosts, nil } type SSHResult struct { User string Host string Cmd string Stdout string Stderr string Code int } // SSH synchronously SSHs to a node running on provider and runs cmd. If there // is no error performing the SSH, the stdout, stderr, and exit code are // returned. func SSH(cmd, host, provider string) (SSHResult, error) { result := SSHResult{Host: host, Cmd: cmd} // Get a signer for the provider. signer, err := GetSigner(provider) if err != nil { return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err) } // RunSSHCommand will default to Getenv("USER") if user == "", but we're // defaulting here as well for logging clarity. result.User = os.Getenv("KUBE_SSH_USER") if result.User == "" { result.User = os.Getenv("USER") } stdout, stderr, code, err := sshutil.RunSSHCommand(cmd, result.User, host, signer) result.Stdout = stdout result.Stderr = stderr result.Code = code return result, err } func LogSSHResult(result SSHResult) { remote := fmt.Sprintf("%s@%s", result.User, result.Host) Logf("ssh %s: command: %s", remote, result.Cmd) Logf("ssh %s: stdout: %q", remote, result.Stdout) Logf("ssh %s: stderr: %q", remote, result.Stderr) Logf("ssh %s: exit code: %d", remote, result.Code) } func IssueSSHCommandWithResult(cmd, provider string, node *api.Node) (*SSHResult, error) { Logf("Getting external IP address for %s", node.Name) host := "" for _, a := range node.Status.Addresses { if a.Type == api.NodeExternalIP { host = a.Address + ":22" break } } if host == "" { return nil, fmt.Errorf("couldn't find external IP address for node %s", node.Name) } Logf("SSH %q on %s(%s)", cmd, node.Name, host) result, err := SSH(cmd, host, provider) LogSSHResult(result) if result.Code != 0 || err != nil { return nil, fmt.Errorf("failed running %q: %v (exit code %d)", cmd, err, result.Code) } return &result, nil } func IssueSSHCommand(cmd, provider string, node *api.Node) error { _, err := IssueSSHCommandWithResult(cmd, provider, node) if err != nil { return err } return nil } // NewHostExecPodSpec returns the pod spec of hostexec pod func NewHostExecPodSpec(ns, name string) *api.Pod { pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, Namespace: ns, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "hostexec", Image: "gcr.io/google_containers/hostexec:1.2", ImagePullPolicy: api.PullIfNotPresent, }, }, SecurityContext: &api.PodSecurityContext{ HostNetwork: true, }, }, } return pod } // RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec` // inside of a shell. func RunHostCmd(ns, name, cmd string) (string, error) { return RunKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd) } // RunHostCmdOrDie calls RunHostCmd and dies on error. func RunHostCmdOrDie(ns, name, cmd string) string { stdout, err := RunHostCmd(ns, name, cmd) Logf("stdout: %v", stdout) ExpectNoError(err) return stdout } // LaunchHostExecPod launches a hostexec pod in the given namespace and waits // until it's Running func LaunchHostExecPod(client clientset.Interface, ns, name string) *api.Pod { hostExecPod := NewHostExecPodSpec(ns, name) pod, err := client.Core().Pods(ns).Create(hostExecPod) ExpectNoError(err) err = WaitForPodRunningInNamespace(client, pod) ExpectNoError(err) return pod } // GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be // used to SSH to their nodes. func GetSigner(provider string) (ssh.Signer, error) { // Get the directory in which SSH keys are located. keydir := filepath.Join(os.Getenv("HOME"), ".ssh") // Select the key itself to use. When implementing more providers here, // please also add them to any SSH tests that are disabled because of signer // support. keyfile := "" switch provider { case "gce", "gke", "kubemark": keyfile = "google_compute_engine" case "aws": // If there is an env. variable override, use that. aws_keyfile := os.Getenv("AWS_SSH_KEY") if len(aws_keyfile) != 0 { return sshutil.MakePrivateKeySignerFromFile(aws_keyfile) } // Otherwise revert to home dir keyfile = "kube_aws_rsa" case "vagrant": keyfile := os.Getenv("VAGRANT_SSH_KEY") if len(keyfile) != 0 { return sshutil.MakePrivateKeySignerFromFile(keyfile) } return nil, fmt.Errorf("VAGRANT_SSH_KEY env variable should be provided") default: return nil, fmt.Errorf("GetSigner(...) not implemented for %s", provider) } key := filepath.Join(keydir, keyfile) return sshutil.MakePrivateKeySignerFromFile(key) } // CheckPodsRunningReady returns whether all pods whose names are listed in // podNames in namespace ns are running and ready, using c and waiting at most // timeout. func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready") } // CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are // listed in podNames in namespace ns are running and ready, or succeeded; use // c and waiting at most timeout. func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded") } // CheckPodsCondition returns whether all pods whose names are listed in podNames // in namespace ns are in the condition, using c and waiting at most timeout. func CheckPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool { np := len(podNames) Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames) result := make(chan bool, len(podNames)) for ix := range podNames { // Launch off pod readiness checkers. go func(name string) { err := waitForPodCondition(c, ns, name, desc, timeout, condition) result <- err == nil }(podNames[ix]) } // Wait for them all to finish. success := true // TODO(a-robinson): Change to `for range` syntax and remove logging once we // support only Go >= 1.4. for _, podName := range podNames { if !<-result { Logf("Pod %[1]s failed to be %[2]s.", podName, desc) success = false } } Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames) return success } // WaitForNodeToBeReady returns whether node name is ready within timeout. func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool { return WaitForNodeToBe(c, name, api.NodeReady, true, timeout) } // WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the // readiness condition is anything but ready, e.g false or unknown) within // timeout. func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool { return WaitForNodeToBe(c, name, api.NodeReady, false, timeout) } func isNodeConditionSetAsExpected(node *api.Node, conditionType api.NodeConditionType, wantTrue, silent bool) bool { // Check the node readiness condition (logging all). for _, cond := range node.Status.Conditions { // Ensure that the condition type and the status matches as desired. if cond.Type == conditionType { if (cond.Status == api.ConditionTrue) == wantTrue { return true } else { if !silent { Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v", conditionType, node.Name, cond.Status == api.ConditionTrue, wantTrue, cond.Reason, cond.Message) } return false } } } if !silent { Logf("Couldn't find condition %v on node %v", conditionType, node.Name) } return false } func IsNodeConditionSetAsExpected(node *api.Node, conditionType api.NodeConditionType, wantTrue bool) bool { return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false) } func IsNodeConditionSetAsExpectedSilent(node *api.Node, conditionType api.NodeConditionType, wantTrue bool) bool { return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true) } func IsNodeConditionUnset(node *api.Node, conditionType api.NodeConditionType) bool { for _, cond := range node.Status.Conditions { if cond.Type == conditionType { return false } } return true } // WaitForNodeToBe returns whether node "name's" condition state matches wantTrue // within timeout. If wantTrue is true, it will ensure the node condition status // is ConditionTrue; if it's false, it ensures the node condition is in any state // other than ConditionTrue (e.g. not true or unknown). func WaitForNodeToBe(c clientset.Interface, name string, conditionType api.NodeConditionType, wantTrue bool, timeout time.Duration) bool { Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { node, err := c.Core().Nodes().Get(name) if err != nil { Logf("Couldn't get node %s", name) continue } if IsNodeConditionSetAsExpected(node, conditionType, wantTrue) { return true } } Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout) return false } // Checks whether not-ready nodes can be ignored while checking if all nodes are // ready (we allow e.g. for incorrect provisioning of some small percentage of nodes // while validating cluster, and those nodes may never become healthy). // Currently we allow only for: // - not present CNI plugins on node // TODO: we should extend it for other reasons. func allowedNotReadyReasons(nodes []*api.Node) bool { for _, node := range nodes { index, condition := api.GetNodeCondition(&node.Status, api.NodeReady) if index == -1 || !strings.Contains(condition.Message, "could not locate kubenet required CNI plugins") { return false } } return true } // Checks whether all registered nodes are ready. // TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy, // and figure out how to do it in a configurable way, as we can't expect all setups to run // default test add-ons. func AllNodesReady(c clientset.Interface, timeout time.Duration) error { Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, TestContext.AllowedNotReadyNodes) var notReady []*api.Node err := wait.PollImmediate(Poll, timeout, func() (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. nodes, err := c.Core().Nodes().List(api.ListOptions{}) if err != nil { return false, err } for i := range nodes.Items { node := &nodes.Items[i] if !IsNodeConditionSetAsExpected(node, api.NodeReady, true) { notReady = append(notReady, node) } } // Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready, // to make it possible e.g. for incorrect deployment of some small percentage // of nodes (which we allow in cluster validation). Some nodes that are not // provisioned correctly at startup will never become ready (e.g. when something // won't install correctly), so we can't expect them to be ready at any point. // // However, we only allow non-ready nodes with some specific reasons. if len(notReady) > TestContext.AllowedNotReadyNodes { return false, nil } return allowedNotReadyReasons(notReady), nil }) if err != nil && err != wait.ErrWaitTimeout { return err } if len(notReady) > TestContext.AllowedNotReadyNodes || !allowedNotReadyReasons(notReady) { return fmt.Errorf("Not ready nodes: %#v", notReady) } return nil } // checks whether all registered nodes are ready and all required Pods are running on them. func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error { Logf("Waiting up to %v for all nodes to be ready", timeout) var notReady []api.Node var missingPodsPerNode map[string][]string err := wait.PollImmediate(Poll, timeout, func() (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. nodes, err := c.Core().Nodes().List(api.ListOptions{ResourceVersion: "0"}) if err != nil { return false, err } for _, node := range nodes.Items { if !IsNodeConditionSetAsExpected(&node, api.NodeReady, true) { notReady = append(notReady, node) } } pods, err := c.Core().Pods(api.NamespaceAll).List(api.ListOptions{ResourceVersion: "0"}) if err != nil { return false, err } systemPodsPerNode := make(map[string][]string) for _, pod := range pods.Items { if pod.Namespace == api.NamespaceSystem && pod.Status.Phase == api.PodRunning { if pod.Spec.NodeName != "" { systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name) } } } missingPodsPerNode = make(map[string][]string) for _, node := range nodes.Items { if !system.IsMasterNode(&node) { for _, requiredPod := range requiredPerNodePods { foundRequired := false for _, presentPod := range systemPodsPerNode[node.Name] { if requiredPod.MatchString(presentPod) { foundRequired = true break } } if !foundRequired { missingPodsPerNode[node.Name] = append(missingPodsPerNode[node.Name], requiredPod.String()) } } } } return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil }) if err != nil && err != wait.ErrWaitTimeout { return err } if len(notReady) > 0 { return fmt.Errorf("Not ready nodes: %v", notReady) } if len(missingPodsPerNode) > 0 { return fmt.Errorf("Not running system Pods: %v", missingPodsPerNode) } return nil } // Filters nodes in NodeList in place, removing nodes that do not // satisfy the given condition // TODO: consider merging with pkg/client/cache.NodeLister func FilterNodes(nodeList *api.NodeList, fn func(node api.Node) bool) { var l []api.Node for _, node := range nodeList.Items { if fn(node) { l = append(l, node) } } nodeList.Items = l } // ParseKVLines parses output that looks like lines containing "<key>: <val>" // and returns <val> if <key> is found. Otherwise, it returns the empty string. func ParseKVLines(output, key string) string { delim := ":" key = key + delim for _, line := range strings.Split(output, "\n") { pieces := strings.SplitAfterN(line, delim, 2) if len(pieces) != 2 { continue } k, v := pieces[0], pieces[1] if k == key { return strings.TrimSpace(v) } } return "" } func RestartKubeProxy(host string) error { // TODO: Make it work for all providers. if !ProviderIs("gce", "gke", "aws") { return fmt.Errorf("unsupported provider: %s", TestContext.Provider) } // kubelet will restart the kube-proxy since it's running in a static pod Logf("Killing kube-proxy on node %v", host) result, err := SSH("sudo pkill kube-proxy", host, TestContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) return fmt.Errorf("couldn't restart kube-proxy: %v", err) } // wait for kube-proxy to come back up sshCmd := "sudo /bin/sh -c 'pgrep kube-proxy | wc -l'" err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { Logf("Waiting for kubeproxy to come back up with %v on %v", sshCmd, host) result, err := SSH(sshCmd, host, TestContext.Provider) if err != nil { return false, err } if result.Code != 0 { LogSSHResult(result) return false, fmt.Errorf("failed to run command, exited %d", result.Code) } if result.Stdout == "0\n" { return false, nil } Logf("kube-proxy is back up.") return true, nil }) if err != nil { return fmt.Errorf("kube-proxy didn't recover: %v", err) } return nil } func RestartApiserver(c discovery.ServerVersionInterface) error { // TODO: Make it work for all providers. if !ProviderIs("gce", "gke", "aws") { return fmt.Errorf("unsupported provider: %s", TestContext.Provider) } if ProviderIs("gce", "aws") { return sshRestartMaster() } // GKE doesn't allow ssh access, so use a same-version master // upgrade to teardown/recreate master. v, err := c.ServerVersion() if err != nil { return err } return masterUpgradeGKE(v.GitVersion[1:]) // strip leading 'v' } func sshRestartMaster() error { if !ProviderIs("gce", "aws") { return fmt.Errorf("unsupported provider: %s", TestContext.Provider) } var command string if ProviderIs("gce") { command = "sudo docker ps | grep /kube-apiserver | cut -d ' ' -f 1 | xargs sudo docker kill" } else { command = "sudo /etc/init.d/kube-apiserver restart" } Logf("Restarting master via ssh, running: %v", command) result, err := SSH(command, GetMasterHost()+":22", TestContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) return fmt.Errorf("couldn't restart apiserver: %v", err) } return nil } func WaitForApiserverUp(c clientset.Interface) error { for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { body, err := c.Core().RESTClient().Get().AbsPath("/healthz").Do().Raw() if err == nil && string(body) == "ok" { return nil } } return fmt.Errorf("waiting for apiserver timed out") } // WaitForClusterSize waits until the cluster has desired size and there is no not-ready nodes in it. // By cluster size we mean number of Nodes excluding Master Node. func WaitForClusterSize(c clientset.Interface, size int, timeout time.Duration) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { nodes, err := c.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector()}) if err != nil { Logf("Failed to list nodes: %v", err) continue } numNodes := len(nodes.Items) // Filter out not-ready nodes. FilterNodes(nodes, func(node api.Node) bool { return IsNodeConditionSetAsExpected(&node, api.NodeReady, true) }) numReady := len(nodes.Items) if numNodes == size && numReady == size { Logf("Cluster has reached the desired size %d", size) return nil } Logf("Waiting for cluster size %d, current size %d, not ready nodes %d", size, numNodes, numNodes-numReady) } return fmt.Errorf("timeout waiting %v for cluster size to be %d", timeout, size) } func GenerateMasterRegexp(prefix string) string { return prefix + "(-...)?" } // waitForMasters waits until the cluster has the desired number of ready masters in it. func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { nodes, err := c.Core().Nodes().List(api.ListOptions{}) if err != nil { Logf("Failed to list nodes: %v", err) continue } // Filter out nodes that are not master replicas FilterNodes(nodes, func(node api.Node) bool { res, err := regexp.Match(GenerateMasterRegexp(masterPrefix), ([]byte)(node.Name)) if err != nil { Logf("Failed to match regexp to node name: %v", err) return false } return res }) numNodes := len(nodes.Items) // Filter out not-ready nodes. FilterNodes(nodes, func(node api.Node) bool { return IsNodeConditionSetAsExpected(&node, api.NodeReady, true) }) numReady := len(nodes.Items) if numNodes == size && numReady == size { Logf("Cluster has reached the desired number of masters %d", size) return nil } Logf("Waiting for the number of masters %d, current %d, not ready master nodes %d", size, numNodes, numNodes-numReady) } return fmt.Errorf("timeout waiting %v for the number of masters to be %d", timeout, size) } // GetHostExternalAddress gets the node for a pod and returns the first External // address. Returns an error if the node the pod is on doesn't have an External // address. func GetHostExternalAddress(client clientset.Interface, p *api.Pod) (externalAddress string, err error) { node, err := client.Core().Nodes().Get(p.Spec.NodeName) if err != nil { return "", err } for _, address := range node.Status.Addresses { if address.Type == api.NodeExternalIP { if address.Address != "" { externalAddress = address.Address break } } } if externalAddress == "" { err = fmt.Errorf("No external address for pod %v on node %v", p.Name, p.Spec.NodeName) } return } type extractRT struct { http.Header } func (rt *extractRT) RoundTrip(req *http.Request) (*http.Response, error) { rt.Header = req.Header return &http.Response{}, nil } // headersForConfig extracts any http client logic necessary for the provided // config. func headersForConfig(c *restclient.Config) (http.Header, error) { extract := &extractRT{} rt, err := restclient.HTTPWrappersForConfig(c, extract) if err != nil { return nil, err } if _, err := rt.RoundTrip(&http.Request{}); err != nil { return nil, err } return extract.Header, nil } // OpenWebSocketForURL constructs a websocket connection to the provided URL, using the client // config, with the specified protocols. func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) { tlsConfig, err := restclient.TLSConfigFor(config) if err != nil { return nil, fmt.Errorf("failed to create tls config: %v", err) } if tlsConfig != nil { url.Scheme = "wss" if !strings.Contains(url.Host, ":") { url.Host += ":443" } } else { url.Scheme = "ws" if !strings.Contains(url.Host, ":") { url.Host += ":80" } } headers, err := headersForConfig(config) if err != nil { return nil, fmt.Errorf("failed to load http headers: %v", err) } cfg, err := websocket.NewConfig(url.String(), "http://localhost") if err != nil { return nil, fmt.Errorf("failed to create websocket config: %v", err) } cfg.Header = headers cfg.TlsConfig = tlsConfig cfg.Protocol = protocols return websocket.DialConfig(cfg) } // getIngressAddress returns the ips/hostnames associated with the Ingress. func getIngressAddress(client clientset.Interface, ns, name string) ([]string, error) { ing, err := client.Extensions().Ingresses(ns).Get(name) if err != nil { return nil, err } addresses := []string{} for _, a := range ing.Status.LoadBalancer.Ingress { if a.IP != "" { addresses = append(addresses, a.IP) } if a.Hostname != "" { addresses = append(addresses, a.Hostname) } } return addresses, nil } // WaitForIngressAddress waits for the Ingress to acquire an address. func WaitForIngressAddress(c clientset.Interface, ns, ingName string, timeout time.Duration) (string, error) { var address string err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) { ipOrNameList, err := getIngressAddress(c, ns, ingName) if err != nil || len(ipOrNameList) == 0 { Logf("Waiting for Ingress %v to acquire IP, error %v", ingName, err) return false, nil } address = ipOrNameList[0] return true, nil }) return address, err } // Looks for the given string in the log of a specific pod container func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) { return LookForString(expectedString, timeout, func() string { return RunKubectlOrDie("logs", podName, container, fmt.Sprintf("--namespace=%v", ns)) }) } // Looks for the given string in a file in a specific pod container func LookForStringInFile(ns, podName, container, file, expectedString string, timeout time.Duration) (result string, err error) { return LookForString(expectedString, timeout, func() string { return RunKubectlOrDie("exec", podName, "-c", container, fmt.Sprintf("--namespace=%v", ns), "--", "cat", file) }) } // Looks for the given string in the output of a command executed in a specific pod container func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) { return LookForString(expectedString, timeout, func() string { // use the first container args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"} args = append(args, command...) return RunKubectlOrDie(args...) }) } // Looks for the given string in the output of fn, repeatedly calling fn until // the timeout is reached or the string is found. Returns last log and possibly // error if the string was not found. func LookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) { for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) { result = fn() if strings.Contains(result, expectedString) { return } } err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result) return } // getSvcNodePort returns the node port for the given service:port. func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) { svc, err := client.Core().Services(ns).Get(name) if err != nil { return 0, err } for _, p := range svc.Spec.Ports { if p.Port == int32(svcPort) { if p.NodePort != 0 { return int(p.NodePort), nil } } } return 0, fmt.Errorf( "No node port found for service %v, port %v", name, svcPort) } // GetNodePortURL returns the url to a nodeport Service. func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (string, error) { nodePort, err := getSvcNodePort(client, ns, name, svcPort) if err != nil { return "", err } // This list of nodes must not include the master, which is marked // unschedulable, since the master doesn't run kube-proxy. Without // kube-proxy NodePorts won't work. var nodes *api.NodeList if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { nodes, err = client.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector()}) return err == nil, nil }) != nil { return "", err } if len(nodes.Items) == 0 { return "", fmt.Errorf("Unable to list nodes in cluster.") } for _, node := range nodes.Items { for _, address := range node.Status.Addresses { if address.Type == api.NodeExternalIP { if address.Address != "" { return fmt.Sprintf("http://%v:%v", address.Address, nodePort), nil } } } } return "", fmt.Errorf("Failed to find external address for service %v", name) } // ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till // none are running, otherwise it does what a synchronous scale operation would do. func ScaleRCByLabels(clientset clientset.Interface, ns string, l map[string]string, replicas uint) error { listOpts := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l))} rcs, err := clientset.Core().ReplicationControllers(ns).List(listOpts) if err != nil { return err } if len(rcs.Items) == 0 { return fmt.Errorf("RC with labels %v not found in ns %v", l, ns) } Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas) for _, labelRC := range rcs.Items { name := labelRC.Name if err := ScaleRC(clientset, ns, name, replicas, false); err != nil { return err } rc, err := clientset.Core().ReplicationControllers(ns).Get(name) if err != nil { return err } if replicas == 0 { ps, err := podStoreForRC(clientset, rc) if err != nil { return err } defer ps.Stop() if err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute); err != nil { return fmt.Errorf("error while waiting for pods gone %s: %v", name, err) } } else { if err := testutils.WaitForPodsWithLabelRunning( clientset, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil { return err } } } return nil } // TODO(random-liu): Change this to be a member function of the framework. func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { return getPodLogsInternal(c, namespace, podName, containerName, false) } func getPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { return getPodLogsInternal(c, namespace, podName, containerName, true) } // utility function for gomega Eventually func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) { logs, err := c.Core().RESTClient().Get(). Resource("pods"). Namespace(namespace). Name(podName).SubResource("log"). Param("container", containerName). Param("previous", strconv.FormatBool(previous)). Do(). Raw() if err != nil { return "", err } if err == nil && strings.Contains(string(logs), "Internal Error") { return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs)) } return string(logs), err } // EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created // are actually cleaned up. Currently only implemented for GCE/GKE. func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error { if TestContext.Provider == "gce" || TestContext.Provider == "gke" { return ensureGCELoadBalancerResourcesDeleted(ip, portRange) } return nil } func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error { gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud) if !ok { return fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider) } project := TestContext.CloudConfig.ProjectID region, err := gcecloud.GetGCERegion(TestContext.CloudConfig.Zone) if err != nil { return fmt.Errorf("could not get region for zone %q: %v", TestContext.CloudConfig.Zone, err) } return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) { service := gceCloud.GetComputeService() list, err := service.ForwardingRules.List(project, region).Do() if err != nil { return false, err } for ix := range list.Items { item := list.Items[ix] if item.PortRange == portRange && item.IPAddress == ip { Logf("found a load balancer: %v", item) return false, nil } } return true, nil }) } // The following helper functions can block/unblock network from source // host to destination host by manipulating iptable rules. // This function assumes it can ssh to the source host. // // Caution: // Recommend to input IP instead of hostnames. Using hostnames will cause iptables to // do a DNS lookup to resolve the name to an IP address, which will // slow down the test and cause it to fail if DNS is absent or broken. // // Suggested usage pattern: // func foo() { // ... // defer UnblockNetwork(from, to) // BlockNetwork(from, to) // ... // } // func BlockNetwork(from string, to string) { Logf("block network traffic from %s to %s", from, to) iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to) dropCmd := fmt.Sprintf("sudo iptables --insert %s", iptablesRule) if result, err := SSH(dropCmd, from, TestContext.Provider); result.Code != 0 || err != nil { LogSSHResult(result) Failf("Unexpected error: %v", err) } } func UnblockNetwork(from string, to string) { Logf("Unblock network traffic from %s to %s", from, to) iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to) undropCmd := fmt.Sprintf("sudo iptables --delete %s", iptablesRule) // Undrop command may fail if the rule has never been created. // In such case we just lose 30 seconds, but the cluster is healthy. // But if the rule had been created and removing it failed, the node is broken and // not coming back. Subsequent tests will run or fewer nodes (some of the tests // may fail). Manual intervention is required in such case (recreating the // cluster solves the problem too). err := wait.Poll(time.Millisecond*100, time.Second*30, func() (bool, error) { result, err := SSH(undropCmd, from, TestContext.Provider) if result.Code == 0 && err == nil { return true, nil } LogSSHResult(result) if err != nil { Logf("Unexpected error: %v", err) } return false, nil }) if err != nil { Failf("Failed to remove the iptable REJECT rule. Manual intervention is "+ "required on host %s: remove rule %s, if exists", from, iptablesRule) } } func isElementOf(podUID types.UID, pods *api.PodList) bool { for _, pod := range pods.Items { if pod.UID == podUID { return true } } return false } func CheckRSHashLabel(rs *extensions.ReplicaSet) error { if len(rs.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 || len(rs.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 || len(rs.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 { return fmt.Errorf("unexpected RS missing required pod-hash-template: %+v, selector = %+v, template = %+v", rs, rs.Spec.Selector, rs.Spec.Template) } return nil } func CheckPodHashLabel(pods *api.PodList) error { invalidPod := "" for _, pod := range pods.Items { if len(pod.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 { if len(invalidPod) == 0 { invalidPod = "unexpected pods missing required pod-hash-template:" } invalidPod = fmt.Sprintf("%s %+v;", invalidPod, pod) } } if len(invalidPod) > 0 { return fmt.Errorf("%s", invalidPod) } return nil } // timeout for proxy requests. const proxyTimeout = 2 * time.Minute // NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client. func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) { // proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call. // This will leak a goroutine if proxy hangs. #22165 subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery()) if err != nil { return restclient.Result{}, err } var result restclient.Result finished := make(chan struct{}) go func() { if subResourceProxyAvailable { result = c.Core().RESTClient().Get(). Resource("nodes"). SubResource("proxy"). Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)). Suffix(endpoint). Do() } else { result = c.Core().RESTClient().Get(). Prefix("proxy"). Resource("nodes"). Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)). Suffix(endpoint). Do() } finished <- struct{}{} }() select { case <-finished: return result, nil case <-time.After(proxyTimeout): return restclient.Result{}, nil } } // GetKubeletPods retrieves the list of pods on the kubelet func GetKubeletPods(c clientset.Interface, node string) (*api.PodList, error) { return getKubeletPods(c, node, "pods") } // GetKubeletRunningPods retrieves the list of running pods on the kubelet. The pods // includes necessary information (e.g., UID, name, namespace for // pods/containers), but do not contain the full spec. func GetKubeletRunningPods(c clientset.Interface, node string) (*api.PodList, error) { return getKubeletPods(c, node, "runningpods") } func getKubeletPods(c clientset.Interface, node, resource string) (*api.PodList, error) { result := &api.PodList{} client, err := NodeProxyRequest(c, node, resource) if err != nil { return &api.PodList{}, err } if err = client.Into(result); err != nil { return &api.PodList{}, err } return result, nil } // LaunchWebserverPod launches a pod serving http on port 8080 to act // as the target for networking connectivity checks. The ip address // of the created pod will be returned if the pod is launched // successfully. func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) { containerName := fmt.Sprintf("%s-container", podName) port := 8080 pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: containerName, Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab", Env: []api.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}}, Ports: []api.ContainerPort{{ContainerPort: int32(port)}}, }, }, NodeName: nodeName, RestartPolicy: api.RestartPolicyNever, }, } podClient := f.ClientSet.Core().Pods(f.Namespace.Name) _, err := podClient.Create(pod) ExpectNoError(err) ExpectNoError(f.WaitForPodRunning(podName)) createdPod, err := podClient.Get(podName) ExpectNoError(err) ip = fmt.Sprintf("%s:%d", createdPod.Status.PodIP, port) Logf("Target pod IP:port is %s", ip) return } // CheckConnectivityToHost launches a pod running wget on the // specified node to test connectivity to the specified host. An // error will be returned if the host is not reachable from the pod. func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, timeout int) error { contName := fmt.Sprintf("%s-container", podName) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: contName, Image: "gcr.io/google_containers/busybox:1.24", Command: []string{"wget", fmt.Sprintf("--timeout=%d", timeout), "-s", host}, }, }, NodeName: nodeName, RestartPolicy: api.RestartPolicyNever, }, } podClient := f.ClientSet.Core().Pods(f.Namespace.Name) _, err := podClient.Create(pod) if err != nil { return err } err = WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name) if err != nil { logs, logErr := GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, contName) if logErr != nil { Logf("Warning: Failed to get logs from pod %q: %v", pod.Name, logErr) } else { Logf("pod %s/%s \"wget\" logs:\n%s", f.Namespace.Name, pod.Name, logs) } } return err } // CoreDump SSHs to the master and all nodes and dumps their logs into dir. // It shells out to cluster/log-dump.sh to accomplish this. func CoreDump(dir string) { cmd := exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump.sh"), dir) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { Logf("Error running cluster/log-dump.sh: %v", err) } } func UpdatePodWithRetries(client clientset.Interface, ns, name string, update func(*api.Pod)) (*api.Pod, error) { for i := 0; i < 3; i++ { pod, err := client.Core().Pods(ns).Get(name) if err != nil { return nil, fmt.Errorf("Failed to get pod %q: %v", name, err) } update(pod) pod, err = client.Core().Pods(ns).Update(pod) if err == nil { return pod, nil } if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) { return nil, fmt.Errorf("Failed to update pod %q: %v", name, err) } } return nil, fmt.Errorf("Too many retries updating Pod %q", name) } func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*api.Pod, error) { pods, err := c.Core().Pods(ns).List(api.ListOptions{}) if err != nil { return []*api.Pod{}, err } ignoreSelector := labels.SelectorFromSet(ignoreLabels) filtered := []*api.Pod{} for _, p := range pods.Items { if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) { continue } filtered = append(filtered, &p) } return filtered, nil } // RunCmd runs cmd using args and returns its stdout and stderr. It also outputs // cmd's stdout and stderr to their respective OS streams. func RunCmd(command string, args ...string) (string, string, error) { Logf("Running %s %v", command, args) var bout, berr bytes.Buffer cmd := exec.Command(command, args...) // We also output to the OS stdout/stderr to aid in debugging in case cmd // hangs and never returns before the test gets killed. // // This creates some ugly output because gcloud doesn't always provide // newlines. cmd.Stdout = io.MultiWriter(os.Stdout, &bout) cmd.Stderr = io.MultiWriter(os.Stderr, &berr) err := cmd.Run() stdout, stderr := bout.String(), berr.String() if err != nil { return "", "", fmt.Errorf("error running %s %v; got error %v, stdout %q, stderr %q", command, args, err, stdout, stderr) } return stdout, stderr, nil } // retryCmd runs cmd using args and retries it for up to SingleCallTimeout if // it returns an error. It returns stdout and stderr. func retryCmd(command string, args ...string) (string, string, error) { var err error stdout, stderr := "", "" wait.Poll(Poll, SingleCallTimeout, func() (bool, error) { stdout, stderr, err = RunCmd(command, args...) if err != nil { Logf("Got %v", err) return false, nil } return true, nil }) return stdout, stderr, err } // GetPodsScheduled returns a number of currently scheduled and not scheduled Pods. func GetPodsScheduled(masterNodes sets.String, pods *api.PodList) (scheduledPods, notScheduledPods []api.Pod) { for _, pod := range pods.Items { if !masterNodes.Has(pod.Spec.NodeName) { if pod.Spec.NodeName != "" { _, scheduledCondition := api.GetPodCondition(&pod.Status, api.PodScheduled) Expect(scheduledCondition != nil).To(Equal(true)) Expect(scheduledCondition.Status).To(Equal(api.ConditionTrue)) scheduledPods = append(scheduledPods, pod) } else { _, scheduledCondition := api.GetPodCondition(&pod.Status, api.PodScheduled) Expect(scheduledCondition != nil).To(Equal(true)) Expect(scheduledCondition.Status).To(Equal(api.ConditionFalse)) if scheduledCondition.Reason == "Unschedulable" { notScheduledPods = append(notScheduledPods, pod) } } } } return } // WaitForStableCluster waits until all existing pods are scheduled and returns their amount. func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int { timeout := 10 * time.Minute startTime := time.Now() allPods, err := c.Core().Pods(api.NamespaceAll).List(api.ListOptions{}) ExpectNoError(err) // API server returns also Pods that succeeded. We need to filter them out. currentPods := make([]api.Pod, 0, len(allPods.Items)) for _, pod := range allPods.Items { if pod.Status.Phase != api.PodSucceeded && pod.Status.Phase != api.PodFailed { currentPods = append(currentPods, pod) } } allPods.Items = currentPods scheduledPods, currentlyNotScheduledPods := GetPodsScheduled(masterNodes, allPods) for len(currentlyNotScheduledPods) != 0 { time.Sleep(2 * time.Second) allPods, err := c.Core().Pods(api.NamespaceAll).List(api.ListOptions{}) ExpectNoError(err) scheduledPods, currentlyNotScheduledPods = GetPodsScheduled(masterNodes, allPods) if startTime.Add(timeout).Before(time.Now()) { Failf("Timed out after %v waiting for stable cluster.", timeout) break } } return len(scheduledPods) } // GetMasterAndWorkerNodesOrDie will return a list masters and schedulable worker nodes func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *api.NodeList) { nodes := &api.NodeList{} masters := sets.NewString() all, _ := c.Core().Nodes().List(api.ListOptions{}) for _, n := range all.Items { if system.IsMasterNode(&n) { masters.Insert(n.Name) } else if isNodeSchedulable(&n) && isNodeUntainted(&n) { nodes.Items = append(nodes.Items, n) } } return masters, nodes } func CreateFileForGoBinData(gobindataPath, outputFilename string) error { data := ReadOrDie(gobindataPath) if len(data) == 0 { return fmt.Errorf("Failed to read gobindata from %v", gobindataPath) } fullPath := filepath.Join(TestContext.OutputDir, outputFilename) err := os.MkdirAll(filepath.Dir(fullPath), 0777) if err != nil { return fmt.Errorf("Error while creating directory %v: %v", filepath.Dir(fullPath), err) } err = ioutil.WriteFile(fullPath, data, 0644) if err != nil { return fmt.Errorf("Error while trying to write to file %v: %v", fullPath, err) } return nil } func ListNamespaceEvents(c clientset.Interface, ns string) error { ls, err := c.Core().Events(ns).List(api.ListOptions{}) if err != nil { return err } for _, event := range ls.Items { glog.Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message) } return nil } // E2ETestNodePreparer implements testutils.TestNodePreparer interface, which is used // to create/modify Nodes before running a test. type E2ETestNodePreparer struct { client clientset.Interface // Specifies how many nodes should be modified using the given strategy. // Only one strategy can be applied to a single Node, so there needs to // be at least <sum_of_keys> Nodes in the cluster. countToStrategy []testutils.CountToStrategy nodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy } func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy) testutils.TestNodePreparer { return &E2ETestNodePreparer{ client: client, countToStrategy: countToStrategy, nodeToAppliedStrategy: make(map[string]testutils.PrepareNodeStrategy), } } func (p *E2ETestNodePreparer) PrepareNodes() error { nodes := GetReadySchedulableNodesOrDie(p.client) numTemplates := 0 for k := range p.countToStrategy { numTemplates += k } if numTemplates > len(nodes.Items) { return fmt.Errorf("Can't prepare Nodes. Got more templates than existing Nodes.") } index := 0 sum := 0 for _, v := range p.countToStrategy { sum += v.Count for ; index < sum; index++ { if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil { glog.Errorf("Aborting node preparation: %v", err) return err } p.nodeToAppliedStrategy[nodes.Items[index].Name] = v.Strategy } } return nil } func (p *E2ETestNodePreparer) CleanupNodes() error { var encounteredError error nodes := GetReadySchedulableNodesOrDie(p.client) for i := range nodes.Items { var err error name := nodes.Items[i].Name strategy, found := p.nodeToAppliedStrategy[name] if found { if err = testutils.DoCleanupNode(p.client, name, strategy); err != nil { glog.Errorf("Skipping cleanup of Node: failed update of %v: %v", name, err) encounteredError = err } } } return encounteredError } func CleanupGCEResources(loadBalancerName string) (err error) { gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud) if !ok { return fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider) } gceCloud.DeleteFirewall(loadBalancerName) gceCloud.DeleteForwardingRule(loadBalancerName) gceCloud.DeleteGlobalStaticIP(loadBalancerName) hc, _ := gceCloud.GetHttpHealthCheck(loadBalancerName) gceCloud.DeleteTargetPool(loadBalancerName, hc) return nil } // getMaster populates the externalIP, internalIP and hostname fields of the master. // If any of these is unavailable, it is set to "". func getMaster(c clientset.Interface) Address { master := Address{} // Populate the internal IP. eps, err := c.Core().Endpoints(api.NamespaceDefault).Get("kubernetes") if err != nil { Failf("Failed to get kubernetes endpoints: %v", err) } if len(eps.Subsets) != 1 || len(eps.Subsets[0].Addresses) != 1 { Failf("There are more than 1 endpoints for kubernetes service: %+v", eps) } master.internalIP = eps.Subsets[0].Addresses[0].IP // Populate the external IP/hostname. url, err := url.Parse(TestContext.Host) if err != nil { Failf("Failed to parse hostname: %v", err) } if net.ParseIP(url.Host) != nil { // TODO: Check that it is external IP (not having a reserved IP address as per RFC1918). master.externalIP = url.Host } else { master.hostname = url.Host } return master } // GetMasterAddress returns the hostname/external IP/internal IP as appropriate for e2e tests on a particular provider // which is the address of the interface used for communication with the kubelet. func GetMasterAddress(c clientset.Interface) string { master := getMaster(c) switch TestContext.Provider { case "gce", "gke": return master.externalIP case "aws": return awsMasterIP default: Failf("This test is not supported for provider %s and should be disabled", TestContext.Provider) } return "" } // GetNodeExternalIP returns node external IP concatenated with port 22 for ssh // e.g. 1.2.3.4:22 func GetNodeExternalIP(node *api.Node) string { Logf("Getting external IP address for %s", node.Name) host := "" for _, a := range node.Status.Addresses { if a.Type == api.NodeExternalIP { host = a.Address + ":22" break } } if host == "" { Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses) } return host }
[ "\"FEDERATION_NAMESPACE\"", "\"KUBE_SSH_USER\"", "\"USER\"", "\"HOME\"", "\"AWS_SSH_KEY\"", "\"VAGRANT_SSH_KEY\"" ]
[]
[ "VAGRANT_SSH_KEY", "AWS_SSH_KEY", "KUBE_SSH_USER", "USER", "FEDERATION_NAMESPACE", "HOME" ]
[]
["VAGRANT_SSH_KEY", "AWS_SSH_KEY", "KUBE_SSH_USER", "USER", "FEDERATION_NAMESPACE", "HOME"]
go
6
0
locustfile.py
# Copyright 2015-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file # except in compliance with the License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an "AS IS" # BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under the License. import os import string import random from locust import HttpLocust, TaskSet, task class MyTaskSet(TaskSet): @task(1000) def index(self): response = self.client.get("/") # This task will 15 times for every 1000 runs of the above task # @task(15) # def about(self): # self.client.get("/blog") # This task will run once for every 1000 runs of the above task # @task(1) # def about(self): # id = id_generator() # self.client.post("/signup", {"email": "[email protected]", "name": "Test"}) class MyLocust(HttpLocust): host = os.getenv('TARGET_URL', "http://localhost") task_set = MyTaskSet min_wait = 90 max_wait = 100
[]
[]
[ "TARGET_URL" ]
[]
["TARGET_URL"]
python
1
0
x/gov/handler.go
package gov import ( sdk "github.com/soominhyunwoo/chain-sdk/types" sdkerrors "github.com/soominhyunwoo/chain-sdk/types/errors" "github.com/soominhyunwoo/chain-sdk/x/gov/keeper" "github.com/soominhyunwoo/chain-sdk/x/gov/types" ) // NewHandler creates an sdk.Handler for all the gov type messages func NewHandler(k keeper.Keeper) sdk.Handler { msgServer := keeper.NewMsgServerImpl(k) return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { ctx = ctx.WithEventManager(sdk.NewEventManager()) switch msg := msg.(type) { case *types.MsgDeposit: res, err := msgServer.Deposit(sdk.WrapSDKContext(ctx), msg) return sdk.WrapServiceResult(ctx, res, err) case *types.MsgSubmitProposal: res, err := msgServer.SubmitProposal(sdk.WrapSDKContext(ctx), msg) return sdk.WrapServiceResult(ctx, res, err) case *types.MsgVote: res, err := msgServer.Vote(sdk.WrapSDKContext(ctx), msg) return sdk.WrapServiceResult(ctx, res, err) case *types.MsgVoteWeighted: res, err := msgServer.VoteWeighted(sdk.WrapSDKContext(ctx), msg) return sdk.WrapServiceResult(ctx, res, err) default: return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unrecognized %s message type: %T", types.ModuleName, msg) } } }
[]
[]
[]
[]
[]
go
null
null
null
tests/unit/test_device.py
__author__ = "Rick Sherman, Nitin Kumar" __credits__ = "Jeremy Schulman" import unittest2 as unittest from nose.plugins.attrib import attr from mock import MagicMock, patch, mock_open import os from lxml import etree from ncclient.manager import Manager, make_device_handler from ncclient.transport import SSHSession import ncclient.transport.errors as NcErrors from ncclient.operations import RPCError, TimeoutExpiredError from jnpr.junos.facts.swver import version_info from jnpr.junos import Device from jnpr.junos.exception import RpcError from jnpr.junos import exception as EzErrors facts = {'domain': None, 'hostname': 'firefly', 'ifd_style': 'CLASSIC', 'version_info': version_info('12.1X46-D15.3'), '2RE': False, 'serialnumber': 'aaf5fe5f9b88', 'fqdn': 'firefly', 'virtual': True, 'switch_style': 'NONE', 'version': '12.1X46-D15.3', 'HOME': '/cf/var/home/rick', 'srx_cluster': False, 'model': 'FIREFLY-PERIMETER', 'RE0': {'status': 'Testing', 'last_reboot_reason': 'Router rebooted after a ' 'normal shutdown.', 'model': 'FIREFLY-PERIMETER RE', 'up_time': '6 hours, 29 minutes, 30 seconds'}, 'vc_capable': False, 'personality': 'SRX_BRANCH'} @attr('unit') class Test_MyTemplateLoader(unittest.TestCase): def setUp(self): from jnpr.junos.device import _MyTemplateLoader self.template_loader = _MyTemplateLoader() @patch('__builtin__.filter') def test_temp_load_get_source_filter_false(self, filter_mock): filter_mock.return_value = False try: self.template_loader.get_source(None, None) except Exception as ex: import jinja2 self.assertEqual(type(ex), jinja2.exceptions.TemplateNotFound) @patch('jnpr.junos.device.os.path') def test_temp_load_get_source_filter_true(self, os_path_mock): # cant use @patch here as with statement will have exit m = mock_open() with patch('__builtin__.file', m, create=True): self.template_loader.get_source(None, None) @attr('unit') class TestDevice(unittest.TestCase): @patch('ncclient.manager.connect') def setUp(self, mock_connect): mock_connect.side_effect = self._mock_manager self.dev = Device(host='1.1.1.1', user='rick', password='password123', gather_facts=False) self.dev.open() @patch('ncclient.operations.session.CloseSession.request') def tearDown(self, mock_session): self.dev.close() @patch('jnpr.junos.device.netconf_ssh') def test_device_ConnectAuthError(self, mock_manager): mock_manager.connect.side_effect = NcErrors.AuthenticationError self.assertRaises(EzErrors.ConnectAuthError, self.dev.open) @patch('jnpr.junos.device.netconf_ssh') def test_device_ConnectRefusedError(self, mock_manager): mock_manager.connect.side_effect = NcErrors.SSHError self.assertRaises(EzErrors.ConnectRefusedError, self.dev.open) @patch('jnpr.junos.device.netconf_ssh') @patch('jnpr.junos.device.datetime') def test_device_ConnectTimeoutError(self, mock_datetime, mock_manager): mock_manager.connect.side_effect = NcErrors.SSHError("Could not open socket to 1.1.1.1:830") from datetime import timedelta, datetime currenttime = datetime.now() mock_datetime.datetime.now.side_effect = [currenttime, currenttime + timedelta(minutes=4)] self.assertRaises(EzErrors.ConnectTimeoutError, self.dev.open) @patch('jnpr.junos.device.netconf_ssh') @patch('jnpr.junos.device.datetime') def test_device_diff_err_message(self, mock_datetime, mock_manager): NcErrors.SSHError.message = 'why are you trying :)' mock_manager.connect.side_effect = NcErrors.SSHError from datetime import timedelta, datetime currenttime = datetime.now() mock_datetime.datetime.now.side_effect = [currenttime, currenttime + timedelta(minutes=4)] self.assertRaises(EzErrors.ConnectError, self.dev.open) @patch('jnpr.junos.device.netconf_ssh') def test_device_ConnectUnknownHostError(self, mock_manager): import socket mock_manager.connect.side_effect = socket.gaierror self.assertRaises(EzErrors.ConnectUnknownHostError, self.dev.open) @patch('jnpr.junos.device.netconf_ssh') def test_device_other_error(self, mock_manager): mock_manager.connect.side_effect = TypeError self.assertRaises(EzErrors.ConnectError, self.dev.open) def test_device_probe_error(self): mock_probe = MagicMock() mock_probe.return_value = None self.dev.probe = mock_probe def fn(): self.dev.open(auto_probe=1) self.assertRaises(EzErrors.ProbeError, fn) def test_device_property_logfile_isinstance(self): mock = MagicMock() with patch('__builtin__.open', mock): with patch('__builtin__.file', MagicMock): handle = open('filename', 'r') self.dev.logfile = handle self.assertEqual(self.dev.logfile, handle) def test_device_host_mand_param(self): self.assertRaises(ValueError, Device, user='rick', password='password123', gather_facts=False) def test_device_property_logfile_close(self): self.dev._logfile = MagicMock() self.dev._logfile.close.return_value = 0 self.dev.logfile = None self.assertFalse(self.dev._logfile) def test_device_property_logfile_exception(self): try: self.dev.logfile = True except Exception as ex: self.assertEqual(type(ex), ValueError) def test_device_repr(self): localdev = Device(host='1.1.1.1', user='rick', password='password123', gather_facts=False) self.assertEqual(repr(localdev), 'Device(1.1.1.1)') def test_device_local(self): Device.ON_JUNOS = True localdev = Device() self.assertEqual(localdev._hostname, 'localhost') @patch('jnpr.junos.device.os') @patch('__builtin__.open') @patch('paramiko.config.SSHConfig.lookup') def test_device__sshconf_lkup(self, os_mock, open_mock, mock_paramiko): os_mock.path.exists.return_value = True self.dev._sshconf_lkup() mock_paramiko.assert_called_any() @patch('jnpr.junos.device.os') @patch('__builtin__.open') @patch('paramiko.config.SSHConfig.lookup') def test_device__sshconf_lkup_def(self, os_mock, open_mock, mock_paramiko): os_mock.path.exists.return_value = True self.dev._ssh_config = '/home/rsherman/.ssh/config' self.dev._sshconf_lkup() mock_paramiko.assert_called_any() @patch('os.getenv') def test_device__sshconf_lkup_path_not_exists(self, mock_env): mock_env.return_value = '/home/test' self.assertEqual(self.dev._sshconf_lkup(), None) @patch('os.getenv') def test_device__sshconf_lkup_home_not_defined(self, mock_env): mock_env.return_value = None self.assertEqual(self.dev._sshconf_lkup(), None) mock_env.assert_called_with('HOME') @patch('ncclient.manager.connect') @patch('jnpr.junos.Device.execute') def test_device_open(self, mock_connect, mock_execute): with patch('jnpr.junos.utils.fs.FS.cat') as mock_cat: mock_cat.return_value = """ domain jls.net """ mock_connect.side_effect = self._mock_manager mock_execute.side_effect = self._mock_manager self.dev2 = Device(host='2.2.2.2', user='rick', password='password123') self.dev2.open() self.assertEqual(self.dev2.connected, True) @patch('jnpr.junos.Device.execute') def test_device_facts(self, mock_execute): with patch('jnpr.junos.utils.fs.FS.cat') as mock_cat: mock_execute.side_effect = self._mock_manager mock_cat.return_value = """ domain jls.net """ self.dev.facts_refresh() assert self.dev.facts['version'] == facts['version'] def test_device_hostname(self): self.assertEqual(self.dev.hostname, '1.1.1.1') def test_device_user(self): self.assertEqual(self.dev.user, 'rick') def test_device_get_password(self): self.assertEqual(self.dev.password, None) def test_device_set_password(self): self.dev.password = 'secret' self.assertEqual(self.dev._password, 'secret') def test_device_get_timeout(self): self.assertEqual(self.dev.timeout, 30) def test_device_set_timeout(self): self.dev.timeout = 10 self.assertEqual(self.dev.timeout, 10) def test_device_manages(self): self.assertEqual(self.dev.manages, [], 'By default manages will be empty list') @patch('ncclient.manager.connect') @patch('jnpr.junos.Device.execute') def test_device_open_normalize(self, mock_connect, mock_execute): mock_connect.side_effect = self._mock_manager self.dev2 = Device(host='2.2.2.2', user='rick', password='password123') self.dev2.open(gather_facts=False, normalize=True) self.assertEqual(self.dev2.transform, self.dev2._norm_transform) def test_device_set_facts_exception(self): try: self.dev.facts = 'test' except RuntimeError as ex: self.assertEqual(RuntimeError, type(ex)) @patch('jnpr.junos.Device.execute') def test_device_cli(self, mock_execute): mock_execute.side_effect = self._mock_manager self.assertEqual(self.dev.cli('show cli directory').tag, 'cli') @patch('jnpr.junos.Device.execute') def test_device_cli_conf_info(self, mock_execute): mock_execute.side_effect = self._mock_manager self.assertTrue('ge-0/0/0' in self.dev.cli('show configuration')) @patch('jnpr.junos.Device.execute') def test_device_cli_output(self, mock_execute): mock_execute.side_effect = self._mock_manager self.assertTrue('Alarm' in self.dev.cli('show system alarms')) @patch('jnpr.junos.Device.execute') def test_device_cli_rpc(self, mock_execute): mock_execute.side_effect = self._mock_manager self.assertEqual(self.dev.cli('show system uptime | display xml rpc') .tag, 'get-system-uptime-information') def test_device_cli_exception(self): self.dev.rpc.cli = MagicMock(side_effect=AttributeError) val = self.dev.cli('show version') self.assertEqual(val, 'invalid command: show version') @patch('jnpr.junos.Device.execute') def test_device_display_xml_rpc(self, mock_execute): mock_execute.side_effect = self._mock_manager self.assertEqual(self.dev.display_xml_rpc('show system uptime ').tag, 'get-system-uptime-information') @patch('jnpr.junos.Device.execute') def test_device_display_xml_rpc_text(self, mock_execute): mock_execute.side_effect = self._mock_manager self.assertIn('<get-system-uptime-information>', self.dev.display_xml_rpc('show system uptime ', format='text')) @patch('jnpr.junos.Device.execute') def test_device_display_xml_exception(self, mock_execute): mock_execute.side_effect = self._mock_manager self.assertEqual(self.dev.display_xml_rpc('show foo'), 'invalid command: show foo| display xml rpc') def test_device_execute(self): self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager) self.assertEqual(self.dev.execute('<get-system-core-dumps/>').tag, 'directory-list') def test_device_execute_topy(self): self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager) self.assertEqual(self.dev.execute('<get-system-core-dumps/>', to_py=self._do_nothing), 'Nothing') # This test is for the commented out rpc-error code # def test_device_execute_exception(self): # self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager) # self.assertRaises(RpcError, self.dev.execute, # '<load-configuration-error/>') def test_device_execute_unknown_exception(self): class MyException(Exception): pass self.dev._conn.rpc = MagicMock(side_effect=MyException) self.assertRaises(MyException, self.dev.execute, '<get-software-information/>') def test_device_execute_rpc_error(self): self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager) self.assertRaises(RpcError, self.dev.rpc.get_rpc_error) def test_device_execute_permission_error(self): self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager) self.assertRaises(EzErrors.PermissionError, self.dev.rpc.get_permission_denied) def test_device_execute_index_error(self): self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager) self.assertTrue(self.dev.rpc.get_index_error()) def test_device_execute_ValueError(self): self.assertRaises(ValueError, self.dev.execute, None) def test_device_execute_unopened(self): self.dev.connected = False self.assertRaises(EzErrors.ConnectClosedError, self.dev.execute, None) def test_device_execute_timeout(self): self.dev._conn.rpc = MagicMock(side_effect=TimeoutExpiredError) self.assertRaises(EzErrors.RpcTimeoutError, self.dev.rpc.get_rpc_timeout) def test_device_execute_closed(self): self.dev._conn.rpc = MagicMock(side_effect=NcErrors.TransportError) self.assertRaises(EzErrors.ConnectClosedError, self.dev.rpc.get_rpc_close) self.assertFalse(self.dev.connected) def test_device_rpcmeta(self): self.assertEqual(self.dev.rpc.get_software_information.func_doc, 'get-software-information') def test_device_probe_timeout_zero(self): with patch('jnpr.junos.device.socket'): self.assertFalse(self.dev.probe(0)) def test_device_probe_timeout_gt_zero(self): with patch('jnpr.junos.device.socket'): self.assertTrue(self.dev.probe(1), 'probe fn is not working for' ' timeout greater than zero') def test_device_probe_timeout_exception(self): with patch('jnpr.junos.device.socket') as mock_socket: with patch('jnpr.junos.device.time.sleep') as mock_time: mock_socket.socket.return_value.close.side_effect \ = RuntimeError mock_time.return_value = None self.assertFalse(self.dev.probe(.01)) def test_device_bind_varg(self): self.dev.bind() mock = MagicMock() mock.__name__ = 'magic_mock' self.dev.bind(mock) self.assertEqual(self.dev.magic_mock.__name__, 'magic_mock') def test_device_bind_kvarg(self): self.dev.bind() mock = MagicMock() mock.return_value = 'Test' self.dev.bind(kw=mock) self.assertEqual(self.dev.kw, 'Test') def test_device_bind_varg_exception(self): def varg(): self.dev.bind() mock = MagicMock() mock.__name__ = 'magic mock' # for *args self.dev.bind(mock) self.dev.bind(mock) self.assertRaises(ValueError, varg) def test_device_bind_kvarg_exception(self): def kve(): self.dev.bind() mock = MagicMock() mock.__name__ = 'magic mock' # for **kwargs self.dev.bind(kw=mock) self.dev.bind(kw=mock) self.assertRaises(ValueError, kve) def test_device_template(self): # Try to load the template relative to module base try: template = self.dev.Template('tests/unit/templates/config-example.xml') except: # Try to load the template relative to test base try: template = self.dev.Template('templates/config-example.xml') except: raise self.assertEqual(template.render({'host_name': '1', 'domain_name': '2'}), 'system {\n host-name 1;\n domain-name 2;\n}') def test_device_close(self): def close_conn(): self.dev.connected = False self.dev.close = MagicMock(name='close') self.dev.close.side_effect = close_conn self.dev.close() self.assertEqual(self.dev.connected, False) def _read_file(self, fname): from ncclient.xml_ import NCElement fpath = os.path.join(os.path.dirname(__file__), 'rpc-reply', fname) foo = open(fpath).read() if fname == 'get-rpc-error.xml': # Raise ncclient exception for error raise RPCError(etree.XML(foo)) elif fname == 'get-permission-denied.xml': # Raise ncclient exception for error raise RPCError(etree.XML(foo)) elif (fname == 'get-index-error.xml' or fname == 'get-system-core-dumps.xml' or fname == 'load-configuration-error.xml'): rpc_reply = NCElement(foo, self.dev._conn._device_handler .transform_reply()) elif (fname == 'show-configuration.xml' or fname == 'show-system-alarms.xml'): rpc_reply = NCElement(foo, self.dev._conn._device_handler .transform_reply())._NCElement__doc else: rpc_reply = NCElement(foo, self.dev._conn._device_handler .transform_reply())._NCElement__doc[0] return rpc_reply def _mock_manager(self, *args, **kwargs): if kwargs: device_params = kwargs['device_params'] device_handler = make_device_handler(device_params) session = SSHSession(device_handler) return Manager(session, device_handler) elif args: if args[0].tag == 'command': if args[0].text == 'show cli directory': return self._read_file('show-cli-directory.xml') elif args[0].text == 'show configuration': return self._read_file('show-configuration.xml') elif args[0].text == 'show system alarms': return self._read_file('show-system-alarms.xml') elif args[0].text == 'show system uptime | display xml rpc': return self._read_file('show-system-uptime-rpc.xml') else: raise RpcError else: return self._read_file(args[0].tag + '.xml') def _do_nothing(self, *args, **kwargs): return 'Nothing'
[]
[]
[]
[]
[]
python
0
0
pkg/config/config.go
package config import ( "os" "path/filepath" "runtime" "strings" ) const ( folderName = ".okteto" ) // VersionString the version of the cli var VersionString string // Config holds all the configuration values. type Config struct { // HomePath is the path of the base folder for all the Okteto files HomePath string // ManifestFileName is the name of the manifest file ManifestFileName string } //GetBinaryName returns the name of the binary func GetBinaryName() string { return filepath.Base(GetBinaryFullPath()) } //GetBinaryFullPath returns the name of the binary func GetBinaryFullPath() string { return os.Args[0] } // GetHome returns the path of the folder func GetHome() string { home := getHomeDir() home = filepath.Join(home, folderName) if err := os.MkdirAll(home, 0700); err != nil { panic("failed to create the okteto directory") } return home } // GetDeploymentHome returns the path of the folder func GetDeploymentHome(namespace, name string) string { home := getHomeDir() home = filepath.Join(home, folderName, namespace, name) if err := os.MkdirAll(home, 0700); err != nil { panic("failed to create the okteto deployment directory") } return home } // GetStateFile returns the path to the state file func GetStateFile(namespace, name string) string { return filepath.Join(GetDeploymentHome(namespace, name), "okteto.state") } // GetHomeDir returns the OS home dir func getHomeDir() string { home := os.Getenv("HOME") if runtime.GOOS == "windows" { home = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") if home == "" { home = os.Getenv("USERPROFILE") } } return home } // GetKubeConfigFile returns the path to the kubeconfig file, taking the KUBECONFIG env var into consideration func GetKubeConfigFile() string { home := getHomeDir() kubeconfig := filepath.Join(home, ".kube", "config") kubeconfigEnv := os.Getenv("KUBECONFIG") if len(kubeconfigEnv) > 0 { kubeconfig = splitKubeConfigEnv(kubeconfigEnv) } return kubeconfig } func splitKubeConfigEnv(value string) string { if runtime.GOOS == "windows" { return strings.Split(value, ";")[0] } return strings.Split(value, ":")[0] }
[ "\"HOME\"", "\"HOMEDRIVE\"", "\"HOMEPATH\"", "\"USERPROFILE\"", "\"KUBECONFIG\"" ]
[]
[ "HOMEPATH", "KUBECONFIG", "HOMEDRIVE", "USERPROFILE", "HOME" ]
[]
["HOMEPATH", "KUBECONFIG", "HOMEDRIVE", "USERPROFILE", "HOME"]
go
5
0
minimalrestipy/wsgi.py
""" WSGI config for minimalrestipy project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'minimalrestipy.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
server/YPHS/mydatabase.py
import sqlite3 from string import Template from datetime import datetime from ftplib import FTP import requests import os usr = os.environ['ftpusr'] psw = os.environ['ftppsw'] def get_current_time(): site=requests.get("https://worldtimeapi.org/api/timezone/Asia/Taipei") data=site.json() day=datetime.fromisoformat(data["datetime"]) return day.strftime('%Y/%m/%d') def remote_connect(server, file_name, usr, psw): server.set_debuglevel(0) server.connect("203.72.178.240") server.login(usr, psw) server.cwd("./database") with open(f"./{file_name}", "wb") as w: server.retrbinary(f'RETR ./{file_name}', w.write) server.quit() def remote_upload(server, file_name, usr, psw): server.set_debuglevel(2) server.connect("203.72.178.240") server.login(usr, psw) server.cwd("./database") with open(f"./{file_name}", "rb") as r: server.storbinary(f"STOR ./{file_name}", r) server.quit() class database: def __init__(self, name): global usr, psw, dt2 self.name = name self.server = FTP() try: remote_connect(self.server, name, usr, psw) except: pass self.db = sqlite3.connect(name) def __del__(self): global usr, psw self.db.commit() self.db.close() remote_upload(self.server, self.name, usr, psw) def create_table(self, table_name): self.db.cursor() self.db.execute(Template( "CREATE TABLE $name(id INTEGER PRIMARY KEY AUTOINCREMENT,type TEXT,day TEXT,subject TEXT,content TEXT)").substitute(name=table_name)) def insert(self, table_name, subject, type_, content): self.db.execute(Template("INSERT INTO $name(type , day , subject , content ) VALUES(\"$type\" , \"$dat\" , \"$subject\" , \"$txt\" )").substitute( name=table_name, dat=get_current_time(), type=type_, subject=subject, txt=content)) self.db.commit() def select(self, table_name, date): results = self.db.execute(Template( "SELECT * FROM $name WHERE day=\"$day\"").substitute(name=table_name, day=date)) return results.fetchall() def select_by_id(self, table_name, id): result = self.db.execute(Template( "SELECT * FROM $name WHERE id=\"$no\"").substitute(name=table_name, no=id)).fetchone() return result def update(self, table_name, id, content): self.db.execute(Template("UPDATE $name SET content = \"$content\" WHERE id = \"$id\"").substitute( name=table_name, id=id, content=content)) self.db.commit() def delete(self, table_name, id): self.db.execute(Template("DELETE FROM $name WHERE id=\"$id\"").substitute( name=table_name, id=id)) self.db.commit()
[]
[]
[ "ftppsw", "ftpusr" ]
[]
["ftppsw", "ftpusr"]
python
2
0
src/autoscaler/api/publicapiserver/public_api_server.go
package publicapiserver import ( "fmt" "net/http" "os" "autoscaler/api" "autoscaler/api/config" "autoscaler/cf" "autoscaler/db" "autoscaler/healthendpoint" "autoscaler/ratelimiter" "autoscaler/routes" "code.cloudfoundry.org/cfhttp" "code.cloudfoundry.org/lager" "github.com/gorilla/mux" "github.com/tedsuo/ifrit" "github.com/tedsuo/ifrit/http_server" ) type VarsFunc func(w http.ResponseWriter, r *http.Request, vars map[string]string) func (vh VarsFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) vh(w, r, vars) } func NewPublicApiServer(logger lager.Logger, conf *config.Config, policydb db.PolicyDB, checkBindingFunc api.CheckBindingFunc, cfclient cf.CFClient, httpStatusCollector healthendpoint.HTTPStatusCollector, rateLimiter ratelimiter.Limiter, bindingdb db.BindingDB) (ifrit.Runner, error) { pah := NewPublicApiHandler(logger, conf, policydb, bindingdb) mw := NewMiddleware(logger, cfclient, checkBindingFunc) rateLimiterMiddleware := ratelimiter.NewRateLimiterMiddleware("appId", rateLimiter, logger.Session("api-ratelimiter-middleware")) httpStatusCollectMiddleware := healthendpoint.NewHTTPStatusCollectMiddleware(httpStatusCollector) r := routes.ApiOpenRoutes() r.Use(httpStatusCollectMiddleware.Collect) r.Get(routes.PublicApiInfoRouteName).Handler(VarsFunc(pah.GetApiInfo)) r.Get(routes.PublicApiHealthRouteName).Handler(VarsFunc(pah.GetHealth)) rp := routes.ApiRoutes() rp.Use(rateLimiterMiddleware.CheckRateLimit) rp.Use(mw.Oauth) rp.Use(httpStatusCollectMiddleware.Collect) rp.Get(routes.PublicApiScalingHistoryRouteName).Handler(VarsFunc(pah.GetScalingHistories)) rp.Get(routes.PublicApiMetricsHistoryRouteName).Handler(VarsFunc(pah.GetInstanceMetricsHistories)) rp.Get(routes.PublicApiAggregatedMetricsHistoryRouteName).Handler(VarsFunc(pah.GetAggregatedMetricsHistories)) rpolicy := routes.ApiPolicyRoutes() rpolicy.Use(rateLimiterMiddleware.CheckRateLimit) rpolicy.Use(mw.Oauth) if !conf.UseBuildInMode { rpolicy.Use(mw.CheckServiceBinding) } rpolicy.Use(httpStatusCollectMiddleware.Collect) rpolicy.Get(routes.PublicApiGetPolicyRouteName).Handler(VarsFunc(pah.GetScalingPolicy)) rpolicy.Get(routes.PublicApiAttachPolicyRouteName).Handler(VarsFunc(pah.AttachScalingPolicy)) rpolicy.Get(routes.PublicApiDetachPolicyRouteName).Handler(VarsFunc(pah.DetachScalingPolicy)) rcredential := routes.ApiCredentialRoutes() rcredential.Use(rateLimiterMiddleware.CheckRateLimit) if !conf.UseBuildInMode { rcredential.Use(mw.RejectCredentialOperationInServiceOffering) } rcredential.Use(mw.Oauth) rcredential.Use(httpStatusCollectMiddleware.Collect) rcredential.Get(routes.PublicApiCreateCredentialRouteName).Handler(VarsFunc(pah.CreateCredential)) rcredential.Get(routes.PublicApiDeleteCredentialRouteName).Handler(VarsFunc(pah.DeleteCredential)) var addr string if os.Getenv("APP_AUTOSCALER_TEST_RUN") == "true" { addr = fmt.Sprintf("localhost:%d", conf.PublicApiServer.Port) } else { addr = fmt.Sprintf("0.0.0.0:%d", conf.PublicApiServer.Port) } var runner ifrit.Runner if (conf.PublicApiServer.TLS.KeyFile == "") || (conf.PublicApiServer.TLS.CertFile == "") { logger.Info("creating-public-api-http-server") runner = http_server.New(addr, r) } else { logger.Info("creating-public-api-https-server") tlsConfig, err := cfhttp.NewTLSConfig(conf.PublicApiServer.TLS.CertFile, conf.PublicApiServer.TLS.KeyFile, conf.PublicApiServer.TLS.CACertFile) if err != nil { logger.Error("failed-new-server-new-tls-config", err, lager.Data{"tls": conf.PublicApiServer.TLS}) return nil, err } runner = http_server.NewTLSServer(addr, r, tlsConfig) } logger.Info("public-api-http-server-created", lager.Data{"serverConfig": conf.PublicApiServer}) return runner, nil }
[ "\"APP_AUTOSCALER_TEST_RUN\"" ]
[]
[ "APP_AUTOSCALER_TEST_RUN" ]
[]
["APP_AUTOSCALER_TEST_RUN"]
go
1
0
core/deployment/src/main/java/io/quarkus/deployment/IsDockerWorking.java
package io.quarkus.deployment; import java.io.BufferedReader; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.lang.reflect.InvocationTargetException; import java.net.Socket; import java.net.URI; import java.net.URISyntaxException; import java.util.List; import java.util.function.BooleanSupplier; import java.util.function.Function; import java.util.function.Supplier; import org.jboss.logging.Logger; import io.quarkus.deployment.util.ExecUtil; public class IsDockerWorking implements BooleanSupplier { private static final Logger LOGGER = Logger.getLogger(IsDockerWorking.class.getName()); private final List<Strategy> strategies; public IsDockerWorking() { this(false); } public IsDockerWorking(boolean silent) { this.strategies = List.of(new TestContainersStrategy(silent), new DockerHostStrategy(), new DockerBinaryStrategy(silent)); } @Override public boolean getAsBoolean() { for (Strategy strategy : strategies) { Result result = strategy.get(); if (result == Result.AVAILABLE) { return true; } } return false; } public static class IsDockerRunningSilent extends IsDockerWorking { public IsDockerRunningSilent() { super(true); } } private interface Strategy extends Supplier<Result> { } /** * Delegates the check to testcontainers (if the latter is on the classpath) */ private static class TestContainersStrategy implements Strategy { private final boolean silent; private TestContainersStrategy(boolean silent) { this.silent = silent; } @Override public Result get() { try { Class<?> dockerClientFactoryClass = Thread.currentThread().getContextClassLoader() .loadClass("org.testcontainers.DockerClientFactory"); Object dockerClientFactoryInstance = dockerClientFactoryClass.getMethod("instance").invoke(null); boolean isAvailable = (boolean) dockerClientFactoryClass.getMethod("isDockerAvailable") .invoke(dockerClientFactoryInstance); return isAvailable ? Result.AVAILABLE : Result.UNAVAILABLE; } catch (ClassNotFoundException | NoSuchMethodException | InvocationTargetException | IllegalAccessException e) { if (!silent) { LOGGER.debug("Unable to use testcontainers to determine if Docker is working", e); } return Result.UNKNOWN; } } } /** * Detection using a remote host socket * We don't want to pull in the docker API here, so we just see if the DOCKER_HOST is set * and if we can connect to it. * We can't actually verify it is docker listening on the other end. * Furthermore, this does not support Unix Sockets */ private static class DockerHostStrategy implements Strategy { @Override public Result get() { String dockerHost = System.getenv("DOCKER_HOST"); if (dockerHost != null && !dockerHost.startsWith("unix:")) { try { URI url = new URI(dockerHost); try (Socket s = new Socket(url.getHost(), url.getPort())) { return Result.AVAILABLE; } catch (IOException e) { LOGGER.warnf( "Unable to connect to DOCKER_HOST URI %s, make sure docker is running on the specified host", dockerHost); } } catch (URISyntaxException e) { LOGGER.warnf("Unable to parse DOCKER_HOST URI %s, it will be ignored for working docker detection", dockerHost); } } return Result.UNKNOWN; } } private static class DockerBinaryStrategy implements Strategy { private final boolean silent; private DockerBinaryStrategy(boolean silent) { this.silent = silent; } @Override public Result get() { try { if (!ExecUtil.execSilent("docker", "-v")) { LOGGER.warn("'docker -v' returned an error code. Make sure your Docker binary is correct"); return Result.UNKNOWN; } } catch (Exception e) { LOGGER.warnf("No Docker binary found or general error: %s", e); return Result.UNKNOWN; } try { OutputFilter filter = new OutputFilter(); if (ExecUtil.exec(new File("."), filter, "docker", "version", "--format", "'{{.Server.Version}}'")) { LOGGER.debugf("Docker daemon found. Version: %s", filter.getOutput()); return Result.AVAILABLE; } else { if (!silent) { LOGGER.warn("Could not determine version of Docker daemon"); } return Result.UNAVAILABLE; } } catch (Exception e) { LOGGER.warn("Unexpected error occurred while determining Docker daemon version", e); return Result.UNKNOWN; } } public static class OutputFilter implements Function<InputStream, Runnable> { private final StringBuilder builder = new StringBuilder(); @Override public Runnable apply(InputStream is) { return () -> { try (InputStreamReader isr = new InputStreamReader(is); BufferedReader reader = new BufferedReader(isr)) { for (String line = reader.readLine(); line != null; line = reader.readLine()) { builder.append(line); } } catch (IOException e) { throw new RuntimeException("Error reading stream.", e); } }; } public String getOutput() { return builder.toString(); } } } private enum Result { AVAILABLE, UNAVAILABLE, UNKNOWN } }
[ "\"DOCKER_HOST\"" ]
[]
[ "DOCKER_HOST" ]
[]
["DOCKER_HOST"]
java
1
0
userbot/__init__.py
import os import sys from telethon.sessions import StringSession from telethon import TelegramClient from var import Var os.system("pip install pySmartDL") os.system("pip install sqlalchemy==1.3.23") from pylast import LastFMNetwork, md5 from logging import basicConfig, getLogger, INFO, DEBUG from distutils.util import strtobool as sb from pySmartDL import SmartDL from dotenv import load_dotenv import asyncio import pylast from requests import get import time Lastupdate = time.time() os.system("pip install --upgrade pip") if Var.STRING_SESSION: session_name = str(Var.STRING_SESSION) bot = TelegramClient(StringSession(session_name), Var.APP_ID, Var.API_HASH) else: session_name = "startup" bot = TelegramClient(session_name, Var.APP_ID, Var.API_HASH) CMD_LIST = {} # for later purposes CMD_HELP = {} INT_PLUG = "" LOAD_PLUG = {} # PaperPlaneExtended Support Vars ENV = os.environ.get("ENV", False) """ PPE initialization. """ from logging import basicConfig, getLogger, INFO, DEBUG from distutils.util import strtobool as sb import asyncio import pylast from pySmartDL import SmartDL from requests import get # Bot Logs setup: if bool(ENV): CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False")) if CONSOLE_LOGGER_VERBOSE: basicConfig( format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=DEBUG, ) else: basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=INFO) LOGS = getLogger(__name__) # Check if the config was edited by using the already used variable. # Basically, its the 'virginity check' for the config file ;) CONFIG_CHECK = os.environ.get( "___________PLOX_______REMOVE_____THIS_____LINE__________", None) if CONFIG_CHECK: LOGS.info( "Please remove the line mentioned in the first hashtag from the config.env file" ) quit(1) # Logging channel/group configuration. BOTLOG_CHATID = os.environ.get("BOTLOG_CHATID", None) try: BOTLOG_CHATID = int(BOTLOG_CHATID) except: pass # Userbot logging feature switch. BOTLOG = sb(os.environ.get("BOTLOG", "True")) LOGSPAMMER = sb(os.environ.get("LOGSPAMMER", "True")) # Bleep Blop, this is a bot ;) PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN", "True")) # Console verbose logging CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False")) # SQL Database URI DB_URI = os.environ.get("DATABASE_URL", None) # OCR API key OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None) # remove.bg API key REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None) # Chrome Driver and Headless Google Chrome Binaries CHROME_DRIVER = os.environ.get("CHROME_DRIVER", None) GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN", None) # OpenWeatherMap API Key OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None) # Anti Spambot Config ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT", "False")) ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT", "False")) # FedBan Premium Module F_BAN_LOGGER_GROUP = os.environ.get("F_BAN_LOGGER_GROUP", None) # Cbutton PRIVATE_CHANNEL_BOT_API_ID = os.environ.get("PRIVATE_CHANNEL_BOT_API_ID", None) # Heroku Credentials for updater. HEROKU_MEMEZ = sb(os.environ.get("HEROKU_MEMEZ", "False")) HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME", None) HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY", None) # Youtube API key YOUTUBE_API_KEY = os.environ.get("YOUTUBE_API_KEY", None) # Default .alive name ALIVE_NAME = os.environ.get("ALIVE_NAME", None) AUTONAME = os.environ.get("AUTONAME", None) #Autobio AUTO_BIO = os.environ.get("AUTO_BIO", None) # Time & Date - Country and Time Zone COUNTRY = str(os.environ.get("COUNTRY", "India")) TZ_NUMBER = int(os.environ.get("TZ_NUMBER", 1)) FBAN_REASON = os.environ.get("FBAN_REASON", None) FBAN_USER = os.environ.get("FBAN_USER", None) # Clean Welcome # Clean Welcome CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME", "True")) # Custom Module CUSTOM_PMPERMIT = os.environ.get("CUSTOM_PMPERMIT", None) CUSTOM_STICKER_PACK_NAME = os.environ.get("CUSTOM_STICKER_PACK_NAME", None) CUSTOM_ANIMATED_PACK_NAME = os.environ.get("CUSTOM_ANIMATED_PACK_NAME", None) # Pm Permit Img PMPERMIT_PIC = os.environ.get("PMPERMIT_PIC", None) # Gban USER_IS = os.environ.get("USER_IS", None) # Last.fm Module BIO_PREFIX = os.environ.get("BIO_PREFIX", None) DEFAULT_BIO = os.environ.get("DEFAULT_BIO", None) LASTFM_API = os.environ.get("LASTFM_API", None) LASTFM_SECRET = os.environ.get("LASTFM_SECRET", None) LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME", None) LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD", None) LASTFM_PASS = pylast.md5(LASTFM_PASSWORD_PLAIN) if not LASTFM_USERNAME == "None": lastfm = pylast.LastFMNetwork(api_key=LASTFM_API, api_secret=LASTFM_SECRET, username=LASTFM_USERNAME, password_hash=LASTFM_PASS) else: lastfm = None # Google Drive Module G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None) G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None) G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA", None) GDRIVE_FOLDER_ID = os.environ.get("GDRIVE_FOLDER_ID", None) TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TEMP_DOWNLOAD_DIRECTORY", "./downloads") else: # Put your ppe vars here if you are using local hosting PLACEHOLDER = None # Setting Up CloudMail.ru and MEGA.nz extractor binaries, # and giving them correct perms to work properly. if not os.path.exists('bin'): os.mkdir('bin') binaries = { "https://raw.githubusercontent.com/yshalsager/megadown/master/megadown": "bin/megadown", "https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py": "bin/cmrudl" } for binary, path in binaries.items(): downloader = SmartDL(binary, path, progress_bar=False) downloader.start() os.chmod(path, 0o755) # Global Variables COUNT_MSG = 0 USERS = {} COUNT_PM = {} LASTMSG = {} CMD_HELP = {} SUDO_LIST = {} ISAFK = False AFKREASON = None
[]
[]
[ "TEMP_DOWNLOAD_DIRECTORY", "GOOGLE_CHROME_BIN", "G_DRIVE_CLIENT_SECRET", "COUNTRY", "LASTFM_API", "CUSTOM_STICKER_PACK_NAME", "ANTI_SPAMBOT_SHOUT", "FBAN_REASON", "OCR_SPACE_API_KEY", "AUTONAME", "BIO_PREFIX", "FBAN_USER", "LOGSPAMMER", "TZ_NUMBER", "LASTFM_PASSWORD", "ENV", "DATABASE_URL", "GDRIVE_FOLDER_ID", "HEROKU_APP_NAME", "PRIVATE_CHANNEL_BOT_API_ID", "___________PLOX_______REMOVE_____THIS_____LINE__________", "CUSTOM_PMPERMIT", "HEROKU_API_KEY", "PMPERMIT_PIC", "CHROME_DRIVER", "YOUTUBE_API_KEY", "HEROKU_MEMEZ", "LASTFM_USERNAME", "G_DRIVE_CLIENT_ID", "PM_AUTO_BAN", "DEFAULT_BIO", "ANTI_SPAMBOT", "OPEN_WEATHER_MAP_APPID", "LASTFM_SECRET", "G_DRIVE_AUTH_TOKEN_DATA", "F_BAN_LOGGER_GROUP", "AUTO_BIO", "CONSOLE_LOGGER_VERBOSE", "USER_IS", "ALIVE_NAME", "BOTLOG_CHATID", "CUSTOM_ANIMATED_PACK_NAME", "CLEAN_WELCOME", "REM_BG_API_KEY", "BOTLOG" ]
[]
["TEMP_DOWNLOAD_DIRECTORY", "GOOGLE_CHROME_BIN", "G_DRIVE_CLIENT_SECRET", "COUNTRY", "LASTFM_API", "CUSTOM_STICKER_PACK_NAME", "ANTI_SPAMBOT_SHOUT", "FBAN_REASON", "OCR_SPACE_API_KEY", "AUTONAME", "BIO_PREFIX", "FBAN_USER", "LOGSPAMMER", "TZ_NUMBER", "LASTFM_PASSWORD", "ENV", "DATABASE_URL", "GDRIVE_FOLDER_ID", "HEROKU_APP_NAME", "PRIVATE_CHANNEL_BOT_API_ID", "___________PLOX_______REMOVE_____THIS_____LINE__________", "CUSTOM_PMPERMIT", "HEROKU_API_KEY", "PMPERMIT_PIC", "CHROME_DRIVER", "YOUTUBE_API_KEY", "HEROKU_MEMEZ", "LASTFM_USERNAME", "G_DRIVE_CLIENT_ID", "PM_AUTO_BAN", "DEFAULT_BIO", "ANTI_SPAMBOT", "OPEN_WEATHER_MAP_APPID", "LASTFM_SECRET", "G_DRIVE_AUTH_TOKEN_DATA", "F_BAN_LOGGER_GROUP", "AUTO_BIO", "CONSOLE_LOGGER_VERBOSE", "USER_IS", "ALIVE_NAME", "BOTLOG_CHATID", "CUSTOM_ANIMATED_PACK_NAME", "CLEAN_WELCOME", "REM_BG_API_KEY", "BOTLOG"]
python
45
0
symmetric/logging.py
""" A module to set up the logging configuration of symmetric. """ import os import logging.config import symmetric.constants # Logging configuration logging.config.dictConfig({ "version": 1, "formatters": { "console": { "format": "[%(asctime)s] [%(levelname)s] %(module)s: %(message)s" }, "file": { "format": ("[%(asctime)s] [%(levelname)s] %(pathname)s - " "line %(lineno)d: \n%(message)s\n") } }, "handlers": { "console": { "class": "logging.StreamHandler", "stream": "ext://sys.stderr", "formatter": "console" }, "file": { "class": "logging.FileHandler", "filename": os.getenv( "LOG_FILE", default=symmetric.constants.LOG_FILE_NAME ), "formatter": "file" } }, "root": { "level": "INFO", "handlers": ["console", "file"] } })
[]
[]
[ "LOG_FILE" ]
[]
["LOG_FILE"]
python
1
0
scripts/liveness_probe/main.go
// Package main is a simple script for our CI/CD workflow // that ensures our sidecar proxy is running before proceeding package main import ( "context" "errors" "fmt" "net" "net/http" "os" "time" ) var goproxy = os.Getenv("GOPROXY") func main() { timeout := time.After(time.Minute) for { select { case <-timeout: fmt.Println("liveness probe timed out") os.Exit(1) default: } isLive, err := probe() if err != nil { shouldPrintErr := true // connection-refused errors are expected, don't print them var opErr *net.OpError if errors.As(err, &opErr) && opErr.Op == "read" { shouldPrintErr = false } if shouldPrintErr { fmt.Println(err) } } if isLive { fmt.Println("proxy is live") return } time.Sleep(time.Second) } } func probe() (bool, error) { req, err := http.NewRequest(http.MethodGet, goproxy, nil) if err != nil { return false, err } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() req = req.WithContext(ctx) resp, err := http.DefaultClient.Do(req) if err != nil { return false, err } return resp.StatusCode == http.StatusOK, nil }
[ "\"GOPROXY\"" ]
[]
[ "GOPROXY" ]
[]
["GOPROXY"]
go
1
0
Configuration/EventContent/test/fragments.py
import FWCore.ParameterSet.Config as cms import os import pickle def _yellow(string): return '%s%s%s' %('\033[1;33m',string,'\033[1;0m') def include(includes_set): """ It takes a string or a list of strings and returns a list of FWCore.ParameterSet.parseConfig._ConfigReturn objects. In the package directory it creates ASCII files in which the objects are coded. If the files exist already it symply loads them. """ func_id='[fragments.include]' #packagedir=os.environ["CMSSW_BASE"]+"/src/Configuration/PyReleaseValidation/data/" packagedir='./' #Trasform the includes_set in a list if not isinstance(includes_set,list): includes_set=[includes_set] object_list=[] for cf_file_name in includes_set: pkl_file_name=packagedir+os.path.basename(cf_file_name)[:-4]+".pkl" cf_file_fullpath="" # Check the paths of the cffs for path in os.environ["CMSSW_SEARCH_PATH"].split(":"): cf_file_fullpath=path+"/"+cf_file_name if os.path.exists(cf_file_fullpath): break pkl_file_exists=os.path.exists(pkl_file_name) # Check the dates of teh cff and the corresponding pickle cff_age=0 pkl_age=0 if pkl_file_exists: cff_age=os.path.getctime(cf_file_fullpath) pkl_age=os.path.getctime(pkl_file_name) if cff_age>pkl_age: print _yellow(func_id)+" Pickle object older than file ..." if not pkl_file_exists or cff_age>pkl_age: obj=cms.include(cf_file_name) file=open(pkl_file_name,"w") pickle.dump(obj,file) file.close() print _yellow(func_id)+" Pickle object for "+cf_file_fullpath+" dumped as "+pkl_file_name+"..." # load the pkl files. file=open(pkl_file_name,"r") object_list.append(pickle.load(file)) file.close() print _yellow(func_id)+" Pickle object for "+cf_file_fullpath+" loaded ..." return object_list
[]
[]
[ "CMSSW_BASE", "CMSSW_SEARCH_PATH" ]
[]
["CMSSW_BASE", "CMSSW_SEARCH_PATH"]
python
2
0
sfj/usr/local/sbin/sfjmakej2p.py
# -*- coding: utf-8 -*- import xlrd from collections import OrderedDict import simplejson as json import sys import traceback import os import pwd import grp import shutil dbginfo = 1 dbgdetail = 2 dbg = os.environ.get("DBG") confdir=os.environ.get("CONFDIR") gitrepodir=os.environ.get("GITREPODIR") homedir=os.environ.get("HOME") user=os.environ.get("USER") vpcnetfile=os.environ.get("vpcnetfile") aclfile=os.environ.get("aclfile") hostsfile=os.environ.get("hostsfile") eipfile=os.environ.get("eipfile") sfjpara=os.environ.get("sfjpara") tfcmd=os.environ.get("tfcmd") parafile="ParamaterSeet.json" appconffile="appconf.json" zbxserv=os.environ.get("ZBXHOST") def sfjmakej2p(sysname,parafile,dbg=0): try: currentdir = os.getcwd() varsdir="%s/%s" % (gitrepodir,sysname) varsfile="%s/%s" % (varsdir,sfjpara) if os.path.exists(varsdir) is False: os.makedirs(varsdir) appfile="%s/%s" % (confdir,appconffile) appdata = OrderedDict() with open(appfile, 'r') as f: appdata = json.load(f) f.close() parafile="%s/%s/%s_%s" % (gitrepodir,sysname,sysname,parafile) paradata = OrderedDict() with open(parafile, 'r') as f: paradata = json.load(f) f.close() if dbg >= dbgdetail: print json.dumps(paradata, indent=2) # if sysname != paradata['system']['0']['sysname']: print "Different SystemName : %s : %s" % sysname,paradata['system'][0]['sysname'] exit( 1 ) # パラメータファイルの作成(sfjpara.tfvars) f = open(varsfile, "w") for cloudno in paradata['cloud']: pemfile="%s/%s" % (confdir,paradata['cloud'][cloudno]['privatekeyfile']) gitpemfile="%s/%s/%s" % (gitrepodir,sysname,paradata['cloud'][cloudno]['privatekeyfile']) sfjuser=paradata['cloud'][cloudno]['privatekeyfile'].replace(".pem","" ) accountid="%s" % (paradata['cloud'][cloudno]['accountid']) f.write( "aws_access_key=\"%s\"\n" % paradata['cloud'][cloudno]['accesskey'] ) f.write( "aws_secret_key=\"%s\"\n" % paradata['cloud'][cloudno]['secretkey'] ) f.write( "reagion=\"%s\"\n" % paradata['cloud'][cloudno]['reagion'] ) f.write( "ansible_ssh_user=\"%s\"\n" % accountid ) f.write( "privatekeyfile=\"%s\"\n" % paradata['cloud'][cloudno]['privatekeyfile'] ) f.write( "private_key_name=\"%s\"\n" % sfjuser) f.write( "\n" ) f.write( "network0=\"%s\"\n" % paradata['network']['0']['network'] ) f.write( "netmask0=\"%d\"\n" % int(paradata['network']['0']['netmask']) ) f.write( "\n" ) for hostno in paradata['hosts']: no = int(hostno) + 1 f.write( "hostname=\"%s\"\n" % paradata['hosts'][hostno]['hostname'] ) f.write( "instance_type%d=\"%s\"\n" % (no,paradata['hosts'][hostno]['vmtype']) ) f.write( "hostip%d=\"%s\"\n" % (no,paradata['hosts'][hostno]['hostip']) ) f.write( "amitype%d=\"%s\"\n" % (no,paradata['hosts'][hostno]['ostype']) ) f.write( "\n" ) f.close() # pem ファイルのコピー shutil.copy(pemfile, varsdir) uid = pwd.getpwnam(user).pw_uid gid = grp.getgrnam(user).gr_gid os.chown(gitpemfile, uid, gid) os.chmod(gitpemfile,0600) # vpcnet.tf ファイルのコピー f = open(vpcnetfile, "r") vpcfiledata = f.read() f.close() tagname="%s" % ( sysname ) vpcdata = vpcfiledata.replace("SFJTAGS",tagname) dirs, files = os.path.split(vpcnetfile) vpcfile="%s/%s" % (varsdir,files) f = open(vpcfile, "w") f.write(vpcdata) f.close() # acl.tf ファイルのコピー f = open(aclfile, "r") aclfiledata = f.read() f.close() tagname="%s" % ( sysname ) acldata = aclfiledata.replace("SFJTAGS",tagname) dirs, files = os.path.split(aclfile) aclnewfile="%s/%s" % (varsdir,files) f = open(aclnewfile, "w") f.write(acldata) f.close() # hostN.tf ファイルのコピーと作成 f = open(hostsfile, "r") hostsfiledata = f.read() f.close() f = open(eipfile, "r") eipdfileata = f.read() f.close() for hostno in paradata['hosts']: no = int(hostno) + 1 # SFJPARAをnoに置換 hostsdata = hostsfiledata.replace("SFJPARA",str(no)) tagname="%s" % ( sysname ) hostsdata = hostsdata.replace("SFJTAGS",tagname) tagname="%s-%s" % ( sysname, paradata['hosts'][hostno]['hostname'] ) hostsdata = hostsdata.replace("SFJTAGHOST",tagname) hostfile="%s/host%d.tf" % (varsdir,no) f = open(hostfile, "w") f.write(hostsdata) eipflag=paradata['hosts'][hostno]['gip'].encode('utf-8') if eipflag == '有' : tagname="%s" % ( sysname ) eipdata = eipdfileata.replace("SFJPARA",str(no)) eipdata = eipdata.replace("SFJTAGS",tagname) f.write(eipdata) f.close() sshdir="%s/.ssh" % (homedir) if os.path.exists(sshdir) is False: os.makedirs(sshdir) sfile="%s/config" % (sshdir) sf = open(sfile, 'w') gwname=paradata['hosts']['0']['hostname'].encode('utf-8') for hostno in paradata['hosts']: if paradata['hosts'][hostno]['role'].encode('utf-8') == "踏み台": gwname=paradata['hosts'][hostno]['hostname'].encode('utf-8') sf.write("host %s\n" % gwname) sf.write(" HostName PUBIP\n") sf.write(" User %s\n" % accountid ) sf.write(" StrictHostKeyChecking no\n") sf.write(" IdentityFile %s\n" % gitpemfile ) sf.write("\n" ) for hostno in paradata['hosts']: sf.write("Host %s-%s\n" % (sysname,paradata['hosts'][hostno]['hostname']) ) sf.write(" HostName %s\n" % paradata['hosts'][hostno]['hostip'] ) sf.write(" User %s\n" % accountid ) sf.write(" StrictHostKeyChecking no\n") sf.write(" IdentityFile %s\n" % gitpemfile ) sf.write(" ProxyCommand ssh -W %%h:%%p %s\n" % gwname) sf.write("\n" ) sf.close() # hostsとymlファイルの作成 for hostno in paradata['hosts']: monflag=0 # ymlファイルの作成 yfile="%s/%s/%s-%s.yml" % (gitrepodir,sysname,sysname,paradata['hosts'][hostno]['hostname']) yf = open(yfile, 'w') yf.write("- hosts: appl-%s\n" % paradata['hosts'][hostno]['hostname']) yf.write(" become: yes\n") yf.write(" roles:\n") for applno in appdata['rolelist']: appl=appdata['rolelist'][applno]['appname'].encode('utf-8') roleinfo=appdata['rolelist'][applno]['roleinfo'].encode('utf-8') monitor=paradata['hosts'][hostno]['monitorname'] if monitor != '' and appl == 'ZabbixAgent' : yf.write(" - role: %s\n" % roleinfo ) monflag=1 elif roleinfo != '' : item=paradata['hosts'][hostno].get(appl,"None") if item != 'None' : item=item.encode('utf-8') if item == '有' : yf.write(" - role: %s\n" % roleinfo ) yf.write("\n" ) if monflag == 1: yf.write("- hosts: appl-zbx\n") yf.write(" connection: local\n") yf.write(" become: yes\n") yf.write(" roles:\n") yf.write(" - role: RgistAgent\n") yf.write("\n" ) yf.close() # hostsファイルの作成 hfile="%s/%s/%s-%s.host" % (gitrepodir,sysname,sysname,paradata['hosts'][hostno]['hostname']) hf = open(hfile, 'w') hf.write("[appl-%s]\n" % paradata['hosts'][hostno]['hostname']) hf.write("%s-%s\n" % (sysname,paradata['hosts'][hostno]['hostname']) ) hf.write("\n") hf.write("[appl-%s:vars]\n" % paradata['hosts'][hostno]['hostname']) hf.write("ansible_ssh_user=%s\n" % paradata['cloud']['0']['accountid']) hf.write("ansible_ssh_private_key_file=%s\n" % gitpemfile ) hf.write("ansible_server_ip=%s\n" % zbxserv ) hf.write("\n") if monflag == 1: hf.write("[appl-zbx]\n") hf.write("localhost\n") hf.write("\n") hf.close() os.chdir(currentdir) exit( 0 ) except Exception as e: os.chdir(currentdir) print '=== エラー内容 ===' print 'type:' + str(type(e)) print traceback.format_exc() if __name__ == "__main__": argv = sys.argv argc = len(argv) if argc <= 1: print "NEED PROJECTNAME" exit(1) sysname=argv[1] if argc > 2: parafile=argv[2] if argc > 3: dbg=int(argv[3]) sfjmakej2p(argv[1],parafile,dbg) ###############################################################
[]
[]
[ "vpcnetfile", "eipfile", "aclfile", "USER", "CONFDIR", "tfcmd", "GITREPODIR", "ZBXHOST", "hostsfile", "sfjpara", "HOME", "DBG" ]
[]
["vpcnetfile", "eipfile", "aclfile", "USER", "CONFDIR", "tfcmd", "GITREPODIR", "ZBXHOST", "hostsfile", "sfjpara", "HOME", "DBG"]
python
12
0
tests/test_channel_methods.py
import sys import os sys.path.append('../') import unittest import requests from youtube_api import YoutubeDataApi import youtube_api.youtube_api_utils as utils class TestVideo(unittest.TestCase): @classmethod def setUpClass(cls): cls.key = os.environ.get('YT_KEY') cls.yt = YoutubeDataApi(cls.key) cls.channel_id = 'UC3XTzVzaHQEd30rQbuvCtTQ' cls.channel_title = 'LastWeekTonight' def test_channel_id(self): '''written by Megan Brown on 11/30/2018''' resp = self.yt.get_channel_id_from_user(self.channel_title) self.assertEqual(resp, self.channel_id) if __name__ == '__main__': unittest.main()
[]
[]
[ "YT_KEY" ]
[]
["YT_KEY"]
python
1
0
cmd/abapEnvironmentRunATCCheck_generated.go
// Code generated by piper's step-generator. DO NOT EDIT. package cmd import ( "fmt" "os" "time" "github.com/SAP/jenkins-library/pkg/config" "github.com/SAP/jenkins-library/pkg/log" "github.com/SAP/jenkins-library/pkg/splunk" "github.com/SAP/jenkins-library/pkg/telemetry" "github.com/SAP/jenkins-library/pkg/validation" "github.com/spf13/cobra" ) type abapEnvironmentRunATCCheckOptions struct { AtcConfig string `json:"atcConfig,omitempty"` CfAPIEndpoint string `json:"cfApiEndpoint,omitempty"` CfOrg string `json:"cfOrg,omitempty"` CfServiceInstance string `json:"cfServiceInstance,omitempty"` CfServiceKeyName string `json:"cfServiceKeyName,omitempty"` CfSpace string `json:"cfSpace,omitempty"` Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` Host string `json:"host,omitempty"` AtcResultsFileName string `json:"atcResultsFileName,omitempty"` GenerateHTML bool `json:"generateHTML,omitempty"` } // AbapEnvironmentRunATCCheckCommand Runs an ATC Check func AbapEnvironmentRunATCCheckCommand() *cobra.Command { const STEP_NAME = "abapEnvironmentRunATCCheck" metadata := abapEnvironmentRunATCCheckMetadata() var stepConfig abapEnvironmentRunATCCheckOptions var startTime time.Time var logCollector *log.CollectorHook var createAbapEnvironmentRunATCCheckCmd = &cobra.Command{ Use: STEP_NAME, Short: "Runs an ATC Check", Long: `This step is for triggering an [ATC](https://help.sap.com/viewer/65de2977205c403bbc107264b8eccf4b/Cloud/en-US/d8cec788fc104ff9ad9c3757b4dd13d4.html) test run on an SAP Cloud Platform ABAP Environment system. Please provide either of the following options: * The host and credentials the Cloud Platform ABAP Environment system itself. The credentials must be configured for the Communication Scenario [SAP_COM_0510](https://help.sap.com/viewer/65de2977205c403bbc107264b8eccf4b/Cloud/en-US/b04a9ae412894725a2fc539bfb1ca055.html). * The Cloud Foundry parameters (API endpoint, organization, space), credentials, the service instance for the ABAP service and the service key for the Communication Scenario SAP_COM_0510. * Only provide one of those options with the respective credentials. If all values are provided, the direct communication (via host) has priority. Regardless of the option you chose, please make sure to provide the configuration for Software Components and Packages that you want to be checked analog to the examples listed on this page.`, PreRunE: func(cmd *cobra.Command, _ []string) error { startTime = time.Now() log.SetStepName(STEP_NAME) log.SetVerbose(GeneralConfig.Verbose) GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens) path, _ := os.Getwd() fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path} log.RegisterHook(fatalHook) err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile) if err != nil { log.SetErrorCategory(log.ErrorConfiguration) return err } log.RegisterSecret(stepConfig.Username) log.RegisterSecret(stepConfig.Password) if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 { sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID) log.RegisterHook(&sentryHook) } if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 { logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID} log.RegisterHook(logCollector) } validation, err := validation.New(validation.WithJSONNamesForStructFields(), validation.WithPredefinedErrorMessages()) if err != nil { return err } if err = validation.ValidateStruct(stepConfig); err != nil { log.SetErrorCategory(log.ErrorConfiguration) return err } return nil }, Run: func(_ *cobra.Command, _ []string) { telemetryData := telemetry.CustomData{} telemetryData.ErrorCode = "1" handler := func() { config.RemoveVaultSecretFiles() telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds()) telemetryData.ErrorCategory = log.GetErrorCategory().String() telemetry.Send(&telemetryData) if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 { splunk.Send(&telemetryData, logCollector) } } log.DeferExitHandler(handler) defer handler() telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME) if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 { splunk.Initialize(GeneralConfig.CorrelationID, GeneralConfig.HookConfig.SplunkConfig.Dsn, GeneralConfig.HookConfig.SplunkConfig.Token, GeneralConfig.HookConfig.SplunkConfig.Index, GeneralConfig.HookConfig.SplunkConfig.SendLogs) } abapEnvironmentRunATCCheck(stepConfig, &telemetryData) telemetryData.ErrorCode = "0" log.Entry().Info("SUCCESS") }, } addAbapEnvironmentRunATCCheckFlags(createAbapEnvironmentRunATCCheckCmd, &stepConfig) return createAbapEnvironmentRunATCCheckCmd } func addAbapEnvironmentRunATCCheckFlags(cmd *cobra.Command, stepConfig *abapEnvironmentRunATCCheckOptions) { cmd.Flags().StringVar(&stepConfig.AtcConfig, "atcConfig", os.Getenv("PIPER_atcConfig"), "Path to a YAML configuration file for Packages and/or Software Components to be checked during ATC run") cmd.Flags().StringVar(&stepConfig.CfAPIEndpoint, "cfApiEndpoint", os.Getenv("PIPER_cfApiEndpoint"), "Cloud Foundry API endpoint") cmd.Flags().StringVar(&stepConfig.CfOrg, "cfOrg", os.Getenv("PIPER_cfOrg"), "CF org") cmd.Flags().StringVar(&stepConfig.CfServiceInstance, "cfServiceInstance", os.Getenv("PIPER_cfServiceInstance"), "Parameter of ServiceInstance Name to delete CloudFoundry Service") cmd.Flags().StringVar(&stepConfig.CfServiceKeyName, "cfServiceKeyName", os.Getenv("PIPER_cfServiceKeyName"), "Parameter of CloudFoundry Service Key to be created") cmd.Flags().StringVar(&stepConfig.CfSpace, "cfSpace", os.Getenv("PIPER_cfSpace"), "CF Space") cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User for either the Cloud Foundry API or the Communication Arrangement for SAP_COM_0510") cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password for either the Cloud Foundry API or the Communication Arrangement for SAP_COM_0510") cmd.Flags().StringVar(&stepConfig.Host, "host", os.Getenv("PIPER_host"), "Specifies the host address of the SAP Cloud Platform ABAP Environment system") cmd.Flags().StringVar(&stepConfig.AtcResultsFileName, "atcResultsFileName", `ATCResults.xml`, "Specifies output file name for the results from the ATC run. This file name will also be used for generating the HTML file") cmd.Flags().BoolVar(&stepConfig.GenerateHTML, "generateHTML", false, "Specifies whether the ATC results should also be generated as an HTML document") cmd.MarkFlagRequired("atcConfig") cmd.MarkFlagRequired("username") cmd.MarkFlagRequired("password") } // retrieve step metadata func abapEnvironmentRunATCCheckMetadata() config.StepData { var theMetaData = config.StepData{ Metadata: config.StepMetadata{ Name: "abapEnvironmentRunATCCheck", Aliases: []config.Alias{}, Description: "Runs an ATC Check", }, Spec: config.StepSpec{ Inputs: config.StepInputs{ Secrets: []config.StepSecrets{ {Name: "abapCredentialsId", Description: "Jenkins credentials ID containing user and password to authenticate to the Cloud Platform ABAP Environment system or the Cloud Foundry API", Type: "jenkins", Aliases: []config.Alias{{Name: "cfCredentialsId", Deprecated: false}}}, }, Parameters: []config.StepParameters{ { Name: "atcConfig", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{}, Default: os.Getenv("PIPER_atcConfig"), }, { Name: "cfApiEndpoint", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: false, Aliases: []config.Alias{{Name: "cloudFoundry/apiEndpoint"}}, Default: os.Getenv("PIPER_cfApiEndpoint"), }, { Name: "cfOrg", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: false, Aliases: []config.Alias{{Name: "cloudFoundry/org"}}, Default: os.Getenv("PIPER_cfOrg"), }, { Name: "cfServiceInstance", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: false, Aliases: []config.Alias{{Name: "cloudFoundry/serviceInstance"}}, Default: os.Getenv("PIPER_cfServiceInstance"), }, { Name: "cfServiceKeyName", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: false, Aliases: []config.Alias{{Name: "cloudFoundry/serviceKey"}, {Name: "cloudFoundry/serviceKeyName"}, {Name: "cfServiceKey"}}, Default: os.Getenv("PIPER_cfServiceKeyName"), }, { Name: "cfSpace", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: false, Aliases: []config.Alias{{Name: "cloudFoundry/space"}}, Default: os.Getenv("PIPER_cfSpace"), }, { Name: "username", ResourceRef: []config.ResourceReference{ { Name: "abapCredentialsId", Param: "username", Type: "secret", }, }, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{}, Default: os.Getenv("PIPER_username"), }, { Name: "password", ResourceRef: []config.ResourceReference{ { Name: "abapCredentialsId", Param: "password", Type: "secret", }, }, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{}, Default: os.Getenv("PIPER_password"), }, { Name: "host", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: false, Aliases: []config.Alias{}, Default: os.Getenv("PIPER_host"), }, { Name: "atcResultsFileName", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: false, Aliases: []config.Alias{}, Default: `ATCResults.xml`, }, { Name: "generateHTML", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "bool", Mandatory: false, Aliases: []config.Alias{}, Default: false, }, }, }, Containers: []config.Container{ {Name: "cf", Image: "ppiper/cf-cli:7"}, }, }, } return theMetaData }
[ "\"PIPER_atcConfig\"", "\"PIPER_cfApiEndpoint\"", "\"PIPER_cfOrg\"", "\"PIPER_cfServiceInstance\"", "\"PIPER_cfServiceKeyName\"", "\"PIPER_cfSpace\"", "\"PIPER_username\"", "\"PIPER_password\"", "\"PIPER_host\"", "\"PIPER_atcConfig\"", "\"PIPER_cfApiEndpoint\"", "\"PIPER_cfOrg\"", "\"PIPER_cfServiceInstance\"", "\"PIPER_cfServiceKeyName\"", "\"PIPER_cfSpace\"", "\"PIPER_username\"", "\"PIPER_password\"", "\"PIPER_host\"" ]
[]
[ "PIPER_cfSpace", "PIPER_host", "PIPER_cfApiEndpoint", "PIPER_password", "PIPER_username", "PIPER_cfServiceInstance", "PIPER_cfServiceKeyName", "PIPER_cfOrg", "PIPER_atcConfig" ]
[]
["PIPER_cfSpace", "PIPER_host", "PIPER_cfApiEndpoint", "PIPER_password", "PIPER_username", "PIPER_cfServiceInstance", "PIPER_cfServiceKeyName", "PIPER_cfOrg", "PIPER_atcConfig"]
go
9
0
src/features/target_feature/zabiha_list_DEPRECATED.py
''' A script to scrape a list of 744 confirmed Halal restaurants in NYC area from Zabiha.com As well as, requesting 338 halal tagged restaurants in NYC from Zomato.com ''' import review_scraper import pandas as pd import os, requests, json from dotenv import load_dotenv def _zabiha_to_csv(url_dict): webdriver = review_scraper._get_webdriver() res_names_xpath = '//div[@class="titleBS"]' res_address_xpath = '//div[@class="titleBS"]/../div[@class="tinyLink"]' df = pd.DataFrame(columns=['name', 'address', 'borough']) for key in url_dict: print('scraping {} results from Zabiha.com'.format(key)) webdriver.get(url_dict[key]) names = webdriver.find_elements_by_xpath(res_names_xpath) addresses = webdriver.find_elements_by_xpath(res_address_xpath) for name, address in zip(names, addresses): row = {'name' : name.text, 'address' : address.text, 'borough' : key, 'source' : 'Zabiha'} df = df.append(row, ignore_index=True) review_scraper._close_webdriver(webdriver) df.to_csv('/Users/wesamazaizeh/Desktop/Projects/halal_o_meter/src/data/data_collection/target_list.csv', mode='a', index=False) print('\n{} rows added from Zabiha\n'.format(df.shape[0])) def _zomato_to_csv(city_id): load_dotenv() API_KEY = os.getenv('ZOMATO_API_KEY') offset = 0 url = 'https://developers.zomato.com/api/v2.1/search?entity_id='\ + str(city_id) + '&entity_type=city&q=halal&start=' + str(offset) headers = {'user-key': '488f11265c3bf28f5d563dfd98697ad2'} r = requests.request("GET", url, headers=headers) response = r.text json_obj = json.loads(response) # get total number of results offset_max = json_obj['results_found'] print('Found {} results in Zomato.com'.format(offset_max)) df = pd.DataFrame(columns=['name', 'address', 'borough']) while offset < offset_max: # request next page r = requests.request("GET", url, headers=headers) response = r.text json_obj = json.loads(response) # get info and append to dataframe for restaurant in json_obj['restaurants']: restaurant = restaurant['restaurant'] row = {'name' : restaurant['name'], 'address' : restaurant['location']['address'], 'borough' : restaurant['location']['city'], 'source' : 'Zomato'} df = df.append(row, ignore_index=True) # advance offset print('Progress: {0}/{1}'.format(offset+20, offset_max), end='\r', flush=True) offset += 20 df.to_csv('/Users/wesamazaizeh/Desktop/Projects/halal_o_meter/src/data/data_collection/target_list.csv', mode='a', index=False) print('\n{} rows added from Zomato\n'.format(df.shape[0])) if __name__ == "__main__": borough_urls = {'Manhattan' : 'https://www.zabihah.com/sub/United-States/New-York/New-York-City/Manhattan/NEwhtS6OzN', 'Brooklyn' : 'https://www.zabihah.com/sub/United-States/New-York/New-York-City/Brooklyn/3avrh3Cth4', 'Queens' : 'https://www.zabihah.com/sub/United-States/New-York/New-York-City/Queens/9Gku594eh7', 'The Bronx' : 'https://www.zabihah.com/sub/United-States/New-York/New-York-City/The-Bronx/eIqsntUUuI', 'Staten Island' : 'https://www.zabihah.com/sub/United-States/New-York/New-York-City/Staten-Island/84zPaAaBZd'} _zabiha_to_csv(borough_urls) _zomato_to_csv(280) # city_id for NYC from Zomato cities API
[]
[]
[ "ZOMATO_API_KEY" ]
[]
["ZOMATO_API_KEY"]
python
1
0
one/tests/__init__.py
"""Tests for ONE-api""" import os import json from pathlib import Path """int: Flag for skipping tests that require an http connection""" OFFLINE_ONLY = int(os.getenv('OFFLINE_ONLY', '0')) def _get_test_db(): """Load test database credentials for testing ONE api Allows users to test ONE using their own Alyx database. The tests use two databases: the first for tests requiring POST requests; the second for tests that do not affect the database. """ default_fixture = str(Path(__file__).parent.joinpath('fixtures', 'test_dbs.json')) db_json = os.getenv('TEST_DB_CONFIG', default_fixture) with open(db_json, 'r') as f: dbs = json.load(f) if not isinstance(dbs, list): dbs = [dbs] return [dbs[i] if len(dbs) >= i else None for i in range(2)] # Ensure length == 2 TEST_DB_1, TEST_DB_2 = _get_test_db()
[]
[]
[ "OFFLINE_ONLY", "TEST_DB_CONFIG" ]
[]
["OFFLINE_ONLY", "TEST_DB_CONFIG"]
python
2
0
python/pyspark/sql/session.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function import sys import warnings from functools import reduce from threading import RLock if sys.version >= '3': basestring = unicode = str else: from itertools import imap as map from pyspark import since from pyspark.rdd import RDD, ignore_unicode_prefix from pyspark.sql.catalog import Catalog from pyspark.sql.conf import RuntimeConfig from pyspark.sql.dataframe import DataFrame from pyspark.sql.readwriter import DataFrameReader from pyspark.sql.streaming import DataStreamReader from pyspark.sql.types import Row, DataType, StringType, StructType, _verify_type, \ _infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string from pyspark.sql.utils import install_exception_handler __all__ = ["SparkSession"] def _monkey_patch_RDD(sparkSession): def toDF(self, schema=None, sampleRatio=None): """ Converts current :class:`RDD` into a :class:`DataFrame` This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)`` :param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns :param samplingRatio: the sample ratio of rows used for inferring :return: a DataFrame >>> rdd.toDF().collect() [Row(name=u'Alice', age=1)] """ return sparkSession.createDataFrame(self, schema, sampleRatio) RDD.toDF = toDF class SparkSession(object): """The entry point to programming Spark with the Dataset and DataFrame API. A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as tables, execute SQL over tables, cache tables, and read parquet files. To create a SparkSession, use the following builder pattern: >>> spark = SparkSession.builder \\ ... .master("local") \\ ... .appName("Word Count") \\ ... .config("spark.some.config.option", "some-value") \\ ... .getOrCreate() """ class Builder(object): """Builder for :class:`SparkSession`. """ _lock = RLock() _options = {} @since(2.0) def config(self, key=None, value=None, conf=None): """Sets a config option. Options set using this method are automatically propagated to both :class:`SparkConf` and :class:`SparkSession`'s own configuration. For an existing SparkConf, use `conf` parameter. >>> from pyspark.conf import SparkConf >>> SparkSession.builder.config(conf=SparkConf()) <pyspark.sql.session... For a (key, value) pair, you can omit parameter names. >>> SparkSession.builder.config("spark.some.config.option", "some-value") <pyspark.sql.session... :param key: a key name string for configuration property :param value: a value for configuration property :param conf: an instance of :class:`SparkConf` """ with self._lock: if conf is None: self._options[key] = str(value) else: for (k, v) in conf.getAll(): self._options[k] = v return self @since(2.0) def master(self, master): """Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]" to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster. :param master: a url for spark master """ return self.config("spark.master", master) @since(2.0) def appName(self, name): """Sets a name for the application, which will be shown in the Spark web UI. If no application name is set, a randomly generated name will be used. :param name: an application name """ return self.config("spark.app.name", name) @since(2.0) def enableHiveSupport(self): """Enables Hive support, including connectivity to a persistent Hive metastore, support for Hive serdes, and Hive user-defined functions. """ return self.config("spark.sql.catalogImplementation", "hive") @since(2.0) def getOrCreate(self): """Gets an existing :class:`SparkSession` or, if there is no existing one, creates a new one based on the options set in this builder. This method first checks whether there is a valid global default SparkSession, and if yes, return that one. If no valid global default SparkSession exists, the method creates a new SparkSession and assigns the newly created SparkSession as the global default. >>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate() >>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1" True In case an existing SparkSession is returned, the config options specified in this builder will be applied to the existing SparkSession. >>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate() >>> s1.conf.get("k1") == s2.conf.get("k1") True >>> s1.conf.get("k2") == s2.conf.get("k2") True """ with self._lock: from pyspark.context import SparkContext from pyspark.conf import SparkConf session = SparkSession._instantiatedSession if session is None or session._sc._jsc is None: sparkConf = SparkConf() for key, value in self._options.items(): sparkConf.set(key, value) sc = SparkContext.getOrCreate(sparkConf) # This SparkContext may be an existing one. for key, value in self._options.items(): # we need to propagate the confs # before we create the SparkSession. Otherwise, confs like # warehouse path and metastore url will not be set correctly ( # these confs cannot be changed once the SparkSession is created). sc._conf.set(key, value) session = SparkSession(sc) for key, value in self._options.items(): session.conf.set(key, value) for key, value in self._options.items(): session.sparkContext._conf.set(key, value) return session builder = Builder() _instantiatedSession = None @ignore_unicode_prefix def __init__(self, sparkContext, jsparkSession=None): """Creates a new SparkSession. >>> from datetime import datetime >>> spark = SparkSession(sc) >>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1, ... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1), ... time=datetime(2014, 8, 1, 14, 1, 5))]) >>> df = allTypes.toDF() >>> df.createOrReplaceTempView("allTypes") >>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a ' ... 'from allTypes where b and i > 0').collect() [Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \ dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)] >>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect() [(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])] """ from pyspark.sql.context import SQLContext self._sc = sparkContext self._jsc = self._sc._jsc self._jvm = self._sc._jvm if jsparkSession is None: jsparkSession = self._jvm.SparkSession(self._jsc.sc()) self._jsparkSession = jsparkSession self._jwrapped = self._jsparkSession.sqlContext() self._wrapped = SQLContext(self._sc, self, self._jwrapped) _monkey_patch_RDD(self) install_exception_handler() # If we had an instantiated SparkSession attached with a SparkContext # which is stopped now, we need to renew the instantiated SparkSession. # Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate. if SparkSession._instantiatedSession is None \ or SparkSession._instantiatedSession._sc._jsc is None: SparkSession._instantiatedSession = self @since(2.0) def newSession(self): """ Returns a new SparkSession as new session, that has separate SQLConf, registered temporary views and UDFs, but shared SparkContext and table cache. """ return self.__class__(self._sc, self._jsparkSession.newSession()) @property @since(2.0) def sparkContext(self): """Returns the underlying :class:`SparkContext`.""" return self._sc @property @since(2.0) def version(self): """The version of Spark on which this application is running.""" return self._jsparkSession.version() @property @since(2.0) def conf(self): """Runtime configuration interface for Spark. This is the interface through which the user can get and set all Spark and Hadoop configurations that are relevant to Spark SQL. When getting the value of a config, this defaults to the value set in the underlying :class:`SparkContext`, if any. """ if not hasattr(self, "_conf"): self._conf = RuntimeConfig(self._jsparkSession.conf()) return self._conf @property @since(2.0) def catalog(self): """Interface through which the user may create, drop, alter or query underlying databases, tables, functions etc. """ if not hasattr(self, "_catalog"): self._catalog = Catalog(self) return self._catalog @property @since(2.0) def udf(self): """Returns a :class:`UDFRegistration` for UDF registration. :return: :class:`UDFRegistration` """ from pyspark.sql.context import UDFRegistration return UDFRegistration(self._wrapped) @since(2.0) def range(self, start, end=None, step=1, numPartitions=None): """ Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named ``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with step value ``step``. :param start: the start value :param end: the end value (exclusive) :param step: the incremental step (default: 1) :param numPartitions: the number of partitions of the DataFrame :return: :class:`DataFrame` >>> spark.range(1, 7, 2).collect() [Row(id=1), Row(id=3), Row(id=5)] If only one argument is specified, it will be used as the end value. >>> spark.range(3).collect() [Row(id=0), Row(id=1), Row(id=2)] """ if numPartitions is None: numPartitions = self._sc.defaultParallelism if end is None: jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions)) else: jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions)) return DataFrame(jdf, self._wrapped) def _inferSchemaFromList(self, data): """ Infer schema from list of Row or tuple. :param data: list of Row or tuple :return: :class:`pyspark.sql.types.StructType` """ if not data: raise ValueError("can not infer schema from empty dataset") first = data[0] if type(first) is dict: warnings.warn("inferring schema from dict is deprecated," "please use pyspark.sql.Row instead") schema = reduce(_merge_type, map(_infer_schema, data)) if _has_nulltype(schema): raise ValueError("Some of types cannot be determined after inferring") return schema def _inferSchema(self, rdd, samplingRatio=None): """ Infer schema from an RDD of Row or tuple. :param rdd: an RDD of Row or tuple :param samplingRatio: sampling ratio, or no sampling (default) :return: :class:`pyspark.sql.types.StructType` """ first = rdd.first() if not first: raise ValueError("The first row in RDD is empty, " "can not infer schema") if type(first) is dict: warnings.warn("Using RDD of dict to inferSchema is deprecated. " "Use pyspark.sql.Row instead") if samplingRatio is None: schema = _infer_schema(first) if _has_nulltype(schema): for row in rdd.take(100)[1:]: schema = _merge_type(schema, _infer_schema(row)) if not _has_nulltype(schema): break else: raise ValueError("Some of types cannot be determined by the " "first 100 rows, please try again with sampling") else: if samplingRatio < 0.99: rdd = rdd.sample(False, float(samplingRatio)) schema = rdd.map(_infer_schema).reduce(_merge_type) return schema def _createFromRDD(self, rdd, schema, samplingRatio): """ Create an RDD for DataFrame from an existing RDD, returns the RDD and schema. """ if schema is None or isinstance(schema, (list, tuple)): struct = self._inferSchema(rdd, samplingRatio) converter = _create_converter(struct) rdd = rdd.map(converter) if isinstance(schema, (list, tuple)): for i, name in enumerate(schema): struct.fields[i].name = name struct.names[i] = name schema = struct elif not isinstance(schema, StructType): raise TypeError("schema should be StructType or list or None, but got: %s" % schema) # convert python objects to sql data rdd = rdd.map(schema.toInternal) return rdd, schema def _createFromLocal(self, data, schema): """ Create an RDD for DataFrame from a list or pandas.DataFrame, returns the RDD and schema. """ # make sure data could consumed multiple times if not isinstance(data, list): data = list(data) if schema is None or isinstance(schema, (list, tuple)): struct = self._inferSchemaFromList(data) converter = _create_converter(struct) data = map(converter, data) if isinstance(schema, (list, tuple)): for i, name in enumerate(schema): struct.fields[i].name = name struct.names[i] = name schema = struct elif not isinstance(schema, StructType): raise TypeError("schema should be StructType or list or None, but got: %s" % schema) # convert python objects to sql data data = [schema.toInternal(row) for row in data] return self._sc.parallelize(data), schema @since(2.0) @ignore_unicode_prefix def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True): """ Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`. When ``schema`` is a list of column names, the type of each column will be inferred from ``data``. When ``schema`` is ``None``, it will try to infer the schema (column names and types) from ``data``, which should be an RDD of :class:`Row`, or :class:`namedtuple`, or :class:`dict`. When ``schema`` is :class:`pyspark.sql.types.DataType` or :class:`pyspark.sql.types.StringType`, it must match the real data, or an exception will be thrown at runtime. If the given schema is not :class:`pyspark.sql.types.StructType`, it will be wrapped into a :class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value", each record will also be wrapped into a tuple, which can be converted to row later. If schema inference is needed, ``samplingRatio`` is used to determined the ratio of rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``. :param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean, etc.), or :class:`list`, or :class:`pandas.DataFrame`. :param schema: a :class:`pyspark.sql.types.DataType` or a :class:`pyspark.sql.types.StringType` or a list of column names, default is ``None``. The data type string format equals to :class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use ``int`` as a short name for ``IntegerType``. :param samplingRatio: the sample ratio of rows used for inferring :param verifySchema: verify data types of every row against schema. :return: :class:`DataFrame` .. versionchanged:: 2.0.1 Added verifySchema. >>> l = [('Alice', 1)] >>> spark.createDataFrame(l).collect() [Row(_1=u'Alice', _2=1)] >>> spark.createDataFrame(l, ['name', 'age']).collect() [Row(name=u'Alice', age=1)] >>> d = [{'name': 'Alice', 'age': 1}] >>> spark.createDataFrame(d).collect() [Row(age=1, name=u'Alice')] >>> rdd = sc.parallelize(l) >>> spark.createDataFrame(rdd).collect() [Row(_1=u'Alice', _2=1)] >>> df = spark.createDataFrame(rdd, ['name', 'age']) >>> df.collect() [Row(name=u'Alice', age=1)] >>> from pyspark.sql import Row >>> Person = Row('name', 'age') >>> person = rdd.map(lambda r: Person(*r)) >>> df2 = spark.createDataFrame(person) >>> df2.collect() [Row(name=u'Alice', age=1)] >>> from pyspark.sql.types import * >>> schema = StructType([ ... StructField("name", StringType(), True), ... StructField("age", IntegerType(), True)]) >>> df3 = spark.createDataFrame(rdd, schema) >>> df3.collect() [Row(name=u'Alice', age=1)] >>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP [Row(name=u'Alice', age=1)] >>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP [Row(0=1, 1=2)] >>> spark.createDataFrame(rdd, "a: string, b: int").collect() [Row(a=u'Alice', b=1)] >>> rdd = rdd.map(lambda row: row[1]) >>> spark.createDataFrame(rdd, "int").collect() [Row(value=1)] >>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... Py4JJavaError: ... """ if isinstance(data, DataFrame): raise TypeError("data is already a DataFrame") if isinstance(schema, basestring): schema = _parse_datatype_string(schema) try: import pandas has_pandas = True except Exception: has_pandas = False if has_pandas and isinstance(data, pandas.DataFrame): if schema is None: schema = [str(x) for x in data.columns] data = [r.tolist() for r in data.to_records(index=False)] verify_func = _verify_type if verifySchema else lambda _, t: True if isinstance(schema, StructType): def prepare(obj): verify_func(obj, schema) return obj elif isinstance(schema, DataType): dataType = schema schema = StructType().add("value", schema) def prepare(obj): verify_func(obj, dataType) return obj, else: if isinstance(schema, list): schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema] prepare = lambda obj: obj if isinstance(data, RDD): rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio) else: rdd, schema = self._createFromLocal(map(prepare, data), schema) jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd()) jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json()) df = DataFrame(jdf, self._wrapped) df._schema = schema return df @ignore_unicode_prefix @since(2.0) def sql(self, sqlQuery): """Returns a :class:`DataFrame` representing the result of the given query. :return: :class:`DataFrame` >>> df.createOrReplaceTempView("table1") >>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1") >>> df2.collect() [Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')] """ return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped) @since(2.0) def table(self, tableName): """Returns the specified table as a :class:`DataFrame`. :return: :class:`DataFrame` >>> df.createOrReplaceTempView("table1") >>> df2 = spark.table("table1") >>> sorted(df.collect()) == sorted(df2.collect()) True """ return DataFrame(self._jsparkSession.table(tableName), self._wrapped) @property @since(2.0) def read(self): """ Returns a :class:`DataFrameReader` that can be used to read data in as a :class:`DataFrame`. :return: :class:`DataFrameReader` """ return DataFrameReader(self._wrapped) @property @since(2.0) def readStream(self): """ Returns a :class:`DataStreamReader` that can be used to read data streams as a streaming :class:`DataFrame`. .. note:: Experimental. :return: :class:`DataStreamReader` """ return DataStreamReader(self._wrapped) @property @since(2.0) def streams(self): """Returns a :class:`StreamingQueryManager` that allows managing all the :class:`StreamingQuery` StreamingQueries active on `this` context. .. note:: Experimental. :return: :class:`StreamingQueryManager` """ from pyspark.sql.streaming import StreamingQueryManager return StreamingQueryManager(self._jsparkSession.streams()) @since(2.0) def stop(self): """Stop the underlying :class:`SparkContext`. """ self._sc.stop() SparkSession._instantiatedSession = None @since(2.0) def __enter__(self): """ Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax. """ return self @since(2.0) def __exit__(self, exc_type, exc_val, exc_tb): """ Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax. Specifically stop the SparkSession on exit of the with block. """ self.stop() def _test(): import os import doctest from pyspark.context import SparkContext from pyspark.sql import Row import pyspark.sql.session os.chdir(os.environ["SPARK_HOME"]) globs = pyspark.sql.session.__dict__.copy() sc = SparkContext('local[4]', 'PythonTest') globs['sc'] = sc globs['spark'] = SparkSession(sc) globs['rdd'] = rdd = sc.parallelize( [Row(field1=1, field2="row1"), Row(field1=2, field2="row2"), Row(field1=3, field2="row3")]) globs['df'] = rdd.toDF() (failure_count, test_count) = doctest.testmod( pyspark.sql.session, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) globs['sc'].stop() if failure_count: exit(-1) if __name__ == "__main__": _test()
[]
[]
[ "SPARK_HOME" ]
[]
["SPARK_HOME"]
python
1
0
docs/conf.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys import django sys.path.insert(0, os.path.abspath("..")) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "security_headers.settings") django.setup() # -- Project information ----------------------------------------------------- project = "Django Security Headers" copyright = "2019, J. Sumner and A. Lefebvre-Brossard" author = "J. Sumner and A. Lefebvre-Brossard" # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ["sphinx.ext.autodoc"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"]
[]
[]
[]
[]
[]
python
0
0
pkg/settings/setting.go
package settings import ( "encoding/json" "fmt" "os" "regexp" "strconv" "strings" v32 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" authsettings "github.com/rancher/rancher/pkg/auth/settings" "github.com/sirupsen/logrus" ) var ( releasePattern = regexp.MustCompile("^v[0-9]") settings = map[string]Setting{} provider Provider InjectDefaults string AgentImage = NewSetting("agent-image", "rancher/rancher-agent:master-head") AuthImage = NewSetting("auth-image", v32.ToolsSystemImages.AuthSystemImages.KubeAPIAuth) AuthTokenMaxTTLMinutes = NewSetting("auth-token-max-ttl-minutes", "0") // never expire AuthorizationCacheTTLSeconds = NewSetting("authorization-cache-ttl-seconds", "10") AuthorizationDenyCacheTTLSeconds = NewSetting("authorization-deny-cache-ttl-seconds", "10") AzureGroupCacheSize = NewSetting("azure-group-cache-size", "10000") CACerts = NewSetting("cacerts", "") CLIURLDarwin = NewSetting("cli-url-darwin", "https://releases.rancher.com/cli/v1.0.0-alpha8/rancher-darwin-amd64-v1.0.0-alpha8.tar.gz") CLIURLLinux = NewSetting("cli-url-linux", "https://releases.rancher.com/cli/v1.0.0-alpha8/rancher-linux-amd64-v1.0.0-alpha8.tar.gz") CLIURLWindows = NewSetting("cli-url-windows", "https://releases.rancher.com/cli/v1.0.0-alpha8/rancher-windows-386-v1.0.0-alpha8.zip") ClusterControllerStartCount = NewSetting("cluster-controller-start-count", "50") EngineInstallURL = NewSetting("engine-install-url", "https://releases.rancher.com/install-docker/20.10.sh") EngineISOURL = NewSetting("engine-iso-url", "https://releases.rancher.com/os/latest/rancheros-vmware.iso") EngineNewestVersion = NewSetting("engine-newest-version", "v17.12.0") EngineSupportedRange = NewSetting("engine-supported-range", "~v1.11.2 || ~v1.12.0 || ~v1.13.0 || ~v17.03.0 || ~v17.06.0 || ~v17.09.0 || ~v18.06.0 || ~v18.09.0 || ~v19.03.0 || ~v20.10.0 ") FirstLogin = NewSetting("first-login", "true") GlobalRegistryEnabled = NewSetting("global-registry-enabled", "false") GithubProxyAPIURL = NewSetting("github-proxy-api-url", "https://api.github.com") HelmVersion = NewSetting("helm-version", "dev") HelmMaxHistory = NewSetting("helm-max-history", "10") IngressIPDomain = NewSetting("ingress-ip-domain", "xip.io") InstallUUID = NewSetting("install-uuid", "") InternalServerURL = NewSetting("internal-server-url", "") InternalCACerts = NewSetting("internal-cacerts", "") JailerTimeout = NewSetting("jailer-timeout", "60") KubeconfigGenerateToken = NewSetting("kubeconfig-generate-token", "true") KubeconfigTokenTTLMinutes = NewSetting("kubeconfig-token-ttl-minutes", "960") // 16 hours KubernetesVersion = NewSetting("k8s-version", "") KubernetesVersionToServiceOptions = NewSetting("k8s-version-to-service-options", "") KubernetesVersionToSystemImages = NewSetting("k8s-version-to-images", "") KubernetesVersionsCurrent = NewSetting("k8s-versions-current", "") KubernetesVersionsDeprecated = NewSetting("k8s-versions-deprecated", "") MachineVersion = NewSetting("machine-version", "dev") Namespace = NewSetting("namespace", os.Getenv("CATTLE_NAMESPACE")) PeerServices = NewSetting("peer-service", os.Getenv("CATTLE_PEER_SERVICE")) RDNSServerBaseURL = NewSetting("rdns-base-url", "https://api.lb.rancher.cloud/v1") RkeVersion = NewSetting("rke-version", "") RkeMetadataConfig = NewSetting("rke-metadata-config", getMetadataConfig()) ServerImage = NewSetting("server-image", "rancher/rancher") ServerURL = NewSetting("server-url", "") ServerVersion = NewSetting("server-version", "dev") SystemAgentInstallScript = NewSetting("system-agent-install-script", "") SystemAgentInstallerImage = NewSetting("system-agent-installer-image", "docker.io/rancher/system-agent-installer-") SystemDefaultRegistry = NewSetting("system-default-registry", "") SystemNamespaces = NewSetting("system-namespaces", "kube-system,kube-public,cattle-system,cattle-alerting,cattle-logging,cattle-pipeline,cattle-prometheus,ingress-nginx,cattle-global-data,cattle-istio,kube-node-lease,cert-manager,cattle-global-nt,security-scan,fleet-system") TelemetryOpt = NewSetting("telemetry-opt", "") TokenHashing = NewSetting("token-hashing", "true") TLSMinVersion = NewSetting("tls-min-version", "1.2") TLSCiphers = NewSetting("tls-ciphers", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305") UIBanners = NewSetting("ui-banners", "{}") UIDefaultLanding = NewSetting("ui-default-landing", "") UIFeedBackForm = NewSetting("ui-feedback-form", "") UIIndex = NewSetting("ui-index", "https://releases.rancher.com/ui/latest2/index.html") UIPath = NewSetting("ui-path", "/usr/share/rancher/ui") UIDashboardIndex = NewSetting("ui-dashboard-index", "https://releases.rancher.com/dashboard/latest/index.html") UIDashboardPath = NewSetting("ui-dashboard-path", "/usr/share/rancher/ui-dashboard") UIPreferred = NewSetting("ui-preferred", "ember") UIOfflinePreferred = NewSetting("ui-offline-preferred", "dynamic") UIIssues = NewSetting("ui-issues", "") UIPL = NewSetting("ui-pl", "rancher") UIKubernetesSupportedVersions = NewSetting("ui-k8s-supported-versions-range", ">= 1.11.0 <=1.14.x") UIKubernetesDefaultVersion = NewSetting("ui-k8s-default-version-range", "<=1.14.x") WhitelistDomain = NewSetting("whitelist-domain", "forums.rancher.com") WhitelistEnvironmentVars = NewSetting("whitelist-envvars", "HTTP_PROXY,HTTPS_PROXY,NO_PROXY") AuthUserInfoResyncCron = NewSetting("auth-user-info-resync-cron", "0 0 * * *") AuthUserSessionTTLMinutes = NewSetting("auth-user-session-ttl-minutes", "960") // 16 hours AuthUserInfoMaxAgeSeconds = NewSetting("auth-user-info-max-age-seconds", "3600") // 1 hour APIUIVersion = NewSetting("api-ui-version", "1.1.6") // Please update the CATTLE_API_UI_VERSION in package/Dockerfile when updating the version here. RotateCertsIfExpiringInDays = NewSetting("rotate-certs-if-expiring-in-days", "7") // 7 days ClusterTemplateEnforcement = NewSetting("cluster-template-enforcement", "false") InitialDockerRootDir = NewSetting("initial-docker-root-dir", "/var/lib/docker") SystemCatalog = NewSetting("system-catalog", "external") // Options are 'external' or 'bundled' ChartDefaultBranch = NewSetting("chart-default-branch", "dev-v2.6") PartnerChartDefaultBranch = NewSetting("partner-chart-default-branch", "main") FleetDefaultWorkspaceName = NewSetting("fleet-default-workspace-name", "fleet-default") // fleetWorkspaceName to assign to clusters with none ShellImage = NewSetting("shell-image", "rancher/shell:v0.1.6") IgnoreNodeName = NewSetting("ignore-node-name", "") // nodes to ignore when syncing v1.node to v3.node NoDefaultAdmin = NewSetting("no-default-admin", "") RestrictedDefaultAdmin = NewSetting("restricted-default-admin", "false") // When bootstrapping the admin for the first time, give them the global role restricted-admin EKSUpstreamRefreshCron = NewSetting("eks-refresh-cron", "*/5 * * * *") // EKSUpstreamRefreshCron is deprecated and will be replaced by EKSUpstreamRefresh EKSUpstreamRefresh = NewSetting("eks-refresh", "300") HideLocalCluster = NewSetting("hide-local-cluster", "false") FleetMinVersion = NewSetting("fleet-min-version", "") RancherOperatorMinVersion = NewSetting("rancher-operator-min-version", "") RancherWebhookMinVersion = NewSetting("rancher-webhook-min-version", "") ) func FullShellImage() string { return PrefixPrivateRegistry(ShellImage.Get()) } func PrefixPrivateRegistry(image string) string { private := SystemDefaultRegistry.Get() if private == "" { return image } return private + "/" + image } func IsRelease() bool { return !strings.Contains(ServerVersion.Get(), "head") && releasePattern.MatchString(ServerVersion.Get()) } func init() { // setup auth setting authsettings.AuthUserInfoResyncCron = AuthUserInfoResyncCron authsettings.AuthUserSessionTTLMinutes = AuthUserSessionTTLMinutes authsettings.AuthUserInfoMaxAgeSeconds = AuthUserInfoMaxAgeSeconds authsettings.FirstLogin = FirstLogin if InjectDefaults == "" { return } defaults := map[string]string{} if err := json.Unmarshal([]byte(InjectDefaults), &defaults); err != nil { return } for name, defaultValue := range defaults { value, ok := settings[name] if !ok { continue } value.Default = defaultValue settings[name] = value } } type Provider interface { Get(name string) string Set(name, value string) error SetIfUnset(name, value string) error SetAll(settings map[string]Setting) error } type Setting struct { Name string Default string ReadOnly bool } func (s Setting) SetIfUnset(value string) error { if provider == nil { return s.Set(value) } return provider.SetIfUnset(s.Name, value) } func (s Setting) Set(value string) error { if provider == nil { s, ok := settings[s.Name] if ok { s.Default = value settings[s.Name] = s } } else { return provider.Set(s.Name, value) } return nil } func (s Setting) Get() string { if provider == nil { s := settings[s.Name] return s.Default } return provider.Get(s.Name) } func (s Setting) GetInt() int { v := s.Get() i, err := strconv.Atoi(v) if err == nil { return i } logrus.Errorf("failed to parse setting %s=%s as int: %v", s.Name, v, err) i, err = strconv.Atoi(s.Default) if err != nil { return 0 } return i } func SetProvider(p Provider) error { if err := p.SetAll(settings); err != nil { return err } provider = p return nil } func NewSetting(name, def string) Setting { s := Setting{ Name: name, Default: def, } settings[s.Name] = s return s } func GetEnvKey(key string) string { return "CATTLE_" + strings.ToUpper(strings.Replace(key, "-", "_", -1)) } func getMetadataConfig() string { branch := os.Getenv("RANCHER_METADATA_BRANCH") if branch == "" { branch = "dev-v2.5" } data := map[string]interface{}{ "url": fmt.Sprintf("https://releases.rancher.com/kontainer-driver-metadata/%s/data.json", branch), "refresh-interval-minutes": "1440", } ans, err := json.Marshal(data) if err != nil { logrus.Errorf("error getting metadata config %v", err) return "" } return string(ans) }
[ "\"CATTLE_NAMESPACE\"", "\"CATTLE_PEER_SERVICE\"", "\"RANCHER_METADATA_BRANCH\"" ]
[]
[ "CATTLE_NAMESPACE", "CATTLE_PEER_SERVICE", "RANCHER_METADATA_BRANCH" ]
[]
["CATTLE_NAMESPACE", "CATTLE_PEER_SERVICE", "RANCHER_METADATA_BRANCH"]
go
3
0
test/daemon.py
#! /bin/env python # # Routine to daemonize a process on unix # # DAEMON_HOME = '/' class NullDevice: def write(self, s): pass def daemonize(homeDir = DAEMON_HOME): import os import sys if os.fork() != 0: # Parent os._exit(0) # Kill parent os.chdir(homeDir) # Detach from parent tty os.setsid() # and start new session os.umask(0) sys.stdin.close() # Close stdin, stdout sys.stdout.close() sys.stdin = NullDevice() sys.stdout = NullDevice() for n in range(3, 256): # Close any remaining file try: # descriptors os.close(n) except: pass if os.fork() != 0: # finally fork again os._exit(0) # to fully daemonize def spawn(cmd, args): import string import os import sys import signal # Prevent zombie orphans by ignoring SIGCHLD signal signal.signal(signal.SIGCHLD, signal.SIG_IGN) args = string.split(args) if os.fork() != 0: # Calling Parent return # allow this parent to continue running os.chdir(DAEMON_HOME) # Temp Parent os.setsid() # Detach from calling parent os.umask(0) if os.fork() != 0: # Kill temp parent os._exit(0) # Run cmd in new child os.execvpe(cmd, [cmd] + args, os.environ) def createPid(pidPath='/var/run'): '''Creates PID file for process''' import os import sys currentPid = os.getpid() #Gets PID number if not currentPid: print 'Could not find PID' sys.exit() scriptFilename, ext = os.path.splitext(os.path.basename(sys.argv[0])) pidFile = '%s.pid' % (scriptFilename) #Creates PIDfile filename pidFilePath = os.path.join(pidPath, pidFile) f = file(pidFilePath, 'w') #Writes PIDfile name print >> f, currentPid f.close() if __name__ == "__main__": while True: daemonize() print "hello world"
[]
[]
[]
[]
[]
python
0
0
src/kibana/supply/supply_test.go
package supply_test import ( "golang" "io/ioutil" "os" "path/filepath" "bytes" "kibana/supply" "github.com/cloudfoundry/libbuildpack" "github.com/cloudfoundry/libbuildpack/ansicleaner" "github.com/golang/mock/gomock" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) //go:generate mockgen -source=supply.go --destination=mocks_test.go --package=supply_test var _ = Describe("Supply", func() { var ( buildDir string depsDir string depsIdx string gs *supply.Supplier logger *libbuildpack.Logger buffer *bytes.Buffer err error mockCtrl *gomock.Controller mockManifest *MockManifest goVersion string vendorTool string godep golang.Godep ) BeforeEach(func() { buildDir, err = ioutil.TempDir("", "go-buildpack.build.") Expect(err).To(BeNil()) depsDir, err = ioutil.TempDir("", "go-buildpack.deps.") Expect(err).To(BeNil()) depsIdx = "04" err = os.MkdirAll(filepath.Join(depsDir, depsIdx), 0755) Expect(err).To(BeNil()) buffer = new(bytes.Buffer) logger = libbuildpack.NewLogger(ansicleaner.New(buffer)) mockCtrl = gomock.NewController(GinkgoT()) mockManifest = NewMockManifest(mockCtrl) }) JustBeforeEach(func() { args := []string{buildDir, "", depsDir, depsIdx} stager := libbuildpack.NewStager(args, logger, &libbuildpack.Manifest{}) gs = &supply.Supplier{ Stager: stager, Manifest: mockManifest, Log: logger, GoVersion: goVersion, VendorTool: vendorTool, Godep: godep, } }) AfterEach(func() { mockCtrl.Finish() err = os.RemoveAll(buildDir) Expect(err).To(BeNil()) err = os.RemoveAll(depsDir) Expect(err).To(BeNil()) }) Describe("SelectVendorTool", func() { Context("There is a Godeps.json", func() { var ( godepsJson string godepsJsonContents string ) JustBeforeEach(func() { err = os.MkdirAll(filepath.Join(buildDir, "Godeps"), 0755) Expect(err).To(BeNil()) godepsJson = filepath.Join(buildDir, "Godeps", "Godeps.json") err = ioutil.WriteFile(godepsJson, []byte(godepsJsonContents), 0644) Expect(err).To(BeNil()) }) Context("the json is valid", func() { BeforeEach(func() { godepsJsonContents = ` { "ImportPath": "go-online", "GoVersion": "go1.6", "Deps": [] } ` }) It("sets the tool to godep", func() { err = gs.SelectVendorTool() Expect(err).To(BeNil()) Expect(gs.VendorTool).To(Equal("godep")) }) It("logs that it is checking the Godeps.json file", func() { err = gs.SelectVendorTool() Expect(err).To(BeNil()) Expect(buffer.String()).To(ContainSubstring("-----> Checking Godeps/Godeps.json file")) }) It("stores the Godep info in the supplier struct", func() { err = gs.SelectVendorTool() Expect(err).To(BeNil()) Expect(gs.Godep.ImportPath).To(Equal("go-online")) Expect(gs.Godep.GoVersion).To(Equal("go1.6")) var empty []string Expect(gs.Godep.Packages).To(Equal(empty)) }) Context("godeps workspace exists", func() { BeforeEach(func() { err = os.MkdirAll(filepath.Join(buildDir, "Godeps", "_workspace", "src"), 0755) Expect(err).To(BeNil()) }) It("sets Godep.WorkspaceExists to true", func() { err = gs.SelectVendorTool() Expect(err).To(BeNil()) Expect(gs.Godep.WorkspaceExists).To(BeTrue()) }) }) Context("godeps workspace does not exist", func() { It("sets Godep.WorkspaceExists to false", func() { err = gs.SelectVendorTool() Expect(err).To(BeNil()) Expect(godep.WorkspaceExists).To(BeFalse()) }) }) }) Context("bad Godeps.json file", func() { BeforeEach(func() { godepsJsonContents = "not actually JSON" }) It("logs that the Godeps.json file is invalid and returns an error", func() { err = gs.SelectVendorTool() Expect(err).NotTo(BeNil()) Expect(buffer.String()).To(ContainSubstring("**ERROR** Bad Godeps/Godeps.json file")) }) }) }) Context("there is a .godir file", func() { BeforeEach(func() { err = ioutil.WriteFile(filepath.Join(buildDir, ".godir"), []byte("xxx"), 0644) }) It("logs that .godir is deprecated and returns an error", func() { err = gs.SelectVendorTool() Expect(err).NotTo(BeNil()) Expect(buffer.String()).To(ContainSubstring("**ERROR** Deprecated, .godir file found! Please update to supported Godep or Glide dependency managers.")) Expect(buffer.String()).To(ContainSubstring("See https://github.com/tools/godep or https://github.com/Masterminds/glide for usage information.")) }) }) Context("there is a glide.yaml file", func() { BeforeEach(func() { err = ioutil.WriteFile(filepath.Join(buildDir, "glide.yaml"), []byte("xxx"), 0644) Expect(err).To(BeNil()) }) It("sets the tool to glide", func() { err = gs.SelectVendorTool() Expect(err).To(BeNil()) Expect(gs.VendorTool).To(Equal("glide")) }) }) Context("the app contains src/**/**/*.go", func() { BeforeEach(func() { err = os.MkdirAll(filepath.Join(buildDir, "src", "package"), 0755) Expect(err).To(BeNil()) err = ioutil.WriteFile(filepath.Join(buildDir, "src", "package", "thing.go"), []byte("xxx"), 0644) Expect(err).To(BeNil()) }) It("logs that gb is deprecated and returns an error", func() { err = gs.SelectVendorTool() Expect(err).NotTo(BeNil()) Expect(buffer.String()).To(ContainSubstring("**ERROR** Cloud Foundry does not support the GB package manager.")) Expect(buffer.String()).To(ContainSubstring("We currently only support the Godep and Glide package managers for go apps")) Expect(buffer.String()).To(ContainSubstring("For support please file an issue: https://github.com/cloudfoundry/go-buildpack/issues")) }) }) Context("none of the above", func() { It("sets the tool to go_nativevendoring", func() { err = gs.SelectVendorTool() Expect(err).To(BeNil()) Expect(gs.VendorTool).To(Equal("go_nativevendoring")) }) }) }) Describe("InstallVendorTools", func() { It("installs godep + glide to the depDir, creating a symlink in <depDir>/bin", func() { godepInstallDir := filepath.Join(depsDir, depsIdx, "godep") glideInstallDir := filepath.Join(depsDir, depsIdx, "glide") mockManifest.EXPECT().InstallOnlyVersion("godep", godepInstallDir).Return(nil) mockManifest.EXPECT().InstallOnlyVersion("glide", glideInstallDir).Return(nil) err = gs.InstallVendorTools() Expect(err).To(BeNil()) link, err := os.Readlink(filepath.Join(depsDir, depsIdx, "bin", "godep")) Expect(err).To(BeNil()) Expect(link).To(Equal("../godep/bin/godep")) link, err = os.Readlink(filepath.Join(depsDir, depsIdx, "bin", "glide")) Expect(err).To(BeNil()) Expect(link).To(Equal("../glide/bin/glide")) }) }) Describe("SelectGoVersion", func() { BeforeEach(func() { versions := []string{"1.8.0", "1.7.5", "1.7.4", "1.6.3", "1.6.4", "34.34.0", "1.14.3"} mockManifest.EXPECT().AllDependencyVersions("go").Return(versions) }) Context("godep", func() { BeforeEach(func() { vendorTool = "godep" godep = golang.Godep{ImportPath: "go-online", GoVersion: "go1.6"} }) Context("GOVERSION not set", func() { It("sets the go version from Godeps.json", func() { err = gs.SelectGoVersion() Expect(err).To(BeNil()) Expect(gs.GoVersion).To(Equal("1.6.4")) }) }) Context("GOVERSION is set", func() { var oldGOVERSION string BeforeEach(func() { oldGOVERSION = os.Getenv("GOVERSION") err = os.Setenv("GOVERSION", "go34.34") Expect(err).To(BeNil()) }) AfterEach(func() { err = os.Setenv("GOVERSION", oldGOVERSION) Expect(err).To(BeNil()) }) It("sets the go version from GOVERSION and logs a warning", func() { err = gs.SelectGoVersion() Expect(err).To(BeNil()) Expect(gs.GoVersion).To(Equal("34.34.0")) Expect(buffer.String()).To(ContainSubstring("**WARNING** Using $GOVERSION override.\n")) Expect(buffer.String()).To(ContainSubstring(" $GOVERSION = go34.34\n")) Expect(buffer.String()).To(ContainSubstring("If this isn't what you want please run:\n")) Expect(buffer.String()).To(ContainSubstring(" cf unset-env <app> GOVERSION")) }) }) }) Context("glide or go_nativevendoring", func() { Context("GOVERSION is notset", func() { BeforeEach(func() { vendorTool = "glide" dep := libbuildpack.Dependency{Name: "go", Version: "1.14.3"} mockManifest.EXPECT().DefaultVersion("go").Return(dep, nil) }) It("sets the go version to the default from the manifest.yml", func() { err = gs.SelectGoVersion() Expect(err).To(BeNil()) Expect(gs.GoVersion).To(Equal("1.14.3")) }) }) Context("GOVERSION is set", func() { var oldGOVERSION string BeforeEach(func() { oldGOVERSION = os.Getenv("GOVERSION") err = os.Setenv("GOVERSION", "go34.34") Expect(err).To(BeNil()) vendorTool = "go_nativevendoring" }) AfterEach(func() { err = os.Setenv("GOVERSION", oldGOVERSION) Expect(err).To(BeNil()) }) It("sets the go version from GOVERSION", func() { err = gs.SelectGoVersion() Expect(err).To(BeNil()) Expect(gs.GoVersion).To(Equal("34.34.0")) }) }) }) }) Describe("InstallGo", func() { var ( goInstallDir string dep libbuildpack.Dependency ) BeforeEach(func() { goVersion = "1.3.4" goInstallDir = filepath.Join(depsDir, depsIdx, "go1.3.4") dep = libbuildpack.Dependency{Name: "go", Version: "1.3.4"} err = os.MkdirAll(filepath.Join(goInstallDir, "go"), 0755) Expect(err).To(BeNil()) mockManifest.EXPECT().InstallDependency(dep, goInstallDir).Return(nil) }) It("Write GOROOT to envfile", func() { err = gs.InstallGo() Expect(err).To(BeNil()) contents, err := ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "env", "GOROOT")) Expect(err).To(BeNil()) Expect(string(contents)).To(Equal(filepath.Join(goInstallDir, "go"))) }) It("installs go to the depDir, creating a symlink in <depDir>/bin", func() { err = gs.InstallGo() Expect(err).To(BeNil()) link, err := os.Readlink(filepath.Join(depsDir, depsIdx, "bin", "go")) Expect(err).To(BeNil()) Expect(link).To(Equal("../go1.3.4/go/bin/go")) }) }) Describe("WritesGoRootToProfileD", func() { BeforeEach(func() { goVersion = "3.4.5" }) It("writes the goroot.sh script to <depDir>/profile.d", func() { err = gs.WriteGoRootToProfileD() Expect(err).To(BeNil()) contents, err := ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "profile.d", "goroot.sh")) Expect(err).To(BeNil()) Expect(string(contents)).To(ContainSubstring("export GOROOT=$DEPS_DIR/04/go3.4.5/go")) Expect(string(contents)).To(ContainSubstring("PATH=$PATH:$GOROOT/bin")) }) }) Describe("WriteConfigYml", func() { BeforeEach(func() { goVersion = "1.3.4" }) type config struct { Name string `yaml:"name"` Config struct { GoVersion string `yaml:"GoVersion"` VendorTool string `yaml:"VendorTool"` Godep string `yaml:"Godep"` } `yaml:"config"` } getConfig := func() config { cfg := config{} err = libbuildpack.NewYAML().Load(filepath.Join(depsDir, depsIdx, "config.yml"), &cfg) Expect(err).To(BeNil()) return cfg } Context("The vendor tool is Godep", func() { BeforeEach(func() { vendorTool = "godep" godep = golang.Godep{ ImportPath: "an-import-path", GoVersion: "go1.3", Packages: []string{"package1", "package2"}, WorkspaceExists: true, } }) It("Writes the go version to config.yml", func() { err = gs.WriteConfigYml() Expect(err).To(BeNil()) cfg := getConfig() Expect(cfg.Config.GoVersion).To(Equal("1.3.4")) }) It("Writes the vendor tool to config.yml", func() { err = gs.WriteConfigYml() Expect(err).To(BeNil()) cfg := getConfig() Expect(cfg.Config.VendorTool).To(Equal("godep")) }) It("Writes the godep info to config.yml", func() { godepsJsonContents := `{"ImportPath":"an-import-path","GoVersion":"go1.3","Packages":["package1","package2"],"WorkspaceExists":true}` err = gs.WriteConfigYml() Expect(err).To(BeNil()) cfg := getConfig() Expect(cfg.Config.Godep).To(Equal(godepsJsonContents)) }) }) Context("The vendor tool is not Godep", func() { BeforeEach(func() { vendorTool = "glide" }) It("Writes the go version to config.yml", func() { err = gs.WriteConfigYml() Expect(err).To(BeNil()) cfg := getConfig() Expect(cfg.Config.GoVersion).To(Equal("1.3.4")) }) It("Writes the vendor tool to config.yml", func() { err = gs.WriteConfigYml() Expect(err).To(BeNil()) cfg := getConfig() Expect(cfg.Config.VendorTool).To(Equal("glide")) }) It("Does not write the godep info to config.yml", func() { err = gs.WriteConfigYml() Expect(err).To(BeNil()) cfg := getConfig() Expect(cfg.Config.Godep).To(Equal("")) }) }) }) })
[ "\"GOVERSION\"", "\"GOVERSION\"" ]
[]
[ "GOVERSION" ]
[]
["GOVERSION"]
go
1
0
internal/resources/template/renderer/texttemplate.go
package renderer import ( "io/ioutil" "os" "regexp" tt "text/template" "github.com/crosseyed/prjstart/internal/resources/template/variables" "github.com/crosseyed/prjstart/internal/utils/errutils" ) // // RenderText // // RenderText render using text/template type RenderText struct { Renderer } // File2File takes a src file populates a dst file with the results of the // template populated with variables func (r *RenderText) File2File(src, dst string, vars *variables.Variables, nounset, noempty bool) error { b, err := ioutil.ReadFile(src) errutils.Elogf("Can not open template file %s for reading: %v", src, err) r.Text2File(string(b), dst, vars, nounset, noempty) return err } // Text2File takes template text text and outputs to dst file func (r *RenderText) Text2File(text, dst string, vars *variables.Variables, nounset, noempty bool) error { td := os.Getenv("TEMP") f, err := ioutil.TempFile(td, "prjstart-*") errutils.Epanicf("Error creating tempfile %v", err) t := tt.Must(tt.New("texttemplate").Parse(text)) errutils.Epanicf("Error parsing variables: %v", err) err = t.Execute(f, vars) errutils.Epanicf("Error executing template: %v", err) err = f.Close() errutils.Epanicf("Error closing tempfile: %v", err) err = os.Rename(f.Name(), dst) errutils.Epanicf("Error writing file %s: %v", dst, err) return err } // Text2String renders input text and returns result as a string. func (r *RenderText) Text2String(text string, vars *variables.Variables, nounset, noempty bool) (string, error) { td := os.Getenv("TEMP") f, err := ioutil.TempFile(td, "prjstart-*") errutils.Epanicf("Error creating tempfile %v", err) f.Close() // nolint r.Text2File(text, f.Name(), vars, nounset, noempty) b, err := ioutil.ReadFile(f.Name()) errutils.Elogf("Can not open template file %s for reading: %v", f.Name(), err) os.Remove(f.Name()) // nolint return string(b), err } // RenderDirRegexp returns the regex to match directory names that should be rendered. func (r *RenderText) RenderDirRegexp() *regexp.Regexp { regex := regexp.MustCompile(`{{[^}}]+}}`) return regex }
[ "\"TEMP\"", "\"TEMP\"" ]
[]
[ "TEMP" ]
[]
["TEMP"]
go
1
0