filename
stringlengths
4
198
content
stringlengths
25
939k
environment
list
variablearg
list
constarg
list
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
java/testingapps/seleniumtestpages/src/main/java/com/seleniumsimplified/seleniumtestpages/spark/app/SeleniumTestPagesForSpark.java
package com.seleniumsimplified.seleniumtestpages.spark.app; import com.seleniumsimplified.seleniumtestpages.ResourceReader; import com.seleniumsimplified.seleniumtestpages.php.*; import static spark.Spark.get; import static spark.Spark.post; import static spark.Spark.put; public class SeleniumTestPagesForSpark { public SeleniumTestPagesForSpark(){ // create backwards compatibility with selenium page on compendiumdev.co.uk // avoid redirects get("/selenium", (req, res) -> {res.redirect("/selenium.html"); return "";}); get("/selenium/", (req, res) -> {res.redirect("/selenium.html"); return "";}); get("/selenium/testpages/", (req, res) -> {res.redirect("/selenium.html"); return "";}); get("/selenium/ajaxselect.php", (req, res) -> {return new PhpAjaxSelect(req,res).get();}); get("/selenium/calculate.php", (req, res) -> {return new PhpCalculate(req,res).get();}); post("/selenium/calculate.php", (req, res) -> {return new PhpCalculate(req,res).post();}); get("/selenium/refresh.php", (req, res) -> {return new PhpRefresh(req,res).get();}); post("/selenium/form_processor.php", (req, res) -> {return new PhpFormProcessor(req,res).post();}); get("/selenium/find_by_playground.php", (req, res) -> {return new ResourceReader().asString("/web/find_by_playground.html");}); // some tests check the url of the asked for page so don't redirect this one get("/selenium/basic_web_page.html", (req, res) -> {return new ResourceReader().asString("/web/basic_web_page.html");}); get("/selenium/gui_user_interactions.html", (req, res) -> {return new ResourceReader().asString("/web/gui_user_interactions.html");}); get("/ajaxselect.php", (req, res) -> {return new PhpAjaxSelect(req,res).get();}); get("/calculate.php", (req, res) -> {return new PhpCalculate(req,res).get();}); post("/calculate.php", (req, res) -> {return new PhpCalculate(req,res).post();}); get("/refresh.php", (req, res) -> {return new PhpRefresh(req,res).get();}); post("/form_processor.php", (req, res) -> {return new PhpFormProcessor(req,res).post();}); get("/find_by_playground.php", (req, res) -> {return new ResourceReader().asString("/web/find_by_playground.html");}); // pretty versions get("/styled/calculator", (req, res) -> {return new PhpPrettyCalculate(req,res).get();}); post("/styled/calculator", (req, res) -> {return new PhpPrettyCalculate(req,res).post();}); post("/styled/the_form_processor.php", (req, res) -> {return new PhpPrettyFormProcessor(req,res).post();}); // pretty template version get("/styled/refresh", (req, res) -> {return new PhpPrettyRefresh(req,res).get();}); get("/styled/frames/get-list", (req, res) -> {return new PhpGetList(req,res).get();}); post("/styled/search", (req, res) -> {return new PhpPrettySearch(req,res).post();}); get("/styled/search", (req, res) -> {return new PhpPrettySearch(req,res).get();}); get("/styled/redirect/user-agent-redirect-test", (req, res) -> { if(req.userAgent()!=null && req.userAgent().length()>0){ // this is actually better than I thought "(?i:.*(mobile|blackberry|mini).*)" // so using "(.*(Mobile).*)" misses more if(req.userAgent().matches(("(.*(Mobile).*)"))){ res.redirect("/styled/redirect/mobile/user-agent-mobile-test"); return ""; } } return new ResourceReader().asString("/web/styled/redirect/user-agent-redirect-test.html"); }); get("/styled/redirect/mobile/user-agent-mobile-test", (req, res) -> { return new ResourceReader().asString("/web/styled/redirect/mobile/user-agent-mobile-test.html").replace("<!-- USERAGENT -->", req.userAgent()); }); //search.php - do not use a search engine, just have a set of random urls that we put up so it looks like a search // testing, java, related post("/selenium/search.php", (req, res) -> {return new PhpSearch(req,res).post();}); get("/selenium/search.php", (req, res) -> {return new PhpSearch(req,res).get();}); post("/search.php", (req, res) -> {return new PhpSearch(req,res).post();}); get("/search.php", (req, res) -> {return new PhpSearch(req,res).get();}); // I Was going to rewrite the find_by_playground.php but then realised that the html is actually static now // as I just added the output of the old php to the resources and serve it up as a static html page //get("/find_by_playground.php", (req, res) -> {return new PhpFindByPlayground(req,res).get();}); /* When migrating compendiumdev.co.uk to static site, we have some 'loose' pages which were used in the book API Testing a REST API redirect these to the testing app. compendiumdev.co.uk/apps/mocktracks/projectsjson.php */ get("/apps/mocktracks/projectsjson.php", (req, res) -> { res.header("Content-Type","application/json"); return new ResourceReader(). asString( "/web/mocktracks/projects.json"); }); get("/apps/mocktracks/projectsxml.php", (req, res) -> { res.header("Content-Type","application/xml"); return new ResourceReader(). asString( "/web/mocktracks/projects.xml"); }); get("/apps/mocktracks/reflect.php", (req, res) -> { return "GET\n\n"+req.body(); }); post("/apps/mocktracks/reflect.php", (req, res) -> { return "POST\n\n"+req.body(); }); put("/apps/mocktracks/reflect.php", (req, res) -> { return "PUT\n\n"+req.body(); }); // the upload functionality makes this insecure for external sites - do not release with this active post("/uploads/fileprocessor", (req, res) -> { Boolean allowUploads = true; // assume working locally Boolean allowSaving = false; // assume care about security String envVar = System.getenv("SELENIUM_TEST_PAGES_DISALLOW_UPLOAD"); if(envVar != null && envVar.length()>0){ allowUploads = false; } String envSaveVar = System.getenv("SELENIUM_TEST_PAGES_ALLOW_UPLOAD_FILE_SAVING"); if(envSaveVar != null && envSaveVar.length()>0){ if(envSaveVar.equalsIgnoreCase("TRUE")) { allowSaving = true; } } // add configuration to allow saving which is currently not enabled return new FileUploadProcessor(req,res, allowUploads, allowSaving).prettyOutput().post(); }); post("/validate/input-validation", (req, res) -> {return new InputValidationProcessor(req,res).post();}); get("/uploads/fileprocessor", (req, res) -> {res.redirect("/styled/file-upload-test.html"); return "";}); get("/upload/NoFileUploadsAllowed.txt", (req, res) -> {return new ResourceReader().asString("/web/NoFileUploadsAllowed.txt");}); get("/upload/*", (req, res) -> {return new UploadedFile(req,res).get("upload/"+req.splat()[0]);}); get("/download/*", (req, res) -> {return new FileDownloadProcessor(req, res).get(req.splat()[0]);}); post("/download/*", (req, res) -> {res.redirect("/download/"+req.splat()[0]); return "";}); // everything else just redirect get("/selenium/testpages/*", (req, res) -> {res.redirect("/" + req.splat()[0]); return "";}); get("/selenium/*", (req, res) -> {res.redirect("/" + req.splat()[0]); return "";}); } }
[ "\"SELENIUM_TEST_PAGES_DISALLOW_UPLOAD\"", "\"SELENIUM_TEST_PAGES_ALLOW_UPLOAD_FILE_SAVING\"" ]
[]
[ "SELENIUM_TEST_PAGES_ALLOW_UPLOAD_FILE_SAVING", "SELENIUM_TEST_PAGES_DISALLOW_UPLOAD" ]
[]
["SELENIUM_TEST_PAGES_ALLOW_UPLOAD_FILE_SAVING", "SELENIUM_TEST_PAGES_DISALLOW_UPLOAD"]
java
2
0
manage.py
#!/usr/bin/env python import os import sys if __name__ == '__main__': os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_site.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
_bugs/bug_simple.go
package main import ( "database/sql" "fmt" "gopkg.in/go-on/pq.v2" //"github.com/metakeule/pq" "net/http" "os" "strings" ) var DB *sql.DB var nSql = 15 var maxDBCons = 30 var maxHttpConnects = 20 var maxIdleDBCons = maxDBCons - maxHttpConnects var httpLock = make(chan int, maxHttpConnects) //var nSql = 1 var reqNum = 0 func Connect(url string) (db *sql.DB) { p, err := pq.ParseURL(url) if err != nil { panic(err.Error()) } db, err = sql.Open("postgres", p) //db.SetMaxIdleConns(-1) db.SetMaxIdleConns(maxIdleDBCons) if err != nil { panic(err.Error()) } return } func handler(w http.ResponseWriter, r *http.Request) { <-httpLock reqNum++ numErrs := 0 failed := []string{} for i := 0; i < nSql; i++ { res, err := DB.Query(fmt.Sprintf("SELECT '%v'::int", reqNum*100+i)) if err != nil { numErrs++ s := strings.ToUpper(fmt.Sprintf("Error (Query): %s", err.Error())) failed = append(failed, s) fmt.Println(s) continue } var ii int res.Next() //for res.Next() { err = res.Scan(&ii) if err != nil { fmt.Printf("Error (Scan): %s\n", err.Error()) continue } res.Close() //} } // fmt.Printf("Num Errors: %v\nFailed: \n%s\n\n", numErrs, strings.Join(failed, "\n")) // fmt.Fprintf(w, "Num Errors: %v\nFailed: \n%s\n\n", numErrs, strings.Join(failed, "\n")) httpLock <- 1 } func Open(driverName, dataSourceName string) (ø *sql.DB, ſ error) { ø, ſ = sql.Open(driverName, dataSourceName) return } /* prepare the postgres server with ALTER Role username CONNECTION LIMIT 10; run webserver with export DB_URL=postgres://user:password@localhost:5432/database ; go run main.go run ab with ab -n 2000 -c 200 http://localhost:8080/ */ func main() { for i := 0; i < maxHttpConnects; i++ { httpLock <- 1 } DB = Connect(os.Getenv("PG_URL")) http.HandleFunc("/", handler) fmt.Println("serving on localhost:8080") err := http.ListenAndServe(":8080", nil) if err != nil { fmt.Printf("Error (Serving): %s\n", err) } }
[ "\"PG_URL\"" ]
[]
[ "PG_URL" ]
[]
["PG_URL"]
go
1
0
caddy/setup_test.go
package caddy import ( "fmt" "io/ioutil" "os" "path/filepath" "testing" "time" "github.com/mholt/caddy" "github.com/mholt/caddy/caddyhttp/httpserver" . "github.com/stretchr/testify/assert" "github.com/tarent/loginsrv/login" ) func TestSetup(t *testing.T) { os.Setenv("JWT_SECRET", "jwtsecret") for j, test := range []struct { input string shouldErr bool configCheck func(*testing.T, *login.Config) }{ { input: `login { simple bob=secret }`, shouldErr: false, configCheck: func(t *testing.T, cfg *login.Config) { expectedBackendCfg := login.Options{"simple": map[string]string{"bob": "secret"}} Equal(t, expectedBackendCfg, cfg.Backends, "config simple auth backend") }, }, { input: `login { success_url successurl jwt_expiry 42h jwt_algo algo login_path /foo/bar redirect true redirect_query_parameter comingFrom redirect_check_referer true redirect_host_file domainWhitelist.txt cookie_name cookiename cookie_http_only false cookie_domain example.com cookie_expiry 23h23m simple bob=secret osiam endpoint=http://localhost:8080,client_id=example-client,client_secret=secret }`, shouldErr: false, configCheck: func(t *testing.T, cfg *login.Config) { Equal(t, cfg.SuccessURL, "successurl") Equal(t, cfg.JwtExpiry, 42*time.Hour) Equal(t, cfg.JwtAlgo, "algo") Equal(t, cfg.LoginPath, "/foo/bar") Equal(t, cfg.Redirect, true) Equal(t, cfg.RedirectQueryParameter, "comingFrom") Equal(t, cfg.RedirectCheckReferer, true) Equal(t, cfg.RedirectHostFile, "domainWhitelist.txt") Equal(t, cfg.CookieName, "cookiename") Equal(t, cfg.CookieHTTPOnly, false) Equal(t, cfg.CookieDomain, "example.com") Equal(t, cfg.CookieExpiry, 23*time.Hour+23*time.Minute) expectedBackendCfg := login.Options{ "simple": map[string]string{ "bob": "secret", }, "osiam": map[string]string{ "endpoint": "http://localhost:8080", "client_id": "example-client", "client_secret": "secret", }, } Equal(t, expectedBackendCfg, cfg.Backends, "config simple auth backend") }, }, { input: `loginsrv /context { backend provider=simple,bob=secret cookie-name cookiename }`, shouldErr: false, configCheck: func(t *testing.T, cfg *login.Config) { Equal(t, "/context/login", cfg.LoginPath, "Login path should be set by argument for backwards compatibility") Equal(t, "cookiename", cfg.CookieName, "The cookie name should be set by a config name with - instead of _ for backwards compatibility") expectedBackendCfg := login.Options{ "simple": map[string]string{ "bob": "secret", }, } Equal(t, expectedBackendCfg, cfg.Backends, "The backend config should be set by \"backend provider=\" for backwards compatibility") }, }, // error cases {input: "login {\n}", shouldErr: true}, {input: "login xx yy {\n}", shouldErr: true}, {input: "login {\n cookie_http_only 42d \n simple bob=secret \n}", shouldErr: true}, {input: "login {\n unknown property \n simple bob=secret \n}", shouldErr: true}, {input: "login {\n backend \n}", shouldErr: true}, {input: "login {\n backend provider=foo\n}", shouldErr: true}, {input: "login {\n backend kk\n}", shouldErr: true}, } { t.Run(fmt.Sprintf("test %v", j), func(t *testing.T) { c := caddy.NewTestController("http", test.input) err := setup(c) if test.shouldErr { Error(t, err, "test ") return } NoError(t, err) mids := httpserver.GetConfig(c).Middleware() if len(mids) == 0 { t.Errorf("no middlewares created in test #%v", j) return } middleware := mids[len(mids)-1](nil).(*CaddyHandler) test.configCheck(t, middleware.config) }) } } func TestSetup_CornerCasesJWTSecret(t *testing.T) { os.Setenv("JWT_SECRET", "jwtsecret") for j, test := range []struct { description string envInput string config1 string config2 string expectedEnv string expectedSecretConfig1 string expectedSecretConfig2 string }{ { description: "just use the environment", envInput: "foo", config1: `login { simple bob=secret }`, config2: `login { simple bob=secret }`, expectedEnv: "foo", expectedSecretConfig1: "foo", expectedSecretConfig2: "foo", }, { description: "set variable using configs", envInput: "", config1: `login { simple bob=secret jwt_secret xxx }`, config2: `login { simple bob=secret jwt_secret yyy }`, expectedEnv: "xxx", expectedSecretConfig1: "xxx", expectedSecretConfig2: "yyy", }, { description: "secret in env and configs was set", envInput: "bli", config1: `login { simple bob=secret jwt_secret bla }`, config2: `login { simple bob=secret jwt_secret blub }`, expectedEnv: "bli", // should not be touched expectedSecretConfig1: "bla", expectedSecretConfig2: "blub", }, { description: "random default value", envInput: "", config1: `login { simple bob=secret }`, config2: `login { simple bob=secret }`, expectedEnv: login.DefaultConfig().JwtSecret, expectedSecretConfig1: login.DefaultConfig().JwtSecret, expectedSecretConfig2: login.DefaultConfig().JwtSecret, }, } { t.Run(fmt.Sprintf("test %v %v", j, test.description), func(t *testing.T) { if test.envInput == "" { os.Unsetenv("JWT_SECRET") } else { os.Setenv("JWT_SECRET", test.envInput) } c1 := caddy.NewTestController("http", test.config1) NoError(t, setup(c1)) c2 := caddy.NewTestController("http", test.config2) NoError(t, setup(c2)) mids1 := httpserver.GetConfig(c1).Middleware() if len(mids1) == 0 { t.Errorf("no middlewares created in test #%v", j) return } middleware1 := mids1[len(mids1)-1](nil).(*CaddyHandler) mids2 := httpserver.GetConfig(c2).Middleware() if len(mids2) == 0 { t.Errorf("no middlewares created in test #%v", j) return } middleware2 := mids2[len(mids2)-1](nil).(*CaddyHandler) Equal(t, test.expectedSecretConfig1, middleware1.config.JwtSecret) Equal(t, test.expectedSecretConfig2, middleware2.config.JwtSecret) Equal(t, test.expectedEnv, os.Getenv("JWT_SECRET")) }) } } func TestSetup_RelativeFiles(t *testing.T) { caddyfile := `loginsrv { template myTemplate.tpl redirect_host_file redirectDomains.txt simple bob=secret }` root, _ := ioutil.TempDir("", "") c := caddy.NewTestController("http", caddyfile) c.Key = "RelativeTemplateFileTest" config := httpserver.GetConfig(c) config.Root = root err := setup(c) NoError(t, err) mids := httpserver.GetConfig(c).Middleware() if len(mids) == 0 { t.Errorf("no middlewares created") } middleware := mids[len(mids)-1](nil).(*CaddyHandler) Equal(t, filepath.FromSlash(root+"/myTemplate.tpl"), middleware.config.Template) Equal(t, "redirectDomains.txt", middleware.config.RedirectHostFile) }
[ "\"JWT_SECRET\"" ]
[]
[ "JWT_SECRET" ]
[]
["JWT_SECRET"]
go
1
0
src/test/java/gov/di_ipv_core/utilities/ConfigurationReader.java
package gov.di_ipv_core.utilities; import java.io.FileInputStream; import java.util.Properties; /** * reads the properties file configuration.properties */ public class ConfigurationReader { private static Properties properties; static { try { String path = "configuration.properties"; FileInputStream input = new FileInputStream(path); properties = new Properties(); properties.load(input); input.close(); } catch (Exception e) { e.printStackTrace(); } } public static String get(String keyName) { return properties.getProperty(keyName); } public static String getBrowser() { return System.getenv("BROWSER") != null ? System.getenv("BROWSER") : "chrome"; } public static String getOrchestratorUrl() { String orchestratorStubUrl = System.getenv("ORCHESTRATOR_STUB_URL"); if (orchestratorStubUrl == null) { throw new IllegalArgumentException("Environment variable ORCHESTRATOR_STUB_URL is not set"); } return orchestratorStubUrl; } public static String getCoreStubUrl() { String coreStubUrl = System.getenv("CORE_STUB_URL"); if (coreStubUrl == null) { throw new IllegalArgumentException("Environment variable CORE_STUB_URL is not set"); } return coreStubUrl; } public static boolean noChromeSandbox() { return "true".equalsIgnoreCase(System.getenv("NO_CHROME_SANDBOX")); } }
[ "\"BROWSER\"", "\"BROWSER\"", "\"ORCHESTRATOR_STUB_URL\"", "\"CORE_STUB_URL\"", "\"NO_CHROME_SANDBOX\"" ]
[]
[ "CORE_STUB_URL", "NO_CHROME_SANDBOX", "BROWSER", "ORCHESTRATOR_STUB_URL" ]
[]
["CORE_STUB_URL", "NO_CHROME_SANDBOX", "BROWSER", "ORCHESTRATOR_STUB_URL"]
java
4
0
d3m_ta2_nyu/alphad3m/d3mpipeline_builder.py
import logging import os import json import pickle import itertools from d3m_ta2_nyu.workflow import database from d3m import index from d3m.container import Dataset, DataFrame, ndarray, List from d3m_ta2_nyu.utils import is_collection, get_collection_type # Use a headless matplotlib backend os.environ['MPLBACKEND'] = 'Agg' logger = logging.getLogger(__name__) CONTAINER_CAST = { Dataset: { DataFrame: 'd3m.primitives.data_transformation.dataset_to_dataframe.Common', ndarray: ('d3m.primitives.data_transformation.dataset_to_dataframe.Common' '|d3m.primitives.data_transformation.dataframe_to_ndarray.Common'), List: ('d3m.primitives.data_transformation.dataset_to_dataframe.Common' '|d3m.primitives.data_transformation.dataframe_to_list.Common') }, DataFrame: { Dataset: "", ndarray: 'd3m.primitives.data_transformation.dataframe_to_ndarray.Common', List: 'd3m.primitives.data_transformation.dataframe_to_list.Common' }, ndarray: { Dataset: "", DataFrame: 'd3m.primitives.data_transformation.ndarray_to_dataframe.Common', List: 'd3m.primitives.data_transformation.ndarray_to_list.Common' }, List: { Dataset: "", DataFrame: 'd3m.primitives.data_transformation.list_to_dataframe.Common', ndarray: 'd3m.primitives.data_transformation.list_to_ndarray.Common', } } def make_pipeline_module(db, pipeline, name, package='d3m', version='2019.10.10'): pipeline_module = database.PipelineModule(pipeline=pipeline, package=package, version=version, name=name) db.add(pipeline_module) return pipeline_module def make_data_module(db, pipeline, targets, features): input_data = make_pipeline_module(db, pipeline, 'dataset', 'data', '0.0') db.add(database.PipelineParameter( pipeline=pipeline, module=input_data, name='targets', value=pickle.dumps(targets), )) db.add(database.PipelineParameter( pipeline=pipeline, module=input_data, name='features', value=pickle.dumps(features), )) return input_data def connect(db, pipeline, from_module, to_module, from_output='produce', to_input='inputs'): if 'index' not in from_output: if not from_module.name.startswith('dataset'): from_module_primitive = index.get_primitive(from_module.name) from_module_output = from_module_primitive.metadata.query()['primitive_code']['class_type_arguments'][ 'Outputs'] else: from_module_output = Dataset to_module_primitive = index.get_primitive(to_module.name) to_module_input = to_module_primitive.metadata.query()['primitive_code']['class_type_arguments'][ 'Inputs'] arguments = to_module_primitive.metadata.query()['primitive_code']['arguments'] if to_input not in arguments: raise NameError('Argument %s not found in %s' % (to_input, to_module.name)) if from_module_output != to_module_input and \ from_module.name != 'd3m.primitives.data_transformation.audio_reader.DistilAudioDatasetLoader': # TODO Find a better way cast_module_steps = CONTAINER_CAST[from_module_output][to_module_input] if cast_module_steps: for cast_step in cast_module_steps.split('|'): cast_module = make_pipeline_module(db, pipeline,cast_step) db.add(database.PipelineConnection(pipeline=pipeline, from_module=from_module, to_module=cast_module, from_output_name=from_output, to_input_name='inputs')) from_module = cast_module else: raise TypeError('Incompatible connection types: %s and %s' % (str(from_module_output), str(to_module_input))) db.add(database.PipelineConnection(pipeline=pipeline, from_module=from_module, to_module=to_module, from_output_name=from_output, to_input_name=to_input)) def set_hyperparams(db, pipeline, module, **hyperparams): db.add(database.PipelineParameter( pipeline=pipeline, module=module, name='hyperparams', value=pickle.dumps(hyperparams), )) def change_default_hyperparams(db, pipeline, primitive_name, primitive): if primitive_name == 'd3m.primitives.feature_extraction.tfidf_vectorizer.SKlearn': set_hyperparams(db, pipeline, primitive, use_semantic_types=True, return_result='replace') elif primitive_name == 'd3m.primitives.feature_extraction.count_vectorizer.SKlearn': set_hyperparams(db, pipeline, primitive, use_semantic_types=True, return_result='replace') elif primitive_name == 'd3m.primitives.feature_extraction.feature_agglomeration.SKlearn': set_hyperparams(db, pipeline, primitive, use_semantic_types=True, return_result='replace') elif primitive_name == 'd3m.primitives.data_cleaning.string_imputer.SKlearn': set_hyperparams(db, pipeline, primitive, use_semantic_types=True, return_result='replace') elif primitive_name == 'd3m.primitives.data_transformation.one_hot_encoder.SKlearn': set_hyperparams(db, pipeline, primitive, use_semantic_types=True, return_result='replace', handle_unknown='ignore') elif primitive_name == 'd3m.primitives.data_cleaning.imputer.SKlearn': set_hyperparams(db, pipeline, primitive, strategy='most_frequent') elif primitive_name == 'd3m.primitives.clustering.k_means.DistilKMeans': set_hyperparams(db, pipeline, primitive, cluster_col_name='Class') elif primitive_name == 'd3m.primitives.time_series_forecasting.lstm.DeepAR': set_hyperparams(db, pipeline, primitive, epochs=1) elif primitive_name == 'd3m.primitives.data_transformation.encoder.DSBOX': set_hyperparams(db, pipeline, primitive, n_limit=50) elif primitive_name == 'd3m.primitives.data_cleaning.cleaning_featurizer.DSBOX': set_hyperparams(db, pipeline, primitive, features='split_date_column') elif primitive_name == 'd3m.primitives.data_transformation.encoder.DistilTextEncoder': set_hyperparams(db, pipeline, primitive, encoder_type='tfidf') elif primitive_name == 'd3m.primitives.classification.text_classifier.DistilTextClassifier': set_hyperparams(db, pipeline, primitive, metric='accuracy') elif primitive_name == 'd3m.primitives.feature_selection.joint_mutual_information.AutoRPI': set_hyperparams(db, pipeline, primitive, method='fullBayesian') def need_entire_dataframe(primitives): for primitive in primitives: if primitive in {'d3m.primitives.data_transformation.time_series_to_list.DSBOX', 'd3m.primitives.feature_extraction.random_projection_timeseries_featurization.DSBOX', 'd3m.primitives.data_transformation.dataframe_to_tensor.DSBOX', 'd3m.primitives.feature_extraction.resnet50_image_feature.DSBOX'}: return True return False def encode_features(pipeline, attribute_step, target_step, features_metadata, db): last_step = attribute_step feature_types = features_metadata['only_attribute_types'] count_steps = 0 if 'http://schema.org/Text' in feature_types: text_step = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.encoder.DistilTextEncoder') set_hyperparams(db, pipeline, text_step, encoder_type='tfidf') connect(db, pipeline, last_step, text_step) connect(db, pipeline, target_step, text_step, to_input='outputs') last_step = text_step count_steps += 1 if 'http://schema.org/DateTime' in feature_types: time_step = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.enrich_dates.DistilEnrichDates') connect(db, pipeline, last_step, time_step) last_step = time_step count_steps += 1 if 'https://metadata.datadrivendiscovery.org/types/CategoricalData' in feature_types: onehot_step = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.encoder.DSBOX') set_hyperparams(db, pipeline, onehot_step, n_limit=50) connect(db, pipeline, last_step, onehot_step) last_step = onehot_step count_steps += 1 return last_step, count_steps def process_template(db, input_data, pipeline, pipeline_template, count_template_steps=0, prev_step=None): prev_steps = {} for pipeline_step in pipeline_template['steps']: if pipeline_step['type'] == 'PRIMITIVE': step = make_pipeline_module(db, pipeline, pipeline_step['primitive']['python_path']) if 'outputs' in pipeline_step: for output in pipeline_step['outputs']: prev_steps['steps.%d.%s' % (count_template_steps, output['id'])] = step count_template_steps += 1 if 'hyperparams' in pipeline_step: hyperparams = {} for hyper, desc in pipeline_step['hyperparams'].items(): hyperparams[hyper] = {'type': desc['type'], 'data': desc['data']} set_hyperparams(db, pipeline, step, **hyperparams) else: # TODO In the future we should be able to handle subpipelines break if prev_step: if 'arguments' in pipeline_step: for argument, desc in pipeline_step['arguments'].items(): connect(db, pipeline, prev_steps[desc['data']], step, from_output=desc['data'].split('.')[-1], to_input=argument) # index is a special connection to keep the order of steps in fixed pipeline templates connect(db, pipeline, prev_step, step, from_output='index', to_input='index') else: connect(db, pipeline, input_data, step, from_output='dataset') prev_step = step return prev_step, count_template_steps def add_semantic_types(db, features_metadata, pipeline, pipeline_template, prev_step): count_steps = 0 if pipeline_template is None: for semantic_type, columns in features_metadata['semantictypes_indices'].items(): step_add_type = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'add_semantic_types.Common') count_steps += 1 set_hyperparams(db, pipeline, step_add_type, columns=columns, semantic_types=[semantic_type]) connect(db, pipeline, prev_step, step_add_type) prev_step = step_add_type else: step_add_type = make_pipeline_module(db, pipeline, 'd3m.primitives.schema_discovery.' 'profiler.Common') count_steps += 1 connect(db, pipeline, prev_step, step_add_type) prev_step = step_add_type return prev_step, count_steps def add_file_readers(db, pipeline, prev_step, dataset_path): last_step = prev_step count_steps = 0 if get_collection_type(dataset_path) == 'text': text_reader = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.text_reader.Common') count_steps += 1 set_hyperparams(db, pipeline, text_reader, return_result='replace') connect(db, pipeline, prev_step, text_reader) last_step = text_reader elif get_collection_type(dataset_path) == 'image': image_reader = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.image_reader.Common') count_steps += 1 set_hyperparams(db, pipeline, image_reader, return_result='replace') connect(db, pipeline, prev_step, image_reader) last_step = image_reader elif get_collection_type(dataset_path) == 'audio': audio_reader = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.audio_reader.Common') count_steps += 1 set_hyperparams(db, pipeline, audio_reader, return_result='replace') connect(db, pipeline, prev_step, audio_reader) last_step = audio_reader return last_step, count_steps def add_previous_primitive(db, pipeline, primitives, prev_step): remaining_primitives = [] count_steps = 0 for primitive in primitives: if need_entire_dataframe([primitive]): step_add_type = make_pipeline_module(db, pipeline, primitive) count_steps += 1 connect(db, pipeline, prev_step, step_add_type) prev_step = step_add_type else: remaining_primitives.append(primitive) return prev_step, remaining_primitives, count_steps class BaseBuilder: def make_d3mpipeline(self, primitives, origin, dataset, pipeline_template, targets, features, features_metadata, privileged_data=[], metrics=[], DBSession=None): # TODO parameters 'features and 'targets' are not needed db = DBSession() dataset_path = dataset[7:] origin_name = '%s (%s)' % (origin, ', '.join([p.replace('d3m.primitives.', '') for p in primitives])) pipeline = database.Pipeline(origin=origin_name, dataset=dataset) try: input_data = make_data_module(db, pipeline, targets, features) step0 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.denormalize.Common') count_steps = 0 if pipeline_template: template_step, template_count = process_template(db, input_data, pipeline, pipeline_template) count_steps += template_count connect(db, pipeline, template_step, step0) else: connect(db, pipeline, input_data, step0, from_output='dataset') step1 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.dataset_to_dataframe.Common') count_steps += 1 connect(db, pipeline, step0, step1) prev_step = step1 if is_collection(dataset_path) and not need_entire_dataframe(primitives): prev_step, reader_steps = add_file_readers(db, pipeline, prev_step, dataset_path) count_steps += reader_steps if len(features_metadata['semantictypes_indices']) > 0: prev_step, semantic_steps = add_semantic_types(db, features_metadata, pipeline, pipeline_template, prev_step) count_steps += semantic_steps dataframe_step = prev_step if 'ROC_AUC' in metrics[0]['metric'].name: step_unique = make_pipeline_module(db, pipeline, 'd3m.primitives.operator.compute_unique_values.Common') connect(db, pipeline, dataframe_step, step_unique) count_steps += 1 prev_step = step_unique else: step_unique = dataframe_step if need_entire_dataframe(primitives): prev_step, primitives, primitive_steps = add_previous_primitive(db, pipeline, primitives, prev_step) count_steps += primitive_steps step2 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.column_parser.Common') count_steps += 1 connect(db, pipeline, prev_step, step2) step3 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'extract_columns_by_semantic_types.Common') count_steps += 1 set_hyperparams(db, pipeline, step3, semantic_types=['https://metadata.datadrivendiscovery.org/types/Attribute'], exclude_columns=privileged_data) connect(db, pipeline, step2, step3) step4 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'extract_columns_by_semantic_types.Common') count_steps += 1 set_hyperparams(db, pipeline, step4, semantic_types=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) connect(db, pipeline, step_unique, step4) current_step = prev_step = step3 for primitive in primitives: current_step = make_pipeline_module(db, pipeline, primitive) count_steps += 1 change_default_hyperparams(db, pipeline, primitive, current_step) connect(db, pipeline, prev_step, current_step) prev_step = current_step to_module_primitive = index.get_primitive(primitive) if 'outputs' in to_module_primitive.metadata.query()['primitive_code']['arguments']: connect(db, pipeline, step4, current_step, to_input='outputs') if 'ROC_AUC' in metrics[0]['metric'].name: step5 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'construct_confidence.Common') set_hyperparams(db, pipeline, step5, primitive_learner={"type": "PRIMITIVE", "data": count_steps} ) connect(db, pipeline, current_step, step5, from_output='index', to_input='index') connect(db, pipeline, step_unique, step5) connect(db, pipeline, dataframe_step, step5, to_input='reference') else: step5 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'construct_predictions.Common') connect(db, pipeline, current_step, step5) connect(db, pipeline, dataframe_step, step5, to_input='reference') db.add(pipeline) db.commit() logger.info('%s PIPELINE ID: %s', origin, pipeline.id) return pipeline.id except: logger.exception('Error creating pipeline id=%s, primitives=%s', pipeline.id, str(primitives)) return None finally: db.close() @staticmethod def make_template(imputer, estimator, dataset, pipeline_template, targets, features, features_metadata, privileged_data, metrics, DBSession=None): db = DBSession() origin_name = 'Template (%s, %s)' % (imputer, estimator) origin_name = origin_name.replace('d3m.primitives.', '') pipeline = database.Pipeline(origin=origin_name, dataset=dataset) try: # TODO: Use pipeline input for this #count_steps = 0 input_data = make_data_module(db, pipeline, targets, features) step0 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.denormalize.Common') count_steps = 0 if pipeline_template: template_step, template_count = process_template(db, input_data, pipeline, pipeline_template) count_steps += template_count connect(db, pipeline, template_step, step0) else: connect(db, pipeline, input_data, step0, from_output='dataset') step1 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.dataset_to_dataframe.Common') connect(db, pipeline, step0, step1) count_steps += 1 prev_step = step1 if len(features_metadata['semantictypes_indices']) > 0: prev_step, semantic_steps = add_semantic_types(db, features_metadata, pipeline, pipeline_template, prev_step) count_steps += semantic_steps if 'ROC_AUC' in metrics[0]['metric'].name: step_unique = make_pipeline_module(db, pipeline, 'd3m.primitives.operator.compute_unique_values.Common') connect(db, pipeline, prev_step, step_unique) count_steps += 1 else: step_unique = prev_step step2 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'column_parser.Common') connect(db, pipeline, step_unique, step2) count_steps += 1 step3 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'extract_columns_by_semantic_types.Common') set_hyperparams(db, pipeline, step3, semantic_types=['https://metadata.datadrivendiscovery.org/types/Attribute'], exclude_columns=privileged_data ) connect(db, pipeline, step2, step3) count_steps += 1 step4 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'extract_columns_by_semantic_types.Common') set_hyperparams(db, pipeline, step4, semantic_types=['https://metadata.datadrivendiscovery.org/types/TrueTarget'] ) connect(db, pipeline, step_unique, step4) count_steps += 1 step5 = make_pipeline_module(db, pipeline, imputer) set_hyperparams(db, pipeline, step5, strategy='most_frequent') connect(db, pipeline, step3, step5) count_steps += 1 encoder_step, encode_steps = encode_features(pipeline, step5, step4, features_metadata, db) other_prev_step = encoder_step count_steps += encode_steps if encoder_step == step5: # Encoders were not applied, so use one_hot_encoder for all features step_fallback = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.encoder.DSBOX') set_hyperparams(db, pipeline, step_fallback, n_limit=50) connect(db, pipeline, step5, step_fallback) other_prev_step = step_fallback count_steps += 1 step6 = make_pipeline_module(db, pipeline, estimator) connect(db, pipeline, other_prev_step, step6) connect(db, pipeline, step4, step6, to_input='outputs') count_steps += 1 if 'ROC_AUC' in metrics[0]['metric'].name: step7 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'construct_confidence.Common') set_hyperparams(db, pipeline, step7, primitive_learner={"type": "PRIMITIVE", "data": count_steps} ) connect(db, pipeline, step6, step7, from_output='index', to_input='index') connect(db, pipeline, step_unique, step7) connect(db, pipeline, prev_step, step7, to_input='reference') else: step7 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'construct_predictions.Common') connect(db, pipeline, step6, step7) connect(db, pipeline, prev_step, step7, to_input='reference') db.add(pipeline) db.commit() return pipeline.id except: logger.exception('Error creating pipeline id=%s', pipeline.id) return None finally: db.close() @staticmethod def make_denormalize_pipeline(dataset, targets, features, DBSession=None): db = DBSession() pipeline = database.Pipeline(origin="denormalize", dataset=dataset) try: input_data = make_data_module(db, pipeline, targets, features) step0 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.denormalize.Common') connect(db, pipeline, input_data, step0, from_output='dataset') step1 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.dataset_to_dataframe.Common') connect(db, pipeline, step0, step1) db.add(pipeline) db.commit() return pipeline.id except: logger.exception('Error creating pipeline id=%s', pipeline.id) return None finally: db.close() TEMPLATES = { 'CLASSIFICATION': list(itertools.product( # Imputer ['d3m.primitives.data_cleaning.imputer.SKlearn'], # Classifier [ 'd3m.primitives.classification.random_forest.SKlearn', 'd3m.primitives.classification.extra_trees.SKlearn', 'd3m.primitives.classification.gradient_boosting.SKlearn', 'd3m.primitives.classification.linear_svc.SKlearn', 'd3m.primitives.classification.sgd.SKlearn' ], )), 'DEBUG_CLASSIFICATION': list(itertools.product( # Imputer ['d3m.primitives.data_cleaning.imputer.SKlearn'], # Classifier [ 'd3m.primitives.classification.random_forest.SKlearn', 'd3m.primitives.classification.extra_trees.SKlearn' ], )), 'REGRESSION': list(itertools.product( # Imputer ['d3m.primitives.data_cleaning.imputer.SKlearn'], # Classifier [ 'd3m.primitives.regression.random_forest.SKlearn', 'd3m.primitives.regression.extra_trees.SKlearn', 'd3m.primitives.regression.sgd.SKlearn', 'd3m.primitives.regression.gradient_boosting.SKlearn', 'd3m.primitives.regression.lasso.SKlearn' ], )), 'DEBUG_REGRESSION': list(itertools.product( # Imputer ['d3m.primitives.data_cleaning.imputer.SKlearn'], # Classifier [ 'd3m.primitives.regression.random_forest.SKlearn', 'd3m.primitives.regression.gradient_boosting.SKlearn' ], )), } class TimeseriesClassificationBuilder(BaseBuilder): def make_d3mpipeline(self, primitives, origin, dataset, pipeline_template, targets, features, features_metadata, privileged_data=[], metrics=[], DBSession=None): db = DBSession() origin_name = '%s (%s)' % (origin, ', '.join([p.replace('d3m.primitives.', '') for p in primitives])) pipeline = database.Pipeline(origin=origin_name, dataset=dataset) try: if len(primitives) == 1: input_data = make_data_module(db, pipeline, targets, features) step0 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'time_series_formatter.DistilTimeSeriesFormatter') connect(db, pipeline, input_data, step0, from_output='dataset') step1 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.dataset_to_dataframe.Common') connect(db, pipeline, step0, step1) step2 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'dataset_to_dataframe.Common') connect(db, pipeline, input_data, step2, from_output='dataset') if 'ROC_AUC' in metrics[0]['metric'].name: step_unique = make_pipeline_module(db, pipeline, 'd3m.primitives.operator.compute_unique_values.Common') connect(db, pipeline, step2, step_unique) else: step_unique = step2 step3 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.column_parser.Common') set_hyperparams(db, pipeline, step3, parse_semantic_types=[ 'http://schema.org/Boolean', 'http://schema.org/Integer', 'http://schema.org/Float', 'https://metadata.datadrivendiscovery.org/types/FloatVector']) connect(db, pipeline, step_unique, step3) step4 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'extract_columns_by_semantic_types.Common') set_hyperparams(db, pipeline, step4, semantic_types=['https://metadata.datadrivendiscovery.org/types/Target', 'https://metadata.datadrivendiscovery.org/types/TrueTarget', 'https://metadata.datadrivendiscovery.org/types/SuggestedTarget' ] ) connect(db, pipeline, step1, step4) step5 = make_pipeline_module(db, pipeline, primitives[0]) if primitives[0] == 'd3m.primitives.time_series_classification.convolutional_neural_net.LSTM_FCN': set_hyperparams(db, pipeline, step5, epochs=1) connect(db, pipeline, step1, step5) connect(db, pipeline, step4, step5, to_input='outputs') if 'ROC_AUC' in metrics[0]['metric'].name: step6 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'construct_confidence.Common') set_hyperparams(db, pipeline, step6, primitive_learner={"type": "PRIMITIVE", "data": 6} ) connect(db, pipeline, step5, step6, from_output='index', to_input='index') connect(db, pipeline, step_unique, step6) connect(db, pipeline, step2, step6, to_input='reference') else: step6 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'construct_predictions.Common') connect(db, pipeline, step5, step6) connect(db, pipeline, step2, step6, to_input='reference') db.add(pipeline) db.commit() logger.info('%s PIPELINE ID: %s', origin, pipeline.id) return pipeline.id else: pipeline_id = super().make_d3mpipeline(primitives, origin, dataset, pipeline_template, targets, features, features_metadata, privileged_data=privileged_data, metrics=metrics, DBSession=DBSession) return pipeline_id except: logger.exception('Error creating pipeline id=%s, primitives=%s', pipeline.id, str(primitives)) return None finally: db.close() class CommunityDetectionBuilder(BaseBuilder): def make_d3mpipeline(self, primitives, origin, dataset, pipeline_template, targets, features, features_metadata, privileged_data=[], metrics=[], DBSession=None): db = DBSession() origin_name = '%s (%s)' % (origin, ', '.join([p.replace('d3m.primitives.', '') for p in primitives])) pipeline = database.Pipeline(origin=origin_name, dataset=dataset) try: if len(primitives) == 1: input_data = make_data_module(db, pipeline, targets, features) step0 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'load_single_graph.DistilSingleGraphLoader') connect(db, pipeline, input_data, step0, from_output='dataset') step1 = make_pipeline_module(db, pipeline, primitives[0]) connect(db, pipeline, step0, step1) connect(db, pipeline, step0, step1, to_input='outputs', from_output='produce_target') db.add(pipeline) db.commit() logger.info('%s PIPELINE ID: %s', origin, pipeline.id) return pipeline.id else: pipeline_id = super().make_d3mpipeline(primitives, origin, dataset, pipeline_template, targets, features, features_metadata, privileged_data=privileged_data, metrics=metrics, DBSession=DBSession) return pipeline_id except: logger.exception('Error creating pipeline id=%s, primitives=%s', pipeline.id, str(primitives)) return None finally: db.close() class LinkPredictionBuilder(BaseBuilder): def make_d3mpipeline(self, primitives, origin, dataset, pipeline_template, targets, features, features_metadata, privileged_data=[], metrics=[], DBSession=None): db = DBSession() origin_name = '%s (%s)' % (origin, ', '.join([p.replace('d3m.primitives.', '') for p in primitives])) pipeline = database.Pipeline(origin=origin_name, dataset=dataset) try: if len(primitives) == 1: input_data = make_data_module(db, pipeline, targets, features) step0 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'load_single_graph.DistilSingleGraphLoader') connect(db, pipeline, input_data, step0, from_output='dataset') step1 = make_pipeline_module(db, pipeline, primitives[0]) set_hyperparams(db, pipeline, step1, metric='accuracy') connect(db, pipeline, step0, step1) connect(db, pipeline, step0, step1, to_input='outputs', from_output='produce_target') db.add(pipeline) db.commit() logger.info('%s PIPELINE ID: %s', origin, pipeline.id) return pipeline.id else: pipeline_id = super().make_d3mpipeline(primitives, origin, dataset, pipeline_template, targets, features, features_metadata, privileged_data=privileged_data, metrics=metrics, DBSession=DBSession) return pipeline_id except: logger.exception('Error creating pipeline id=%s, primitives=%s', pipeline.id, str(primitives)) return None finally: db.close() class GraphMatchingBuilder(BaseBuilder): def make_d3mpipeline(self, primitives, origin, dataset, pipeline_template, targets, features, features_metadata, privileged_data=[], metrics=[], DBSession=None): db = DBSession() origin_name = '%s (%s)' % (origin, ', '.join([p.replace('d3m.primitives.', '') for p in primitives])) try: if len(primitives) == 1: origin_name = 'MtLDB ' + origin_name pipeline = database.Pipeline(origin=origin_name, dataset=dataset) input_data = make_data_module(db, pipeline, targets, features) step0 = make_pipeline_module(db, pipeline, primitives[0]) connect(db, pipeline, input_data, step0) db.add(pipeline) db.commit() logger.info('%s PIPELINE ID: %s', origin, pipeline.id) return pipeline.id else: pipeline = database.Pipeline(origin=origin_name, dataset=dataset) pipeline_id = super().make_d3mpipeline(primitives, origin, dataset, pipeline_template, targets, features, features_metadata, privileged_data=privileged_data, metrics=metrics, DBSession=DBSession) return pipeline_id except: logger.exception('Error creating pipeline id=%s, primitives=%s', pipeline.id, str(primitives)) return None finally: db.close() class VertexClassificationBuilder(BaseBuilder): def make_d3mpipeline(self, primitives, origin, dataset, pipeline_template, targets, features, features_metadata, privileged_data=[], metrics=[], DBSession=None): db = DBSession() origin_name = '%s (%s)' % (origin, ', '.join([p.replace('d3m.primitives.', '') for p in primitives])) pipeline = database.Pipeline(origin=origin_name, dataset=dataset) try: if len(primitives) == 1 and primitives[0] == 'd3m.primitives.classification.gaussian_classification.JHU': input_data = make_data_module(db, pipeline, targets, features) step0 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.load_graphs.JHU') connect(db, pipeline, input_data, step0, from_output='dataset') step1 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_preprocessing.largest_connected_component.JHU') connect(db, pipeline, step0, step1) step2 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.adjacency_spectral_embedding.JHU') set_hyperparams(db, pipeline, step2, max_dimension=5, use_attributes=True) connect(db, pipeline, step1, step2) step3 = make_pipeline_module(db, pipeline, 'd3m.primitives.classification.gaussian_classification.JHU') connect(db, pipeline, step2, step3) db.add(pipeline) db.commit() logger.info('%s PIPELINE ID: %s', origin, pipeline.id) return pipeline.id elif len(primitives) == 1 and primitives[0] == 'd3m.primitives.vertex_nomination.seeded_graph_matching.DistilVertexNomination': input_data = make_data_module(db, pipeline, targets, features) step0 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.load_edgelist.DistilEdgeListLoader') connect(db, pipeline, input_data, step0, from_output='dataset') step1 = make_pipeline_module(db, pipeline, primitives[0]) set_hyperparams(db, pipeline, step1, metric='accuracy') connect(db, pipeline, step0, step1) connect(db, pipeline, step0, step1, to_input='outputs', from_output='produce_target') db.add(pipeline) db.commit() logger.info('%s PIPELINE ID: %s', origin, pipeline.id) return pipeline.id else: pipeline_id = super().make_d3mpipeline(primitives, origin, dataset, pipeline_template, targets, features, features_metadata, privileged_data=privileged_data, metrics=metrics, DBSession=DBSession) return pipeline_id except: logger.exception('Error creating pipeline id=%s, primitives=%s', pipeline.id, str(primitives)) return None finally: db.close() class ObjectDetectionBuilder(BaseBuilder): def make_d3mpipeline(self, primitives, origin, dataset, pipeline_template, targets, features, features_metadata, privileged_data=[], metrics=[], DBSession=None): db = DBSession() origin_name = '%s (%s)' % (origin, ', '.join([p.replace('d3m.primitives.', '') for p in primitives])) pipeline = database.Pipeline(origin=origin_name, dataset=dataset) try: input_data = make_data_module(db, pipeline, targets, features) step0 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.denormalize.Common') connect(db, pipeline, input_data, step0, from_output='dataset') if primitives[0] == 'd3m.primitives.feature_extraction.yolo.DSBOX': step1 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'dataset_to_dataframe.Common') connect(db, pipeline, step0, step1) if 'ROC_AUC' in metrics[0]['metric'].name: step_unique = make_pipeline_module(db, pipeline, 'd3m.primitives.operator.compute_unique_values.Common') connect(db, pipeline, step1, step_unique) else: step_unique = step1 step2 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'extract_columns_by_semantic_types.Common') set_hyperparams(db, pipeline, step2, semantic_types=['https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey', 'https://metadata.datadrivendiscovery.org/types/FileName'] ) connect(db, pipeline, step_unique, step2) step3 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'extract_columns_by_semantic_types.Common') set_hyperparams(db, pipeline, step3, semantic_types=['https://metadata.datadrivendiscovery.org/types/TrueTarget'], ) connect(db, pipeline, step1, step3) step4 = make_pipeline_module(db, pipeline, primitives[0]) connect(db, pipeline, step2, step4) connect(db, pipeline, step3, step4, to_input='outputs') if 'ROC_AUC' in metrics[0]['metric'].name: step5 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'construct_confidence.Common') set_hyperparams(db, pipeline, step5, primitive_learner={"type": "PRIMITIVE", "data": 4} ) connect(db, pipeline, step4, step5, from_output='index', to_input='index') connect(db, pipeline, step_unique, step5) connect(db, pipeline, step2, step5, to_input='reference') else: step5 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'construct_predictions.Common') connect(db, pipeline, step4, step5) connect(db, pipeline, step2, step5, to_input='reference') else: step1 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'dataset_to_dataframe.Common') set_hyperparams(db, pipeline, step1) connect(db, pipeline, step0, step1) step2 = make_pipeline_module(db, pipeline, primitives[0]) connect(db, pipeline, step1, step2) connect(db, pipeline, step1, step2, to_input='outputs') db.add(pipeline) db.commit() logger.info('%s PIPELINE ID: %s', origin, pipeline.id) return pipeline.id except: logger.exception('Error creating pipeline id=%s, primitives=%s', pipeline.id, str(primitives)) return None finally: db.close() class AudioBuilder(BaseBuilder): def make_d3mpipeline(self, primitives, origin, dataset, pipeline_template, targets, features, features_metadata, privileged_data=[], metrics=[], DBSession=None): db = DBSession() origin_name = '%s (%s)' % (origin, ', '.join([p.replace('d3m.primitives.', '') for p in primitives])) pipeline = database.Pipeline(origin=origin_name, dataset=dataset) try: input_data = make_data_module(db, pipeline, targets, features) step0 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.audio_reader.DistilAudioDatasetLoader') connect(db, pipeline, input_data, step0, from_output='dataset') if 'ROC_AUC' in metrics[0]['metric'].name: step_unique = make_pipeline_module(db, pipeline, 'd3m.primitives.operator.compute_unique_values.Common') connect(db, pipeline, step0, step_unique) else: step_unique = step0 step1 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.column_parser.Common') set_hyperparams(db, pipeline, step1, parse_semantic_types=[ "http://schema.org/Boolean", "http://schema.org/Integer", "http://schema.org/Float", "https://metadata.datadrivendiscovery.org/types/FloatVector" ] ) db.add(database.PipelineConnection(pipeline=pipeline, from_module=step_unique, to_module=step1, from_output_name='produce', to_input_name='inputs')) step2 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'extract_columns_by_semantic_types.Common') set_hyperparams(db, pipeline, step2, semantic_types=['https://metadata.datadrivendiscovery.org/types/TrueTarget'] ) connect(db, pipeline, step_unique, step2) step3 = make_pipeline_module(db, pipeline, primitives[0]) db.add(database.PipelineConnection(pipeline=pipeline, from_module=step_unique, to_module=step3, from_output_name='produce_collection', to_input_name='inputs')) step = prev_step = step3 preprocessors = primitives[1:-1] estimator = primitives[-1] for preprocessor in preprocessors: step = make_pipeline_module(db, pipeline, preprocessor) change_default_hyperparams(db, pipeline, preprocessor, step) connect(db, pipeline, prev_step, step) prev_step = step to_module_primitive = index.get_primitive(preprocessor) if 'outputs' in to_module_primitive.metadata.query()['primitive_code']['arguments']: connect(db, pipeline, step2, step, to_input='outputs') step5 = make_pipeline_module(db, pipeline, estimator) change_default_hyperparams(db, pipeline, estimator, step5) connect(db, pipeline, step, step5) connect(db, pipeline, step2, step5, to_input='outputs') if 'ROC_AUC' in metrics[0]['metric'].name: count_steps = 4 + len(primitives) step6 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'construct_confidence.Common') set_hyperparams(db, pipeline, step6, primitive_learner={"type": "PRIMITIVE", "data": count_steps} ) connect(db, pipeline, step5, step6, from_output='index', to_input='index') connect(db, pipeline, step_unique, step6) connect(db, pipeline, step1, step6, to_input='reference') else: step6 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'construct_predictions.Common') connect(db, pipeline, step5, step6) connect(db, pipeline, step1, step6, to_input='reference') db.add(pipeline) db.commit() db.add(pipeline) db.commit() logger.info('%s PIPELINE ID: %s', origin, pipeline.id) return pipeline.id except: logger.exception('Error creating pipeline id=%s, primitives=%s', pipeline.id, str(primitives)) return None finally: db.close() class SemisupervisedClassificationBuilder(BaseBuilder): def make_d3mpipeline(self, primitives, origin, dataset, pipeline_template, targets, features, features_metadata, privileged_data=[], metrics=[], DBSession=None): db = DBSession() origin_name = '%s (%s)' % (origin, ', '.join([p.replace('d3m.primitives.', '') for p in primitives])) pipeline = database.Pipeline(origin=origin_name, dataset=dataset) try: if 'semisupervised_classification.iterative_labeling.AutonBox' in primitives[-1]: input_data = make_data_module(db, pipeline, targets, features) step0 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.denormalize.Common') count_steps = 0 if pipeline_template: template_step, template_steps = process_template(db, input_data, pipeline, pipeline_template) connect(db, pipeline, template_step, step0) count_steps += template_steps else: connect(db, pipeline, input_data, step0, from_output='dataset') step1 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.dataset_to_dataframe.Common') count_steps += 1 connect(db, pipeline, step0, step1) prev_step = step1 if len(features_metadata['semantictypes_indices']) > 0: prev_step, semantic_steps_count = add_semantic_types(db, features_metadata, pipeline, pipeline_template, prev_step) count_steps += semantic_steps_count if 'ROC_AUC' in metrics[0]['metric'].name: step_unique = make_pipeline_module(db, pipeline, 'd3m.primitives.operator.compute_unique_values.Common') connect(db, pipeline, prev_step, step_unique) count_steps += 1 else: step_unique = prev_step step2 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'column_parser.Common') count_steps += 1 connect(db, pipeline, prev_step, step2) step3 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'extract_columns_by_semantic_types.Common') count_steps += 1 set_hyperparams(db, pipeline, step3, semantic_types=['https://metadata.datadrivendiscovery.org/types/Attribute'], exclude_columns=privileged_data) connect(db, pipeline, step2, step3) step4 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'extract_columns_by_semantic_types.Common') count_steps += 1 set_hyperparams(db, pipeline, step4, semantic_types=['https://metadata.datadrivendiscovery.org/types/TrueTarget'] ) connect(db, pipeline, prev_step, step4) step = otherprev_step = step3 preprocessors = primitives[:-2] blackbox = primitives[-2] estimator = primitives[-1] for preprocessor in preprocessors: step = make_pipeline_module(db, pipeline, preprocessor) count_steps += 1 change_default_hyperparams(db, pipeline, preprocessor, step) connect(db, pipeline, otherprev_step, step) otherprev_step = step to_module_primitive = index.get_primitive(preprocessor) if 'outputs' in to_module_primitive.metadata.query()['primitive_code']['arguments']: connect(db, pipeline, step4, step, to_input='outputs') step_blackbox = make_pipeline_module(db, pipeline, blackbox) count_steps += 1 change_default_hyperparams(db, pipeline, blackbox, step_blackbox) connect(db, pipeline, step, step_blackbox) connect(db, pipeline, step4, step_blackbox, to_input='outputs') step5 = make_pipeline_module(db, pipeline, estimator) change_default_hyperparams(db, pipeline, estimator, step5) connect(db, pipeline, step_blackbox, step5,from_output='index', to_input='index') connect(db, pipeline, step, step5) set_hyperparams(db, pipeline, step5, blackbox={ "type": "PRIMITIVE", "data": count_steps } ) count_steps += 1 to_module_primitive = index.get_primitive(estimator) if 'outputs' in to_module_primitive.metadata.query()['primitive_code']['arguments']: connect(db, pipeline, step4, step5, to_input='outputs') if 'ROC_AUC' in metrics[0]['metric'].name: step6 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'construct_confidence.Common') set_hyperparams(db, pipeline, step6, primitive_learner={"type": "PRIMITIVE", "data": count_steps} ) connect(db, pipeline, step5, step6, from_output='index', to_input='index') connect(db, pipeline, step_unique, step6) connect(db, pipeline, prev_step, step6, to_input='reference') else: step6 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'construct_predictions.Common') connect(db, pipeline, step5, step6) connect(db, pipeline, prev_step, step6, to_input='reference') db.add(pipeline) db.commit() logger.info('%s PIPELINE ID: %s', origin, pipeline.id) return pipeline.id else: pipeline_id = super().make_d3mpipeline(primitives, origin, dataset, pipeline_template, targets, features, features_metadata, privileged_data=privileged_data, metrics=metrics, DBSession=DBSession) return pipeline_id except: logger.exception('Error creating pipeline id=%s, primitives=%s', pipeline.id, str(primitives)) return None finally: db.close() class CollaborativeFilteringBuilder: def make_d3mpipeline(self, primitives, origin, dataset, pipeline_template, targets, features, features_metadata, privileged_data=[], metrics=[], DBSession=None): # TODO parameters 'features and 'targets' are not needed db = DBSession() dataset_path = dataset[7:] origin_name = '%s (%s)' % (origin, ', '.join([p.replace('d3m.primitives.', '') for p in primitives])) pipeline = database.Pipeline(origin=origin_name, dataset=dataset) try: input_data = make_data_module(db, pipeline, targets, features) step0 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.denormalize.Common') if pipeline_template: template_step, _ = process_template(db, input_data, pipeline, pipeline_template) connect(db, pipeline, template_step, step0) else: connect(db, pipeline, input_data, step0, from_output='dataset') step1 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.dataset_to_dataframe.Common') connect(db, pipeline, step0, step1) prev_step = step1 if is_collection(dataset_path): prev_step = add_file_readers(db, pipeline, prev_step, dataset_path) if len(features_metadata['semantictypes_indices']) > 0: prev_step, _ = add_semantic_types(db, features_metadata, pipeline, pipeline_template, prev_step) dataframe_step = prev_step if need_entire_dataframe(primitives): prev_step, primitives = add_previous_primitive(db, pipeline, primitives, prev_step) step2 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.column_parser.Common') connect(db, pipeline, prev_step, step2) step3 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'extract_columns_by_semantic_types.Common') set_hyperparams(db, pipeline, step3, semantic_types=['https://metadata.datadrivendiscovery.org/types/Attribute'], exclude_columns=privileged_data) connect(db, pipeline, step2, step3) step4 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'extract_columns_by_semantic_types.Common') set_hyperparams(db, pipeline, step4, semantic_types=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) connect(db, pipeline, step2, step4) # TODO: Remove this class, it's needed just to perform column_parser in targets, see above current_step = prev_step = step3 for primitive in primitives: current_step = make_pipeline_module(db, pipeline, primitive) change_default_hyperparams(db, pipeline, primitive, current_step) connect(db, pipeline, prev_step, current_step) prev_step = current_step to_module_primitive = index.get_primitive(primitive) if 'outputs' in to_module_primitive.metadata.query()['primitive_code']['arguments']: connect(db, pipeline, step4, current_step, to_input='outputs') step5 = make_pipeline_module(db, pipeline, 'd3m.primitives.data_transformation.' 'construct_predictions.Common') connect(db, pipeline, current_step, step5) connect(db, pipeline, dataframe_step, step5, to_input='reference') db.add(pipeline) db.commit() logger.info('%s PIPELINE ID: %s', origin, pipeline.id) return pipeline.id except: logger.exception('Error creating pipeline id=%s, primitives=%s', pipeline.id, str(primitives)) return None finally: db.close()
[]
[]
[ "MPLBACKEND" ]
[]
["MPLBACKEND"]
python
1
0
main.go
package main import ( "bufio" "encoding/json" "flag" "fmt" "net/http" "os" "strings" "sync" "github.com/acarl005/stripansi" ) var wg sync.WaitGroup func main() { var oneLine, verboseMode bool var ApiToken, lines string flag.StringVar(&ApiToken, "u", "", "Telegram ApiToken") flag.BoolVar(&oneLine, "1", false, "Send message line-by-line") flag.BoolVar(&verboseMode, "v", false, "Verbose mode") flag.Parse() apitokenEnv := os.Getenv("TELEGRAM_API_TOKEN") TelegramEnv := "https://api.telegram.org/bot"+apitokenEnv+"/sendMessage" chatid := os.Getenv("TELEGRAM_CHAT_ID") if TelegramEnv != "" { ApiToken = TelegramEnv } else { if ApiToken == "" { if verboseMode { fmt.Println("Telegram ApiToken not set!") } } } if !isStdin() { os.Exit(1) } sc := bufio.NewScanner(os.Stdin) for sc.Scan() { line := sc.Text() fmt.Println(line) if oneLine { if ApiToken != "" { wg.Add(1) go teleman(ApiToken, line,chatid) } } else { lines += line lines += "\n" } } if !oneLine { wg.Add(1) go teleman(ApiToken, lines,chatid) } wg.Wait() } func isStdin() bool { f, e := os.Stdin.Stat() if e != nil { return false } if f.Mode()&os.ModeNamedPipe == 0 { return false } return true } type data struct { Chat_id string `json:"chat_id"` Text string `json:"text"` } func teleman(url string, line string,chatid string) { data, _ := json.Marshal(data{Chat_id:chatid,Text: stripansi.Strip(line)}) http.Post(url, "application/json", strings.NewReader(string(data))) wg.Done() }
[ "\"TELEGRAM_API_TOKEN\"", "\"TELEGRAM_CHAT_ID\"" ]
[]
[ "TELEGRAM_CHAT_ID", "TELEGRAM_API_TOKEN" ]
[]
["TELEGRAM_CHAT_ID", "TELEGRAM_API_TOKEN"]
go
2
0
pctiler/pctiler/main.py
#!/usr/bin/env python3 import logging import os from typing import Awaitable, Callable, Dict, List from cogeo_mosaic.errors import NoAssetFoundError from fastapi import FastAPI, Request, Response from fastapi.openapi.utils import get_openapi from morecantile.defaults import tms as defaultTileMatrices from morecantile.models import TileMatrixSet from starlette import status from starlette.middleware.cors import CORSMiddleware from titiler.application.middleware import ( CacheControlMiddleware, LoggerMiddleware, TotalTimeMiddleware, ) from titiler.core.errors import DEFAULT_STATUS_CODES, add_exception_handlers from pccommon.logging import init_logging from pccommon.middleware import handle_exceptions from pccommon.openapi import fixup_schema from pctiler.config import get_settings from pctiler.db import close_db_connection, connect_to_db from pctiler.endpoints import item, legend, pg_mosaic from pctiler.middleware import trace_request # Initialize logging init_logging("tiler") logger = logging.getLogger(__name__) # Get the root path if set in the environment APP_ROOT_PATH = os.environ.get("APP_ROOT_PATH", "") settings = get_settings() app = FastAPI( title=settings.title, openapi_url=settings.openapi_url, root_path=APP_ROOT_PATH, ) app.include_router( item.pc_tile_factory.router, prefix=get_settings().item_endpoint_prefix, tags=["Item tile endpoints"], ) app.include_router( pg_mosaic.pgstac_mosaic_factory.router, prefix=get_settings().mosaic_endpoint_prefix, tags=["PgSTAC Mosaic endpoints"], ) app.include_router( legend.legend_router, prefix=get_settings().legend_endpoint_prefix, tags=["Legend endpoints"], ) @app.middleware("http") async def _trace_requests( request: Request, call_next: Callable[[Request], Awaitable[Response]] ) -> Response: return await trace_request(request, call_next) @app.middleware("http") async def _handle_exceptions( request: Request, call_next: Callable[[Request], Awaitable[Response]] ) -> Response: return await handle_exceptions(request, call_next) add_exception_handlers( app, {**DEFAULT_STATUS_CODES, NoAssetFoundError: status.HTTP_404_NOT_FOUND} ) # type: ignore app.add_middleware(CacheControlMiddleware, cachecontrol="public, max-age=3600") app.add_middleware(TotalTimeMiddleware) if get_settings().debug: app.add_middleware(LoggerMiddleware) app.add_middleware( CORSMiddleware, allow_origins="*", allow_credentials=True, allow_methods=["GET", "POST"], allow_headers=["*"], ) @app.on_event("startup") async def startup_event() -> None: """Connect to database on startup.""" await connect_to_db(app) @app.on_event("shutdown") async def shutdown_event() -> None: """Close database connection.""" await close_db_connection(app) @app.get("/") async def read_root() -> Dict[str, str]: return {"Hello": "Planetary Developer!"} @app.get("/tileMatrixSets") async def matrix_list() -> List[str]: return defaultTileMatrices.list() @app.get("/tileMatrixSets/{tileMatrixSetId}") async def matrix_definition(tileMatrixSetId: str) -> TileMatrixSet: logger.info( "Matrix definition requested", extra={ "custom_dimensions": { "tileMatrixSetId": tileMatrixSetId, } }, ) return defaultTileMatrices.get(tileMatrixSetId) def custom_openapi() -> Dict: if not app.openapi_schema: schema = get_openapi( title="Preview of Tile Access Features", version=get_settings().api_version, routes=app.routes, ) app.openapi_schema = fixup_schema(app.root_path, schema) return app.openapi_schema app.openapi = custom_openapi # type: ignore
[]
[]
[ "APP_ROOT_PATH" ]
[]
["APP_ROOT_PATH"]
python
1
0
src/backend/vt/extractor.py
import os import requests API_KEY = os.environ["VT_KEY"] class RequestIP: URL = "https://www.virustotal.com/api/v3/ip_addresses/{ip}" def get_analysis(self, ip: str) -> dict: response = requests.get( self.URL.format(ip=ip), headers={"x-apikey": API_KEY}, ) return response.json()
[]
[]
[ "VT_KEY" ]
[]
["VT_KEY"]
python
1
0
tfx/extensions/google_cloud_ai_platform/pusher/executor_test.py
# Lint as: python2, python3 # Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tfx.extensions.google_cloud_ai_platform.pusher.executor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os # Standard Imports import mock import tensorflow as tf from tfx.extensions.google_cloud_ai_platform.pusher import executor from tfx.extensions.google_cloud_ai_platform.pusher.executor import Executor from tfx.types import standard_artifacts class ExecutorTest(tf.test.TestCase): def setUp(self): super(ExecutorTest, self).setUp() self._source_data_dir = os.path.join( os.path.dirname( os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), 'components', 'testdata') self._output_data_dir = os.path.join( os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()), self._testMethodName) tf.io.gfile.makedirs(self._output_data_dir) self._model_export = standard_artifacts.Model() self._model_export.uri = os.path.join(self._source_data_dir, 'trainer/current/') self._model_blessing = standard_artifacts.ModelBlessing() self._input_dict = { 'model_export': [self._model_export], 'model_blessing': [self._model_blessing], } self._model_push = standard_artifacts.PushedModel() self._model_push.uri = os.path.join(self._output_data_dir, 'model_push') tf.io.gfile.makedirs(self._model_push.uri) self._output_dict = { 'model_push': [self._model_push], } self._exec_properties = { 'custom_config': { 'ai_platform_serving_args': { 'model_name': 'model_name', 'project_id': 'project_id' }, }, 'push_destination': None, } self._executor = Executor() @mock.patch.object(executor, 'runner', autospec=True) def testDoBlessed(self, mock_runner): self._model_blessing.uri = os.path.join(self._source_data_dir, 'model_validator/blessed/') self._model_blessing.set_int_custom_property('blessed', 1) self._executor.Do(self._input_dict, self._output_dict, self._exec_properties) mock_runner.deploy_model_for_cmle_serving.assert_called_once_with( self._model_push.artifact.custom_properties['pushed_model'] .string_value, mock.ANY, mock.ANY) self.assertEqual( 1, self._model_push.artifact.custom_properties['pushed'].int_value) @mock.patch.object(executor, 'runner', autospec=True) def testDoNotBlessed(self, mock_runner): self._model_blessing.uri = os.path.join(self._source_data_dir, 'model_validator/not_blessed/') self._model_blessing.set_int_custom_property('blessed', 0) self._executor.Do(self._input_dict, self._output_dict, self._exec_properties) self.assertEqual( 0, self._model_push.artifact.custom_properties['pushed'].int_value) mock_runner.deploy_model_for_cmle_serving.assert_not_called() if __name__ == '__main__': tf.test.main()
[]
[]
[ "TEST_UNDECLARED_OUTPUTS_DIR" ]
[]
["TEST_UNDECLARED_OUTPUTS_DIR"]
python
1
0
tests/ovirt/multiple_disks_test.go
package ovirt_test import ( "context" "github.com/kubevirt/vm-import-operator/tests" fwk "github.com/kubevirt/vm-import-operator/tests/framework" . "github.com/kubevirt/vm-import-operator/tests/matchers" "github.com/kubevirt/vm-import-operator/tests/ovirt/vms" "github.com/kubevirt/vm-import-operator/tests/utils" sapi "github.com/machacekondra/fakeovirt/pkg/api/stubbing" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" v1 "kubevirt.io/client-go/api/v1" ) type multipleDisksTest struct { framework *fwk.Framework } var _ = Describe("VM import ", func() { var ( f = fwk.NewFrameworkOrDie("multiple-disks", fwk.ProviderOvirt) secret corev1.Secret namespace string test = multipleDisksTest{f} ) BeforeEach(func() { namespace = f.Namespace.Name s, err := f.CreateOvirtSecretFromCACert() if err != nil { Fail("Cannot create secret: " + err.Error()) } secret = s }) Context("for VM with two disks", func() { It("should create started VM", func() { vmID := vms.TwoDisksVmID vmi := utils.VirtualMachineImportCr(fwk.ProviderOvirt, vmID, namespace, secret.Name, f.NsPrefix, tests.TrueVar) vmi.Spec.StartVM = &tests.TrueVar test.stub(vmID) created, err := f.VMImportClient.V2vV1beta1().VirtualMachineImports(namespace).Create(context.TODO(), &vmi, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(created).To(BeSuccessful(f)) retrieved, _ := f.VMImportClient.V2vV1beta1().VirtualMachineImports(namespace).Get(context.TODO(), created.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) vmBlueprint := v1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{Name: retrieved.Status.TargetVMName, Namespace: namespace}} Expect(vmBlueprint).To(BeRunning(f)) vmNamespacedName := types.NamespacedName{ Namespace: vmBlueprint.Namespace, Name: vmBlueprint.Name, } vm := &v1.VirtualMachine{} err = f.Client.Get(context.TODO(), vmNamespacedName, vm) if err != nil { Fail(err.Error()) } spec := vm.Spec.Template.Spec By("having correct disk setup") disks := spec.Domain.Devices.Disks Expect(disks).To(HaveLen(2)) disk1 := disks[0] disk2 := disks[1] if disk1.BootOrder == nil { disk2, disk1 = disk1, disk2 } Expect(disk1.Disk.Bus).To(BeEquivalentTo("virtio")) Expect(*disk1.BootOrder).To(BeEquivalentTo(1)) Expect(disk2.Disk.Bus).To(BeEquivalentTo("sata")) Expect(disk2.BootOrder).To(BeNil()) By("having correct volumes") Expect(spec.Volumes).To(HaveLen(2)) Expect(vm.Spec.Template.Spec.Volumes[0].DataVolume.Name).To(HaveDefaultStorageClass(f)) Expect(vm.Spec.Template.Spec.Volumes[1].DataVolume.Name).To(HaveDefaultStorageClass(f)) }) }) }) func (t *multipleDisksTest) stub(vmID string) { diskAttachmentsXml := t.framework.LoadFile("disk-attachments/two.xml") disk1Xml := t.framework.LoadTemplate("disks/disk-1.xml", map[string]string{"@DISKSIZE": "50331648"}) disk2Xml := t.framework.LoadTemplate("disks/disk-2.xml", map[string]string{"@DISKSIZE": "50331648"}) domainXml := t.framework.LoadFile("storage-domains/domain-1.xml") consolesXml := t.framework.LoadFile("graphic-consoles/empty.xml") vmXml := t.framework.LoadTemplate("vms/basic-vm.xml", map[string]string{"@VMID": vmID}) nicsXml := t.framework.LoadFile("nics/empty.xml") builder := sapi.NewStubbingBuilder(). StubGet("/ovirt-engine/api/vms/"+vmID+"/diskattachments", &diskAttachmentsXml). StubGet("/ovirt-engine/api/vms/"+vmID+"/graphicsconsoles", &consolesXml). StubGet("/ovirt-engine/api/vms/"+vmID+"/nics", &nicsXml). StubGet("/ovirt-engine/api/disks/disk-1", &disk1Xml). StubGet("/ovirt-engine/api/disks/disk-2", &disk2Xml). StubGet("/ovirt-engine/api/storagedomains/domain-1", &domainXml). StubGet("/ovirt-engine/api/vms/"+vmID, &vmXml) err := t.framework.OvirtStubbingClient.Stub(builder.Build()) if err != nil { Fail(err.Error()) } }
[]
[]
[]
[]
[]
go
null
null
null
azkaban-webserver/src/main/java/azkaban/webapp/AzkabanWebServer.java
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.webapp; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.lang.management.ManagementFactory; import java.lang.reflect.Constructor; import java.net.MalformedURLException; import java.net.URL; import java.net.URLClassLoader; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TimeZone; import javax.management.MBeanInfo; import javax.management.MBeanServer; import javax.management.ObjectName; import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import org.apache.log4j.jmx.HierarchyDynamicMBean; import org.apache.velocity.app.VelocityEngine; import org.apache.velocity.runtime.log.Log4JLogChute; import org.apache.velocity.runtime.resource.loader.ClasspathResourceLoader; import org.apache.velocity.runtime.resource.loader.JarResourceLoader; import org.joda.time.DateTimeZone; import org.mortbay.jetty.Connector; import org.mortbay.jetty.Server; import org.mortbay.jetty.bio.SocketConnector; import org.mortbay.jetty.security.SslSocketConnector; import org.mortbay.jetty.servlet.Context; import org.mortbay.jetty.servlet.DefaultServlet; import org.mortbay.jetty.servlet.ServletHolder; import org.mortbay.thread.QueuedThreadPool; import azkaban.alert.Alerter; import azkaban.database.AzkabanDatabaseSetup; import azkaban.executor.ExecutorManager; import azkaban.executor.JdbcExecutorLoader; import azkaban.jmx.JmxExecutorManager; import azkaban.jmx.JmxJettyServer; import azkaban.jmx.JmxTriggerManager; import azkaban.project.JdbcProjectLoader; import azkaban.project.ProjectManager; import azkaban.scheduler.ScheduleLoader; import azkaban.scheduler.ScheduleManager; import azkaban.scheduler.TriggerBasedScheduleLoader; import azkaban.server.AzkabanServer; import azkaban.server.ServerConstants; import azkaban.server.session.SessionCache; import azkaban.trigger.JdbcTriggerLoader; import azkaban.trigger.TriggerLoader; import azkaban.trigger.TriggerManager; import azkaban.trigger.TriggerManagerException; import azkaban.trigger.builtin.BasicTimeChecker; import azkaban.trigger.builtin.CreateTriggerAction; import azkaban.trigger.builtin.ExecuteFlowAction; import azkaban.trigger.builtin.ExecutionChecker; import azkaban.trigger.builtin.KillExecutionAction; import azkaban.trigger.builtin.SlaAlertAction; import azkaban.trigger.builtin.SlaChecker; import azkaban.user.UserManager; import azkaban.user.XmlUserManager; import azkaban.utils.Emailer; import azkaban.utils.FileIOUtils; import azkaban.utils.Props; import azkaban.utils.PropsUtils; import azkaban.utils.Utils; import azkaban.webapp.plugin.PluginRegistry; import azkaban.webapp.plugin.TriggerPlugin; import azkaban.webapp.plugin.ViewerPlugin; import azkaban.webapp.servlet.AbstractAzkabanServlet; import azkaban.webapp.servlet.ExecutorServlet; import azkaban.webapp.servlet.HistoryServlet; import azkaban.webapp.servlet.IndexRedirectServlet; import azkaban.webapp.servlet.JMXHttpServlet; import azkaban.webapp.servlet.ProjectManagerServlet; import azkaban.webapp.servlet.ProjectServlet; import azkaban.webapp.servlet.ScheduleServlet; import azkaban.webapp.servlet.StatsServlet; import azkaban.webapp.servlet.TriggerManagerServlet; import com.linkedin.restli.server.RestliServlet; /** * The Azkaban Jetty server class * * Global azkaban properties for setup. All of them are optional unless * otherwise marked: azkaban.name - The displayed name of this instance. * azkaban.label - Short descriptor of this Azkaban instance. azkaban.color - * Theme color azkaban.temp.dir - Temp dir used by Azkaban for various file * uses. web.resource.dir - The directory that contains the static web files. * default.timezone.id - The timezone code. I.E. America/Los Angeles * * user.manager.class - The UserManager class used for the user manager. Default * is XmlUserManager. project.manager.class - The ProjectManager to load * projects project.global.properties - The base properties inherited by all * projects and jobs * * jetty.maxThreads - # of threads for jetty jetty.ssl.port - The ssl port used * for sessionizing. jetty.keystore - Jetty keystore . jetty.keypassword - Jetty * keystore password jetty.truststore - Jetty truststore jetty.trustpassword - * Jetty truststore password */ public class AzkabanWebServer extends AzkabanServer { private static final String AZKABAN_ACCESS_LOGGER_NAME = "azkaban.webapp.servlet.LoginAbstractAzkabanServlet"; private static final Logger logger = Logger.getLogger(AzkabanWebServer.class); public static final String AZKABAN_HOME = "AZKABAN_HOME"; public static final String DEFAULT_CONF_PATH = "conf"; public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties"; public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties"; private static final int MAX_FORM_CONTENT_SIZE = 10 * 1024 * 1024; private static final int MAX_HEADER_BUFFER_SIZE = 10 * 1024 * 1024; private static AzkabanWebServer app; private static final String DEFAULT_TIMEZONE_ID = "default.timezone.id"; private static final int DEFAULT_PORT_NUMBER = 8081; private static final int DEFAULT_SSL_PORT_NUMBER = 8443; private static final int DEFAULT_THREAD_NUMBER = 20; private static final String VELOCITY_DEV_MODE_PARAM = "velocity.dev.mode"; private static final String USER_MANAGER_CLASS_PARAM = "user.manager.class"; private static final String DEFAULT_STATIC_DIR = ""; private final VelocityEngine velocityEngine; private final Server server; private UserManager userManager; private ProjectManager projectManager; // private ExecutorManagerAdapter executorManager; private ExecutorManager executorManager; private ScheduleManager scheduleManager; private TriggerManager triggerManager; private Map<String, Alerter> alerters; private final ClassLoader baseClassLoader; private Props props; private SessionCache sessionCache; private File tempDir; private Map<String, TriggerPlugin> triggerPlugins; private MBeanServer mbeanServer; private ArrayList<ObjectName> registeredMBeans = new ArrayList<ObjectName>(); public static AzkabanWebServer getInstance() { return app; } /** * Constructor usually called by tomcat AzkabanServletContext to create the * initial server */ public AzkabanWebServer() throws Exception { this(null, loadConfigurationFromAzkabanHome()); } /** * Constructor * Azkaban web server 初始化 */ public AzkabanWebServer(Server server, Props props) throws Exception { this.props = props; this.server = server; //初始化velocity实例 velocityEngine = configureVelocityEngine(props .getBoolean(VELOCITY_DEV_MODE_PARAM, false)); //初始化sessionCache sessionCache = new SessionCache(props); //初始化用户信息,涉及到权限之类的信息 userManager = loadUserManager(props); //初始化ALERT 以及插件 alerters = loadAlerters(props); //executorManager初始化,主要是SQL执行相关信息以及任务执行的信息 executorManager = loadExecutorManager(props); projectManager = loadProjectManager(props); triggerManager = loadTriggerManager(props); loadBuiltinCheckersAndActions(); //加载所有的触发器在这里 scheduleManager = loadScheduleManager(triggerManager, props); String triggerPluginDir = props.getString("trigger.plugin.dir", "plugins/triggers"); loadPluginCheckersAndActions(triggerPluginDir); baseClassLoader = this.getClassLoader(); tempDir = new File(props.getString("azkaban.temp.dir", "temp")); // 时区设置 if (props.containsKey(DEFAULT_TIMEZONE_ID)) { String timezone = props.getString(DEFAULT_TIMEZONE_ID); System.setProperty("user.timezone", timezone); TimeZone.setDefault(TimeZone.getTimeZone(timezone)); DateTimeZone.setDefault(DateTimeZone.forID(timezone)); logger.info("Setting timezone to " + timezone); } configureMBeanServer(); } private void setTriggerPlugins(Map<String, TriggerPlugin> triggerPlugins) { this.triggerPlugins = triggerPlugins; } private UserManager loadUserManager(Props props) { Class<?> userManagerClass = props.getClass(USER_MANAGER_CLASS_PARAM, null); logger.info("Loading user manager class " + userManagerClass.getName()); UserManager manager = null; if (userManagerClass != null && userManagerClass.getConstructors().length > 0) { try { Constructor<?> userManagerConstructor = userManagerClass.getConstructor(Props.class); manager = (UserManager) userManagerConstructor.newInstance(props); } catch (Exception e) { logger.error("Could not instantiate UserManager " + userManagerClass.getName()); throw new RuntimeException(e); } } else { manager = new XmlUserManager(props); } return manager; } private ProjectManager loadProjectManager(Props props) { logger.info("Loading JDBC for project management"); JdbcProjectLoader loader = new JdbcProjectLoader(props); //加载数据源 ProjectManager manager = new ProjectManager(loader, props); return manager; } private ExecutorManager loadExecutorManager(Props props) throws Exception { JdbcExecutorLoader loader = new JdbcExecutorLoader(props); ExecutorManager execManager = new ExecutorManager(props, loader, alerters); return execManager; } private ScheduleManager loadScheduleManager(TriggerManager tm, Props props) throws Exception { logger.info("Loading trigger based scheduler"); ScheduleLoader loader = new TriggerBasedScheduleLoader(tm, ScheduleManager.triggerSource); return new ScheduleManager(loader); } private TriggerManager loadTriggerManager(Props props) throws TriggerManagerException { TriggerLoader loader = new JdbcTriggerLoader(props); return new TriggerManager(props, loader, executorManager); } private void loadBuiltinCheckersAndActions() { logger.info("Loading built-in checker and action types"); if (triggerManager instanceof TriggerManager) { SlaChecker.setExecutorManager(executorManager); ExecuteFlowAction.setExecutorManager(executorManager); ExecuteFlowAction.setProjectManager(projectManager); ExecuteFlowAction.setTriggerManager(triggerManager); KillExecutionAction.setExecutorManager(executorManager); SlaAlertAction.setExecutorManager(executorManager); SlaAlertAction.setAlerters(alerters); SlaAlertAction.setExecutorManager(executorManager); CreateTriggerAction.setTriggerManager(triggerManager); ExecutionChecker.setExecutorManager(executorManager); } triggerManager.registerCheckerType(BasicTimeChecker.type, BasicTimeChecker.class); triggerManager.registerCheckerType(SlaChecker.type, SlaChecker.class); triggerManager.registerCheckerType(ExecutionChecker.type, ExecutionChecker.class); triggerManager.registerActionType(ExecuteFlowAction.type, ExecuteFlowAction.class); triggerManager.registerActionType(KillExecutionAction.type, KillExecutionAction.class); triggerManager .registerActionType(SlaAlertAction.type, SlaAlertAction.class); triggerManager.registerActionType(CreateTriggerAction.type, CreateTriggerAction.class); } /** * 报警 * @param props * @return */ private Map<String, Alerter> loadAlerters(Props props) { Map<String, Alerter> allAlerters = new HashMap<String, Alerter>(); // load built-in alerters Emailer mailAlerter = new Emailer(props); allAlerters.put("email", mailAlerter); // load all plugin alerters String pluginDir = props.getString("alerter.plugin.dir", "plugins/alerter"); allAlerters.putAll(loadPluginAlerters(pluginDir)); return allAlerters; } private Map<String, Alerter> loadPluginAlerters(String pluginPath) { File alerterPluginPath = new File(pluginPath); if (!alerterPluginPath.exists()) { return Collections.<String, Alerter> emptyMap(); } Map<String, Alerter> installedAlerterPlugins = new HashMap<String, Alerter>(); ClassLoader parentLoader = getClass().getClassLoader(); File[] pluginDirs = alerterPluginPath.listFiles(); ArrayList<String> jarPaths = new ArrayList<String>(); for (File pluginDir : pluginDirs) { if (!pluginDir.isDirectory()) { logger.error("The plugin path " + pluginDir + " is not a directory."); continue; } // Load the conf directory File propertiesDir = new File(pluginDir, "conf"); Props pluginProps = null; if (propertiesDir.exists() && propertiesDir.isDirectory()) { File propertiesFile = new File(propertiesDir, "plugin.properties"); File propertiesOverrideFile = new File(propertiesDir, "override.properties"); if (propertiesFile.exists()) { if (propertiesOverrideFile.exists()) { pluginProps = PropsUtils.loadProps(null, propertiesFile, propertiesOverrideFile); } else { pluginProps = PropsUtils.loadProps(null, propertiesFile); } } else { logger.error("Plugin conf file " + propertiesFile + " not found."); continue; } } else { logger.error("Plugin conf path " + propertiesDir + " not found."); continue; } String pluginName = pluginProps.getString("alerter.name"); List<String> extLibClasspath = pluginProps.getStringList("alerter.external.classpaths", (List<String>) null); String pluginClass = pluginProps.getString("alerter.class"); if (pluginClass == null) { logger.error("Alerter class is not set."); } else { logger.info("Plugin class " + pluginClass); } URLClassLoader urlClassLoader = null; File libDir = new File(pluginDir, "lib"); if (libDir.exists() && libDir.isDirectory()) { File[] files = libDir.listFiles(); ArrayList<URL> urls = new ArrayList<URL>(); for (int i = 0; i < files.length; ++i) { try { URL url = files[i].toURI().toURL(); urls.add(url); } catch (MalformedURLException e) { logger.error(e); } } if (extLibClasspath != null) { for (String extLib : extLibClasspath) { try { File file = new File(pluginDir, extLib); URL url = file.toURI().toURL(); urls.add(url); } catch (MalformedURLException e) { logger.error(e); } } } urlClassLoader = new URLClassLoader(urls.toArray(new URL[urls.size()]), parentLoader); } else { logger.error("Library path " + propertiesDir + " not found."); continue; } Class<?> alerterClass = null; try { alerterClass = urlClassLoader.loadClass(pluginClass); } catch (ClassNotFoundException e) { logger.error("Class " + pluginClass + " not found."); continue; } String source = FileIOUtils.getSourcePathFromClass(alerterClass); logger.info("Source jar " + source); jarPaths.add("jar:file:" + source); Constructor<?> constructor = null; try { constructor = alerterClass.getConstructor(Props.class); } catch (NoSuchMethodException e) { logger.error("Constructor not found in " + pluginClass); continue; } Object obj = null; try { obj = constructor.newInstance(pluginProps); } catch (Exception e) { logger.error(e); } if (!(obj instanceof Alerter)) { logger.error("The object is not an Alerter"); continue; } Alerter plugin = (Alerter) obj; installedAlerterPlugins.put(pluginName, plugin); } return installedAlerterPlugins; } private void loadPluginCheckersAndActions(String pluginPath) { logger.info("Loading plug-in checker and action types"); File triggerPluginPath = new File(pluginPath); if (!triggerPluginPath.exists()) { logger.error("plugin path " + pluginPath + " doesn't exist!"); return; } ClassLoader parentLoader = this.getClassLoader(); File[] pluginDirs = triggerPluginPath.listFiles(); ArrayList<String> jarPaths = new ArrayList<String>(); for (File pluginDir : pluginDirs) { if (!pluginDir.exists()) { logger.error("Error! Trigger plugin path " + pluginDir.getPath() + " doesn't exist."); continue; } if (!pluginDir.isDirectory()) { logger.error("The plugin path " + pluginDir + " is not a directory."); continue; } // Load the conf directory File propertiesDir = new File(pluginDir, "conf"); Props pluginProps = null; if (propertiesDir.exists() && propertiesDir.isDirectory()) { File propertiesFile = new File(propertiesDir, "plugin.properties"); File propertiesOverrideFile = new File(propertiesDir, "override.properties"); if (propertiesFile.exists()) { if (propertiesOverrideFile.exists()) { pluginProps = PropsUtils.loadProps(null, propertiesFile, propertiesOverrideFile); } else { pluginProps = PropsUtils.loadProps(null, propertiesFile); } } else { logger.error("Plugin conf file " + propertiesFile + " not found."); continue; } } else { logger.error("Plugin conf path " + propertiesDir + " not found."); continue; } List<String> extLibClasspath = pluginProps.getStringList("trigger.external.classpaths", (List<String>) null); String pluginClass = pluginProps.getString("trigger.class"); if (pluginClass == null) { logger.error("Trigger class is not set."); } else { logger.error("Plugin class " + pluginClass); } URLClassLoader urlClassLoader = null; File libDir = new File(pluginDir, "lib"); if (libDir.exists() && libDir.isDirectory()) { File[] files = libDir.listFiles(); ArrayList<URL> urls = new ArrayList<URL>(); for (int i = 0; i < files.length; ++i) { try { URL url = files[i].toURI().toURL(); urls.add(url); } catch (MalformedURLException e) { logger.error(e); } } if (extLibClasspath != null) { for (String extLib : extLibClasspath) { try { File file = new File(pluginDir, extLib); URL url = file.toURI().toURL(); urls.add(url); } catch (MalformedURLException e) { logger.error(e); } } } urlClassLoader = new URLClassLoader(urls.toArray(new URL[urls.size()]), parentLoader); } else { logger.error("Library path " + propertiesDir + " not found."); continue; } Class<?> triggerClass = null; try { triggerClass = urlClassLoader.loadClass(pluginClass); } catch (ClassNotFoundException e) { logger.error("Class " + pluginClass + " not found."); continue; } String source = FileIOUtils.getSourcePathFromClass(triggerClass); logger.info("Source jar " + source); jarPaths.add("jar:file:" + source); try { Utils.invokeStaticMethod(urlClassLoader, pluginClass, "initiateCheckerTypes", pluginProps, app); } catch (Exception e) { logger.error("Unable to initiate checker types for " + pluginClass); continue; } try { Utils.invokeStaticMethod(urlClassLoader, pluginClass, "initiateActionTypes", pluginProps, app); } catch (Exception e) { logger.error("Unable to initiate action types for " + pluginClass); continue; } } } /** * Returns the web session cache. * * @return */ public SessionCache getSessionCache() { return sessionCache; } /** * Returns the velocity engine for pages to use. * * @return */ public VelocityEngine getVelocityEngine() { return velocityEngine; } /** * * @return */ public UserManager getUserManager() { return userManager; } /** * * @return */ public ProjectManager getProjectManager() { return projectManager; } /** * */ public ExecutorManager getExecutorManager() { return executorManager; } public ScheduleManager getScheduleManager() { return scheduleManager; } public TriggerManager getTriggerManager() { return triggerManager; } /** * 创建Velocity运行实例 * * @param devMode * @return */ private VelocityEngine configureVelocityEngine(final boolean devMode) { VelocityEngine engine = new VelocityEngine(); engine.setProperty("resource.loader", "classpath, jar"); engine.setProperty("classpath.resource.loader.class", ClasspathResourceLoader.class.getName()); engine.setProperty("classpath.resource.loader.cache", !devMode); engine.setProperty("classpath.resource.loader.modificationCheckInterval", 5L); engine.setProperty("jar.resource.loader.class", JarResourceLoader.class.getName()); engine.setProperty("jar.resource.loader.cache", !devMode); engine.setProperty("resource.manager.logwhenfound", false); engine.setProperty("input.encoding", "UTF-8"); engine.setProperty("output.encoding", "UTF-8"); engine.setProperty("directive.set.null.allowed", true); engine.setProperty("resource.manager.logwhenfound", false); engine.setProperty("velocimacro.permissions.allow.inline", true); engine.setProperty("velocimacro.library.autoreload", devMode); engine.setProperty("velocimacro.library", "/azkaban/webapp/servlet/velocity/macros.vm"); engine.setProperty( "velocimacro.permissions.allow.inline.to.replace.global", true); engine.setProperty("velocimacro.arguments.strict", true); engine.setProperty("runtime.log.invalid.references", devMode); engine.setProperty("runtime.log.logsystem.class", Log4JLogChute.class); engine.setProperty("runtime.log.logsystem.log4j.logger", Logger.getLogger("org.apache.velocity.Logger")); engine.setProperty("parser.pool.size", 3); return engine; } public ClassLoader getClassLoader() { return baseClassLoader; } /** * Returns the global azkaban properties * * @return */ public Props getServerProps() { return props; } /** * jetty启动项 * * @param args */ public static void main(String[] args) throws Exception { logger.info("Starting Jetty Azkaban Web Server..."); // String [] argss = {"-conf","/home/liyangzhou/java_code/azkaban_code/azkaban-webserver/src/main/resources/conf"}; Props azkabanSettings = AzkabanServer.loadProps(args); if (azkabanSettings == null) { logger.error("Azkaban Properties not loaded."); logger.error("Exiting Azkaban..."); return; } int maxThreads = azkabanSettings.getInt("jetty.maxThreads", DEFAULT_THREAD_NUMBER); boolean isStatsOn = azkabanSettings.getBoolean("jetty.connector.stats", true); logger.info("Setting up connector with stats on: " + isStatsOn); //==========jetty server 初始化 ============ boolean ssl; int port; final Server server = new Server(); //这个不是我们以往的socket的server 调用jar包为org.mortbay.jetty.Server.Server() if (azkabanSettings.getBoolean("jetty.use.ssl", true)) { //调试不采用ssl加密方式 int sslPortNumber = azkabanSettings.getInt("jetty.ssl.port", DEFAULT_SSL_PORT_NUMBER); port = sslPortNumber; ssl = true; logger.info("Setting up Jetty Https Server with port:" + sslPortNumber + " and numThreads:" + maxThreads); SslSocketConnector secureConnector = new SslSocketConnector(); //SSL通信 secureConnector.setPort(sslPortNumber); secureConnector.setKeystore(azkabanSettings.getString("jetty.keystore")); secureConnector.setPassword(azkabanSettings.getString("jetty.password")); secureConnector.setKeyPassword(azkabanSettings .getString("jetty.keypassword")); secureConnector.setTruststore(azkabanSettings .getString("jetty.truststore")); secureConnector.setTrustPassword(azkabanSettings .getString("jetty.trustpassword")); secureConnector.setHeaderBufferSize(MAX_HEADER_BUFFER_SIZE); // set up vulnerable cipher suites to exclude List<String> cipherSuitesToExclude = azkabanSettings.getStringList("jetty.excludeCipherSuites"); logger.info("Excluded Cipher Suites: " + String.valueOf(cipherSuitesToExclude)); if (cipherSuitesToExclude != null && !cipherSuitesToExclude.isEmpty()) { secureConnector.setExcludeCipherSuites(cipherSuitesToExclude.toArray(new String[0])); } server.addConnector(secureConnector); } else { ssl = false; port = azkabanSettings.getInt("jetty.port", DEFAULT_PORT_NUMBER);//默认端口8081 SocketConnector connector = new SocketConnector(); //通信 connector.setPort(port); connector.setHeaderBufferSize(MAX_HEADER_BUFFER_SIZE); //默认设置为10M server.addConnector(connector);//纳入jetty server管理 } // setting stats configuration for connectors for (Connector connector : server.getConnectors()) { connector.setStatsOn(isStatsOn); //开启统计开关 } String hostname = azkabanSettings.getString("jetty.hostname", "localhost"); azkabanSettings.put("server.hostname", hostname); azkabanSettings.put("server.port", port); azkabanSettings.put("server.useSSL", String.valueOf(ssl)); //=============jetty server 构造完成============== app = new AzkabanWebServer(server, azkabanSettings); boolean checkDB = azkabanSettings.getBoolean(AzkabanDatabaseSetup.DATABASE_CHECK_VERSION, false); if (checkDB) { AzkabanDatabaseSetup setup = new AzkabanDatabaseSetup(azkabanSettings); setup.loadTableInfo(); if (setup.needsUpdating()) { logger.error("Database is out of date."); setup.printUpgradePlan(); logger.error("Exiting with error."); System.exit(-1); } } QueuedThreadPool httpThreadPool = new QueuedThreadPool(maxThreads); server.setThreadPool(httpThreadPool); String staticDir = azkabanSettings.getString("web.resource.dir", DEFAULT_STATIC_DIR); logger.info("Setting up web resource dir " + staticDir); //===============servlet 初始化============= Context root = new Context(server, "/", Context.SESSIONS); root.setMaxFormContentSize(MAX_FORM_CONTENT_SIZE); //默认访问servlet String defaultServletPath = azkabanSettings.getString("azkaban.default.servlet.path", "/index"); root.setResourceBase(staticDir); //=======servlet 解析 ========= ServletHolder indexRedirect = new ServletHolder(new IndexRedirectServlet(defaultServletPath)); root.addServlet(indexRedirect, "/"); ServletHolder index = new ServletHolder(new ProjectServlet()); root.addServlet(index, "/index"); //===配置静态资源路径=== ServletHolder staticServlet = new ServletHolder(new DefaultServlet()); root.addServlet(staticServlet, "/css/*"); root.addServlet(staticServlet, "/js/*"); root.addServlet(staticServlet, "/images/*"); root.addServlet(staticServlet, "/fonts/*"); root.addServlet(staticServlet, "/favicon.ico"); //===配置动态请求路径对应的 Servlet,每种请求类型都由对应的servlet处理=== root.addServlet(new ServletHolder(new ProjectManagerServlet()), "/manager"); root.addServlet(new ServletHolder(new ExecutorServlet()), "/executor"); root.addServlet(new ServletHolder(new HistoryServlet()), "/history"); root.addServlet(new ServletHolder(new ScheduleServlet()), "/schedule"); root.addServlet(new ServletHolder(new JMXHttpServlet()), "/jmx"); root.addServlet(new ServletHolder(new TriggerManagerServlet()), "/triggers"); root.addServlet(new ServletHolder(new StatsServlet()), "/stats"); ServletHolder restliHolder = new ServletHolder(new RestliServlet()); restliHolder.setInitParameter("resourcePackages", "azkaban.restli"); root.addServlet(restliHolder, "/restli/*"); String viewerPluginDir = azkabanSettings.getString("viewer.plugin.dir", "plugins/viewer"); loadViewerPlugins(root, viewerPluginDir, app.getVelocityEngine()); // triggerplugin String triggerPluginDir = azkabanSettings.getString("trigger.plugin.dir", "plugins/triggers"); Map<String, TriggerPlugin> triggerPlugins = loadTriggerPlugins(root, triggerPluginDir, app); app.setTriggerPlugins(triggerPlugins); // always have basic time trigger // TODO: find something else to do the job app.getTriggerManager().start(); root.setAttribute(ServerConstants.AZKABAN_SERVLET_CONTEXT_KEY, app); try { server.start();//服务启动 } catch (Exception e) { logger.warn(e); Utils.croak(e.getMessage(), 1); } //addShutdownHook进程挂掉的时候打印当时的进程资源利用情况 Runtime.getRuntime().addShutdownHook(new Thread() { public void run() { try { logTopMemoryConsumers(); } catch (Exception e) { logger.info(("Exception when logging top memory consumers"), e); } logger.info("Shutting down http server..."); try { app.close(); server.stop(); server.destroy(); } catch (Exception e) { logger.error("Error while shutting down http server.", e); } logger.info("kk thx bye."); } public void logTopMemoryConsumers() throws Exception, IOException { if (new File("/bin/bash").exists() && new File("/bin/ps").exists() && new File("/usr/bin/head").exists()) { logger.info("logging top memeory consumer"); java.lang.ProcessBuilder processBuilder = new java.lang.ProcessBuilder("/bin/bash", "-c", "/bin/ps aux --sort -rss | /usr/bin/head"); Process p = processBuilder.start(); p.waitFor(); InputStream is = p.getInputStream(); java.io.BufferedReader reader = new java.io.BufferedReader(new InputStreamReader(is)); String line = null; while ((line = reader.readLine()) != null) { logger.info(line); } is.close(); } } }); logger.info("Server running on " + (ssl ? "ssl" : "") + " port " + port + "."); } private static Map<String, TriggerPlugin> loadTriggerPlugins(Context root, String pluginPath, AzkabanWebServer azkabanWebApp) { File triggerPluginPath = new File(pluginPath); if (!triggerPluginPath.exists()) { return new HashMap<String, TriggerPlugin>(); } Map<String, TriggerPlugin> installedTriggerPlugins = new HashMap<String, TriggerPlugin>(); ClassLoader parentLoader = AzkabanWebServer.class.getClassLoader(); File[] pluginDirs = triggerPluginPath.listFiles(); ArrayList<String> jarPaths = new ArrayList<String>(); for (File pluginDir : pluginDirs) { if (!pluginDir.exists()) { logger.error("Error! Trigger plugin path " + pluginDir.getPath() + " doesn't exist."); continue; } if (!pluginDir.isDirectory()) { logger.error("The plugin path " + pluginDir + " is not a directory."); continue; } // Load the conf directory File propertiesDir = new File(pluginDir, "conf"); Props pluginProps = null; if (propertiesDir.exists() && propertiesDir.isDirectory()) { File propertiesFile = new File(propertiesDir, "plugin.properties"); File propertiesOverrideFile = new File(propertiesDir, "override.properties"); if (propertiesFile.exists()) { if (propertiesOverrideFile.exists()) { pluginProps = PropsUtils.loadProps(null, propertiesFile, propertiesOverrideFile); } else { pluginProps = PropsUtils.loadProps(null, propertiesFile); } } else { logger.error("Plugin conf file " + propertiesFile + " not found."); continue; } } else { logger.error("Plugin conf path " + propertiesDir + " not found."); continue; } String pluginName = pluginProps.getString("trigger.name"); List<String> extLibClasspath = pluginProps.getStringList("trigger.external.classpaths", (List<String>) null); String pluginClass = pluginProps.getString("trigger.class"); if (pluginClass == null) { logger.error("Trigger class is not set."); } else { logger.error("Plugin class " + pluginClass); } URLClassLoader urlClassLoader = null; File libDir = new File(pluginDir, "lib"); if (libDir.exists() && libDir.isDirectory()) { File[] files = libDir.listFiles(); ArrayList<URL> urls = new ArrayList<URL>(); for (int i = 0; i < files.length; ++i) { try { URL url = files[i].toURI().toURL(); urls.add(url); } catch (MalformedURLException e) { logger.error(e); } } if (extLibClasspath != null) { for (String extLib : extLibClasspath) { try { File file = new File(pluginDir, extLib); URL url = file.toURI().toURL(); urls.add(url); } catch (MalformedURLException e) { logger.error(e); } } } urlClassLoader = new URLClassLoader(urls.toArray(new URL[urls.size()]), parentLoader); } else { logger.error("Library path " + propertiesDir + " not found."); continue; } Class<?> triggerClass = null; try { triggerClass = urlClassLoader.loadClass(pluginClass); } catch (ClassNotFoundException e) { logger.error("Class " + pluginClass + " not found."); continue; } String source = FileIOUtils.getSourcePathFromClass(triggerClass); logger.info("Source jar " + source); jarPaths.add("jar:file:" + source); Constructor<?> constructor = null; try { constructor = triggerClass.getConstructor(String.class, Props.class, Context.class, AzkabanWebServer.class); } catch (NoSuchMethodException e) { logger.error("Constructor not found in " + pluginClass); continue; } Object obj = null; try { obj = constructor.newInstance(pluginName, pluginProps, root, azkabanWebApp); } catch (Exception e) { logger.error(e); } if (!(obj instanceof TriggerPlugin)) { logger.error("The object is not an TriggerPlugin"); continue; } TriggerPlugin plugin = (TriggerPlugin) obj; installedTriggerPlugins.put(pluginName, plugin); } // Velocity needs the jar resource paths to be set. String jarResourcePath = StringUtils.join(jarPaths, ", "); logger.info("Setting jar resource path " + jarResourcePath); VelocityEngine ve = azkabanWebApp.getVelocityEngine(); ve.addProperty("jar.resource.loader.path", jarResourcePath); return installedTriggerPlugins; } public Map<String, TriggerPlugin> getTriggerPlugins() { return triggerPlugins; } private static void loadViewerPlugins(Context root, String pluginPath, VelocityEngine ve) { File viewerPluginPath = new File(pluginPath); if (!viewerPluginPath.exists()) { return; } ClassLoader parentLoader = AzkabanWebServer.class.getClassLoader(); File[] pluginDirs = viewerPluginPath.listFiles(); ArrayList<String> jarPaths = new ArrayList<String>(); for (File pluginDir : pluginDirs) { if (!pluginDir.exists()) { logger.error("Error viewer plugin path " + pluginDir.getPath() + " doesn't exist."); continue; } if (!pluginDir.isDirectory()) { logger.error("The plugin path " + pluginDir + " is not a directory."); continue; } // Load the conf directory File propertiesDir = new File(pluginDir, "conf"); Props pluginProps = null; if (propertiesDir.exists() && propertiesDir.isDirectory()) { File propertiesFile = new File(propertiesDir, "plugin.properties"); File propertiesOverrideFile = new File(propertiesDir, "override.properties"); if (propertiesFile.exists()) { if (propertiesOverrideFile.exists()) { pluginProps = PropsUtils.loadProps(null, propertiesFile, propertiesOverrideFile); } else { pluginProps = PropsUtils.loadProps(null, propertiesFile); } } else { logger.error("Plugin conf file " + propertiesFile + " not found."); continue; } } else { logger.error("Plugin conf path " + propertiesDir + " not found."); continue; } String pluginName = pluginProps.getString("viewer.name"); String pluginWebPath = pluginProps.getString("viewer.path"); String pluginJobTypes = pluginProps.getString("viewer.jobtypes", null); int pluginOrder = pluginProps.getInt("viewer.order", 0); boolean pluginHidden = pluginProps.getBoolean("viewer.hidden", false); List<String> extLibClasspath = pluginProps.getStringList("viewer.external.classpaths", (List<String>) null); String pluginClass = pluginProps.getString("viewer.servlet.class"); if (pluginClass == null) { logger.error("Viewer class is not set."); } else { logger.error("Plugin class " + pluginClass); } URLClassLoader urlClassLoader = null; File libDir = new File(pluginDir, "lib"); if (libDir.exists() && libDir.isDirectory()) { File[] files = libDir.listFiles(); ArrayList<URL> urls = new ArrayList<URL>(); for (int i = 0; i < files.length; ++i) { try { URL url = files[i].toURI().toURL(); urls.add(url); } catch (MalformedURLException e) { logger.error(e); } } // Load any external libraries. if (extLibClasspath != null) { for (String extLib : extLibClasspath) { File extLibFile = new File(pluginDir, extLib); if (extLibFile.exists()) { if (extLibFile.isDirectory()) { // extLibFile is a directory; load all the files in the // directory. File[] extLibFiles = extLibFile.listFiles(); for (int i = 0; i < extLibFiles.length; ++i) { try { URL url = extLibFiles[i].toURI().toURL(); urls.add(url); } catch (MalformedURLException e) { logger.error(e); } } } else { // extLibFile is a file try { URL url = extLibFile.toURI().toURL(); urls.add(url); } catch (MalformedURLException e) { logger.error(e); } } } else { logger.error("External library path " + extLibFile.getAbsolutePath() + " not found."); continue; } } } urlClassLoader = new URLClassLoader(urls.toArray(new URL[urls.size()]), parentLoader); } else { logger .error("Library path " + libDir.getAbsolutePath() + " not found."); continue; } Class<?> viewerClass = null; try { viewerClass = urlClassLoader.loadClass(pluginClass); } catch (ClassNotFoundException e) { logger.error("Class " + pluginClass + " not found."); continue; } String source = FileIOUtils.getSourcePathFromClass(viewerClass); logger.info("Source jar " + source); jarPaths.add("jar:file:" + source); Constructor<?> constructor = null; try { constructor = viewerClass.getConstructor(Props.class); } catch (NoSuchMethodException e) { logger.error("Constructor not found in " + pluginClass); continue; } Object obj = null; try { obj = constructor.newInstance(pluginProps); } catch (Exception e) { logger.error(e); logger.error(e.getCause()); } if (!(obj instanceof AbstractAzkabanServlet)) { logger.error("The object is not an AbstractAzkabanServlet"); continue; } AbstractAzkabanServlet avServlet = (AbstractAzkabanServlet) obj; root.addServlet(new ServletHolder(avServlet), "/" + pluginWebPath + "/*"); PluginRegistry.getRegistry().register( new ViewerPlugin(pluginName, pluginWebPath, pluginOrder, pluginHidden, pluginJobTypes)); } // Velocity needs the jar resource paths to be set. String jarResourcePath = StringUtils.join(jarPaths, ", "); logger.info("Setting jar resource path " + jarResourcePath); ve.addProperty("jar.resource.loader.path", jarResourcePath); } /** * Loads the Azkaban property file from the AZKABAN_HOME conf directory * * @return */ private static Props loadConfigurationFromAzkabanHome() { String azkabanHome = System.getenv("AZKABAN_HOME"); if (azkabanHome == null) { logger.error("AZKABAN_HOME not set. Will try default."); return null; } if (!new File(azkabanHome).isDirectory() || !new File(azkabanHome).canRead()) { logger.error(azkabanHome + " is not a readable directory."); return null; } File confPath = new File(azkabanHome, DEFAULT_CONF_PATH); if (!confPath.exists() || !confPath.isDirectory() || !confPath.canRead()) { logger .error(azkabanHome + " does not contain a readable conf directory."); return null; } return loadAzkabanConfigurationFromDirectory(confPath); } /** * Returns the set temp dir * * @return */ public File getTempDirectory() { return tempDir; } private static Props loadAzkabanConfigurationFromDirectory(File dir) { File azkabanPrivatePropsFile = new File(dir, AZKABAN_PRIVATE_PROPERTIES_FILE); File azkabanPropsFile = new File(dir, AZKABAN_PROPERTIES_FILE); Props props = null; try { // This is purely optional if (azkabanPrivatePropsFile.exists() && azkabanPrivatePropsFile.isFile()) { logger.info("Loading azkaban private properties file"); props = new Props(null, azkabanPrivatePropsFile); } if (azkabanPropsFile.exists() && azkabanPropsFile.isFile()) { logger.info("Loading azkaban properties file"); props = new Props(props, azkabanPropsFile); } } catch (FileNotFoundException e) { logger.error("File not found. Could not load azkaban config file", e); } catch (IOException e) { logger.error( "File found, but error reading. Could not load azkaban config file", e); } return props; } private void configureMBeanServer() { logger.info("Registering MBeans..."); mbeanServer = ManagementFactory.getPlatformMBeanServer(); registerMbean("jetty", new JmxJettyServer(server)); registerMbean("triggerManager", new JmxTriggerManager(triggerManager)); if (executorManager instanceof ExecutorManager) { registerMbean("executorManager", new JmxExecutorManager( (ExecutorManager) executorManager)); } // Register Log4J loggers as JMX beans so the log level can be // updated via JConsole or Java VisualVM HierarchyDynamicMBean log4jMBean = new HierarchyDynamicMBean(); registerMbean("log4jmxbean", log4jMBean); ObjectName accessLogLoggerObjName = log4jMBean.addLoggerMBean(AZKABAN_ACCESS_LOGGER_NAME); if (accessLogLoggerObjName == null) { System.out .println("************* loginLoggerObjName is null, make sure there is a logger with name " + AZKABAN_ACCESS_LOGGER_NAME); } else { System.out.println("******** loginLoggerObjName: " + accessLogLoggerObjName.getCanonicalName()); } } public void close() { try { for (ObjectName name : registeredMBeans) { mbeanServer.unregisterMBean(name); logger.info("Jmx MBean " + name.getCanonicalName() + " unregistered."); } } catch (Exception e) { logger.error("Failed to cleanup MBeanServer", e); } scheduleManager.shutdown(); executorManager.shutdown(); } private void registerMbean(String name, Object mbean) { Class<?> mbeanClass = mbean.getClass(); ObjectName mbeanName; try { mbeanName = new ObjectName(mbeanClass.getName() + ":name=" + name); mbeanServer.registerMBean(mbean, mbeanName); logger.info("Bean " + mbeanClass.getCanonicalName() + " registered."); registeredMBeans.add(mbeanName); } catch (Exception e) { logger.error("Error registering mbean " + mbeanClass.getCanonicalName(), e); } } public List<ObjectName> getMbeanNames() { return registeredMBeans; } public MBeanInfo getMBeanInfo(ObjectName name) { try { return mbeanServer.getMBeanInfo(name); } catch (Exception e) { logger.error(e); return null; } } public Object getMBeanAttribute(ObjectName name, String attribute) { try { return mbeanServer.getAttribute(name, attribute); } catch (Exception e) { logger.error(e); return null; } } }
[ "\"AZKABAN_HOME\"" ]
[]
[ "AZKABAN_HOME" ]
[]
["AZKABAN_HOME"]
java
1
0
cvat/apps/dashboard/views.py
# Copyright (C) 2018 Intel Corporation # # SPDX-License-Identifier: MIT from django.http import HttpResponse, JsonResponse, HttpResponseBadRequest from django.shortcuts import redirect from django.shortcuts import render from django.conf import settings from cvat.apps.authentication.decorators import login_required from cvat.apps.engine.models import Task as TaskModel, Job as JobModel from cvat.settings.base import JS_3RDPARTY import os def ScanNode(directory): if '..' in directory.split(os.path.sep): return HttpResponseBadRequest('Permission Denied') act_dir = os.path.normpath(settings.SHARE_ROOT + directory) result = [] nodes = os.listdir(act_dir) files = filter(os.path.isfile, map(lambda f: os.path.join(act_dir, f), nodes)) dirs = filter(os.path.isdir, map(lambda d: os.path.join(act_dir, d), nodes)) for d in dirs: name = os.path.basename(d) children = len(os.listdir(d)) > 0 node = {'id': directory + name + '/', 'text': name, 'children': children} result.append(node) for f in files: name = os.path.basename(f) node = {'id': directory + name, 'text': name, "icon" : "jstree-file"} result.append(node) return result @login_required def JsTreeView(request): node_id = None if 'id' in request.GET: node_id = request.GET['id'] if node_id is None or node_id == '#': node_id = '/' response = [{"id": node_id, "text": node_id, "children": ScanNode(node_id)}] else: response = ScanNode(node_id) return JsonResponse(response, safe=False, json_dumps_params=dict(ensure_ascii=False)) @login_required def DashboardView(request): query_name = request.GET['search'] if 'search' in request.GET else None query_job = int(request.GET['jid']) if 'jid' in request.GET and request.GET['jid'].isdigit() else None task_list = None if query_job is not None and JobModel.objects.filter(pk = query_job).exists(): task_list = [JobModel.objects.select_related('segment__task').get(pk = query_job).segment.task] else: task_list = list(TaskModel.objects.prefetch_related('segment_set__job_set').order_by('-created_date').all()) if query_name is not None: task_list = list(filter(lambda x: query_name.lower() in x.name.lower(), task_list)) task_list = list(filter(lambda task: request.user.has_perm( 'engine.task.access', task), task_list)) return render(request, 'dashboard/dashboard.html', { 'data': task_list, 'max_upload_size': settings.LOCAL_LOAD_MAX_FILES_SIZE, 'max_upload_count': settings.LOCAL_LOAD_MAX_FILES_COUNT, 'base_url': "{0}://{1}/".format(request.scheme, request.get_host()), 'share_path': os.getenv('CVAT_SHARE_URL', default=r'${cvat_root}/share'), 'js_3rdparty': JS_3RDPARTY.get('dashboard', []), })
[]
[]
[ "CVAT_SHARE_URL" ]
[]
["CVAT_SHARE_URL"]
python
1
0
dev/Tools/build/waf-1.7.13/lmbrwaflib/qt5.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2015 (ita) # Modifications copyright Amazon.com, Inc. or its affiliates. """ Tool Description ================ This tool helps with finding Qt5 tools and libraries, and also provides syntactic sugar for using Qt5 tools. The following snippet illustrates the tool usage:: def options(opt): opt.load('compiler_cxx qt5') def configure(conf): conf.load('compiler_cxx qt5') def build(bld): bld( features = ['qt5','cxx','cxxprogram'], uselib = ['QTCORE','QTGUI','QTOPENGL','QTSVG'], source = 'main.cpp textures.qrc aboutDialog.ui', target = 'window', ) Here, the UI description and resource files will be processed to generate code. Usage ===== Load the "qt5" tool. You also need to edit your sources accordingly: - the normal way of doing things is to have your C++ files include the .moc file. This is regarded as the best practice (and provides much faster compilations). It also implies that the include paths have beenset properly. - to have the include paths added automatically, use the following:: from waflib.TaskGen import feature, before_method, after_method @feature('cxx') @after_method('process_source') @before_method('apply_incpaths') def add_includes_paths(self): incs = set(self.to_list(getattr(self, 'includes', ''))) for x in self.compiled_tasks: incs.add(x.inputs[0].parent.path_from(self.path)) self.includes = list(incs) Note: another tool provides Qt processing that does not require .moc includes, see 'playground/slow_qt/'. A few options (--qt{dir,bin,...}) and environment variables (QT5_{ROOT,DIR,MOC,UIC,XCOMPILE}) allow finer tuning of the tool, tool path selection, etc; please read the source for more info. """ # System Imports import os import re import shutil import stat import sys try: from xml.sax import make_parser from xml.sax.handler import ContentHandler except ImportError: has_xml = False ContentHandler = object else: has_xml = True # waflib imports from waflib import Context, Errors, Logs, Options, Task, Utils from waflib.TaskGen import feature, after_method, extension, before_method from waflib.Configure import conf from waflib.Tools import c_preproc # lmbrwaflib imports from lmbrwaflib import copy_tasks from lmbrwaflib import lmbr_setup_tools from lmbrwaflib import lumberyard from lmbrwaflib.generate_uber_files import UBER_HEADER_COMMENT MOC_H = ['.h', '.hpp', '.hxx', '.hh'] """ File extensions associated to the .moc files """ EXT_RCC = ['.qrc'] """ File extension for the resource (.qrc) files """ EXT_UI = ['.ui'] """ File extension for the user interface (.ui) files """ EXT_QT5 = ['.cpp', '.cc', '.cxx', '.C', '.mm'] """ File extensions of C++ files that may require a .moc processing """ QT5_LIBS = ''' qtmain Qt5Bluetooth Qt5Concurrent Qt5Core Qt5DBus Qt5Declarative Qt5DesignerComponents Qt5Designer Qt5Gui Qt5Help Qt5MacExtras Qt5MultimediaQuick Qt5Multimedia Qt5MultimediaWidgets Qt5Network Qt5Nfc Qt5OpenGL Qt5Positioning Qt5PrintSupport Qt5Qml Qt5QmlModels Qt5QmlWorkerScript Qt5QuickParticles Qt5Quick Qt5QuickTest Qt5Script Qt5ScriptTools Qt5Sensors Qt5SerialPort Qt5Sql Qt5Svg Qt5Test Qt5WebEngine Qt5WebEngineCore Qt5WebEngineWidgets Qt5WebChannel Qt5WebSockets Qt5Widgets Qt5WinExtras Qt5X11Extras Qt5XmlPatterns Qt5Xml''' QT5_WEBENGINE_DLLS = [ 'Qt5Core', 'Qt5Gui', 'Qt5Network', 'Qt5Positioning', 'Qt5Qml', 'Qt5QmlModels', 'Qt5Quick', 'Qt5WebChannel', 'Qt5WebEngineCore' ] QT_CONF = '''[Paths] Prefix = qtlibs ''' QT_WEBENGINE_CONF = '''[Paths] Prefix = .. ''' # Search pattern to find the required #include <*.moc> lines in the source code to identify the header files that need # moc'ing. The path of the moc file must be relative to the current project root INCLUDE_MOC_RE = re.compile(r'\s*\#include\s+[\"<](.*.moc)[\">]',flags=re.MULTILINE) INCLUDE_SRC_RE = re.compile(r'\s*\#include\s+[\"<](.*.(cpp|cxx|cc))[\">]',flags=re.MULTILINE) QOBJECT_RE = re.compile(r'\s*Q_OBJECT\s*', flags=re.MULTILINE) # Derive a specific moc_files.<idx> folder name based on the base bldnode and idx def get_target_qt5_root(ctx, target_name, idx): base_qt_node = ctx.bldnode.make_node('qt5/{}.{}'.format(target_name, idx)) return base_qt_node # Change a target node from a changed extension to one marked as QT code generated # The qt5 generated files are restricted to the build folder. That means # each project cannot use any QT generated artifacts that do no exist within its project boundaries. def change_target_qt5_node(ctx, project_path, target_name, relpath_target, target_uid): relpath_project = project_path.relpath() if relpath_target.startswith(relpath_project): # Strip out the project relative path and use that as the target_qt5 relative path restricted_path = relpath_target.replace(relpath_project,'') elif relpath_target.startswith('..'): # Special case. If the target and project rel paths dont align, then the target node is outside of the # project folder. (ie there is a qt-related file in the waf_files that is outside the project's context path) # If the file is an include or moc file, it must reside inside the project context, because it will be # included based on an expected project relative path target_node_name_lower = relpath_target.lower() if target_node_name_lower.endswith(".moc") or target_node_name_lower.endswith(".h"): ctx.fatal("QT target {} for project {} cannot exist outside of its source folder context.".format(relpath_target, target_name)) restricted_path = "__/{}.{}/{}".format(target_name, target_uid, target_name) else: restricted_path = relpath_target target_node_subdir = os.path.dirname(restricted_path) # Change the output target to the specific moc file folder output_qt_dir = get_target_qt5_root(ctx, target_name, target_uid).make_node(target_node_subdir) output_qt_dir.mkdir() output_qt_node = output_qt_dir.make_node(os.path.split(relpath_target)[1]) return output_qt_node class qxx(Task.classes['cxx']): """ Each C++ file can have zero or several .moc files to create. They are known only when the files are scanned (preprocessor) To avoid scanning the c++ files each time (parsing C/C++), the results are retrieved from the task cache (bld.node_deps/bld.raw_deps). The moc tasks are also created *dynamically* during the build. """ def __init__(self, *k, **kw): Task.Task.__init__(self, *k, **kw) if 'qt5' in self.generator.features and self.env.QMAKE: # If QT5 is enabled, then signal that moc scanning is needed self.moc_done = 0 else: # Otherwise, signal that moc scanning can be skipped self.moc_done = 1 self.dep_moc_files = {} def __str__(self): "string to display to the user" env = self.env src_str = ' '.join([a.nice_path() for a in self.inputs]) tgt_str = ' '.join([a.nice_path() for a in self.outputs]) if self.outputs and self.inputs: sep = ' -> ' else: sep = '' name = self.__class__.__name__.replace('_task', '') + ' (' + env['PLATFORM'] + '|' + env['CONFIGURATION'] + ')' return '%s: %s%s%s\n' % (name, src_str, sep, tgt_str) def runnable_status(self): """ Compute the task signature to make sure the scanner was executed. Create the moc tasks by using :py:meth:`waflib.Tools.qt5.qxx.add_moc_tasks` (if necessary), then postpone the task execution (there is no need to recompute the task signature). """ status = Task.Task.runnable_status(self) if not self.moc_done: # ask the task if it needs to rebuild. This will include checking all dependencies (.moc files included) # that may have changed. If the task doesn't need to rebuild, no point in scanning for all the individual # moc tasks that need to be added if status != Task.RUN_ME: return status self.add_moc_tasks() # At this point, the moc task should be done, recycle and try the status check again self.moc_done = 1 try: del self.cache_sig except: pass return Task.Task.runnable_status(self) return status def create_moc_task(self, h_node, moc_filename): """ If several libraries use the same classes, it is possible that moc will run several times (Issue 1318) It is not possible to change the file names, but we can assume that the moc transformation will be identical, and the moc tasks can be shared in a global cache. The defines passed to moc will then depend on task generator order. If this is not acceptable, then use the tool slow_qt5 instead (and enjoy the slow builds... :-( ) """ cache_key = '{}.{}'.format(h_node.abspath(),self.generator.target_uid) try: moc_cache = self.generator.bld.moc_cache except AttributeError: moc_cache = self.generator.bld.moc_cache = {} try: return moc_cache[cache_key] except KeyError: relpath_target = os.path.join(h_node.parent.relpath(), moc_filename) target_node = change_target_qt5_node(self.generator.bld, self.generator.path, self.generator.name, relpath_target, self.generator.target_uid) tsk = moc_cache[cache_key] = Task.classes['moc'](env=self.env, generator=self.generator) tsk.set_inputs(h_node) tsk.set_outputs(target_node) self.dep_moc_files[target_node] = False if self.generator: self.generator.tasks.append(tsk) # direct injection in the build phase (safe because called from the main thread) gen = self.generator.bld.producer gen.outstanding.insert(0, tsk) gen.total += 1 return tsk def moc_h_ext(self): try: ext = Options.options.qt_header_ext.split() except AttributeError: pass if not ext: ext = MOC_H return ext def add_moc_tasks(self): node = self.inputs[0] src_scan = node.read() # Determine if this is an uber file to see if we need to go one level deeper if src_scan.startswith(UBER_HEADER_COMMENT): # This is an uber file, handle uber files differently self.add_moc_task_uber(node,src_scan) else: # Process the source file (for mocs) self.add_moc_tasks_for_node(node,src_scan) del src_scan #free up the text as soon as possible def scan_node_contents_for_moc_tasks(self,node_contents): base_node = self.generator.path include_moc_node_rel_paths = INCLUDE_MOC_RE.findall(node_contents) moctasks = [] for include_moc_node_rel_path in include_moc_node_rel_paths: base_name = os.path.splitext(include_moc_node_rel_path)[0] # We are only allowing to include mocing header files that are relative to the project folder header_node = None for moc_ext in self.moc_h_ext(): # use search_node(), it will not create a node if the node is not found, and won't create bogus nodes while searching header_node = base_node.search_node('{}{}'.format(base_name, moc_ext)) if header_node: break if not header_node: raise Errors.WafError('No source found for {} which is a moc file. Is the file included in .waf_files?'.format(base_name)) moc_filename = '{}.moc'.format(os.path.splitext(header_node.name)[0]) # create the moc task task = self.create_moc_task(header_node, moc_filename) moctasks.append(task) return moctasks def add_moc_task_uber(self, node, node_contents): ''' Handle uber files by grepping for all the includes of source files and performing the moc scanning there ''' moctasks = [] include_source_rel_paths = INCLUDE_SRC_RE.findall(node_contents) for include_source_rel_path, include_source_extension in include_source_rel_paths: source_node = node.parent.find_node(include_source_rel_path) if source_node is None: source_node = self.generator.path.find_node(include_source_rel_path) if source_node is not None: source_node_contents = source_node.read() moctasks += self.scan_node_contents_for_moc_tasks(source_node_contents) del source_node_contents #free up the text as soon as possible # simple scheduler dependency: run the moc task before others self.run_after.update(set(moctasks)) def add_moc_tasks_for_node(self, node, node_contents): ''' Create the moc tasks greping the source file for all the #includes ''' moctasks = self.scan_node_contents_for_moc_tasks(node_contents) # simple scheduler dependency: run the moc task before others self.run_after.update(set(moctasks)) class trans_update(Task.Task): """Update a .ts files from a list of C++ files""" run_str = '${QT_LUPDATE} ${SRC} -ts ${TGT}' color = 'BLUE' Task.update_outputs(trans_update) class XMLHandler(ContentHandler): """ Parser for *.qrc* files """ def __init__(self): self.buf = [] self.files = [] def startElement(self, name, attrs): if name == 'file': self.buf = [] def endElement(self, name): if name == 'file': self.files.append(str(''.join(self.buf))) def characters(self, cars): self.buf.append(cars) @extension(*EXT_RCC) def create_rcc_task(self, node): "Create rcc and cxx tasks for *.qrc* files" # Do not create tasks for project_generation builds if self.env['PLATFORM'] == 'project_generator': return None # Do not perform any task if QMAKE is not installed if not self.env.QMAKE: return None # For QRC Processing, we cannot make the generated rcc file from the qrc source as a separate compile unit # when creating static libs. It appears that MSVC will optimize the required static methods required to # initialize the resources for the static lib. In order to work around this, the generated file from the # qrc will need to be created as a header and included into a cpp that is consumed by the app/shared library # that is linking against it is_static_lib = 'stlib' == getattr(self,'_type','') if not getattr(self, 'rcc_tasks', False): self.rcc_tasks = [] if is_static_lib: rcc_filename = 'rcc_%s.h' % os.path.splitext(node.name)[0] relpath_target = os.path.join(node.parent.relpath(), rcc_filename) rcnode = change_target_qt5_node(self.bld, self.path, self.name, relpath_target, self.target_uid) qrc_task = self.create_task('rcc', node, rcnode) self.rcc_tasks.append(qrc_task) return qrc_task else: rcc_filename = '%s_rc.cpp' % os.path.splitext(node.name)[0] relpath_target = os.path.join(node.parent.relpath(), rcc_filename) rcnode = change_target_qt5_node(self.bld, self.path, self.name, relpath_target, self.target_uid) qrc_task = self.create_task('rcc', node, rcnode) self.rcc_tasks.append(qrc_task) cpptask = self.create_task('cxx', rcnode, rcnode.change_ext('.o')) cpptask.dep_nodes.append(node) cpptask.set_run_after(qrc_task) try: self.compiled_tasks.append(cpptask) except AttributeError: self.compiled_tasks = [cpptask] return cpptask @feature('qt5') @after_method('process_source') def add_rcc_dependencies(self): # are there rcc tasks? if not getattr(self, 'rcc_tasks', False): return rcc_tasks = set(self.rcc_tasks) for task in self.tasks: if any(isinstance(task, Task.classes[c]) for c in ['qxx', 'cxx', 'c']): task.run_after |= rcc_tasks @feature('qt5') @after_method('apply_link') def create_automoc_task(self): if hasattr(self, 'header_files') and len(self.header_files) > 0: header_nodes = self.to_nodes(self.header_files) task = self.create_task('automoc', header_nodes) # this may mutate the link task, must run the link task after this task if hasattr(self, "link_task"): self.link_task.set_run_after(task) @extension(*EXT_UI) def create_uic_task(self, node): "hook for uic tasks" # Do not create tasks for project_generation builds if self.env['PLATFORM'] == 'project_generator': return None if not self.env.QMAKE: return None if not getattr(self, 'uic_tasks', False): self.uic_tasks = [] uictask = self.create_task('ui5', node) ui_filename = self.env['ui_PATTERN'] % node.name[:-3] relpath_target = os.path.join(node.parent.relpath(), ui_filename) target_node = change_target_qt5_node(self.bld, self.path, self.name, relpath_target, self.target_uid) uictask.outputs = [target_node] self.uic_tasks.append(uictask) @feature('qt5') @after_method('process_source') def add_uic_dependencies(self): # are there uic tasks? if not getattr(self, 'uic_tasks', False): return uic_tasks = set(self.uic_tasks) for task in self.tasks: if task.__class__.__name__ in ['qxx', 'cxx', 'c']: task.run_after |= uic_tasks @extension('.ts') def add_lang(self, node): """add all the .ts file into self.lang""" self.lang = self.to_list(getattr(self, 'lang', [])) + [node] @feature('qt5') @before_method('apply_incpaths') def apply_qt5_includes(self): # Make sure the QT is enabled, otherwise whatever module is using this feature will fail if not self.env.QMAKE: return base_moc_node = get_target_qt5_root(self.bld, self.name, self.target_uid) if not hasattr(self, 'includes'): self.includes = [] self.includes.append(base_moc_node) if self.env.PLATFORM == 'win_x64_clang': self.env.append_unique('CXXFLAGS', '-Wno-ignored-pragmas') @feature('qt5') @after_method('set_link_outputs') def apply_qt5(self): """ Add MOC_FLAGS which may be necessary for moc:: def build(bld): bld.program(features='qt5', source='main.cpp', target='app', use='QTCORE') The additional parameters are: :param lang: list of translation files (\*.ts) to process :type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension :param update: whether to process the C++ files to update the \*.ts files (use **waf --translate**) :type update: bool :param langname: if given, transform the \*.ts files into a .qrc files to include in the binary file :type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension """ # Make sure the QT is enabled, otherwise whatever module is using this feature will fail if not self.env.QMAKE: return # If no type is defined, this is just a stub task that shouldn't handle any additional build/link tasks if not hasattr(self,'_type'): return if getattr(self, 'lang', None): qmtasks = [] for x in self.to_list(self.lang): if isinstance(x, str): x = self.path.find_resource(x + '.ts') qm_filename = '%s.qm' % os.path.splitext(x.name)[0] relpath_target = os.path.join(x.parent.relpath(), qm_filename) new_qm_node = change_target_qt5_node(self.bld, self.path, self.name, relpath_target, self.target_uid) qmtask = self.create_task('ts2qm', x, new_qm_node) qmtasks.append(qmtask) if getattr(self, 'update', None) and Options.options.trans_qt5: cxxnodes = [a.inputs[0] for a in self.compiled_tasks] + [ a.inputs[0] for a in self.tasks if getattr(a, 'inputs', None) and a.inputs[0].name.endswith('.ui')] for x in qmtasks: self.create_task('trans_update', cxxnodes, x.inputs) if getattr(self, 'langname', None): qmnodes = [x.outputs[0] for x in qmtasks] assert(isinstance(self.langname, str)) qrc_filename = '%s.qrc' % self.langname relpath_target = os.path.join(self.path.relpath(), qrc_filename) new_rc_node = change_target_qt5_node(self.bld, self.path, self.name, relpath_target, self.target_uid) t = self.create_task('qm2rcc', qmnodes, new_rc_node) for x in qmtasks: t.set_run_after(x) k = create_rcc_task(self, t.outputs[0]) if k: self.link_task.inputs.append(k.outputs[0]) k.set_run_after(t) lst = [] for flag in self.to_list(self.env['CXXFLAGS']): if len(flag) < 2: continue f = flag[0:2] if f in ('-D', '-I', '/D', '/I'): if f[0] == '/': lst.append('-' + flag[1:]) else: lst.append(flag) if len(self.env['DEFINES']) > 0: for defined_value in self.env['DEFINES']: lst.append( '-D'+defined_value ) # Apply additional QT defines for all MOCing additional_flags = ['-DQT_LARGEFILE_SUPPORT', '-DQT_DLL', '-DQT_CORE_LIB', '-DQT_GUI_LIB'] for additional_flag in additional_flags: if not lst.__contains__(additional_flag): lst.append(additional_flag) self.env.append_value('MOC_FLAGS', lst) @extension(*EXT_QT5) def cxx_hook(self, node): """ Re-map C++ file extensions to the :py:class:`waflib.Tools.qt5.qxx` task. """ if 'qt5' in self.features: return self.create_compiled_task('qxx', node) else: return self.create_compiled_task('cxx', node) # QT tasks involve code generation, so we need to also check if the generated code is still there class QtTask(Task.Task): def runnable_status(self): missing_output = False for output in self.outputs: if not os.path.exists(output.abspath()): missing_output = True break if missing_output: for t in self.run_after: if not t.hasrun: return Task.ASK_LATER return Task.RUN_ME status = Task.Task.runnable_status(self) return status class automoc(Task.Task): def create_moc_tasks(self, moc_headers): moc_names = set() for moc_header in moc_headers: moc_node_name = os.path.splitext(moc_header.name)[0] # Make sure we don't have two moc files with the same name suffix = None while (moc_node_name + ("_%i" % suffix if suffix else "")) in moc_names: suffix = suffix + 1 if suffix else 2 if suffix: moc_node_name += "_%i" % suffix moc_names.add(moc_node_name) cpp_filename = '%s_moc.cpp' % moc_node_name relpath_target = os.path.join(moc_header.parent.relpath(), cpp_filename) moc_node = change_target_qt5_node(self.generator.bld, self.generator.path, self.generator.name, relpath_target, self.generator.target_uid) moc_task = self.generator.create_task('moc', moc_header, moc_node) # Include the precompiled header, if applicable if getattr(self.generator, 'pch_header', None) is not None: moc_task.env['MOC_FLAGS'] = moc_task.env['MOC_FLAGS'] + ['-b', self.generator.pch_header] cpp_task = self.generator.create_compiled_task('cxx', moc_node) # Ignore warnings in generated code is_msvc = cpp_task.env['CXX_NAME'] == 'msvc' moc_cxx_flags = [flag for flag in cpp_task.env['CXXFLAGS'] if not flag.startswith('/W' if is_msvc else '-W')] if is_msvc and '/EHsc' not in moc_cxx_flags: moc_cxx_flags.append('/EHsc') elif not is_msvc and '-w' not in moc_cxx_flags: moc_cxx_flags.append('-w') cpp_task.env['CXXFLAGS'] = moc_cxx_flags # Define Q_MOC_BUILD for the (rare) case where a header might need to check to see if it's been included by # a _moc file. cpp_task.env.append_unique('DEFINES', 'Q_MOC_BUILD') cpp_task.set_run_after(moc_task) # add cpp output to link task. # Modifying the task should be ok because the link task is already registered as a run_after of # the automoc task (this task), and runnable_status in run on the main thread self.generator.link_task.inputs.append(cpp_task.outputs[0]) self.generator.link_task.set_run_after(cpp_task) # direct injection in the build phase (safe because runnable_status is only called from the main thread) producer = self.generator.bld.producer producer.outstanding.insert(0, moc_task) # insert the moc_task, its ready to run producer.outstanding.append(cpp_task) # append the cpp_task, it must wait for the moc task completion anyways producer.total += 2 def runnable_status(self): # check if any of the inputs have changed, or the input list has changed, or the dependencies have changed status = Task.Task.runnable_status(self) moc_headers = [] if Task.RUN_ME == status: # run the automoc scan to generate the up-to-date contents for header_node in self.inputs: header_contents = header_node.read() # For now, only work on headers that opt in with an AUTOMOC comment if "AUTOMOC" not in header_contents: continue header_contents = c_preproc.re_cpp.sub(c_preproc.repl, header_contents) if QOBJECT_RE.search(header_contents): moc_headers.append(header_node) # store on task, will be added to the node_deps in post_run self.moc_headers = moc_headers else: # signatures didn't change, grab the saved nodes moc_headers = self.generator.bld.node_deps[self.uid()] # build the qt tasks, and add them to the link task self.create_moc_tasks(moc_headers) return status def scan(self): moc_headers = self.generator.bld.node_deps.get(self.uid(), []) return (moc_headers, []) def post_run(self): self.generator.bld.node_deps[self.uid()] = getattr(self, 'moc_headers', []) try: del self.cache_sig except: pass Task.Task.post_run(self) class rcc(QtTask): """ Process *.qrc* files """ color = 'BLUE' run_str = '${QT_RCC} -name ${tsk.rcname()} ${SRC[0].abspath()} ${RCC_ST} -o ${TGT}' ext_in = ['.qrc'] def __init__(self, *k, **kw): QtTask.__init__(self, *k, **kw) def rcname(self): return os.path.splitext(self.inputs[0].name)[0] def parse_deps(self): """Parse the *.qrc* files""" if not has_xml: Logs.error('no xml support was found, the rcc dependencies will be incomplete!') return parser = make_parser() curHandler = XMLHandler() parser.setContentHandler(curHandler) fi = open(self.inputs[0].abspath(), 'r') try: parser.parse(fi) finally: fi.close() self.rcc_deps_paths = curHandler.files def lookup_deps(self, root, deps_paths): nodes = [] names = [] for x in deps_paths: nd = root.find_resource(x) if nd: nodes.append(nd) else: names.append(x) return (nodes, names) def scan(self): resolved_nodes = self.generator.bld.node_deps.get(self.uid(), []) unresolved_names = self.generator.bld.raw_deps.get(self.uid(), []) return (resolved_nodes, unresolved_names) def post_run(self): self.parse_deps() # convert input dependency files to nodes. Care must be taken in this block wrt thread safety because it creates nodes if 'msvcdeps' in sys.modules: # msvcdeps is run on the worker threads, it may conflict with generate_deps, which is also creating node at # compile time. Defer to msvcdeps module to handle thread locking (nodes, names) = sys.modules['msvcdeps'].sync_lookup_deps(self.inputs[0].parent, self.rcc_deps_paths) else: (nodes, names) = self.lookup_deps(self.inputs[0].parent, self.rcc_deps_paths) del self.rcc_deps_paths # store dependencies in build self.generator.bld.node_deps[self.uid()] = nodes self.generator.bld.raw_deps[self.uid()] = names # delete signature to force a rebuild of signature. Scan() will be called to store the deps try: del self.cache_sig except: pass # call base class to regenerate signature super(rcc, self).post_run() class moc(QtTask): """ Create *.moc* files """ color = 'BLUE' run_str = '${QT_MOC} ${MOC_FLAGS} ${SRC} -o ${TGT}' class fake_moc(QtTask): """ Create dummy *.moc files - this is a temporary workaround while we migrate to autmoc """ color = 'BLUE' def post_run(self): self.outputs[0].write("/* Dummy moc file, this will eventually be removed */\n") super(fake_moc, self).post_run(self) class ui5(QtTask): """ Process *.ui* files """ color = 'BLUE' run_str = '${QT_UIC} ${SRC} -o ${TGT}' ext_in = ['.ui'] class ts2qm(QtTask): """ Create *.qm* files from *.ts* files """ color = 'BLUE' run_str = '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}' class qm2rcc(QtTask): """ Transform *.qm* files into *.rc* files """ color = 'BLUE' after = 'ts2qm' def run(self): """Create a qrc file including the inputs""" txt = '\n'.join(['<file>%s</file>' % k.path_from(self.outputs[0].parent) for k in self.inputs]) code = '<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n%s\n</qresource>\n</RCC>' % txt self.outputs[0].write(code) bin_cache = {} # maintain a cache set of platforms that don't have Qt # so that we don't needlessly search multiple times, and # so that the user doesn't get numerous warnings of the same thing QT_SDK_MISSING = set() @conf def get_qt_version(self): # at the end, try to find qmake in the paths given # keep the one with the highest version version = None paths = [] prev_ver = ['5', '0', '0'] for qmk in ('qmake-qt5', 'qmake5', 'qmake'): try: qmake = self.find_program(qmk, path_list=paths, silent_output=True) except self.errors.ConfigurationError: pass else: try: version = self.cmd_and_log([qmake] + ['-query', 'QT_VERSION'], quiet=Context.BOTH).strip() except self.errors.WafError: version = None pass # qmake could not be found easily, rely on qtchooser if version is None: try: self.find_program('qtchooser', silent_output=True) except self.errors.ConfigurationError: pass else: cmd = [self.env.QTCHOOSER] + ['-qt=5', '-run-tool=qmake'] try: version = self.cmd_and_log(cmd + ['-query', 'QT_VERSION'], quiet=Context.BOTH).strip() except self.errors.WafError: pass return version def _prepare_lib_folder_for_linux(qt_lib_path): # this functions sets up the qt linux shared library, for example # libQt5Xml.so -> libQt5Xml.so.5.6.2 # libQt5Xml.so.5 -> libQt5Xml.so.5.6.2 # libQt5Xml.so.5.6 -> libQt5Xml.so.5.6.2 import glob library_files = glob.glob(os.path.join(qt_lib_path, 'lib*.so*')) for lib_path in library_files: if os.path.islink(lib_path): continue lib_path_basename = os.path.basename(lib_path) new_lib_path, ext = os.path.splitext(lib_path) while ext != '.so': if os.path.lexists(new_lib_path) is False: os.symlink(lib_path_basename, new_lib_path) Logs.debug('Made link: {} -> {}'.format(lib_path, new_lib_path)) new_lib_path, ext = os.path.splitext(new_lib_path) @conf def find_qt5_binaries(self, platform): # platform has to be passed in, as it hasn't been set in the env # when this function is called global QT_SDK_MISSING if platform in QT_SDK_MISSING: return False env = self.env opt = Options.options qtbin = getattr(opt, 'qtbin', '') platform_details = self.get_target_platform_detail(platform) if not platform_details.attributes.get('qt_supported', False): raise Errors.WafError('Platform {} is not supported by our Qt waf scripts.'.format(platform)) qt_platform_dir = platform_details.attributes.get('qt_platform_dir', None) if not qt_platform_dir: raise Errors.WafError("Platform settings for platform {} is missing required '' attribute.".format(platform)) # Get the QT dir from the third party settings qtdir, enabled, roles, _ = self.tp.get_third_party_path(platform, 'qt') # If the path was not resolved, it could be an invalid alias (missing from the SetupAssistantConfig.json) if not qtdir: raise Errors.WafError("Invalid required QT alias for platform {}".format(platform)) # If the path was resolved, we still need to make sure the 3rd party is enabled based on the roles if not enabled: error_message = "Unable to resolve Qt because it is not enabled in Setup Assistant. \nMake sure that at least " \ "one of the following roles is enabled: [{}]".format(', '.join(roles)) raise Errors.WafError(error_message) qtdir = os.path.join(qtdir, qt_platform_dir) paths = [] if qtdir: qtbin = os.path.join(qtdir, 'bin') # the qt directory has been given from QT5_ROOT - deduce the qt binary path if not qtdir: qtdir = os.environ.get('QT5_ROOT', '') qtbin = os.environ.get('QT5_BIN', None) or os.path.join(qtdir, 'bin') if qtbin: paths = [qtbin] qmake_cache_key = qtdir + '_QMAKE' if qmake_cache_key in bin_cache: self.env.QMAKE = bin_cache[qmake_cache_key] else: # at the end, try to find qmake in the paths given # keep the one with the highest version cand = None prev_ver = ['5', '0', '0'] for qmk in ('qmake-qt5', 'qmake5', 'qmake'): try: qmake = self.find_program(qmk, path_list=paths, silent_output=True) except self.errors.ConfigurationError: pass else: try: version = self.cmd_and_log([qmake] + ['-query', 'QT_VERSION']).strip() except self.errors.WafError: print("{} was found, but QT_VERSION could not be retrieved by executing qmake. Retrying with logs enabled for debugging:".format(qmk)) subprocess.run(qmake + ' -query QT_VERSION', shell=True) pass else: if version: new_ver = version.split('.') if new_ver > prev_ver: cand = qmake prev_ver = new_ver # qmake could not be found easily, rely on qtchooser if not cand: try: self.find_program('qtchooser') except self.errors.ConfigurationError: pass else: cmd = [self.env.QTCHOOSER] + ['-qt=5', '-run-tool=qmake'] try: version = self.cmd_and_log(cmd + ['-query', 'QT_VERSION']) except self.errors.WafError: pass else: cand = os.path.normpath(cmd) if cand: self.env.QMAKE = cand bin_cache[qmake_cache_key] = cand else: # If we cannot find qmake, we will assume that QT is not available or a selected option # Therefore, we cannot build the lumberyard editor and tools Logs.warn('[WARN] Unable to find the appropriate QT library. Make sure you have QT installed if you wish to compile the Lumberyard Editor and tools.') QT_SDK_MISSING.add(platform) return False qmake_cache_key = qtdir + '_QT_INSTALL_BINS' if qmake_cache_key in bin_cache: self.env.QT_INSTALL_BINS = qtbin = bin_cache[qmake_cache_key] else: query_qt_bin_result = self.cmd_and_log([self.env.QMAKE] + ['-query', 'QT_INSTALL_BINS']).strip() + os.sep self.env.QT_INSTALL_BINS = qtbin = os.path.normpath(query_qt_bin_result) + os.sep bin_cache[qmake_cache_key] = qtbin paths.insert(0, qtbin) def _get_qtlib_subfolder(name): qt_subdir = os.path.join(qtdir, name) if not os.path.exists(qt_subdir): self.fatal('Unable to find QT lib folder {}: "{}"'.format(name, qt_subdir)) return qt_subdir # generate symlinks for the library files within the lib folder if platform == "linux_x64": _prepare_lib_folder_for_linux(_get_qtlib_subfolder("lib")) def find_bin(lst, var): if var in env: return cache_key = qtdir + '_' + var if cache_key in bin_cache: env[var] = bin_cache[cache_key] return for f in lst: try: ret = self.find_program(f, path_list=paths, silent_output=True, var=var) except self.errors.ConfigurationError: pass else: env[var] = os.path.normpath(ret) bin_cache[cache_key] = os.path.normpath(ret) break find_bin(['uic-qt5', 'uic'], 'QT_UIC') if not env.QT_UIC: # If we find qmake but not the uic compiler, then the QT installation is corrupt/invalid self.fatal('Detected an invalid/corrupt version of QT, please check your installation') uic_version_cache_key = qtdir + '_UICVERSION' if uic_version_cache_key not in bin_cache: uicver = self.cmd_and_log([env.QT_UIC] + ['-version'], output=Context.BOTH, quiet=True) uicver = ''.join(uicver).strip() uicver = uicver.replace('Qt User Interface Compiler ','').replace('User Interface Compiler for Qt', '') if uicver.find(' 3.') != -1 or uicver.find(' 4.') != -1: self.fatal('this uic compiler is for qt3 or qt5, add uic for qt5 to your path') bin_cache[uic_version_cache_key] = uicver find_bin(['moc-qt5', 'moc'], 'QT_MOC') find_bin(['rcc-qt5', 'rcc'], 'QT_RCC') find_bin(['lrelease-qt5', 'lrelease'], 'QT_LRELEASE') find_bin(['lupdate-qt5', 'lupdate'], 'QT_LUPDATE') env['UIC_ST'] = '%s -o %s' env['MOC_ST'] = '-o' env['ui_PATTERN'] = 'ui_%s.h' env['QT_LRELEASE_FLAGS'] = ['-silent'] env.MOCCPPPATH_ST = '-I%s' env.MOCDEFINES_ST = '-D%s' env.QT_BIN_DIR = _get_qtlib_subfolder('bin') env.QT_LIB_DIR = _get_qtlib_subfolder('lib') env.QT_QML_DIR = _get_qtlib_subfolder('qml') env.QT_PLUGINS_DIR = _get_qtlib_subfolder('plugins') if platform is not 'darwin_x64': env.QT_RESOURCES_DIR = _get_qtlib_subfolder('resources') env.QT_TRANSLATIONS_DIR = _get_qtlib_subfolder('translations') return True @conf def find_qt5_libraries(self): qtlibs = getattr(Options.options, 'qtlibs', None) or os.environ.get("QT5_LIBDIR", None) if not qtlibs: try: qtlibs = self.cmd_and_log([self.env.QMAKE] + ['-query', 'QT_INSTALL_LIBS']).strip() except Errors.WafError: qtdir = self.cmd_and_log([self.env.QMAKE] + ['-query', 'QT_INSTALL_PREFIX']).strip() + os.sep qtlibs = os.path.join(qtdir, 'lib') self.msg('Found the Qt5 libraries in', qtlibs) qtincludes = os.environ.get("QT5_INCLUDES", None) or self.cmd_and_log([self.env.QMAKE] + ['-query', 'QT_INSTALL_HEADERS']).strip() env = self.env if not 'PKG_CONFIG_PATH' in os.environ: os.environ['PKG_CONFIG_PATH'] = '%s:%s/pkgconfig:/usr/lib/qt5/lib/pkgconfig:/opt/qt5/lib/pkgconfig:/usr/lib/qt5/lib:/opt/qt5/lib' % (qtlibs, qtlibs) if Utils.unversioned_sys_platform() == "darwin": if qtlibs: env.append_unique('FRAMEWORKPATH',qtlibs) # Keep track of platforms that were checked (there is no need to do a multiple report) checked_darwin = False checked_linux = False checked_win_x64 = False validated_platforms = self.get_enabled_target_platform_names() for validated_platform in validated_platforms: is_platform_darwin = self.is_mac_platform(validated_platform) is_platform_linux = self.is_linux_platform(validated_platform) is_platform_win_x64 = self.is_windows_platform(validated_platform) for i in self.qt5_vars: uselib = i.upper() # Platform is darwin_x64 / mac if is_platform_darwin: # QT for darwin does not have '5' in the name, so we need to remove it darwin_adjusted_name = i.replace('Qt5','Qt') # Since at least qt 4.7.3 each library locates in separate directory frameworkName = darwin_adjusted_name + ".framework" qtDynamicLib = os.path.join(qtlibs, frameworkName, darwin_adjusted_name) if os.path.exists(qtDynamicLib): env.append_unique('FRAMEWORK_{}_{}'.format(validated_platform,uselib), darwin_adjusted_name) if not checked_darwin: self.msg('Checking for %s' % i, qtDynamicLib, 'GREEN') else: if not checked_darwin: self.msg('Checking for %s' % i, False, 'YELLOW') env.append_unique('INCLUDES_{}_{}'.format(validated_platform,uselib), os.path.join(qtlibs, frameworkName, 'Headers')) # Detect the debug versions of the library uselib_debug = i.upper() + "D" darwin_adjusted_name_debug = '{}_debug'.format(darwin_adjusted_name) qtDynamicLib_debug = os.path.join(qtlibs, frameworkName, darwin_adjusted_name_debug) if os.path.exists(qtDynamicLib_debug): env.append_unique('FRAMEWORK_{}_{}'.format(validated_platform, uselib_debug), darwin_adjusted_name) if not checked_darwin: self.msg('Checking for %s_debug' % i, qtDynamicLib_debug, 'GREEN') else: if not checked_darwin: self.msg('Checking for %s_debug' % i, False, 'YELLOW') env.append_unique('INCLUDES_{}_{}'.format(validated_platform,uselib_debug), os.path.join(qtlibs, frameworkName, 'Headers')) # Platform is linux+gcc elif is_platform_linux: qtDynamicLib = os.path.join(qtlibs, "lib" + i + ".so") qtStaticLib = os.path.join(qtlibs, "lib" + i + ".a") if os.path.exists(qtDynamicLib): env.append_unique('LIB_{}_{}'.format(validated_platform,uselib), i) if not checked_linux: self.msg('Checking for %s' % i, qtDynamicLib, 'GREEN') elif os.path.exists(qtStaticLib): env.append_unique('LIB_{}_{}'.format(validated_platform,uselib), i) if not checked_linux: self.msg('Checking for %s' % i, qtStaticLib, 'GREEN') else: if not checked_linux: self.msg('Checking for %s' % i, False, 'YELLOW') env.append_unique('LIBPATH_{}_{}'.format(validated_platform,uselib), qtlibs) env.append_unique('INCLUDES_{}_{}'.format(validated_platform,uselib), qtincludes) env.append_unique('INCLUDES_{}_{}'.format(validated_platform,uselib), os.path.join(qtincludes, i)) # Platform is win_x64 elif is_platform_win_x64: # Release library names are like QtCore5 for k in ("lib%s.a", "lib%s5.a", "%s.lib", "%s5.lib"): lib = os.path.join(qtlibs, k % i) if os.path.exists(lib): env.append_unique('LIB_{}_{}'.format(validated_platform,uselib), i + k[k.find("%s") + 2 : k.find('.')]) if not checked_win_x64: self.msg('Checking for %s' % i, lib, 'GREEN') break else: if not checked_win_x64: self.msg('Checking for %s' % i, False, 'YELLOW') env.append_unique('LIBPATH_{}_{}'.format(validated_platform,uselib), qtlibs) env.append_unique('INCLUDES_{}_{}'.format(validated_platform,uselib), qtincludes) env.append_unique('INCLUDES_{}_{}'.format(validated_platform,uselib), os.path.join(qtincludes, i.replace('Qt5', 'Qt'))) # Debug library names are like QtCore5d uselib = i.upper() + "D" for k in ("lib%sd.a", "lib%sd5.a", "%sd.lib", "%sd5.lib"): lib = os.path.join(qtlibs, k % i) if os.path.exists(lib): env.append_unique('LIB_{}_{}'.format(validated_platform,uselib), i + k[k.find("%s") + 2 : k.find('.')]) if not checked_win_x64: self.msg('Checking for %s' % i, lib, 'GREEN') break else: if not checked_win_x64: self.msg('Checking for %s' % i, False, 'YELLOW') env.append_unique('LIBPATH_{}_{}'.format(validated_platform,uselib), qtlibs) env.append_unique('INCLUDES_{}_{}'.format(validated_platform,uselib), qtincludes) env.append_unique('INCLUDES_{}_{}'.format(validated_platform,uselib), os.path.join(qtincludes, i.replace('Qt5', 'Qt'))) else: # The current target platform is not supported for QT5 Logs.debug('lumberyard: QT5 detection not supported for platform {}'.format(validated_platform)) pass if is_platform_darwin: checked_darwin = True elif is_platform_linux: checked_linux = True elif is_platform_win_x64: checked_win_x64 = True @conf def simplify_qt5_libs(self): # the libpaths make really long command-lines # remove the qtcore ones from qtgui, etc env = self.env def process_lib(vars_, coreval): validated_platforms = self.get_enabled_target_platform_names() for validated_platform in validated_platforms: for d in vars_: var = d.upper() if var == 'QTCORE': continue value = env['LIBPATH_{}_{}'.format(validated_platform, var)] if value: core = env[coreval] accu = [] for lib in value: if lib in core: continue accu.append(lib) env['LIBPATH_{}_{}'.format(validated_platform, var)] = accu process_lib(self.qt5_vars, 'LIBPATH_QTCORE') process_lib(self.qt5_vars_debug, 'LIBPATH_QTCORE_DEBUG') @conf def add_qt5_rpath(self): # rpath if wanted env = self.env if getattr(Options.options, 'want_rpath', False): def process_rpath(vars_, coreval): validated_platforms = self.get_enabled_target_platform_names() for validated_platform in validated_platforms: for d in vars_: var = d.upper() value = env['LIBPATH_{}_{}'.format(validated_platform, var)] if value: core = env[coreval] accu = [] for lib in value: if var != 'QTCORE': if lib in core: continue accu.append('-Wl,--rpath='+lib) env['RPATH_{}_{}'.format(validated_platform, var)] = accu process_rpath(self.qt5_vars, 'LIBPATH_QTCORE') process_rpath(self.qt5_vars_debug, 'LIBPATH_QTCORE_DEBUG') @conf def set_qt5_libs_to_check(self): if not hasattr(self, 'qt5_vars'): self.qt5_vars = QT5_LIBS self.qt5_vars = Utils.to_list(self.qt5_vars) if not hasattr(self, 'qt5_vars_debug'): self.qt5_vars_debug = [a + '_debug' for a in self.qt5_vars] self.qt5_vars_debug = Utils.to_list(self.qt5_vars_debug) @conf def set_qt5_defines(self): if sys.platform != 'win32': return validated_platforms = self.get_enabled_target_platform_names() for validated_platform in validated_platforms: for x in self.qt5_vars: y=x.replace('Qt5', 'Qt')[2:].upper() self.env.append_unique('DEFINES_{}_{}'.format(validated_platform,x.upper()), 'QT_%s_LIB' % y) self.env.append_unique('DEFINES_{}_{}_DEBUG'.format(validated_platform,x.upper()), 'QT_%s_LIB' % y) def options(opt): """ Command-line options """ opt.add_option('--want-rpath', action='store_true', default=False, dest='want_rpath', help='enable the rpath for qt libraries') opt.add_option('--header-ext', type='string', default='', help='header extension for moc files', dest='qt_header_ext') for i in 'qtdir qtbin qtlibs'.split(): opt.add_option('--'+i, type='string', default='', dest=i) opt.add_option('--translate', action="store_true", help="collect translation strings", dest="trans_qt5", default=False) IGNORE_QTLIB_PATTERNS = [ # cmake Not needed os.path.normcase('lib/cmake'), # Special LY built plugins that will be copied from a different source 'qtga.dll', 'qtga.pdb', 'qtgad.dll', 'qtgad.pdb', 'libqttga.dylib', 'libqttga_debug.dylib' ] ICU_DLLS = [ "icudt54", "icuin54", "icuuc54" ] WINDOWS_RC_QT_DLLS = [ "Qt5Core", "Qt5Gui", "Qt5Network", "Qt5Qml", "Qt5Quick", "Qt5Svg", "Qt5Widgets", ] WINDOWS_MAIN_QT_DLLS = [ "Qt5Core", "Qt5Gui", "Qt5Network", "Qt5Qml", "Qt5Quick", "Qt5Svg", "Qt5Widgets", "Qt5Bluetooth", "Qt5CLucene", "Qt5Concurrent", "Qt5DBus", "Qt5DesignerComponents", "Qt5Designer", "Qt5Help", "Qt5MultimediaQuick_p", "Qt5Multimedia", "Qt5MultimediaWidgets", "Qt5Nfc", "Qt5OpenGL", "Qt5Positioning", "Qt5PrintSupport", "Qt5QmlModels", "Qt5QmlWorkerScript", "Qt5QuickParticles", "Qt5QuickTest", "Qt5Script", "Qt5ScriptTools", "Qt5Sensors", "Qt5SerialPort", "Qt5Sql", "Qt5Test", "Qt5WebChannel", "Qt5WebKit", "Qt5WebKitWidgets", "Qt5WebSockets", "Qt5WinExtras", "Qt5XmlPatterns", "Qt5Xml", "libEGL", "libGLESv2" ] @conf def qtlib_bootstrap(self, platform, configuration): global QT_SDK_MISSING if platform in QT_SDK_MISSING: return def _copy_folder(src, dst, qt_type, pattern, is_ignore): dst_type = os.path.normcase(os.path.join(dst, qt_type)) return copy_tasks.copy_tree2(src, dst_type, False, pattern, is_ignore, False) def _copy_file(src_path, dest_path): src = os.path.normcase(src_path) dst = os.path.normcase(dest_path) copy_file = copy_tasks.should_overwrite_file(src, dst) if copy_file: try: # In case the file is readonly, we'll remove the existing file first if os.path.exists(dst): os.chmod(dst, stat.S_IWRITE) except Exception as err: Logs.warn('[WARN] Unable to make target file {} writable {}'.format(dst, err)) try: shutil.copy2(src, dst) except Exception as err: Logs.warn('[WARN] Unable to copy {} to {}: {}'.format(src, dst, err)) return 1 else: return 0 def _copy_dlls(qt_dlls_source, target_folder): if not os.path.exists(target_folder): os.makedirs(target_folder) copied = 0 for qtdll in qt_dlls_source: src_dll = os.path.join(self.env.QT_BIN_DIR, qtdll) dst_dll = os.path.join(target_folder, qtdll) copied += _copy_file(src_dll, dst_dll) return copied def _copy_qtlib_folder(ctx, dst, platform_details, patterns, is_required_pattern): # Used to track number of files copied by this function num_files_copied = 0 # If qt fails to configure, the folder copies below will give meaningless errors. # Test for this condition and error out here if not ctx.env.QT_LIB_DIR: Logs.warn('unable to find QT') return num_files_copied # Create the qtlibs subfolder dst_qtlib = os.path.normcase(os.path.join(dst, 'qtlibs')) if not os.path.exists(dst_qtlib): os.makedirs(dst_qtlib) # Copy the libs for qtlibs lib_pattern = patterns if 'lib' in patterns: lib_pattern = patterns['lib'] num_files_copied += _copy_folder(ctx.env.QT_LIB_DIR, dst_qtlib, 'lib', lib_pattern, is_required_pattern) # special setup for linux_x64 platform if platform == 'linux_x64': _prepare_lib_folder_for_linux(os.path.join(dst_qtlib, 'lib')) # Copy the qml for qtlibs qml_pattern = patterns if 'qml' in patterns: qml_pattern = patterns['qml'] num_files_copied += _copy_folder(ctx.env.QT_QML_DIR, dst_qtlib, 'qml', qml_pattern, is_required_pattern) # Copy the plugins for qtlibs plugins_pattern = patterns if 'plugins' in patterns: plugins_pattern = patterns['plugins'] num_files_copied += _copy_folder(ctx.env.QT_PLUGINS_DIR, dst_qtlib, 'plugins', plugins_pattern, is_required_pattern) # Copy the license files qt_base = os.path.normpath(ctx.ThirdPartyPath('qt', '')) num_files_copied += _copy_file(os.path.join(qt_base, 'LICENSE'), os.path.join(dst_qtlib, 'LICENSE')) num_files_copied += _copy_file(os.path.join(qt_base, 'LICENSE.GPLV3'), os.path.join(dst_qtlib, 'LICENSE.GPLV3')) num_files_copied += _copy_file(os.path.join(qt_base, 'LICENSE.LGPLV3'), os.path.join(dst_qtlib, 'LICENSE.LGPLV3')) num_files_copied += _copy_file(os.path.join(qt_base, 'LGPL_EXCEPTION.TXT'), os.path.join(dst_qtlib, 'LGPL_EXCEPTION.TXT')) num_files_copied += _copy_file(os.path.join(qt_base, 'QT-NOTICE.TXT'), os.path.join(dst_qtlib, 'QT-NOTICE.TXT')) qt_tga_files = platform_details.attributes.get('qtga_subfolders', []) qt_tga_src_root = os.path.normcase(ctx.Path('Tools/Redistributables/QtTgaImageFormatPlugin')) for qt_tga_file in qt_tga_files: if not is_copy_pdbs and qt_tga_file.endswith('.pdb'): continue source_qt_tga = os.path.normcase(os.path.join(qt_tga_src_root, qt_tga_file)) dest_qt_tga = os.path.normcase( os.path.join(dst_qtlib, 'plugins/imageformats', os.path.basename(qt_tga_file))) num_files_copied += _copy_file(source_qt_tga, dest_qt_tga) return num_files_copied def _copy_qt_dlls(ctx, dst, copy_dll_list): debug_dll_fn = lambda qt: qt + ('d.dll' if is_debug else '.dll') ext_dll_fn = lambda dll: dll + '.dll' ext_pdb_fn = lambda pdb: pdb + '.pdb' qt_main_dlls = [debug_dll_fn(qt) for qt in copy_dll_list] qt_main_dlls += [ext_dll_fn(icu) for icu in ICU_DLLS] if is_debug and is_copy_pdbs: qt_main_dlls += [ext_pdb_fn(qt) for qt in copy_dll_list] num_files_copied = 0 try: if not os.path.exists(ctx.env.QT_BIN_DIR): Logs.debug('Unable to locate QT Bin folder: {}.'.format(ctx.env.QT_BIN_DIR)) QT_SDK_MISSING.add(platform) return num_files_copied except TypeError: Logs.debug('Unable to locate QT Bin folder.') QT_SDK_MISSING.add(platform) return num_files_copied # Copy the QT.dlls to the main configuration output folder num_files_copied += _copy_dlls(qt_main_dlls, dst) return num_files_copied def _is_module_in_current_project_spec(module): # No spec means build everything if len(self.options.project_spec.strip()) == 0: return True # Get the list of the current project spec(s) that is being built specs = [spec.strip() for spec in self.options.project_spec.split(',')] has_module = False for spec in specs: # search each valid spec and see if the input module is defined spec_def = self.loaded_specs_dict.get(spec, None) if not spec_def: continue spec_modules = spec_def.get('modules', None) if not spec_modules: continue if module in spec_modules: has_module = True break return has_module def _bootstrap_qtwebengine(ctx, dst, platform_details, patterns, is_required_pattern): # Used to track number of files copied by this function num_files_copied = 0 is_windows = platform_detail.attributes.get('is_windows', False) if not is_windows: return num_files_copied # Write the qt.conf file. This is required so that Qt can find QtWebEngineProcess.exe with open(os.path.join(dst, 'qt.conf'), 'w') as f: f.write(QT_CONF) # Create the qtlibs subfolder if required dst_qtlib = os.path.normcase(os.path.join(dst, 'qtlibs')) if not os.path.exists(dst_qtlib): os.makedirs(dst_qtlib) # If qt fails to configure, the folder copies below will give meaningless errors. # Test for this condition and error out here if not ctx.env.QT_BIN_DIR or not ctx.env.QT_RESOURCES_DIR or not ctx.env.QT_TRANSLATIONS_DIR: Logs.warn('unable to find QT resources or translations') return num_files_copied # QtWebEngineProcess.exe dst_qtbin = os.path.normcase(os.path.join(dst_qtlib, 'bin')) if not os.path.exists(dst_qtbin): os.makedirs(dst_qtbin) qtwebengineprocess = 'QtWebEngineProcess' + ('d.exe' if is_debug else '.exe') num_files_copied += _copy_file(os.path.join(ctx.env.QT_BIN_DIR, qtwebengineprocess), os.path.join(dst_qtbin, qtwebengineprocess)) # QtWebEngineProcess dlls num_files_copied += _copy_qt_dlls(self, dst_qtbin, QT5_WEBENGINE_DLLS) # Write the QtWebEngineProcess qt.conf file. This is required so that QtWebEngineProcess.exe # can find its dependencies and resources with open(os.path.join(dst_qtbin, 'qt.conf'), 'w') as f: f.write(QT_WEBENGINE_CONF) # Resources and ICU data resources_pattern = patterns if 'resources' in patterns: resources_pattern = patterns['resources'] num_files_copied += _copy_folder(ctx.env.QT_RESOURCES_DIR, dst_qtlib, 'resources', resources_pattern, is_required_pattern) # Translations dst_qttranslations = os.path.normcase(os.path.join(dst_qtlib, 'translations')) if not os.path.exists(dst_qttranslations): os.makedirs(dst_qttranslations) translations_pattern = patterns if 'translations' in patterns: translations_pattern = patterns['translations'] num_files_copied += _copy_folder(ctx.env.QT_TRANSLATIONS_DIR, dst_qttranslations, 'qtwebengine_locales', translations_pattern, is_required_pattern) return num_files_copied is_copy_pdbs = self.is_option_true('copy_3rd_party_pdbs') output_paths = self.get_output_folders(platform, configuration) if len(output_paths) != 1: self.fatal('Assertion error: Multiple output paths returned') output_path = output_paths[0].abspath() if not os.path.exists(output_path): os.makedirs(output_path) # Check if current configuration is a debug build is_debug = configuration.startswith('debug') # For windows, we will bootstrap copy the Qt Dlls to the main and rc subfolder # (for non-test and non-dedicated configurations) platform_detail = self.get_target_platform_detail(platform) configuration_detail = platform_detail.get_configuration(configuration) is_monolithic = platform_detail.is_monolithic or configuration_detail.settings.is_monolithic is_windows = platform_detail.attributes.get('is_windows', False) if is_windows and not is_monolithic: copy_timer = Utils.Timer() # Check if current configuration is a debug build is_debug = configuration.startswith('debug') # Copy all the dlls required by Qt # Copy to the current configuration's BinXXX folder files_copied = _copy_qt_dlls(self, output_path, WINDOWS_MAIN_QT_DLLS) # Copy specific files if we are building from the engine folder if self.is_engine_local(): # Copy to the current configuration's BinXXX/rc folder if 'rc' is an included module in the current spec if _is_module_in_current_project_spec('ResourceCompiler'): files_copied += _copy_qt_dlls(self, os.path.join(output_path, 'rc'), WINDOWS_RC_QT_DLLS) # Copy to the LmbrSetup folder if SetupAssistant is defined if _is_module_in_current_project_spec('SetupAssistant') or _is_module_in_current_project_spec('SetupAssistantBatch'): files_copied += _copy_qt_dlls(self, self.Path(self.get_lmbr_setup_tools_output_folder()), lmbr_setup_tools.LMBR_SETUP_QT_FILTERS['win']['Modules']) # Report the sync job, but only report the number of files if any were actually copied if files_copied > 0: Logs.info('[INFO] Copied Qt DLLs to target folder: {} files copied. ({})' .format(files_copied, str(copy_timer))) else: if Logs.verbose > 1: Logs.info('[INFO] Skipped qt dll copy to target folder. ({})'.format(str(copy_timer))) # Check if this is a platform that supports the qtlib folder synchronization platform_details = self.get_target_platform_detail(platform) if platform_details.attributes.get('qt_supported', False): copy_timer = Utils.Timer() # Used as a pattern-set to ignore certain qt library files ignore_lib_patterns = IGNORE_QTLIB_PATTERNS if is_copy_pdbs else IGNORE_QTLIB_PATTERNS + ['.pdb'] # Copy the entire qtlib folder to current output path # Contains lib, plugins and qml folders, and license information files_copied = _copy_qtlib_folder(self, output_path, platform_details, ignore_lib_patterns, False) files_copied = _bootstrap_qtwebengine(self, output_path, platform_details, ignore_lib_patterns, False) lmbr_configuration_key = 'debug' if is_debug else 'profile' lmbr_platform_key = '' for key in lmbr_setup_tools.LMBR_SETUP_QT_FILTERS: if platform.startswith(key): lmbr_platform_key = key break if not lmbr_platform_key: Logs.error('Cannot find the current configuration ({}) to setup LmbrSetup folder.'.format(platform)) files_copied += _copy_qtlib_folder(self, self.Path(self.get_lmbr_setup_tools_output_folder()), platform_details, lmbr_setup_tools.LMBR_SETUP_QT_FILTERS[lmbr_platform_key]['qtlibs'][lmbr_configuration_key], True) # Report the sync job, but only report the number of files if any were actually copied if files_copied > 0: Logs.info('[INFO] Copied qtlibs folder to target folder: {} files copied. ({})' .format(files_copied, str(copy_timer))) else: if Logs.verbose > 1: Logs.info('[INFO] Copied qtlibs folder to target folder: No files copied. ({})' .format(str(copy_timer))) @lumberyard.multi_conf def generate_ib_profile_tool_elements(ctx): qt_tool_elements = [ '<Tool Filename="moc" AllowIntercept="false" AllowRemote="true" AllowPredictedBatch="true" DeriveCaptionFrom="lastparam"/>', '<Tool Filename="uic" AllowIntercept="false" AllowRemote="true" AllowPredictedBatch="true" DeriveCaptionFrom="lastparam"/>', '<Tool Filename="rcc" AllowIntercept="false" AllowRemote="true" AllowPredictedBatch="true" DeriveCaptionFrom="lastparam"/>', '<Tool Filename="link" AllowRemote="false" AllowIntercept="false" DeriveCaptionFrom="firstparam" IdentifyTaskOutput="true" AllowRestartOnLocal="false" VCCompiler="false"/>', '<Tool Filename="lld-link" AllowRemote="false" AllowIntercept="false" DeriveCaptionFrom="firstparam" IdentifyTaskOutput="true" AllowRestartOnLocal="false" VCCompiler="false"/>' ] return qt_tool_elements
[]
[]
[ "QT5_ROOT", "QT5_LIBDIR", "QT5_BIN", "PKG_CONFIG_PATH", "QT5_INCLUDES" ]
[]
["QT5_ROOT", "QT5_LIBDIR", "QT5_BIN", "PKG_CONFIG_PATH", "QT5_INCLUDES"]
python
5
0
tools/functional-tester/etcd-agent/agent_test.go
// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "io/ioutil" "os" "path/filepath" "testing" ) const etcdPath = filepath.Join(os.Getenv("GOPATH"), "bin/etcd") func TestAgentStart(t *testing.T) { defer os.Remove("etcd.log") a, dir := newTestAgent(t) defer a.terminate() err := a.start("--data-dir", dir) if err != nil { t.Fatal(err) } } func TestAgentRestart(t *testing.T) { defer os.Remove("etcd.log") a, dir := newTestAgent(t) defer a.terminate() err := a.start("--data-dir", dir) if err != nil { t.Fatal(err) } err = a.stop() if err != nil { t.Fatal(err) } err = a.restart() if err != nil { t.Fatal(err) } } func TestAgentTerminate(t *testing.T) { defer os.Remove("etcd.log") a, dir := newTestAgent(t) err := a.start("--data-dir", dir) if err != nil { t.Fatal(err) } err = a.terminate() if err != nil { t.Fatal(err) } if _, err := os.Stat(dir); !os.IsNotExist(err) { t.Fatal(err) } } // newTestAgent creates a test agent and with a temp data directory. func newTestAgent(t *testing.T) (*Agent, string) { a, err := newAgent(etcdPath, "etcd.log") if err != nil { t.Fatal(err) } dir, err := ioutil.TempDir(os.TempDir(), "etcd-agent") if err != nil { t.Fatal(err) } return a, dir }
[ "\"GOPATH\"" ]
[]
[ "GOPATH" ]
[]
["GOPATH"]
go
1
0
gke-windows-builder/builder/builder/bucket_test.go
// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package builder import ( "archive/zip" "context" "fmt" "io/ioutil" "net/url" "os" "path/filepath" "strings" "testing" "time" "cloud.google.com/go/storage" ) func TestCreateZip(t *testing.T) { t.Parallel() abs, err := filepath.Abs("testdata") if err != nil { t.Fatal(err) } for name, path := range map[string]string{ "relative": "testdata", "absolute": abs, } { t.Run(name, func(t *testing.T) { zf, err := createZip(context.Background(), path) if err != nil { t.Fatal(err) } zr, err := zip.OpenReader(zf) if err != nil { t.Fatal(err) } defer zr.Close() expected := map[string]string{ "file-a.txt": "hello world", "file-b.txt": "foo bar", filepath.Join("subdir", "file-d.txt"): "bar baz", } for _, f := range zr.File { expectedData, ok := expected[f.Name] if !ok { t.Fatalf("unexpected file %q found in archive", f.Name) } r, err := f.Open() if err != nil { t.Fatal(err) } ad, err := ioutil.ReadAll(r) if err != nil { t.Fatal(err) } actualData := string(ad) // We'll trim space to make testing simpler actualData = strings.TrimSpace(actualData) if actualData != expectedData { t.Fatalf("expected data from %s to be %q, got %q", f.Name, expectedData, actualData) } } if len(expected) != len(zr.File) { t.Fatalf("expected archive to have %d files, had %d", len(expected), len(zr.File)) } }) } } func TestCreateZip_cancelled_context(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) cancel() if _, err := createZip(ctx, "testdata"); err == nil { t.Fatal("expected an error") } } func bucketTestsInfo(t *testing.T) ( bucket string, object string, ) { // This test assumes information has been passed in. If ANY of it is // missing, skip it. bucket = os.Getenv("GCP_BUCKET") if bucket == "" { t.Skipf("Missing environment variable GCP_BUCKET, skipping...") } return bucket, fmt.Sprintf("test-write-to-bucket-%d", time.Now().UnixNano()) } func TestWriteToBucket(t *testing.T) { t.Parallel() bucket, object := bucketTestsInfo(t) gsURL, err := writeToBucket( context.Background(), bucket, object, "testdata/file-a.txt", ) if err != nil { t.Fatal(err) } expected := "hello world" actual := readBucket(t, gsURL) if actual != expected { t.Fatalf("expected %q to equal %q", actual, expected) } } func readBucket(t *testing.T, gsURL string) string { t.Helper() u, err := url.Parse(gsURL) if err != nil { t.Fatal(err) } if u.Scheme != "gs" { t.Fatalf(`expected scheme to be "gs", got %q`, u.Scheme) } bucket := u.Host object := u.Path if strings.HasPrefix(object, "/") { object = object[1:] } ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { t.Fatal(err) } reader, err := client.Bucket(bucket).Object(object).NewReader(ctx) if err != nil { t.Fatal(err) } data, err := ioutil.ReadAll(reader) if err != nil { t.Fatal(err) } // We'll trim space to make testing simpler return strings.TrimSpace(string(data)) }
[ "\"GCP_BUCKET\"" ]
[]
[ "GCP_BUCKET" ]
[]
["GCP_BUCKET"]
go
1
0
sdk/management/samples/src/main/java/com/azure/management/sql/samples/ManageSqlServerSecurityAlertPolicy.java
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. package com.azure.management.sql.samples; import com.azure.core.http.policy.HttpLogDetailLevel; import com.azure.core.management.AzureEnvironment; import com.azure.core.management.serializer.AzureJacksonAdapter; import com.azure.management.ApplicationTokenCredential; import com.azure.management.Azure; import com.azure.management.RestClient; import com.azure.management.RestClientBuilder; import com.azure.management.resources.fluentcore.arm.Region; import com.azure.management.samples.Utils; import com.azure.management.sql.SampleName; import com.azure.management.sql.SecurityAlertPolicyState; import com.azure.management.sql.SqlDatabaseStandardServiceObjective; import com.azure.management.sql.SqlServer; import com.azure.management.sql.SqlServerSecurityAlertPolicy; import com.azure.management.storage.StorageAccount; import java.io.File; /** * Azure SQL sample for managing SQL Server Security Alert Policy * - Create a SQL Server. * - Create an Azure Storage Account and get the storage account blob entry point * - Create a Server Security Alert Policy * - Get the Server Security Alert Policy. * - Update the Server Security Alert Policy. * - Delete the Sql Server */ public class ManageSqlServerSecurityAlertPolicy { /** * Main function which runs the actual sample. * @param azure instance of the azure client * @return true if sample runs successfully */ public static boolean runSample(Azure azure) { final String sqlServerName = azure.sdkContext().randomResourceName("sql", 20); final String storageAccountName = azure.sdkContext().randomResourceName("sqlsa", 20); final String rgName = azure.sdkContext().randomResourceName("rgsql", 20); final Region region = Region.US_EAST; final String dbName = "dbSample"; final String administratorLogin = "sqladmin3423"; // [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="Serves as an example, not for deployment. Please change when using this in your code.")] final String administratorPassword = "myS3cureP@ssword"; try { // ============================================================ // Create a primary SQL Server with a sample database. System.out.println("Creating a primary SQL Server with a sample database"); SqlServer sqlServer = azure.sqlServers().define(sqlServerName) .withRegion(region) .withNewResourceGroup(rgName) .withAdministratorLogin(administratorLogin) .withAdministratorPassword(administratorPassword) .defineDatabase(dbName) .fromSample(SampleName.ADVENTURE_WORKS_LT) .withStandardEdition(SqlDatabaseStandardServiceObjective.S0) .attach() .create(); Utils.print(sqlServer); // ============================================================ // Create an Azure Storage Account and get the storage account blob entry point. System.out.println("Creating an Azure Storage Account and a storage account blob"); StorageAccount storageAccount = azure.storageAccounts().define(storageAccountName) .withRegion(region) .withExistingResourceGroup(rgName) .create(); String accountKey = storageAccount.getKeys().get(0).value(); String blobEntrypoint = storageAccount.endPoints().primary().blob(); // ============================================================ // Create a Server Security Alert Policy. System.out.println("Creating a Server Security Alert Policy"); sqlServer.serverSecurityAlertPolicies().define() .withState(SecurityAlertPolicyState.ENABLED) .withEmailAccountAdmins() .withStorageEndpoint(blobEntrypoint, accountKey) .withDisabledAlerts("Access_Anomaly", "Sql_Injection") .withRetentionDays(5) .create(); // ============================================================ // Get the Server Security Alert Policy. System.out.println("Getting the Server Security Alert Policy"); SqlServerSecurityAlertPolicy sqlSecurityAlertPolicy = sqlServer.serverSecurityAlertPolicies().get(); // ============================================================ // Update the Server Security Alert Policy. System.out.println("Updating the Server Security Alert Policy"); sqlSecurityAlertPolicy = sqlSecurityAlertPolicy.update() .withoutEmailAccountAdmins() .withEmailAddresses("[email protected]") .withRetentionDays(1) .apply(); // Delete the SQL Servers. System.out.println("Deleting the Sql Servers"); azure.sqlServers().deleteById(sqlServer.id()); return true; } catch (Exception f) { System.out.println(f.getMessage()); f.printStackTrace(); } finally { try { System.out.println("Deleting Resource Group: " + rgName); azure.resourceGroups().deleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); } catch (Exception e) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } } return false; } /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final File credFile = new File(System.getenv("AZURE_AUTH_LOCATION")); ApplicationTokenCredential credentials = ApplicationTokenCredential.fromFile(credFile); RestClient restClient = new RestClientBuilder() .withBaseUrl(AzureEnvironment.AZURE, AzureEnvironment.Endpoint.RESOURCE_MANAGER) .withSerializerAdapter(new AzureJacksonAdapter()) // .withReadTimeout(150, TimeUnit.SECONDS) .withLogLevel(HttpLogDetailLevel.BASIC) .withCredential(credentials).buildClient(); Azure azure = Azure.authenticate(restClient, credentials.getDomain(), credentials.getDefaultSubscriptionId()).withDefaultSubscription(); // Print selected subscription System.out.println("Selected subscription: " + azure.subscriptionId()); runSample(azure); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } }
[ "\"AZURE_AUTH_LOCATION\"" ]
[]
[ "AZURE_AUTH_LOCATION" ]
[]
["AZURE_AUTH_LOCATION"]
java
1
0
cmd/main.go
// Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddycmd import ( "bytes" "flag" "fmt" "io" "io/ioutil" "net" "os" "path/filepath" "runtime" "strconv" "strings" "time" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "go.uber.org/zap" ) // Main implements the main function of the caddy command. // Call this if Caddy is to be the main() if your program. func Main() { caddy.TrapSignals() switch len(os.Args) { case 0: fmt.Printf("[FATAL] no arguments provided by OS; args[0] must be command\n") os.Exit(caddy.ExitCodeFailedStartup) case 1: os.Args = append(os.Args, "help") } subcommandName := os.Args[1] subcommand, ok := commands[subcommandName] if !ok { if strings.HasPrefix(os.Args[1], "-") { // user probably forgot to type the subcommand fmt.Println("[ERROR] first argument must be a subcommand; see 'caddy help'") } else { fmt.Printf("[ERROR] '%s' is not a recognized subcommand; see 'caddy help'\n", os.Args[1]) } os.Exit(caddy.ExitCodeFailedStartup) } fs := subcommand.Flags if fs == nil { fs = flag.NewFlagSet(subcommand.Name, flag.ExitOnError) } err := fs.Parse(os.Args[2:]) if err != nil { fmt.Println(err) os.Exit(caddy.ExitCodeFailedStartup) } exitCode, err := subcommand.Func(Flags{fs}) if err != nil { fmt.Fprintf(os.Stderr, "%s: %v\n", subcommand.Name, err) } os.Exit(exitCode) } // handlePingbackConn reads from conn and ensures it matches // the bytes in expect, or returns an error if it doesn't. func handlePingbackConn(conn net.Conn, expect []byte) error { defer conn.Close() confirmationBytes, err := ioutil.ReadAll(io.LimitReader(conn, 32)) if err != nil { return err } if !bytes.Equal(confirmationBytes, expect) { return fmt.Errorf("wrong confirmation: %x", confirmationBytes) } return nil } // loadConfig loads the config from configFile and adapts it // using adapterName. If adapterName is specified, configFile // must be also. If no configFile is specified, it tries // loading a default config file. The lack of a config file is // not treated as an error, but false will be returned if // there is no config available. It prints any warnings to stderr, // and returns the resulting JSON config bytes along with // whether a config file was loaded or not. func loadConfig(configFile, adapterName string) ([]byte, bool, error) { // specifying an adapter without a config file is ambiguous if adapterName != "" && configFile == "" { return nil, false, fmt.Errorf("cannot adapt config without config file (use --config)") } // load initial config and adapter var config []byte var cfgAdapter caddyconfig.Adapter var err error if configFile != "" { config, err = ioutil.ReadFile(configFile) if err != nil { return nil, false, fmt.Errorf("reading config file: %v", err) } caddy.Log().Info("using provided configuration", zap.String("config_file", configFile), zap.String("config_adapter", adapterName)) } else if adapterName == "" { // as a special case when no config file or adapter // is specified, see if the Caddyfile adapter is // plugged in, and if so, try using a default Caddyfile cfgAdapter = caddyconfig.GetAdapter("caddyfile") if cfgAdapter != nil { config, err = ioutil.ReadFile("Caddyfile") if os.IsNotExist(err) { // okay, no default Caddyfile; pretend like this never happened cfgAdapter = nil } else if err != nil { // default Caddyfile exists, but error reading it return nil, false, fmt.Errorf("reading default Caddyfile: %v", err) } else { // success reading default Caddyfile configFile = "Caddyfile" caddy.Log().Info("using adjacent Caddyfile") } } } // as a special case, if a config file called "Caddyfile" was // specified, and no adapter is specified, assume caddyfile adapter // for convenience if strings.HasPrefix(filepath.Base(configFile), "Caddyfile") && filepath.Ext(configFile) != ".json" && adapterName == "" { adapterName = "caddyfile" } // load config adapter if adapterName != "" { cfgAdapter = caddyconfig.GetAdapter(adapterName) if cfgAdapter == nil { return nil, false, fmt.Errorf("unrecognized config adapter: %s", adapterName) } } // adapt config if cfgAdapter != nil { adaptedConfig, warnings, err := cfgAdapter.Adapt(config, map[string]interface{}{ "filename": configFile, }) if err != nil { return nil, false, fmt.Errorf("adapting config using %s: %v", adapterName, err) } for _, warn := range warnings { msg := warn.Message if warn.Directive != "" { msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message) } fmt.Printf("[WARNING][%s] %s:%d: %s\n", adapterName, warn.File, warn.Line, msg) } config = adaptedConfig } return config, configFile != "", nil } // Flags wraps a FlagSet so that typed values // from flags can be easily retrieved. type Flags struct { *flag.FlagSet } // String returns the string representation of the // flag given by name. It panics if the flag is not // in the flag set. func (f Flags) String(name string) string { return f.FlagSet.Lookup(name).Value.String() } // Bool returns the boolean representation of the // flag given by name. It returns false if the flag // is not a boolean type. It panics if the flag is // not in the flag set. func (f Flags) Bool(name string) bool { val, _ := strconv.ParseBool(f.String(name)) return val } // Int returns the integer representation of the // flag given by name. It returns 0 if the flag // is not an integer type. It panics if the flag is // not in the flag set. func (f Flags) Int(name string) int { val, _ := strconv.ParseInt(f.String(name), 0, strconv.IntSize) return int(val) } // Float64 returns the float64 representation of the // flag given by name. It returns false if the flag // is not a float63 type. It panics if the flag is // not in the flag set. func (f Flags) Float64(name string) float64 { val, _ := strconv.ParseFloat(f.String(name), 64) return val } // Duration returns the duration representation of the // flag given by name. It returns false if the flag // is not a duration type. It panics if the flag is // not in the flag set. func (f Flags) Duration(name string) time.Duration { val, _ := time.ParseDuration(f.String(name)) return val } // flagHelp returns the help text for fs. func flagHelp(fs *flag.FlagSet) string { if fs == nil { return "" } // temporarily redirect output out := fs.Output() defer fs.SetOutput(out) buf := new(bytes.Buffer) fs.SetOutput(buf) fs.PrintDefaults() return buf.String() } func printEnvironment() { fmt.Printf("caddy.HomeDir=%s\n", caddy.HomeDir()) fmt.Printf("caddy.AppDataDir=%s\n", caddy.AppDataDir()) fmt.Printf("caddy.AppConfigDir=%s\n", caddy.AppConfigDir()) fmt.Printf("caddy.ConfigAutosavePath=%s\n", caddy.ConfigAutosavePath) fmt.Printf("runtime.GOOS=%s\n", runtime.GOOS) fmt.Printf("runtime.GOARCH=%s\n", runtime.GOARCH) fmt.Printf("runtime.Compiler=%s\n", runtime.Compiler) fmt.Printf("runtime.NumCPU=%d\n", runtime.NumCPU()) fmt.Printf("runtime.GOMAXPROCS=%d\n", runtime.GOMAXPROCS(0)) fmt.Printf("runtime.Version=%s\n", runtime.Version()) cwd, err := os.Getwd() if err != nil { cwd = fmt.Sprintf("<error: %v>", err) } fmt.Printf("os.Getwd=%s\n\n", cwd) for _, v := range os.Environ() { fmt.Println(v) } } // moveStorage moves the old default dataDir to the new default dataDir. // TODO: This is TEMPORARY until the release candidates. func moveStorage() { // get the home directory (the old way) oldHome := os.Getenv("HOME") if oldHome == "" && runtime.GOOS == "windows" { drive := os.Getenv("HOMEDRIVE") path := os.Getenv("HOMEPATH") oldHome = drive + path if drive == "" || path == "" { oldHome = os.Getenv("USERPROFILE") } } if oldHome == "" { oldHome = "." } oldDataDir := filepath.Join(oldHome, ".local", "share", "caddy") // nothing to do if old data dir doesn't exist _, err := os.Stat(oldDataDir) if os.IsNotExist(err) { return } // nothing to do if the new data dir is the same as the old one newDataDir := caddy.AppDataDir() if oldDataDir == newDataDir { return } logger := caddy.Log().Named("automigrate").With( zap.String("old_dir", oldDataDir), zap.String("new_dir", newDataDir)) logger.Info("beginning one-time data directory migration", zap.String("details", "https://github.com/caddyserver/caddy/issues/2955")) // if new data directory exists, avoid auto-migration as a conservative safety measure _, err = os.Stat(newDataDir) if !os.IsNotExist(err) { logger.Error("new data directory already exists; skipping auto-migration as conservative safety measure", zap.Error(err), zap.String("instructions", "https://github.com/caddyserver/caddy/issues/2955#issuecomment-570000333")) return } // construct the new data directory's parent folder err = os.MkdirAll(filepath.Dir(newDataDir), 0700) if err != nil { logger.Error("unable to make new datadirectory - follow link for instructions", zap.String("instructions", "https://github.com/caddyserver/caddy/issues/2955#issuecomment-570000333"), zap.Error(err)) return } // folder structure is same, so just try to rename (move) it; // this fails if the new path is on a separate device err = os.Rename(oldDataDir, newDataDir) if err != nil { logger.Error("new data directory already exists; skipping auto-migration as conservative safety measure - follow link for instructions", zap.String("instructions", "https://github.com/caddyserver/caddy/issues/2955#issuecomment-570000333"), zap.Error(err)) } logger.Info("successfully completed one-time migration of data directory", zap.String("details", "https://github.com/caddyserver/caddy/issues/2955")) }
[ "\"HOME\"", "\"HOMEDRIVE\"", "\"HOMEPATH\"", "\"USERPROFILE\"" ]
[]
[ "USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE" ]
[]
["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"]
go
4
0
pkg/nodeidentity/openstack/identify.go
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package openstack import ( "context" "fmt" "os" "strings" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" corev1 "k8s.io/api/core/v1" "k8s.io/kops/pkg/nodeidentity" ) // nodeIdentifier identifies a node type nodeIdentifier struct { novaClient *gophercloud.ServiceClient } // New creates and returns a nodeidentity.Identifier for Nodes running on OpenStack func New() (nodeidentity.Identifier, error) { env, err := openstack.AuthOptionsFromEnv() if err != nil { return nil, err } region := os.Getenv("OS_REGION_NAME") if region == "" { return nil, fmt.Errorf("unable to find region") } provider, err := openstack.NewClient(env.IdentityEndpoint) if err != nil { return nil, err } err = openstack.Authenticate(provider, env) if err != nil { return nil, err } novaClient, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ Type: "compute", Region: region, }) if err != nil { return nil, fmt.Errorf("error building nova client: %v", err) } return &nodeIdentifier{ novaClient: novaClient, }, nil } // IdentifyNode queries OpenStack for the node identity information func (i *nodeIdentifier) IdentifyNode(ctx context.Context, node *corev1.Node) (*nodeidentity.Info, error) { providerID := node.Spec.ProviderID if providerID == "" { return nil, fmt.Errorf("providerID was not set for node %s", node.Name) } if !strings.HasPrefix(providerID, "openstack://") { return nil, fmt.Errorf("providerID %q not recognized for node %s", providerID, node.Name) } instanceID := strings.TrimPrefix(providerID, "openstack://") // instanceid looks like its openstack:/// but no idea is that really correct like that? // this supports now both openstack:// and openstack:/// format if strings.HasPrefix(instanceID, "/") { instanceID = strings.TrimPrefix(instanceID, "/") } kopsGroup, err := i.getInstanceGroup(instanceID) if err != nil { return nil, err } info := &nodeidentity.Info{} info.InstanceGroup = kopsGroup return info, nil } func (i *nodeIdentifier) getInstanceGroup(instanceID string) (string, error) { instance, err := servers.Get(i.novaClient, instanceID).Extract() if err != nil { return "", err } if val, ok := instance.Metadata["KopsInstanceGroup"]; ok { return val, nil } return "", fmt.Errorf("could not find tag 'KopsInstanceGroup' from instance metadata") }
[ "\"OS_REGION_NAME\"" ]
[]
[ "OS_REGION_NAME" ]
[]
["OS_REGION_NAME"]
go
1
0
cmd/roomserver-integration-tests/main.go
// Copyright 2017 Vector Creations Ltd // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "time" "encoding/json" "net/http" "github.com/matrix-org/dendrite/internal/caching" "github.com/matrix-org/dendrite/internal/test" "github.com/matrix-org/dendrite/roomserver/api" "github.com/matrix-org/dendrite/roomserver/inthttp" "github.com/matrix-org/gomatrixserverlib" ) var ( // Path to where kafka is installed. kafkaDir = defaulting(os.Getenv("KAFKA_DIR"), "kafka") // The URI the kafka zookeeper is listening on. zookeeperURI = defaulting(os.Getenv("ZOOKEEPER_URI"), "localhost:2181") // The URI the kafka server is listening on. kafkaURI = defaulting(os.Getenv("KAFKA_URIS"), "localhost:9092") // How long to wait for the roomserver to write the expected output messages. // This needs to be high enough to account for the time it takes to create // the postgres database tables which can take a while on travis. timeoutString = defaulting(os.Getenv("TIMEOUT"), "60s") // Timeout for http client timeoutHTTPClient = defaulting(os.Getenv("TIMEOUT_HTTP"), "30s") // The name of maintenance database to connect to in order to create the test database. postgresDatabase = defaulting(os.Getenv("POSTGRES_DATABASE"), "postgres") // The name of the test database to create. testDatabaseName = defaulting(os.Getenv("DATABASE_NAME"), "roomserver_test") // The postgres connection config for connecting to the test database. testDatabase = defaulting(os.Getenv("DATABASE"), fmt.Sprintf("dbname=%s binary_parameters=yes", testDatabaseName)) ) var exe = test.KafkaExecutor{ ZookeeperURI: zookeeperURI, KafkaDirectory: kafkaDir, KafkaURI: kafkaURI, // Send stdout and stderr to our stderr so that we see error messages from // the kafka process. OutputWriter: os.Stderr, } func defaulting(value, defaultValue string) string { if value == "" { value = defaultValue } return value } var ( timeout time.Duration timeoutHTTP time.Duration ) func init() { var err error timeout, err = time.ParseDuration(timeoutString) if err != nil { panic(err) } timeoutHTTP, err = time.ParseDuration(timeoutHTTPClient) if err != nil { panic(err) } } func createDatabase(database string) error { cmd := exec.Command("psql", postgresDatabase) cmd.Stdin = strings.NewReader( fmt.Sprintf("DROP DATABASE IF EXISTS %s; CREATE DATABASE %s;", database, database), ) // Send stdout and stderr to our stderr so that we see error messages from // the psql process cmd.Stdout = os.Stderr cmd.Stderr = os.Stderr return cmd.Run() } // runAndReadFromTopic runs a command and waits for a number of messages to be // written to a kafka topic. It returns if the command exits, the number of // messages is reached or after a timeout. It kills the command before it returns. // It returns a list of the messages read from the command on success or an error // on failure. func runAndReadFromTopic(runCmd *exec.Cmd, readyURL string, doInput func(), topic string, count int, checkQueryAPI func()) ([]string, error) { type result struct { // data holds all of stdout on success. data []byte // err is set on failure. err error } done := make(chan result) readCmd := exec.Command( filepath.Join(kafkaDir, "bin", "kafka-console-consumer.sh"), "--bootstrap-server", kafkaURI, "--topic", topic, "--from-beginning", "--max-messages", fmt.Sprintf("%d", count), ) // Send stderr to our stderr so the user can see any error messages. readCmd.Stderr = os.Stderr // Kill both processes before we exit. defer func() { runCmd.Process.Kill() }() // nolint: errcheck defer func() { readCmd.Process.Kill() }() // nolint: errcheck // Run the command, read the messages and wait for a timeout in parallel. go func() { // Read all of stdout. defer func() { if err := recover(); err != nil { if errv, ok := err.(error); ok { done <- result{nil, errv} } else { panic(err) } } }() data, err := readCmd.Output() checkQueryAPI() done <- result{data, err} }() go func() { err := runCmd.Run() done <- result{nil, err} }() go func() { time.Sleep(timeout) done <- result{nil, fmt.Errorf("Timeout reading %d messages from topic %q", count, topic)} }() // Poll the HTTP listener of the process waiting for it to be ready to receive requests. ready := make(chan struct{}) go func() { delay := 10 * time.Millisecond for { time.Sleep(delay) if delay < 100*time.Millisecond { delay *= 2 } resp, err := http.Get(readyURL) if err != nil { continue } if resp.StatusCode == 200 { break } } ready <- struct{}{} }() // Wait for the roomserver to be ready to receive input or for it to crash. select { case <-ready: case r := <-done: return nil, r.err } // Write the input now that the server is running. doInput() // Wait for one of the tasks to finsh. r := <-done if r.err != nil { return nil, r.err } // The kafka console consumer writes a newline character after each message. // So we split on newline characters lines := strings.Split(string(r.data), "\n") if len(lines) > 0 { // Remove the blank line at the end of the data. lines = lines[:len(lines)-1] } return lines, nil } func writeToRoomServer(input []string, roomserverURL string) error { var request api.InputRoomEventsRequest var response api.InputRoomEventsResponse var err error request.InputRoomEvents = make([]api.InputRoomEvent, len(input)) for i := range input { if err = json.Unmarshal([]byte(input[i]), &request.InputRoomEvents[i]); err != nil { return err } } x, err := inthttp.NewRoomserverClient(roomserverURL, &http.Client{Timeout: timeoutHTTP}, nil) if err != nil { return err } return x.InputRoomEvents(context.Background(), &request, &response) } // testRoomserver is used to run integration tests against a single roomserver. // It creates new kafka topics for the input and output of the roomserver. // It writes the input messages to the input kafka topic, formatting each message // as canonical JSON so that it fits on a single line. // It then runs the roomserver and waits for a number of messages to be written // to the output topic. // Once those messages have been written it runs the checkQueries function passing // a api.RoomserverQueryAPI client. The caller can use this function to check the // behaviour of the query API. func testRoomserver(input []string, wantOutput []string, checkQueries func(api.RoomserverInternalAPI)) { dir, err := ioutil.TempDir("", "room-server-test") if err != nil { panic(err) } cfg, _, err := test.MakeConfig(dir, kafkaURI, testDatabase, "localhost", 10000) if err != nil { panic(err) } if err = test.WriteConfig(cfg, dir); err != nil { panic(err) } outputTopic := string(cfg.Kafka.Topics.OutputRoomEvent) err = exe.DeleteTopic(outputTopic) if err != nil { panic(err) } if err = exe.CreateTopic(outputTopic); err != nil { panic(err) } if err = createDatabase(testDatabaseName); err != nil { panic(err) } cache, err := caching.NewInMemoryLRUCache(false) if err != nil { panic(err) } doInput := func() { fmt.Printf("Roomserver is ready to receive input, sending %d events\n", len(input)) if err = writeToRoomServer(input, cfg.RoomServerURL()); err != nil { panic(err) } } cmd := exec.Command(filepath.Join(filepath.Dir(os.Args[0]), "dendrite-room-server")) // Append the roomserver config to the existing environment. // We append to the environment rather than replacing so that any additional // postgres and golang environment variables such as PGHOST are passed to // the roomserver process. cmd.Stderr = os.Stderr cmd.Args = []string{"dendrite-room-server", "--config", filepath.Join(dir, test.ConfigFile)} gotOutput, err := runAndReadFromTopic(cmd, cfg.RoomServerURL()+"/metrics", doInput, outputTopic, len(wantOutput), func() { queryAPI, _ := inthttp.NewRoomserverClient("http://"+string(cfg.Listen.RoomServer), &http.Client{Timeout: timeoutHTTP}, cache) checkQueries(queryAPI) }) if err != nil { panic(err) } if len(wantOutput) != len(gotOutput) { panic(fmt.Errorf("Wanted %d lines of output got %d lines", len(wantOutput), len(gotOutput))) } for i := range wantOutput { if !equalJSON(wantOutput[i], gotOutput[i]) { panic(fmt.Errorf("Wanted %q at index %d got %q", wantOutput[i], i, gotOutput[i])) } } } func equalJSON(a, b string) bool { canonicalA, err := gomatrixserverlib.CanonicalJSON([]byte(a)) if err != nil { panic(err) } canonicalB, err := gomatrixserverlib.CanonicalJSON([]byte(b)) if err != nil { panic(err) } return string(canonicalA) == string(canonicalB) } func main() { fmt.Println("==TESTING==", os.Args[0]) input := []string{ `{ "auth_event_ids": [], "kind": 1, "event": { "origin": "matrix.org", "signatures": { "matrix.org": { "ed25519:auto": "3kXGwNtdj+zqEXlI8PWLiB76xtrQ7SxcvPuXAEVCTo+QPoBoUvLi1RkHs6O5mDz7UzIowK5bi1seAN4vOh0OBA" } }, "origin_server_ts": 1463671337837, "sender": "@richvdh:matrix.org", "event_id": "$1463671337126266wrSBX:matrix.org", "prev_events": [], "state_key": "", "content": {"creator": "@richvdh:matrix.org"}, "depth": 1, "prev_state": [], "room_id": "!HCXfdvrfksxuYnIFiJ:matrix.org", "auth_events": [], "hashes": {"sha256": "Q05VLC8nztN2tguy+KnHxxhitI95wK9NelnsDaXRqeo"}, "type": "m.room.create"} }`, `{ "auth_event_ids": ["$1463671337126266wrSBX:matrix.org"], "kind": 2, "state_event_ids": ["$1463671337126266wrSBX:matrix.org"], "event": { "origin": "matrix.org", "signatures": { "matrix.org": { "ed25519:auto": "a2b3xXYVPPFeG1sHCU3hmZnAaKqZFgzGZozijRGblG5Y//ewRPAn1A2mCrI2UM5I+0zqr70cNpHgF8bmNFu4BA" } }, "origin_server_ts": 1463671339844, "sender": "@richvdh:matrix.org", "event_id": "$1463671339126270PnVwC:matrix.org", "prev_events": [[ "$1463671337126266wrSBX:matrix.org", {"sha256": "h/VS07u8KlMwT3Ee8JhpkC7sa1WUs0Srgs+l3iBv6c0"} ]], "membership": "join", "state_key": "@richvdh:matrix.org", "content": { "membership": "join", "avatar_url": "mxc://matrix.org/ZafPzsxMJtLaSaJXloBEKiws", "displayname": "richvdh" }, "depth": 2, "prev_state": [], "room_id": "!HCXfdvrfksxuYnIFiJ:matrix.org", "auth_events": [[ "$1463671337126266wrSBX:matrix.org", {"sha256": "h/VS07u8KlMwT3Ee8JhpkC7sa1WUs0Srgs+l3iBv6c0"} ]], "hashes": {"sha256": "t9t3sZV1Eu0P9Jyrs7pge6UTa1zuTbRdVxeUHnrQVH0"}, "type": "m.room.member"}, "has_state": true }`, } want := []string{ `{"type":"new_room_event","new_room_event":{ "event":{ "auth_events":[[ "$1463671337126266wrSBX:matrix.org",{"sha256":"h/VS07u8KlMwT3Ee8JhpkC7sa1WUs0Srgs+l3iBv6c0"} ]], "content":{ "avatar_url":"mxc://matrix.org/ZafPzsxMJtLaSaJXloBEKiws", "displayname":"richvdh", "membership":"join" }, "depth": 2, "event_id": "$1463671339126270PnVwC:matrix.org", "hashes": {"sha256":"t9t3sZV1Eu0P9Jyrs7pge6UTa1zuTbRdVxeUHnrQVH0"}, "membership": "join", "origin": "matrix.org", "origin_server_ts": 1463671339844, "prev_events": [[ "$1463671337126266wrSBX:matrix.org",{"sha256":"h/VS07u8KlMwT3Ee8JhpkC7sa1WUs0Srgs+l3iBv6c0"} ]], "prev_state":[], "room_id":"!HCXfdvrfksxuYnIFiJ:matrix.org", "sender":"@richvdh:matrix.org", "signatures":{ "matrix.org":{ "ed25519:auto":"a2b3xXYVPPFeG1sHCU3hmZnAaKqZFgzGZozijRGblG5Y//ewRPAn1A2mCrI2UM5I+0zqr70cNpHgF8bmNFu4BA" } }, "state_key":"@richvdh:matrix.org", "type":"m.room.member" }, "state_before_removes_event_ids":["$1463671339126270PnVwC:matrix.org"], "state_before_adds_event_ids":null, "latest_event_ids":["$1463671339126270PnVwC:matrix.org"], "adds_state_event_ids":["$1463671337126266wrSBX:matrix.org", "$1463671339126270PnVwC:matrix.org"], "removes_state_event_ids":null, "last_sent_event_id":"", "send_as_server":"", "transaction_id": null }}`, } testRoomserver(input, want, func(q api.RoomserverInternalAPI) { var response api.QueryLatestEventsAndStateResponse if err := q.QueryLatestEventsAndState( context.Background(), &api.QueryLatestEventsAndStateRequest{ RoomID: "!HCXfdvrfksxuYnIFiJ:matrix.org", StateToFetch: []gomatrixserverlib.StateKeyTuple{ {EventType: "m.room.member", StateKey: "@richvdh:matrix.org"}, }, }, &response, ); err != nil { panic(err) } if !response.RoomExists { panic(fmt.Errorf(`Wanted room "!HCXfdvrfksxuYnIFiJ:matrix.org" to exist`)) } if len(response.LatestEvents) != 1 || response.LatestEvents[0].EventID != "$1463671339126270PnVwC:matrix.org" { panic(fmt.Errorf(`Wanted "$1463671339126270PnVwC:matrix.org" to be the latest event got %#v`, response.LatestEvents)) } if len(response.StateEvents) != 1 || response.StateEvents[0].EventID() != "$1463671339126270PnVwC:matrix.org" { panic(fmt.Errorf(`Wanted "$1463671339126270PnVwC:matrix.org" to be the state event got %#v`, response.StateEvents)) } }) fmt.Println("==PASSED==", os.Args[0]) }
[ "\"KAFKA_DIR\"", "\"ZOOKEEPER_URI\"", "\"KAFKA_URIS\"", "\"TIMEOUT\"", "\"TIMEOUT_HTTP\"", "\"POSTGRES_DATABASE\"", "\"DATABASE_NAME\"", "\"DATABASE\"" ]
[]
[ "KAFKA_URIS", "DATABASE_NAME", "DATABASE", "KAFKA_DIR", "POSTGRES_DATABASE", "TIMEOUT_HTTP", "TIMEOUT", "ZOOKEEPER_URI" ]
[]
["KAFKA_URIS", "DATABASE_NAME", "DATABASE", "KAFKA_DIR", "POSTGRES_DATABASE", "TIMEOUT_HTTP", "TIMEOUT", "ZOOKEEPER_URI"]
go
8
0
lib/tests/streamlit/config_test.py
# -*- coding: utf-8 -*- # Copyright 2018-2019 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Config System Unittest.""" import copy import os import textwrap import unittest import pytest from mock import mock_open from mock import patch from streamlit import config from streamlit.ConfigOption import ConfigOption SECTION_DESCRIPTIONS = copy.deepcopy(config._section_descriptions) CONFIG_OPTIONS = copy.deepcopy(config._config_options) class ConfigTest(unittest.TestCase): """Test the config system.""" def setUp(self): self.patches = [ patch.object( config, "_section_descriptions", new=copy.deepcopy(SECTION_DESCRIPTIONS) ), patch.object(config, "_config_options", new=copy.deepcopy(CONFIG_OPTIONS)), ] for p in self.patches: p.start() def tearDown(self): for p in self.patches: p.stop() try: del os.environ["TEST_ENV_VAR"] except Exception: pass config._delete_option("_test.tomlTest") def test_simple_config_option(self): """Test creating a simple (constant) config option.""" # Create the config option. config_option = ConfigOption( "_test.simpleParam", description="Simple config option.", default_val=12345 ) # Test that it works. self.assertEqual(config_option.key, "_test.simpleParam") self.assertEqual(config_option.section, "_test") self.assertEqual(config_option.name, "simpleParam") self.assertEqual(config_option.description, "Simple config option.") self.assertEqual(config_option.where_defined, ConfigOption.DEFAULT_DEFINITION) self.assertEqual(config_option.value, 12345) def test_complex_config_option(self): """Test setting a complex (functional) config option.""" # Create the config option. @ConfigOption("_test.complexParam") def config_option(): """Complex config option.""" return 12345 # Test that it works. self.assertEqual(config_option.key, "_test.complexParam") self.assertEqual(config_option.section, "_test") self.assertEqual(config_option.name, "complexParam") self.assertEqual(config_option.description, "Complex config option.") self.assertEqual(config_option.where_defined, ConfigOption.DEFAULT_DEFINITION) self.assertEqual(config_option.value, 12345) def test_complex_config_option_must_have_doc_strings(self): """Test that complex config options use funcs with doc stringsself. This is becuase the doc string forms the option's description. """ with self.assertRaises(AssertionError): @ConfigOption("_test.noDocString") def no_doc_string(): pass def test_invalid_config_name(self): """Test setting an invalid config section.""" with self.assertRaises(AssertionError): ConfigOption("_test.myParam.") def test_invalid_config_section(self): """Test setting an invalid config section.""" with self.assertRaises(AssertionError): config._create_option("mySection.myParam") def test_cannot_overwrite_config_section(self): """Test overwriting a config section using _create_section.""" with self.assertRaises(AssertionError): config._create_section("_test2", "A test section.") config._create_section("_test2", "A test section.") def test_cannot_overwrite_config_key(self): """Test overwriting a config option using _create_option.""" with self.assertRaises(AssertionError): config._create_option("_test.overwriteKey") config._create_option("_test.overwriteKey") def test_param_names_are_camel_case(self): """Test that param names must be camelCase. Note the exception is the "_test" section which is used for unit testing. """ with self.assertRaises(AssertionError): config._create_option("_test.snake_case") def test_get_set_and_complex_config_options(self): """Verify that changing one option changes another, dependent one. This also implicitly tests simple and complex ConfigOptions as well as get_option() and set_option(). """ # Some useful variables. DUMMY_VAL_1, DUMMY_VAL_2, DUMMY_VAL_3 = "Steven", "Vincent", "Buscemi" # Set up both options. config._create_option( "_test.independentOption", description="This option can change at will", default_val=DUMMY_VAL_1, ) @config._create_option("_test.dependentOption") def _test_dependent_option(): """Depend on the value of _test.independentOption.""" return config.get_option("_test.independentOption") # Check that the default values are good. self.assertEqual(config.get_option("_test.independentOption"), DUMMY_VAL_1) self.assertEqual(config.get_option("_test.dependentOption"), DUMMY_VAL_1) self.assertEqual( config.get_where_defined("_test.independentOption"), ConfigOption.DEFAULT_DEFINITION, ) self.assertEqual( config.get_where_defined("_test.dependentOption"), ConfigOption.DEFAULT_DEFINITION, ) # Override the independent option. Both update! config.set_option("_test.independentOption", DUMMY_VAL_2) self.assertEqual(config.get_option("_test.independentOption"), DUMMY_VAL_2) self.assertEqual(config.get_option("_test.dependentOption"), DUMMY_VAL_2) self.assertEqual( config.get_where_defined("_test.independentOption"), config._USER_DEFINED ) self.assertEqual( config.get_where_defined("_test.dependentOption"), ConfigOption.DEFAULT_DEFINITION, ) # Override the dependent option. Only that updates! config.set_option("_test.dependentOption", DUMMY_VAL_3) self.assertEqual(config.get_option("_test.independentOption"), DUMMY_VAL_2) self.assertEqual(config.get_option("_test.dependentOption"), DUMMY_VAL_3) self.assertEqual( config.get_where_defined("_test.independentOption"), config._USER_DEFINED ) self.assertEqual( config.get_where_defined("_test.dependentOption"), config._USER_DEFINED ) def test_parsing_toml(self): """Test config._update_config_with_toml().""" # Some useful variables. DUMMY_VAL_1, DUMMY_VAL_2 = "Christopher", "Walken" DUMMY_DEFINITION = "<test definition>" # Create a dummy default option. config._create_option( "_test.tomlTest", description="This option tests the TOML parser.", default_val=DUMMY_VAL_1, ) self.assertEqual(config.get_option("_test.tomlTest"), DUMMY_VAL_1) self.assertEqual( config.get_where_defined("_test.tomlTest"), ConfigOption.DEFAULT_DEFINITION ) # Override it with some TOML NEW_TOML = ( """ [_test] tomlTest="%s" """ % DUMMY_VAL_2 ) config._update_config_with_toml(NEW_TOML, DUMMY_DEFINITION) self.assertEqual(config.get_option("_test.tomlTest"), DUMMY_VAL_2) self.assertEqual(config.get_where_defined("_test.tomlTest"), DUMMY_DEFINITION) def test_parsing_env_vars_in_toml(self): """Test that environment variables get parsed in the TOML file.""" # Some useful variables. DEFAULT_VAL, DESIRED_VAL = "Christopher", "Walken" DUMMY_DEFINITION = "<test definition>" # Create a dummy default option. config._create_option( "_test.tomlTest", description="This option tests the TOML parser.", default_val=DEFAULT_VAL, ) self.assertEqual(config.get_option("_test.tomlTest"), DEFAULT_VAL) self.assertEqual( config.get_where_defined("_test.tomlTest"), ConfigOption.DEFAULT_DEFINITION ) os.environ["TEST_ENV_VAR"] = DESIRED_VAL # Override it with some TOML NEW_TOML = """ [_test] tomlTest="env:TEST_ENV_VAR" """ config._update_config_with_toml(NEW_TOML, DUMMY_DEFINITION) self.assertEqual(config.get_option("_test.tomlTest"), DESIRED_VAL) self.assertEqual(config.get_where_defined("_test.tomlTest"), DUMMY_DEFINITION) def test_delete_option(self): config.set_option("s3.bucket", "some.bucket") config._delete_option("s3.bucket") with pytest.raises(RuntimeError) as e: config.get_option("s3.bucket") self.assertEqual(str(e.value), 'Config key "s3.bucket" not defined.') config._delete_option("s3.bucket") def test_sections_order(self): sections = sorted( ["_test", u"browser", u"client", u"global", u"runner", u"s3", u"server"] ) keys = sorted(list(config._section_descriptions.keys())) self.assertEqual(sections, keys) def test_config_option_keys(self): config_options = sorted( [ u"browser.gatherUsageStats", u"browser.serverAddress", u"browser.serverPort", u"client.caching", u"client.displayEnabled", u"global.developmentMode", u"global.disableWatchdogWarning", u"global.logLevel", u"global.maxCachedMessageAge", u"global.minCachedMessageSize", u"global.metrics", u"global.sharingMode", u"global.showWarningOnDirectExecution", u"global.unitTest", u"global.useNode", u"runner.magicEnabled", u"runner.installTracer", u"runner.fixMatplotlib", u"s3.accessKeyId", u"s3.bucket", u"s3.keyPrefix", u"s3.profile", u"s3.region", u"s3.requireLoginToView", u"s3.secretAccessKey", u"s3.url", u"server.enableCORS", u"server.folderWatchBlacklist", u"server.headless", u"server.liveSave", u"server.port", u"server.runOnSave", ] ) keys = sorted(config._config_options.keys()) self.assertEqual(config_options, keys) def test_clean_paragraphs(self): # from https://www.lipsum.com/ input = textwrap.dedent( """ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur ac fermentum eros. Maecenas libero est, ultricies eget ligula eget, """ ) truth = [ u"Lorem ipsum dolor sit amet, consectetur adipiscing elit.", u"Curabitur ac fermentum eros.", u"Maecenas libero est, ultricies eget ligula eget,", ] result = config._clean_paragraphs(input) self.assertEqual(truth, result) def test_clean(self): result = config._clean(" clean this text ") self.assertEqual("clean this text", result) def test_check_conflicts_2(self): config._set_option("global.developmentMode", True, "test") config._set_option("server.port", 1234, "test") with pytest.raises(AssertionError) as e: config._check_conflicts() self.assertEqual( str(e.value), "server.port does not work when global.developmentMode is true.", ) def test_check_conflicts_2a(self): config._set_option("global.developmentMode", True, "test") config._set_option("browser.serverPort", 1234, "test") with pytest.raises(AssertionError) as e: config._check_conflicts() self.assertEqual( str(e.value), "browser.serverPort does not work when global.developmentMode is " "true.", ) def test_check_conflicts_3(self): with pytest.raises(AssertionError) as e: config._set_option("global.sharingMode", "s3", "test") config._set_option("s3.bucket", None, "<default>") config._check_conflicts() self.assertEqual( str(e.value), 'When global.sharingMode is set to "s3", s3.bucket must also be set', ) def test_check_conflicts_4(self): with pytest.raises(AssertionError) as e: config._set_option("global.sharingMode", "s3", "test") config._set_option("s3.bucket", "some.bucket", "test") config._set_option("s3.accessKeyId", "some.key", "test") config._set_option("s3.secretAccessKey", None, "<default>") config._check_conflicts() self.assertEqual( str(e.value), "In config.toml, s3.accessKeyId and s3.secretAccessKey must either both be set or both be unset.", ) def test_maybe_convert_to_number(self): self.assertEqual(1234, config._maybe_convert_to_number("1234")) self.assertEqual(1234.5678, config._maybe_convert_to_number("1234.5678")) self.assertEqual("1234.5678ex", config._maybe_convert_to_number("1234.5678ex")) def test_maybe_read_env_variable(self): self.assertEqual( "env:RANDOM_TEST", config._maybe_read_env_variable("env:RANDOM_TEST") ) os.environ["RANDOM_TEST"] = "1234" self.assertEqual(1234, config._maybe_read_env_variable("env:RANDOM_TEST")) def test_update_config_with_toml(self): self.assertEqual(True, config.get_option("client.caching")) toml = textwrap.dedent( """ [client] caching = false """ ) config._update_config_with_toml(toml, "test") self.assertEqual(False, config.get_option("client.caching")) def test_set_option(self): with pytest.raises(AssertionError) as e: config._set_option("not.defined", "no.value", "test") self.assertEqual(str(e.value), 'Key "not.defined" is not defined.') config._set_option("client.caching", "test", "test") self.assertEqual("test", config.get_option("client.caching")) def test_is_manually_set(self): config._set_option("s3.bucket", "some.bucket", "test") self.assertEqual(True, config.is_manually_set("s3.bucket")) config._set_option("s3.bucket", "some.bucket", "<default>") self.assertEqual(False, config.is_manually_set("s3.bucket")) def test_is_unset(self): config._set_option("s3.bucket", "some.bucket", "test") self.assertEqual(False, config._is_unset("s3.bucket")) config._set_option("s3.bucket", "some.bucket", "<default>") self.assertEqual(True, config._is_unset("s3.bucket")) def test_get_where_defined(self): config._set_option("s3.bucket", "some.bucket", "test") self.assertEqual("test", config.get_where_defined("s3.bucket")) with pytest.raises(RuntimeError) as e: config.get_where_defined("doesnt.exist") self.assertEqual(str(e.value), 'Config key "doesnt.exist" not defined.') def test_get_options(self): config._set_option("s3.bucket", "some.bucket", "test") self.assertEqual("some.bucket", config.get_option("s3.bucket")) with pytest.raises(RuntimeError) as e: config.get_option("doesnt.exist") self.assertEqual(str(e.value), 'Config key "doesnt.exist" not defined.') def test_s3(self): self.assertEqual(None, config.get_option("s3.secretAccessKey")) self.assertEqual(None, config.get_option("s3.accessKeyId")) self.assertEqual(None, config.get_option("s3.url")) self.assertEqual(None, config.get_option("s3.bucket")) def test_browser_server_port(self): config.set_option("server.port", 1234) self.assertEqual(1234, config.get_option("browser.serverPort")) def test_server_headless_via_liveSave(self): config.set_option("server.liveSave", True) self.assertEqual(True, config.get_option("server.headless")) def test_server_headless_via_atom_plugin(self): os.environ["IS_RUNNING_IN_STREAMLIT_EDITOR_PLUGIN"] = "True" config.set_option("server.liveSave", False) self.assertEqual(True, config.get_option("server.headless")) del os.environ["IS_RUNNING_IN_STREAMLIT_EDITOR_PLUGIN"] def test_server_headless(self): orig_display = None if "DISPLAY" in os.environ.keys(): orig_display = os.environ["DISPLAY"] del os.environ["DISPLAY"] with patch("streamlit.config.platform.system") as p: p.return_value = "Linux" self.assertEqual(True, config.get_option("server.headless")) if orig_display: os.environ["DISPLAY"] = orig_display def test_global_dev_mode(self): config.set_option("global.developmentMode", True) self.assertEqual(True, config.get_option("global.developmentMode")) def test_global_log_level_debug(self): config.set_option("global.developmentMode", True) self.assertEqual(u"debug", config.get_option("global.logLevel")) def test_global_log_level(self): config.set_option("global.developmentMode", False) self.assertEqual(u"info", config.get_option("global.logLevel")) class ConfigLoadingTest(unittest.TestCase): """Tests that involve loading the config.toml file.""" def setUp(self): self.patches = [ patch.object( config, "_section_descriptions", new=copy.deepcopy(SECTION_DESCRIPTIONS) ), patch.object(config, "_config_options", new=copy.deepcopy(CONFIG_OPTIONS)), patch.object(config, "_config_file_has_been_parsed", new=False), ] for p in self.patches: p.start() def tearDown(self): for p in self.patches: p.stop() def test_missing_config(self): """Test that we can initialize our config even if the file is missing.""" with patch("streamlit.config.os.path.exists") as path_exists: path_exists.return_value = False config.parse_config_file() self.assertEqual(True, config.get_option("client.caching")) self.assertIsNone(config.get_option("s3.bucket")) def test_load_global_config(self): """Test that ~/.streamlit/config.toml is read.""" global_config = """ [s3] bucket = "global_bucket" url = "global_url" """ global_config_path = ( global_config_path ) = "/mock/home/folder/.streamlit/config.toml" open_patch = patch("streamlit.config.open", mock_open(read_data=global_config)) makedirs_patch = patch("streamlit.config.os.makedirs") makedirs_patch.return_value = True pathexists_patch = patch("streamlit.config.os.path.exists") pathexists_patch.side_effect = lambda path: path == global_config_path with open_patch, makedirs_patch, pathexists_patch: config.parse_config_file() self.assertEqual(u"global_bucket", config.get_option("s3.bucket")) self.assertEqual(u"global_url", config.get_option("s3.url")) self.assertIsNone(config.get_option("s3.accessKeyId")) def test_load_local_config(self): """Test that $CWD/.streamlit/config.toml is read, even if ~/.streamlit/config.toml is missing. """ local_config = """ [s3] bucket = "local_bucket" accessKeyId = "local_accessKeyId" """ local_config_path = os.path.join(os.getcwd(), ".streamlit/config.toml") open_patch = patch("streamlit.config.open", mock_open(read_data=local_config)) makedirs_patch = patch("streamlit.config.os.makedirs") makedirs_patch.return_value = True pathexists_patch = patch("streamlit.config.os.path.exists") pathexists_patch.side_effect = lambda path: path == local_config_path with open_patch, makedirs_patch, pathexists_patch: config.parse_config_file() self.assertEqual(u"local_bucket", config.get_option("s3.bucket")) self.assertEqual(u"local_accessKeyId", config.get_option("s3.accessKeyId")) self.assertIsNone(config.get_option("s3.url")) def test_load_global_local_config(self): """Test that $CWD/.streamlit/config.toml gets overlaid on ~/.streamlit/config.toml at parse time. """ global_config = """ [s3] bucket = "global_bucket" url = "global_url" """ local_config = """ [s3] bucket = "local_bucket" accessKeyId = "local_accessKeyId" """ global_config_path = "/mock/home/folder/.streamlit/config.toml" local_config_path = os.path.join(os.getcwd(), ".streamlit/config.toml") global_open = mock_open(read_data=global_config) local_open = mock_open(read_data=local_config) open = mock_open() open.side_effect = [global_open.return_value, local_open.return_value] open_patch = patch("streamlit.config.open", open) makedirs_patch = patch("streamlit.config.os.makedirs") makedirs_patch.return_value = True pathexists_patch = patch("streamlit.config.os.path.exists") pathexists_patch.side_effect = lambda path: path in [ global_config_path, local_config_path, ] with open_patch, makedirs_patch, pathexists_patch: config.parse_config_file() # s3.bucket set in both local and global self.assertEqual(u"local_bucket", config.get_option("s3.bucket")) # s3.url is set in global, and not in local self.assertEqual(u"global_url", config.get_option("s3.url")) # s3.accessKeyId is set in local and not in global self.assertEqual(u"local_accessKeyId", config.get_option("s3.accessKeyId"))
[]
[]
[ "TEST_ENV_VAR", "IS_RUNNING_IN_STREAMLIT_EDITOR_PLUGIN", "RANDOM_TEST", "DISPLAY" ]
[]
["TEST_ENV_VAR", "IS_RUNNING_IN_STREAMLIT_EDITOR_PLUGIN", "RANDOM_TEST", "DISPLAY"]
python
4
0
pools/zippool/zippool.go
package zippool import ( "github.com/itchio/arkive/zip" "bytes" "fmt" "io" "io/ioutil" "os" "path/filepath" "github.com/go-errors/errors" "github.com/itchio/wharf/tlc" "github.com/itchio/wharf/wsync" ) // ZipPool implements the wsync.ZipPool interface based on a Container type ZipPool struct { container *tlc.Container fmap map[string]*zip.File fileIndex int64 reader io.ReadCloser seekFileIndex int64 readSeeker ReadCloseSeeker } var _ wsync.Pool = (*ZipPool)(nil) // ReadCloseSeeker unifies io.Reader, io.Seeker, and io.Closer type ReadCloseSeeker interface { io.Reader io.Seeker io.Closer } // NewZipPool creates a new ZipPool from the given Container // metadata and a base path on-disk to allow reading from files. func New(c *tlc.Container, zipReader *zip.Reader) *ZipPool { fmap := make(map[string]*zip.File) for _, f := range zipReader.File { info := f.FileInfo() if info.IsDir() { // muffin } else if (info.Mode() & os.ModeSymlink) > 0 { // muffin ether } else { key := filepath.ToSlash(filepath.Clean(f.Name)) fmap[key] = f } } return &ZipPool{ container: c, fmap: fmap, fileIndex: int64(-1), reader: nil, seekFileIndex: int64(-1), readSeeker: nil, } } // GetSize returns the size of the file at index fileIndex func (cfp *ZipPool) GetSize(fileIndex int64) int64 { return cfp.container.Files[fileIndex].Size } // GetRelativePath returns the slashed path of a file, relative to // the container's root. func (cfp *ZipPool) GetRelativePath(fileIndex int64) string { return cfp.container.Files[fileIndex].Path } // GetPath returns the native path of a file (with slashes or backslashes) // on-disk, based on the ZipPool's base path func (cfp *ZipPool) GetPath(fileIndex int64) string { panic("ZipPool does not support GetPath") } // GetReader returns an io.Reader for the file at index fileIndex // Successive calls to `GetReader` will attempt to re-use the last // returned reader if the file index is similar. The cache size is 1, so // reading in parallel from different files is not supported. func (cfp *ZipPool) GetReader(fileIndex int64) (io.Reader, error) { if cfp.fileIndex != fileIndex { if cfp.reader != nil { err := cfp.reader.Close() if err != nil { return nil, errors.Wrap(err, 1) } cfp.reader = nil cfp.fileIndex = -1 } relPath := cfp.GetRelativePath(fileIndex) f := cfp.fmap[relPath] if f == nil { if os.Getenv("VERBOSE_ZIP_POOL") != "" { fmt.Printf("\nzip contents:\n") for k := range cfp.fmap { fmt.Printf("\n- %s", k) } fmt.Println() } return nil, errors.WrapPrefix(os.ErrNotExist, relPath, 1) } reader, err := f.Open() if err != nil { return nil, errors.Wrap(err, 1) } cfp.reader = reader cfp.fileIndex = fileIndex } return cfp.reader, nil } // GetReadSeeker is like GetReader but the returned object allows seeking func (cfp *ZipPool) GetReadSeeker(fileIndex int64) (io.ReadSeeker, error) { if cfp.seekFileIndex != fileIndex { if cfp.readSeeker != nil { err := cfp.readSeeker.Close() if err != nil { return nil, errors.Wrap(err, 1) } cfp.readSeeker = nil cfp.seekFileIndex = -1 } key := cfp.GetRelativePath(fileIndex) f := cfp.fmap[key] if f == nil { return nil, errors.Wrap(os.ErrNotExist, 1) } reader, err := f.Open() if err != nil { return nil, errors.Wrap(err, 1) } defer reader.Close() buf, err := ioutil.ReadAll(reader) if err != nil { return nil, errors.Wrap(err, 1) } cfp.readSeeker = &closableBuf{bytes.NewReader(buf)} cfp.seekFileIndex = fileIndex } return cfp.readSeeker, nil } // Close closes all reader belonging to this ZipPool func (cfp *ZipPool) Close() error { if cfp.reader != nil { err := cfp.reader.Close() if err != nil { return errors.Wrap(err, 1) } cfp.reader = nil cfp.fileIndex = -1 } if cfp.readSeeker != nil { err := cfp.readSeeker.Close() if err != nil { return errors.Wrap(err, 1) } cfp.readSeeker = nil cfp.seekFileIndex = -1 } return nil } type closableBuf struct { rs io.ReadSeeker } var _ ReadCloseSeeker = (*closableBuf)(nil) func (cb *closableBuf) Read(buf []byte) (int, error) { return cb.rs.Read(buf) } func (cb *closableBuf) Seek(offset int64, whence int) (int64, error) { return cb.rs.Seek(offset, whence) } func (cb *closableBuf) Close() error { return nil }
[ "\"VERBOSE_ZIP_POOL\"" ]
[]
[ "VERBOSE_ZIP_POOL" ]
[]
["VERBOSE_ZIP_POOL"]
go
1
0
stanza/id_test.go
// Copyright 2020 The Mellium Contributors. // Use of this source code is governed by the BSD 2-clause // license that can be found in the LICENSE file. package stanza_test import ( "encoding/xml" "regexp" "strconv" "strings" "testing" "mellium.im/xmlstream" "mellium.im/xmpp/jid" "mellium.im/xmpp/stanza" ) const ( testOrigin = `<origin-id xmlns="urn:xmpp:sid:0" id="abc"></origin-id>` testStanza = `<stanza-id xmlns="urn:xmpp:sid:0" id="abc" by="[email protected]"></stanza-id>` ) var idTestCases = [...]struct { in string origin string id string ns string }{ 0: { in: `<message xmlns="jabber:client"></message>`, origin: `<message xmlns="jabber:client">` + testOrigin + `</message>`, id: `<message xmlns="jabber:client">` + testStanza + `</message>`, ns: stanza.NSClient, }, 1: { in: `<iq xmlns="jabber:client"></iq>`, origin: `<iq xmlns="jabber:client">` + testOrigin + `</iq>`, id: `<iq xmlns="jabber:client">` + testStanza + `</iq>`, ns: stanza.NSClient, }, 2: { in: `<presence xmlns="jabber:client"></presence>`, origin: `<presence xmlns="jabber:client">` + testOrigin + `</presence>`, id: `<presence xmlns="jabber:client">` + testStanza + `</presence>`, ns: stanza.NSClient, }, 3: { in: `<message xmlns="jabber:server"></message>`, origin: `<message xmlns="jabber:server">` + testOrigin + `</message>`, id: `<message xmlns="jabber:server">` + testStanza + `</message>`, ns: stanza.NSServer, }, 4: { in: `<iq xmlns="jabber:server"></iq>`, origin: `<iq xmlns="jabber:server">` + testOrigin + `</iq>`, id: `<iq xmlns="jabber:server">` + testStanza + `</iq>`, ns: stanza.NSServer, }, 5: { in: `<presence xmlns="jabber:server"></presence>`, origin: `<presence xmlns="jabber:server">` + testOrigin + `</presence>`, id: `<presence xmlns="jabber:server">` + testStanza + `</presence>`, ns: stanza.NSServer, }, 6: { in: `<not-stanza><message xmlns="jabber:client"></message></not-stanza>`, origin: `<not-stanza><message xmlns="jabber:client"></message></not-stanza>`, id: `<not-stanza><message xmlns="jabber:client"></message></not-stanza>`, ns: stanza.NSClient, }, 7: { in: `<not-stanza><iq xmlns="jabber:client"></iq></not-stanza>`, origin: `<not-stanza><iq xmlns="jabber:client"></iq></not-stanza>`, id: `<not-stanza><iq xmlns="jabber:client"></iq></not-stanza>`, ns: stanza.NSClient, }, 8: { in: `<not-stanza><presence xmlns="jabber:client"></presence></not-stanza>`, origin: `<not-stanza><presence xmlns="jabber:client"></presence></not-stanza>`, id: `<not-stanza><presence xmlns="jabber:client"></presence></not-stanza>`, ns: stanza.NSClient, }, 9: { in: `<presence xmlns="jabber:badns"></presence>`, origin: `<presence xmlns="jabber:badns"></presence>`, id: `<presence xmlns="jabber:badns"></presence>`, ns: stanza.NSClient, }, 10: { in: `<presence xmlns="jabber:badns"></presence>`, origin: `<presence xmlns="jabber:badns">` + testOrigin + `</presence>`, id: `<presence xmlns="jabber:badns">` + testStanza + `</presence>`, ns: "jabber:badns", }, } func TestAddID(t *testing.T) { idReplacer := regexp.MustCompile(`id="(.*?)"`) by := jid.MustParse("[email protected]") for i, tc := range idTestCases { addID := stanza.AddID(by, tc.ns) t.Run(strconv.Itoa(i), func(t *testing.T) { t.Run("origin", func(t *testing.T) { r := stanza.AddOriginID(xml.NewDecoder(strings.NewReader(tc.in)), tc.ns) // Prevent duplicate xmlns attributes. See https://mellium.im/issue/75 r = xmlstream.RemoveAttr(func(start xml.StartElement, attr xml.Attr) bool { return attr.Name.Local == "xmlns" })(r) var buf strings.Builder e := xml.NewEncoder(&buf) _, err := xmlstream.Copy(e, r) if err != nil { t.Fatalf("error copying xml stream: %v", err) } if err = e.Flush(); err != nil { t.Fatalf("error flushing stream: %v", err) } out := buf.String() // We need this to be testable, not random. out = idReplacer.ReplaceAllString(out, `id="abc"`) if out != tc.origin { t.Errorf("wrong output:\nwant=%v,\n got=%v", tc.origin, out) } }) t.Run("stanza", func(t *testing.T) { r := addID(xml.NewDecoder(strings.NewReader(tc.in))) // Prevent duplicate xmlns attributes. See https://mellium.im/issue/75 r = xmlstream.RemoveAttr(func(start xml.StartElement, attr xml.Attr) bool { return attr.Name.Local == "xmlns" })(r) var buf strings.Builder e := xml.NewEncoder(&buf) _, err := xmlstream.Copy(e, r) if err != nil { t.Fatalf("error copying xml stream: %v", err) } if err = e.Flush(); err != nil { t.Fatalf("error flushing stream: %v", err) } out := buf.String() // We need this to be testable, not random. out = idReplacer.ReplaceAllString(out, `id="abc"`) if out != tc.id { t.Errorf("wrong output:\nwant=%v,\n got=%v", tc.id, out) } }) }) } }
[]
[]
[]
[]
[]
go
null
null
null
ProjectManager/wsgi.py
""" WSGI config for ProjectManager project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ProjectManager.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
molecule/resources/tests/test_default.py
import os import testinfra.utils.ansible_runner import pytest testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') def test_kapacitor_running_and_enabled(host): kapacitor = host.service("kapacitor") assert kapacitor.is_running assert kapacitor.is_enabled @pytest.mark.parametrize("teststring", [ ('test_db = \\["rp_test_db"\\]'), ('test_db_2 = \\["rp_test_db_one", "rp_test_db_two"\\]'), ('https-certificate = "/etc/ssl/kapacitor.pem"'), ('log-enabled = true'), ('write-tracing = false'), ('pprof-enabled = false'), ('ttps-enabled = false'), ('stats-interval = "10s"'), ('database = "_kapacitor"'), ('retention-policy= "default"'), ('url = "https://usage.influxdata.com"'), ('dir = "/var/lib/kapacitor/replay"'), ('dir = "/var/lib/kapacitor/tasks"'), ('snapshot-interval = "60s"'), ('boltdb = "/var/lib/kapacitor/kapacitor.db"'), ('file = "/var/log/kapacitor/kapacitor.log"'), ('level = "INFO"'), ('urls = \\["http://localhost:8086"\\]') ]) def test_kapacitor_config(host, teststring): kap_config = host.file("/etc/kapacitor/kapacitor.conf") assert kap_config.exists assert kap_config.contains(teststring) def test_tick_file(host): for alert in ( "kapacitor/alert_load_average", "cpu_alert", "disk_alert", "cpu_alert_batch" ): tick_script = host.file("/tmp/" + alert + ".tick") assert tick_script.exists def test_tick_load(host): tick_load = host.command("kapacitor list tasks") for alert in ( "alert_load_average", "cpu_alert", "disk_alert", "cpu_alert_batch" ): assert alert in tick_load.stdout def test_kapacitor_listener(host): assert host.socket('tcp://:::9092').is_listening
[]
[]
[ "MOLECULE_INVENTORY_FILE" ]
[]
["MOLECULE_INVENTORY_FILE"]
python
1
0
django backend/rajastan_studios_project/asgi.py
""" ASGI config for rajastan_studios_project project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rajastan_studios_project.settings') application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
server/server.go
package main import ( "encoding/json" "fmt" "log" "net/http" "os" "strconv" "github.com/gorilla/mux" "github.com/mrobinsn/go-rtorrent/rtorrent" ) // Clamp constrains a value to a range. func Clamp(n int, min int, max int) int { if min > max { min, max = max, min } if n < min { return min } if n > max { return max } return n } // TODO: write tests // Subset returns a paged subset of the input Stats slice. func Subset(stats []Stat, offset int, count int) []Stat { left := Clamp(offset, 0, len(stats)) right := Clamp(offset+count, left, len(stats)) return stats[left:right] } // Structure flattens and restructures the data in Stats for the client. func Structure(stats []Stat) []map[string]interface{} { merged := []map[string]interface{}{} for _, stat := range stats { one := map[string]interface{}{ "hash": stat.Torrent.Hash, "name": stat.Torrent.Name, "path": stat.Torrent.Path, "size": stat.Torrent.Size, "label": stat.Torrent.Label, "completed": stat.Torrent.Completed, "ratio": stat.Torrent.Ratio, "created": stat.Torrent.Created, "started": stat.Torrent.Started, "finished": stat.Torrent.Finished, "completed_bytes": stat.Status.CompletedBytes, "down_rate": stat.Status.DownRate, "up_rate": stat.Status.UpRate, } merged = append(merged, one) } return merged } // Serve starts the server for RTorrent stats data. func Serve(conn *rtorrent.RTorrent, newStats <-chan []Stat) { name := "" stats := []Stat{} go func() { for incoming := range newStats { stats = incoming n, err := conn.Name() if err != nil { log.Println(err) } name = n } }() r := mux.NewRouter() r.HandleFunc("/torrents", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") q := r.URL.Query() count, err := strconv.Atoi(q.Get("count")) if err != nil { w.WriteHeader(http.StatusUnprocessableEntity) return } offset, err := strconv.Atoi(q.Get("offset")) if err != nil { w.WriteHeader(http.StatusUnprocessableEntity) return } var subset []Stat query := q.Get("query") all := stats if query != "" { all = Filter(stats, query) } subset = Subset(all, offset, count) json.NewEncoder(w).Encode(map[string]interface{}{ "name": name, "total": len(all), "torrents": Structure(subset), }) }) // mux handles static files from /static at / r.PathPrefix("/").Handler(http.FileServer(http.Dir("static"))) host := os.Getenv("HOST") port := os.Getenv("PORT") if port == "" { port = "9081" } addr := fmt.Sprintf("%s:%s", host, port) log.Printf("Listening on %s\n", addr) log.Fatal(http.ListenAndServe(addr, r)) }
[ "\"HOST\"", "\"PORT\"" ]
[]
[ "PORT", "HOST" ]
[]
["PORT", "HOST"]
go
2
0
logstash-core/src/test/java/org/logstash/secret/EnvironmentUtil.java
package org.logstash.secret; import java.lang.reflect.Field; import java.util.Collections; import java.util.Map; /** * Tool to change the in-memory environment settings, does not change actual environment */ public class EnvironmentUtil { //near exact copy from https://stackoverflow.com/questions/318239/how-do-i-set-environment-variables-from-java //thanks @pushy and @Edward Campbell ! @SuppressWarnings("unchecked") private static void setEnv(Map<String, String> newenv, String removeKey) throws Exception { try { Class<?> processEnvironmentClass = Class.forName("java.lang.ProcessEnvironment"); Field theEnvironmentField = processEnvironmentClass.getDeclaredField("theEnvironment"); theEnvironmentField.setAccessible(true); Map<String, String> env = (Map<String, String>) theEnvironmentField.get(null); if(removeKey == null){ env.putAll(newenv); }else{ env.remove(removeKey); } Field theCaseInsensitiveEnvironmentField = processEnvironmentClass.getDeclaredField("theCaseInsensitiveEnvironment"); theCaseInsensitiveEnvironmentField.setAccessible(true); Map<String, String> cienv = (Map<String, String>) theCaseInsensitiveEnvironmentField.get(null); if(removeKey == null){ cienv.putAll(newenv); }else{ cienv.remove(removeKey); } } catch (NoSuchFieldException e) { Class[] classes = Collections.class.getDeclaredClasses(); Map<String, String> env = System.getenv(); for (Class cl : classes) { if ("java.util.Collections$UnmodifiableMap".equals(cl.getName())) { Field field = cl.getDeclaredField("m"); field.setAccessible(true); Object obj = field.get(env); Map<String, String> map = (Map<String, String>) obj; map.clear(); if(removeKey == null){ map.putAll(newenv); }else{ map.remove(removeKey); } } } } } public static void add(Map<String, String> environment) throws Exception { setEnv(environment, null); } public static void remove(String key) throws Exception { setEnv(null, key); } }
[]
[]
[]
[]
[]
java
0
0
test/e2e/framework/util.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package framework import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "io/ioutil" "math/rand" "net" "net/http" "net/url" "os" "os/exec" "path" "path/filepath" "regexp" "sort" "strconv" "strings" "sync" "syscall" "text/tabwriter" "time" "github.com/golang/glog" "golang.org/x/crypto/ssh" "golang.org/x/net/websocket" "google.golang.org/api/googleapi" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" gomegatypes "github.com/onsi/gomega/types" apps "k8s.io/api/apps/v1" batch "k8s.io/api/batch/v1" "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" utilyaml "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" scaleclient "k8s.io/client-go/scale" "k8s.io/kubernetes/pkg/api/legacyscheme" podutil "k8s.io/kubernetes/pkg/api/v1/pod" appsinternal "k8s.io/kubernetes/pkg/apis/apps" batchinternal "k8s.io/kubernetes/pkg/apis/batch" api "k8s.io/kubernetes/pkg/apis/core" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/conditions" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/controller" nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubectl" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/schedulercache" sshutil "k8s.io/kubernetes/pkg/ssh" "k8s.io/kubernetes/pkg/util/system" taintutils "k8s.io/kubernetes/pkg/util/taints" utilversion "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" uexec "k8s.io/utils/exec" ) const ( // How long to wait for the pod to be listable PodListTimeout = time.Minute // Initial pod start can be delayed O(minutes) by slow docker pulls // TODO: Make this 30 seconds once #4566 is resolved. PodStartTimeout = 5 * time.Minute // Same as `PodStartTimeout` to wait for the pod to be started, but shorter. // Use it case by case when we are sure pod start will not be delayed // minutes by slow docker pulls or something else. PodStartShortTimeout = 1 * time.Minute // If there are any orphaned namespaces to clean up, this test is running // on a long lived cluster. A long wait here is preferably to spurious test // failures caused by leaked resources from a previous test run. NamespaceCleanupTimeout = 15 * time.Minute // Some pods can take much longer to get ready due to volume attach/detach latency. slowPodStartTimeout = 15 * time.Minute // How long to wait for a service endpoint to be resolvable. ServiceStartTimeout = 3 * time.Minute // How often to Poll pods, nodes and claims. Poll = 2 * time.Second pollShortTimeout = 1 * time.Minute pollLongTimeout = 5 * time.Minute // service accounts are provisioned after namespace creation // a service account is required to support pod creation in a namespace as part of admission control ServiceAccountProvisionTimeout = 2 * time.Minute // How long to try single API calls (like 'get' or 'list'). Used to prevent // transient failures from failing tests. // TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed. SingleCallTimeout = 5 * time.Minute // How long nodes have to be "ready" when a test begins. They should already // be "ready" before the test starts, so this is small. NodeReadyInitialTimeout = 20 * time.Second // How long pods have to be "ready" when a test begins. PodReadyBeforeTimeout = 5 * time.Minute // How long pods have to become scheduled onto nodes podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second) podRespondingTimeout = 15 * time.Minute ServiceRespondingTimeout = 2 * time.Minute EndpointRegisterTimeout = time.Minute // How long claims have to become dynamically provisioned ClaimProvisionTimeout = 5 * time.Minute // Same as `ClaimProvisionTimeout` to wait for claim to be dynamically provisioned, but shorter. // Use it case by case when we are sure this timeout is enough. ClaimProvisionShortTimeout = 1 * time.Minute // How long claims have to become bound ClaimBindingTimeout = 3 * time.Minute // How long claims have to become deleted ClaimDeletingTimeout = 3 * time.Minute // How long PVs have to beome reclaimed PVReclaimingTimeout = 3 * time.Minute // How long PVs have to become bound PVBindingTimeout = 3 * time.Minute // How long PVs have to become deleted PVDeletingTimeout = 3 * time.Minute // How long a node is allowed to become "Ready" after it is restarted before // the test is considered failed. RestartNodeReadyAgainTimeout = 5 * time.Minute // How long a pod is allowed to become "running" and "ready" after a node // restart before test is considered failed. RestartPodReadyAgainTimeout = 5 * time.Minute // Number of objects that gc can delete in a second. // GC issues 2 requestes for single delete. gcThroughput = 10 // Minimal number of nodes for the cluster to be considered large. largeClusterThreshold = 100 // TODO(justinsb): Avoid hardcoding this. awsMasterIP = "172.20.0.9" // ssh port sshPort = "22" // ImagePrePullingTimeout is the time we wait for the e2e-image-puller // static pods to pull the list of seeded images. If they don't pull // images within this time we simply log their output and carry on // with the tests. ImagePrePullingTimeout = 5 * time.Minute ) var ( BusyBoxImage = "busybox" // Label allocated to the image puller static pod that runs on each node // before e2es. ImagePullerLabels = map[string]string{"name": "e2e-image-puller"} // For parsing Kubectl version for version-skewed testing. gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"") // Slice of regexps for names of pods that have to be running to consider a Node "healthy" requiredPerNodePods = []*regexp.Regexp{ regexp.MustCompile(".*kube-proxy.*"), regexp.MustCompile(".*fluentd-elasticsearch.*"), regexp.MustCompile(".*node-problem-detector.*"), } // Serve hostname image name ServeHostnameImage = imageutils.GetE2EImage(imageutils.ServeHostname) ) type Address struct { internalIP string externalIP string hostname string } // GetServerArchitecture fetches the architecture of the cluster's apiserver. func GetServerArchitecture(c clientset.Interface) string { arch := "" sVer, err := c.Discovery().ServerVersion() if err != nil || sVer.Platform == "" { // If we failed to get the server version for some reason, default to amd64. arch = "amd64" } else { // Split the platform string into OS and Arch separately. // The platform string may for example be "linux/amd64", "linux/arm" or "windows/amd64". osArchArray := strings.Split(sVer.Platform, "/") arch = osArchArray[1] } return arch } // GetPauseImageName fetches the pause image name for the same architecture as the apiserver. func GetPauseImageName(c clientset.Interface) string { return imageutils.GetE2EImageWithArch(imageutils.Pause, GetServerArchitecture(c)) } func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) { return request.Resource("services").SubResource("proxy"), nil } // unique identifier of the e2e run var RunId = uuid.NewUUID() type CreateTestingNSFn func(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) type ContainerFailures struct { status *v1.ContainerStateTerminated Restarts int } func GetMasterHost() string { masterUrl, err := url.Parse(TestContext.Host) ExpectNoError(err) return masterUrl.Host } func nowStamp() string { return time.Now().Format(time.StampMilli) } func log(level string, format string, args ...interface{}) { fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...) } func Logf(format string, args ...interface{}) { log("INFO", format, args...) } func Failf(format string, args ...interface{}) { FailfWithOffset(1, format, args...) } // FailfWithOffset calls "Fail" and logs the error at "offset" levels above its caller // (for example, for call chain f -> g -> FailfWithOffset(1, ...) error would be logged for "f"). func FailfWithOffset(offset int, format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) log("INFO", msg) ginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset) } func Skipf(format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) log("INFO", msg) ginkgowrapper.Skip(nowStamp() + ": " + msg) } func SkipUnlessNodeCountIsAtLeast(minNodeCount int) { if TestContext.CloudConfig.NumNodes < minNodeCount { Skipf("Requires at least %d nodes (not %d)", minNodeCount, TestContext.CloudConfig.NumNodes) } } func SkipUnlessNodeCountIsAtMost(maxNodeCount int) { if TestContext.CloudConfig.NumNodes > maxNodeCount { Skipf("Requires at most %d nodes (not %d)", maxNodeCount, TestContext.CloudConfig.NumNodes) } } func SkipUnlessAtLeast(value int, minValue int, message string) { if value < minValue { Skipf(message) } } func SkipIfProviderIs(unsupportedProviders ...string) { if ProviderIs(unsupportedProviders...) { Skipf("Not supported for providers %v (found %s)", unsupportedProviders, TestContext.Provider) } } func SkipUnlessLocalEphemeralStorageEnabled() { if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { Skipf("Only supported when %v feature is enabled", features.LocalStorageCapacityIsolation) } } func SkipUnlessSSHKeyPresent() { if _, err := GetSigner(TestContext.Provider); err != nil { Skipf("No SSH Key for provider %s: '%v'", TestContext.Provider, err) } } func SkipUnlessProviderIs(supportedProviders ...string) { if !ProviderIs(supportedProviders...) { Skipf("Only supported for providers %v (not %s)", supportedProviders, TestContext.Provider) } } func SkipUnlessMultizone(c clientset.Interface) { zones, err := GetClusterZones(c) if err != nil { Skipf("Error listing cluster zones") } if zones.Len() <= 1 { Skipf("Requires more than one zone") } } func SkipIfMultizone(c clientset.Interface) { zones, err := GetClusterZones(c) if err != nil { Skipf("Error listing cluster zones") } if zones.Len() > 1 { Skipf("Requires more than one zone") } } func SkipUnlessClusterMonitoringModeIs(supportedMonitoring ...string) { if !ClusterMonitoringModeIs(supportedMonitoring...) { Skipf("Only next monitoring modes are supported %v (not %s)", supportedMonitoring, TestContext.ClusterMonitoringMode) } } func SkipUnlessPrometheusMonitoringIsEnabled(supportedMonitoring ...string) { if !TestContext.EnablePrometheusMonitoring { Skipf("Skipped because prometheus monitoring is not enabled") } } func SkipUnlessMasterOSDistroIs(supportedMasterOsDistros ...string) { if !MasterOSDistroIs(supportedMasterOsDistros...) { Skipf("Only supported for master OS distro %v (not %s)", supportedMasterOsDistros, TestContext.MasterOSDistro) } } func SkipUnlessNodeOSDistroIs(supportedNodeOsDistros ...string) { if !NodeOSDistroIs(supportedNodeOsDistros...) { Skipf("Only supported for node OS distro %v (not %s)", supportedNodeOsDistros, TestContext.NodeOSDistro) } } func SkipUnlessSecretExistsAfterWait(c clientset.Interface, name, namespace string, timeout time.Duration) { Logf("Waiting for secret %v in namespace %v to exist in duration %v", name, namespace, timeout) start := time.Now() if wait.PollImmediate(15*time.Second, timeout, func() (bool, error) { _, err := c.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) if err != nil { Logf("Secret %v in namespace %v still does not exist after duration %v", name, namespace, time.Since(start)) return false, nil } return true, nil }) != nil { Skipf("Secret %v in namespace %v did not exist after timeout of %v", name, namespace, timeout) } Logf("Secret %v in namespace %v found after duration %v", name, namespace, time.Since(start)) } func SkipIfContainerRuntimeIs(runtimes ...string) { for _, runtime := range runtimes { if runtime == TestContext.ContainerRuntime { Skipf("Not supported under container runtime %s", runtime) } } } func RunIfContainerRuntimeIs(runtimes ...string) { for _, runtime := range runtimes { if runtime == TestContext.ContainerRuntime { return } } Skipf("Skipped because container runtime %q is not in %s", TestContext.ContainerRuntime, runtimes) } func RunIfSystemSpecNameIs(names ...string) { for _, name := range names { if name == TestContext.SystemSpecName { return } } Skipf("Skipped because system spec name %q is not in %v", TestContext.SystemSpecName, names) } func ProviderIs(providers ...string) bool { for _, provider := range providers { if strings.ToLower(provider) == strings.ToLower(TestContext.Provider) { return true } } return false } func ClusterMonitoringModeIs(monitoringModes ...string) bool { for _, mode := range monitoringModes { if strings.ToLower(mode) == strings.ToLower(TestContext.ClusterMonitoringMode) { return true } } return false } func MasterOSDistroIs(supportedMasterOsDistros ...string) bool { for _, distro := range supportedMasterOsDistros { if strings.ToLower(distro) == strings.ToLower(TestContext.MasterOSDistro) { return true } } return false } func NodeOSDistroIs(supportedNodeOsDistros ...string) bool { for _, distro := range supportedNodeOsDistros { if strings.ToLower(distro) == strings.ToLower(TestContext.NodeOSDistro) { return true } } return false } func ProxyMode(f *Framework) (string, error) { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "kube-proxy-mode-detector", Namespace: f.Namespace.Name, }, Spec: v1.PodSpec{ HostNetwork: true, Containers: []v1.Container{ { Name: "detector", Image: imageutils.GetE2EImage(imageutils.Net), Command: []string{"/bin/sleep", "3600"}, }, }, }, } f.PodClient().CreateSync(pod) defer f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout) cmd := "curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode" stdout, err := RunHostCmd(pod.Namespace, pod.Name, cmd) if err != nil { return "", err } Logf("ProxyMode: %s", stdout) return stdout, nil } func SkipUnlessServerVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) { gte, err := ServerVersionGTE(v, c) if err != nil { Failf("Failed to get server version: %v", err) } if !gte { Skipf("Not supported for server versions before %q", v) } } func SkipIfMissingResource(dynamicClient dynamic.DynamicInterface, gvr schema.GroupVersionResource, namespace string) { resourceClient := dynamicClient.Resource(gvr).Namespace(namespace) _, err := resourceClient.List(metav1.ListOptions{}) if err != nil { // not all resources support list, so we ignore those if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) { Skipf("Could not find %s resource, skipping test: %#v", gvr, err) } Failf("Unexpected error getting %v: %v", gvr, err) } } // ProvidersWithSSH are those providers where each node is accessible with SSH var ProvidersWithSSH = []string{"gce", "gke", "aws", "local"} type podCondition func(pod *v1.Pod) (bool, error) // logPodStates logs basic info of provided pods for debugging. func logPodStates(pods []v1.Pod) { // Find maximum widths for pod, node, and phase strings for column printing. maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE") for i := range pods { pod := &pods[i] if len(pod.ObjectMeta.Name) > maxPodW { maxPodW = len(pod.ObjectMeta.Name) } if len(pod.Spec.NodeName) > maxNodeW { maxNodeW = len(pod.Spec.NodeName) } if len(pod.Status.Phase) > maxPhaseW { maxPhaseW = len(pod.Status.Phase) } } // Increase widths by one to separate by a single space. maxPodW++ maxNodeW++ maxPhaseW++ maxGraceW++ // Log pod info. * does space padding, - makes them left-aligned. Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s", maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS") for _, pod := range pods { grace := "" if pod.DeletionGracePeriodSeconds != nil { grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds) } Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s", maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions) } Logf("") // Final empty line helps for readability. } // errorBadPodsStates create error message of basic info of bad pods for debugging. func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string { errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout) // Print bad pods info only if there are fewer than 10 bad pods if len(badPods) > 10 { return errStr + "There are too many bad pods. Please check log for details." } buf := bytes.NewBuffer(nil) w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS") for _, badPod := range badPods { grace := "" if badPod.DeletionGracePeriodSeconds != nil { grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds) } podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%+v", badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions) fmt.Fprintln(w, podInfo) } w.Flush() return errStr + buf.String() } // WaitForPodsSuccess waits till all labels matching the given selector enter // the Success state. The caller is expected to only invoke this method once the // pods have been created. func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[string]string, timeout time.Duration) error { successPodSelector := labels.SelectorFromSet(successPodLabels) start, badPods, desiredPods := time.Now(), []v1.Pod{}, 0 if wait.PollImmediate(30*time.Second, timeout, func() (bool, error) { podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: successPodSelector.String()}) if err != nil { Logf("Error getting pods in namespace %q: %v", ns, err) if testutils.IsRetryableAPIError(err) { return false, nil } return false, err } if len(podList.Items) == 0 { Logf("Waiting for pods to enter Success, but no pods in %q match label %v", ns, successPodLabels) return true, nil } badPods = []v1.Pod{} desiredPods = len(podList.Items) for _, pod := range podList.Items { if pod.Status.Phase != v1.PodSucceeded { badPods = append(badPods, pod) } } successPods := len(podList.Items) - len(badPods) Logf("%d / %d pods in namespace %q are in Success state (%d seconds elapsed)", successPods, len(podList.Items), ns, int(time.Since(start).Seconds())) if len(badPods) == 0 { return true, nil } return false, nil }) != nil { logPodStates(badPods) LogPodsWithLabels(c, ns, successPodLabels, Logf) return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "SUCCESS", timeout)) } return nil } // WaitForPodsRunningReady waits up to timeout to ensure that all pods in // namespace ns are either running and ready, or failed but controlled by a // controller. Also, it ensures that at least minPods are running and // ready. It has separate behavior from other 'wait for' pods functions in // that it requests the list of pods on every iteration. This is useful, for // example, in cluster startup, because the number of pods increases while // waiting. All pods that are in SUCCESS state are not counted. // // If ignoreLabels is not empty, pods matching this selector are ignored. func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error { ignoreSelector := labels.SelectorFromSet(ignoreLabels) start := time.Now() Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready", timeout, minPods, ns) wg := sync.WaitGroup{} wg.Add(1) var ignoreNotReady bool badPods := []v1.Pod{} desiredPods := 0 notReady := int32(0) if wait.PollImmediate(Poll, timeout, func() (bool, error) { // We get the new list of pods, replication controllers, and // replica sets in every iteration because more pods come // online during startup and we want to ensure they are also // checked. replicas, replicaOk := int32(0), int32(0) rcList, err := c.CoreV1().ReplicationControllers(ns).List(metav1.ListOptions{}) if err != nil { Logf("Error getting replication controllers in namespace '%s': %v", ns, err) if testutils.IsRetryableAPIError(err) { return false, nil } return false, err } for _, rc := range rcList.Items { replicas += *rc.Spec.Replicas replicaOk += rc.Status.ReadyReplicas } rsList, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(metav1.ListOptions{}) if err != nil { Logf("Error getting replication sets in namespace %q: %v", ns, err) if testutils.IsRetryableAPIError(err) { return false, nil } return false, err } for _, rs := range rsList.Items { replicas += *rs.Spec.Replicas replicaOk += rs.Status.ReadyReplicas } podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) if err != nil { Logf("Error getting pods in namespace '%s': %v", ns, err) if testutils.IsRetryableAPIError(err) { return false, nil } return false, err } nOk := int32(0) notReady = int32(0) badPods = []v1.Pod{} desiredPods = len(podList.Items) for _, pod := range podList.Items { if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) { continue } res, err := testutils.PodRunningReady(&pod) switch { case res && err == nil: nOk++ case pod.Status.Phase == v1.PodSucceeded: Logf("The status of Pod %s is Succeeded which is unexpected", pod.ObjectMeta.Name) badPods = append(badPods, pod) // it doesn't make sense to wait for this pod return false, errors.New("unexpected Succeeded pod state") case pod.Status.Phase != v1.PodFailed: Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase) notReady++ badPods = append(badPods, pod) default: if metav1.GetControllerOf(&pod) == nil { Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name) badPods = append(badPods, pod) } //ignore failed pods that are controlled by some controller } } Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)", nOk, len(podList.Items), ns, int(time.Since(start).Seconds())) Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk) if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 { return true, nil } ignoreNotReady = (notReady <= allowedNotReadyPods) logPodStates(badPods) return false, nil }) != nil { if !ignoreNotReady { return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout)) } Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods) } return nil } func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) { for _, container := range pod.Spec.Containers { if strings.Contains(container.Name, containerNameSubstr) { // Contains() matches all strings if substr is empty logs, err := GetPodLogs(c, pod.Namespace, pod.Name, container.Name) if err != nil { logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name) if err != nil { logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err) } } logFunc("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName) logFunc("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name) } } } func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) { podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) if err != nil { logFunc("Error getting pods in namespace '%s': %v", ns, err) return } logFunc("Running kubectl logs on non-ready containers in %v", ns) for _, pod := range podList.Items { if res, err := testutils.PodRunningReady(&pod); !res || err != nil { kubectlLogPod(c, pod, "", Logf) } } } func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string, logFunc func(ftm string, args ...interface{})) { podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()}) if err != nil { logFunc("Error getting pods in namespace %q: %v", ns, err) return } logFunc("Running kubectl logs on pods with labels %v in %v", match, ns) for _, pod := range podList.Items { kubectlLogPod(c, pod, "", logFunc) } } func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[string]string, containerSubstr string, logFunc func(ftm string, args ...interface{})) { podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()}) if err != nil { Logf("Error getting pods in namespace %q: %v", ns, err) return } for _, pod := range podList.Items { kubectlLogPod(c, pod, containerSubstr, logFunc) } } // DeleteNamespaces deletes all namespaces that match the given delete and skip filters. // Filter is by simple strings.Contains; first skip filter, then delete filter. // Returns the list of deleted namespaces or an error. func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) { By("Deleting namespaces") nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) var deleted []string var wg sync.WaitGroup OUTER: for _, item := range nsList.Items { if skipFilter != nil { for _, pattern := range skipFilter { if strings.Contains(item.Name, pattern) { continue OUTER } } } if deleteFilter != nil { var shouldDelete bool for _, pattern := range deleteFilter { if strings.Contains(item.Name, pattern) { shouldDelete = true break } } if !shouldDelete { continue OUTER } } wg.Add(1) deleted = append(deleted, item.Name) go func(nsName string) { defer wg.Done() defer GinkgoRecover() Expect(c.CoreV1().Namespaces().Delete(nsName, nil)).To(Succeed()) Logf("namespace : %v api call to delete is complete ", nsName) }(item.Name) } wg.Wait() return deleted, nil } func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeout time.Duration) error { By("Waiting for namespaces to vanish") nsMap := map[string]bool{} for _, ns := range namespaces { nsMap[ns] = true } //Now POLL until all namespaces have been eradicated. return wait.Poll(2*time.Second, timeout, func() (bool, error) { nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{}) if err != nil { return false, err } for _, item := range nsList.Items { if _, ok := nsMap[item.Name]; ok { return false, nil } } return true, nil }) } func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error { w, err := c.CoreV1().ServiceAccounts(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName})) if err != nil { return err } _, err = watch.Until(timeout, w, conditions.ServiceAccountHasSecrets) return err } func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error { Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { if apierrs.IsNotFound(err) { Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err) return err } Logf("Get pod %q in namespace %q failed, ignoring for %v. Error: %v", podName, ns, Poll, err) continue } // log now so that current pod info is reported before calling `condition()` Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v", podName, pod.Status.Phase, pod.Status.Reason, podutil.IsPodReady(pod), time.Since(start)) if done, err := condition(pod); done { if err == nil { Logf("Pod %q satisfied condition %q", podName, desc) } return err } } return fmt.Errorf("Gave up after waiting %v for pod %q to be %q", timeout, podName, desc) } // WaitForMatchPodsCondition finds match pods based on the input ListOptions. // waits and checks if all match pods are in the given podCondition func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error { Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(opts) if err != nil { return err } conditionNotMatch := []string{} for _, pod := range pods.Items { done, err := condition(&pod) if done && err != nil { return fmt.Errorf("Unexpected error: %v", err) } if !done { conditionNotMatch = append(conditionNotMatch, format.Pod(&pod)) } } if len(conditionNotMatch) <= 0 { return err } Logf("%d pods are not %s: %v", len(conditionNotMatch), desc, conditionNotMatch) } return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout) } // WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned // the default service account is what is associated with pods when they do not specify a service account // as a result, pods are not able to be provisioned in a namespace until the service account is provisioned func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace string) error { return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout) } // WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first. func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) if err != nil { Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err) continue } else { if pv.Status.Phase == phase { Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start)) return nil } else { Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase) } } } return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout) } // WaitForStatefulSetReplicasReady waits for all replicas of a StatefulSet to become ready or until timeout occurs, whichever comes first. func WaitForStatefulSetReplicasReady(statefulSetName, ns string, c clientset.Interface, Poll, timeout time.Duration) error { Logf("Waiting up to %v for StatefulSet %s to have all replicas ready", timeout, statefulSetName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { sts, err := c.AppsV1().StatefulSets(ns).Get(statefulSetName, metav1.GetOptions{}) if err != nil { Logf("Get StatefulSet %s failed, ignoring for %v: %v", statefulSetName, Poll, err) continue } else { if sts.Status.ReadyReplicas == *sts.Spec.Replicas { Logf("All %d replicas of StatefulSet %s are ready. (%v)", sts.Status.ReadyReplicas, statefulSetName, time.Since(start)) return nil } else { Logf("StatefulSet %s found but there are %d ready replicas and %d total replicas.", statefulSetName, sts.Status.ReadyReplicas, *sts.Spec.Replicas) } } } return fmt.Errorf("StatefulSet %s still has unready pods within %v", statefulSetName, timeout) } // WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first. func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) if err == nil { Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start)) continue } else { if apierrs.IsNotFound(err) { Logf("PersistentVolume %s was removed", pvName) return nil } else { Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err) } } } return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout) } // WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first. func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{}) if err != nil { Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err) continue } else { if pvc.Status.Phase == phase { Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start)) return nil } else { Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase) } } } return fmt.Errorf("PersistentVolumeClaim %s not in phase %s within %v", pvcName, phase, timeout) } // CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name. // Please see NewFramework instead of using this directly. func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) { if labels == nil { labels = map[string]string{} } labels["e2e-run"] = string(RunId) namespaceObj := &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName), Namespace: "", Labels: labels, }, Status: v1.NamespaceStatus{}, } // Be robust about making the namespace creation call. var got *v1.Namespace if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) { var err error got, err = c.CoreV1().Namespaces().Create(namespaceObj) if err != nil { Logf("Unexpected error while creating namespace: %v", err) return false, nil } return true, nil }); err != nil { return nil, err } if TestContext.VerifyServiceAccount { if err := WaitForDefaultServiceAccountInNamespace(c, got.Name); err != nil { // Even if we fail to create serviceAccount in the namespace, // we have successfully create a namespace. // So, return the created namespace. return got, err } } return got, nil } // CheckTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state // and waits until they are finally deleted. It ignores namespace skip. func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error { // TODO: Since we don't have support for bulk resource deletion in the API, // while deleting a namespace we are deleting all objects from that namespace // one by one (one deletion == one API call). This basically exposes us to // throttling - currently controller-manager has a limit of max 20 QPS. // Once #10217 is implemented and used in namespace-controller, deleting all // object from a given namespace should be much faster and we will be able // to lower this timeout. // However, now Density test is producing ~26000 events and Load capacity test // is producing ~35000 events, thus assuming there are no other requests it will // take ~30 minutes to fully delete the namespace. Thus I'm setting it to 60 // minutes to avoid any timeouts here. timeout := 60 * time.Minute Logf("Waiting for terminating namespaces to be deleted...") for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) { namespaces, err := c.CoreV1().Namespaces().List(metav1.ListOptions{}) if err != nil { Logf("Listing namespaces failed: %v", err) continue } terminating := 0 for _, ns := range namespaces.Items { if strings.HasPrefix(ns.ObjectMeta.Name, "e2e-tests-") && ns.ObjectMeta.Name != skip { if ns.Status.Phase == v1.NamespaceActive { return fmt.Errorf("Namespace %s is active", ns.ObjectMeta.Name) } terminating++ } } if terminating == 0 { return nil } } return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out") } // deleteNS deletes the provided namespace, waits for it to be completely deleted, and then checks // whether there are any pods remaining in a non-terminating state. func deleteNS(c clientset.Interface, dynamicClient dynamic.DynamicInterface, namespace string, timeout time.Duration) error { startTime := time.Now() if err := c.CoreV1().Namespaces().Delete(namespace, nil); err != nil { return err } // wait for namespace to delete or timeout. err := wait.PollImmediate(2*time.Second, timeout, func() (bool, error) { if _, err := c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{}); err != nil { if apierrs.IsNotFound(err) { return true, nil } Logf("Error while waiting for namespace to be terminated: %v", err) return false, nil } return false, nil }) // verify there is no more remaining content in the namespace remainingContent, cerr := hasRemainingContent(c, dynamicClient, namespace) if cerr != nil { return cerr } // if content remains, let's dump information about the namespace, and system for flake debugging. remainingPods := 0 missingTimestamp := 0 if remainingContent { // log information about namespace, and set of namespaces in api server to help flake detection logNamespace(c, namespace) logNamespaces(c, namespace) // if we can, check if there were pods remaining with no timestamp. remainingPods, missingTimestamp, _ = countRemainingPods(c, namespace) } // a timeout waiting for namespace deletion happened! if err != nil { // some content remains in the namespace if remainingContent { // pods remain if remainingPods > 0 { if missingTimestamp != 0 { // pods remained, but were not undergoing deletion (namespace controller is probably culprit) return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v, pods missing deletion timestamp: %v", namespace, err, remainingPods, missingTimestamp) } // but they were all undergoing deletion (kubelet is probably culprit, check NodeLost) return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v", namespace, err, remainingPods) } // other content remains (namespace controller is probably screwed up) return fmt.Errorf("namespace %v was not deleted with limit: %v, namespaced content other than pods remain", namespace, err) } // no remaining content, but namespace was not deleted (namespace controller is probably wedged) return fmt.Errorf("namespace %v was not deleted with limit: %v, namespace is empty but is not yet removed", namespace, err) } Logf("namespace %v deletion completed in %s", namespace, time.Now().Sub(startTime)) return nil } // logNamespaces logs the number of namespaces by phase // namespace is the namespace the test was operating against that failed to delete so it can be grepped in logs func logNamespaces(c clientset.Interface, namespace string) { namespaceList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{}) if err != nil { Logf("namespace: %v, unable to list namespaces: %v", namespace, err) return } numActive := 0 numTerminating := 0 for _, namespace := range namespaceList.Items { if namespace.Status.Phase == v1.NamespaceActive { numActive++ } else { numTerminating++ } } Logf("namespace: %v, total namespaces: %v, active: %v, terminating: %v", namespace, len(namespaceList.Items), numActive, numTerminating) } // logNamespace logs detail about a namespace func logNamespace(c clientset.Interface, namespace string) { ns, err := c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{}) if err != nil { if apierrs.IsNotFound(err) { Logf("namespace: %v no longer exists", namespace) return } Logf("namespace: %v, unable to get namespace due to error: %v", namespace, err) return } Logf("namespace: %v, DeletionTimetamp: %v, Finalizers: %v, Phase: %v", ns.Name, ns.DeletionTimestamp, ns.Spec.Finalizers, ns.Status.Phase) } // countRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp. func countRemainingPods(c clientset.Interface, namespace string) (int, int, error) { // check for remaining pods pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{}) if err != nil { return 0, 0, err } // nothing remains! if len(pods.Items) == 0 { return 0, 0, nil } // stuff remains, log about it logPodStates(pods.Items) // check if there were any pods with missing deletion timestamp numPods := len(pods.Items) missingTimestamp := 0 for _, pod := range pods.Items { if pod.DeletionTimestamp == nil { missingTimestamp++ } } return numPods, missingTimestamp, nil } // isDynamicDiscoveryError returns true if the error is a group discovery error // only for groups expected to be created/deleted dynamically during e2e tests func isDynamicDiscoveryError(err error) bool { if !discovery.IsGroupDiscoveryFailedError(err) { return false } discoveryErr := err.(*discovery.ErrGroupDiscoveryFailed) for gv := range discoveryErr.Groups { switch gv.Group { case "mygroup.example.com": // custom_resource_definition // garbage_collector case "wardle.k8s.io": // aggregator default: Logf("discovery error for unexpected group: %#v", gv) return false } } return true } // hasRemainingContent checks if there is remaining content in the namespace via API discovery func hasRemainingContent(c clientset.Interface, dynamicClient dynamic.DynamicInterface, namespace string) (bool, error) { // some tests generate their own framework.Client rather than the default // TODO: ensure every test call has a configured dynamicClient if dynamicClient == nil { return false, nil } // find out what content is supported on the server // Since extension apiserver is not always available, e.g. metrics server sometimes goes down, // add retry here. resources, err := waitForServerPreferredNamespacedResources(c.Discovery(), 30*time.Second) if err != nil { return false, err } groupVersionResources, err := discovery.GroupVersionResources(resources) if err != nil { return false, err } // TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798 ignoredResources := sets.NewString("bindings") contentRemaining := false // dump how many of resource type is on the server in a log. for gvr := range groupVersionResources { // get a client for this group version... dynamicClient := dynamicClient.Resource(gvr).Namespace(namespace) if err != nil { // not all resource types support list, so some errors here are normal depending on the resource type. Logf("namespace: %s, unable to get client - gvr: %v, error: %v", namespace, gvr, err) continue } // get the api resource apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true} if ignoredResources.Has(gvr.Resource) { Logf("namespace: %s, resource: %s, ignored listing per whitelist", namespace, apiResource.Name) continue } unstructuredList, err := dynamicClient.List(metav1.ListOptions{}) if err != nil { // not all resources support list, so we ignore those if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) { continue } // skip unavailable servers if apierrs.IsServiceUnavailable(err) { continue } return false, err } if len(unstructuredList.Items) > 0 { Logf("namespace: %s, resource: %s, items remaining: %v", namespace, apiResource.Name, len(unstructuredList.Items)) contentRemaining = true } } return contentRemaining, nil } func ContainerInitInvariant(older, newer runtime.Object) error { oldPod := older.(*v1.Pod) newPod := newer.(*v1.Pod) if len(oldPod.Spec.InitContainers) == 0 { return nil } if len(oldPod.Spec.InitContainers) != len(newPod.Spec.InitContainers) { return fmt.Errorf("init container list changed") } if oldPod.UID != newPod.UID { return fmt.Errorf("two different pods exist in the condition: %s vs %s", oldPod.UID, newPod.UID) } if err := initContainersInvariants(oldPod); err != nil { return err } if err := initContainersInvariants(newPod); err != nil { return err } oldInit, _, _ := podInitialized(oldPod) newInit, _, _ := podInitialized(newPod) if oldInit && !newInit { // TODO: we may in the future enable resetting PodInitialized = false if the kubelet needs to restart it // from scratch return fmt.Errorf("pod cannot be initialized and then regress to not being initialized") } return nil } func podInitialized(pod *v1.Pod) (ok bool, failed bool, err error) { allInit := true initFailed := false for _, s := range pod.Status.InitContainerStatuses { switch { case initFailed && s.State.Waiting == nil: return allInit, initFailed, fmt.Errorf("container %s is after a failed container but isn't waiting", s.Name) case allInit && s.State.Waiting == nil: return allInit, initFailed, fmt.Errorf("container %s is after an initializing container but isn't waiting", s.Name) case s.State.Terminated == nil: allInit = false case s.State.Terminated.ExitCode != 0: allInit = false initFailed = true case !s.Ready: return allInit, initFailed, fmt.Errorf("container %s initialized but isn't marked as ready", s.Name) } } return allInit, initFailed, nil } func initContainersInvariants(pod *v1.Pod) error { allInit, initFailed, err := podInitialized(pod) if err != nil { return err } if !allInit || initFailed { for _, s := range pod.Status.ContainerStatuses { if s.State.Waiting == nil || s.RestartCount != 0 { return fmt.Errorf("container %s is not waiting but initialization not complete", s.Name) } if s.State.Waiting.Reason != "PodInitializing" { return fmt.Errorf("container %s should have reason PodInitializing: %s", s.Name, s.State.Waiting.Reason) } } } _, c := podutil.GetPodCondition(&pod.Status, v1.PodInitialized) if c == nil { return fmt.Errorf("pod does not have initialized condition") } if c.LastTransitionTime.IsZero() { return fmt.Errorf("PodInitialized condition should always have a transition time") } switch { case c.Status == v1.ConditionUnknown: return fmt.Errorf("PodInitialized condition should never be Unknown") case c.Status == v1.ConditionTrue && (initFailed || !allInit): return fmt.Errorf("PodInitialized condition was True but all not all containers initialized") case c.Status == v1.ConditionFalse && (!initFailed && allInit): return fmt.Errorf("PodInitialized condition was False but all containers initialized") } return nil } type InvariantFunc func(older, newer runtime.Object) error func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error { errs := sets.NewString() for i := range events { j := i + 1 if j >= len(events) { continue } for _, fn := range fns { if err := fn(events[i].Object, events[j].Object); err != nil { errs.Insert(err.Error()) } } } if errs.Len() > 0 { return fmt.Errorf("invariants violated:\n* %s", strings.Join(errs.List(), "\n* ")) } return nil } // Waits default amount of time (PodStartTimeout) for the specified pod to become running. // Returns an error if timeout occurs first, or pod goes in to failed state. func WaitForPodRunningInNamespace(c clientset.Interface, pod *v1.Pod) error { if pod.Status.Phase == v1.PodRunning { return nil } return WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, PodStartTimeout) } // Waits default amount of time (PodStartTimeout) for the specified pod to become running. // Returns an error if timeout occurs first, or pod goes in to failed state. func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error { return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, PodStartTimeout) } // Waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running. // The resourceVersion is used when Watching object changes, it tells since when we care // about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state. func waitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace string) error { return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, slowPodStartTimeout) } func WaitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { return wait.PollImmediate(Poll, timeout, podRunning(c, podName, namespace)) } func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc { return func() (bool, error) { pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) if err != nil { return false, err } switch pod.Status.Phase { case v1.PodRunning: return true, nil case v1.PodFailed, v1.PodSucceeded: return false, conditions.ErrPodCompleted } return false, nil } } // Waits default amount of time (DefaultPodDeletionTimeout) for the specified pod to stop running. // Returns an error if timeout occurs first. func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string) error { return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, DefaultPodDeletionTimeout) } func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { return wait.PollImmediate(Poll, timeout, podCompleted(c, podName, namespace)) } func podCompleted(c clientset.Interface, podName, namespace string) wait.ConditionFunc { return func() (bool, error) { pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) if err != nil { return false, err } switch pod.Status.Phase { case v1.PodFailed, v1.PodSucceeded: return true, nil } return false, nil } } func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { return wait.PollImmediate(Poll, timeout, podRunningAndReady(c, podName, namespace)) } func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.ConditionFunc { return func() (bool, error) { pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) if err != nil { return false, err } switch pod.Status.Phase { case v1.PodFailed, v1.PodSucceeded: return false, conditions.ErrPodCompleted case v1.PodRunning: return podutil.IsPodReady(pod), nil } return false, nil } } // WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state. // The resourceVersion is used when Watching object changes, it tells since when we care // about changes to the pod. func WaitForPodNotPending(c clientset.Interface, ns, podName string) error { return wait.PollImmediate(Poll, PodStartTimeout, podNotPending(c, podName, ns)) } func podNotPending(c clientset.Interface, podName, namespace string) wait.ConditionFunc { return func() (bool, error) { pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) if err != nil { return false, err } switch pod.Status.Phase { case v1.PodPending: return false, nil default: return true, nil } } } // waitForPodTerminatedInNamespace returns an error if it takes too long for the pod to terminate, // if the pod Get api returns an error (IsNotFound or other), or if the pod failed (and thus did not // terminate) with an unexpected reason. Typically called to test that the passed-in pod is fully // terminated (reason==""), but may be called to detect if a pod did *not* terminate according to // the supplied reason. func waitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error { return WaitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", PodStartTimeout, func(pod *v1.Pod) (bool, error) { // Only consider Failed pods. Successful pods will be deleted and detected in // waitForPodCondition's Get call returning `IsNotFound` if pod.Status.Phase == v1.PodFailed { if pod.Status.Reason == reason { // short-circuit waitForPodCondition's loop return true, nil } else { return true, fmt.Errorf("Expected pod %q in namespace %q to be terminated with reason %q, got reason: %q", podName, namespace, reason, pod.Status.Reason) } } return false, nil }) } // waitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate. // Unlike `waitForPodTerminatedInNamespace`, the pod's Phase and Reason are ignored. If the pod Get // api returns IsNotFound then the wait stops and nil is returned. If the Get api returns an error other // than "not found" then that error is returned and the wait stops. func waitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error { return wait.PollImmediate(Poll, timeout, func() (bool, error) { _, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if apierrs.IsNotFound(err) { return true, nil // done } if err != nil { return true, err // stop wait with error } return false, nil }) } // waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long. func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string, namespace string, timeout time.Duration) error { return WaitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *v1.Pod) (bool, error) { if pod.Spec.RestartPolicy == v1.RestartPolicyAlways { return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName) } switch pod.Status.Phase { case v1.PodSucceeded: By("Saw pod success") return true, nil case v1.PodFailed: return true, fmt.Errorf("pod %q failed with status: %+v", podName, pod.Status) default: return false, nil } }) } // WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout. func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error { return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, PodStartTimeout) } // WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout. func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error { return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout) } // WaitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status. func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.Duration) error { options := metav1.ListOptions{FieldSelector: fields.Set{ "metadata.name": name, "metadata.namespace": ns, }.AsSelector().String()} w, err := c.CoreV1().ReplicationControllers(ns).Watch(options) if err != nil { return err } _, err = watch.Until(timeout, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Deleted: return false, apierrs.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "") } switch rc := event.Object.(type) { case *v1.ReplicationController: if rc.Name == name && rc.Namespace == ns && rc.Generation <= rc.Status.ObservedGeneration && *(rc.Spec.Replicas) == rc.Status.Replicas { return true, nil } Logf("Waiting for rc %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d", name, rc.Generation, rc.Status.ObservedGeneration, *(rc.Spec.Replicas), rc.Status.Replicas) } return false, nil }) return err } func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error { return wait.PollImmediate(interval, timeout, func() (bool, error) { Logf("Waiting for pod %s to disappear", podName) options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.CoreV1().Pods(ns).List(options) if err != nil { if testutils.IsRetryableAPIError(err) { return false, nil } return false, err } found := false for _, pod := range pods.Items { if pod.Name == podName { Logf("Pod %s still exists", podName) found = true break } } if !found { Logf("Pod %s no longer exists", podName) return true, nil } return false, nil }) } // WaitForPodNameUnschedulableInNamespace returns an error if it takes too long for the pod to become Pending // and have condition Status equal to Unschedulable, // if the pod Get api returns an error (IsNotFound or other), or if the pod failed with an unexpected reason. // Typically called to test that the passed-in pod is Pending and Unschedulable. func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, namespace string) error { return WaitForPodCondition(c, namespace, podName, "Unschedulable", PodStartTimeout, func(pod *v1.Pod) (bool, error) { // Only consider Failed pods. Successful pods will be deleted and detected in // waitForPodCondition's Get call returning `IsNotFound` if pod.Status.Phase == v1.PodPending { for _, cond := range pod.Status.Conditions { if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" { return true, nil } } } if pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed { return true, fmt.Errorf("Expected pod %q in namespace %q to be in phase Pending, but got phase: %v", podName, namespace, pod.Status.Phase) } return false, nil }) } // WaitForService waits until the service appears (exist == true), or disappears (exist == false) func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { _, err := c.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) switch { case err == nil: Logf("Service %s in namespace %s found.", name, namespace) return exist, nil case apierrs.IsNotFound(err): Logf("Service %s in namespace %s disappeared.", name, namespace) return !exist, nil case !testutils.IsRetryableAPIError(err): Logf("Non-retryable failure while getting service.") return false, err default: Logf("Get service %s in namespace %s failed: %v", name, namespace, err) return false, nil } }) if err != nil { stateMsg := map[bool]string{true: "to appear", false: "to disappear"} return fmt.Errorf("error waiting for service %s/%s %s: %v", namespace, name, stateMsg[exist], err) } return nil } // WaitForServiceWithSelector waits until any service with given selector appears (exist == true), or disappears (exist == false) func WaitForServiceWithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { services, err := c.CoreV1().Services(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) switch { case len(services.Items) != 0: Logf("Service with %s in namespace %s found.", selector.String(), namespace) return exist, nil case len(services.Items) == 0: Logf("Service with %s in namespace %s disappeared.", selector.String(), namespace) return !exist, nil case !testutils.IsRetryableAPIError(err): Logf("Non-retryable failure while listing service.") return false, err default: Logf("List service with %s in namespace %s failed: %v", selector.String(), namespace, err) return false, nil } }) if err != nil { stateMsg := map[bool]string{true: "to appear", false: "to disappear"} return fmt.Errorf("error waiting for service with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err) } return nil } //WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum. func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error { return wait.Poll(interval, timeout, func() (bool, error) { Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum) list, err := c.CoreV1().Endpoints(namespace).List(metav1.ListOptions{}) if err != nil { return false, err } for _, e := range list.Items { if e.Name == serviceName && countEndpointsNum(&e) == expectNum { return true, nil } } return false, nil }) } func countEndpointsNum(e *v1.Endpoints) int { num := 0 for _, sub := range e.Subsets { num += len(sub.Addresses) } return num } func WaitForEndpoint(c clientset.Interface, ns, name string) error { for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) { endpoint, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{}) if apierrs.IsNotFound(err) { Logf("Endpoint %s/%s is not ready yet", ns, name) continue } Expect(err).NotTo(HaveOccurred()) if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 { Logf("Endpoint %s/%s is not ready yet", ns, name) continue } else { return nil } } return fmt.Errorf("Failed to get endpoints for %s/%s", ns, name) } // Context for checking pods responses by issuing GETs to them (via the API // proxy) and verifying that they answer with there own pod name. type podProxyResponseChecker struct { c clientset.Interface ns string label labels.Selector controllerName string respondName bool // Whether the pod should respond with its own name. pods *v1.PodList } func PodProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) podProxyResponseChecker { return podProxyResponseChecker{c, ns, label, controllerName, respondName, pods} } // CheckAllResponses issues GETs to all pods in the context and verify they // reply with their own pod name. func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) { successes := 0 options := metav1.ListOptions{LabelSelector: r.label.String()} currentPods, err := r.c.CoreV1().Pods(r.ns).List(options) Expect(err).NotTo(HaveOccurred()) for i, pod := range r.pods.Items { // Check that the replica list remains unchanged, otherwise we have problems. if !isElementOf(pod.UID, currentPods) { return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods) } ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout) defer cancel() body, err := r.c.CoreV1().RESTClient().Get(). Context(ctx). Namespace(r.ns). Resource("pods"). SubResource("proxy"). Name(string(pod.Name)). Do(). Raw() if err != nil { if ctx.Err() != nil { // We may encounter errors here because of a race between the pod readiness and apiserver // proxy. So, we log the error and retry if this occurs. Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status) return false, nil } Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status) continue } // The response checker expects the pod's name unless !respondName, in // which case it just checks for a non-empty response. got := string(body) what := "" if r.respondName { what = "expected" want := pod.Name if got != want { Logf("Controller %s: Replica %d [%s] expected response %q but got %q", r.controllerName, i+1, pod.Name, want, got) continue } } else { what = "non-empty" if len(got) == 0 { Logf("Controller %s: Replica %d [%s] expected non-empty response", r.controllerName, i+1, pod.Name) continue } } successes++ Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far", r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items)) } if successes < len(r.pods.Items) { return false, nil } return true, nil } // ServerVersionGTE returns true if v is greater than or equal to the server // version. // // TODO(18726): This should be incorporated into client.VersionInterface. func ServerVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) (bool, error) { serverVersion, err := c.ServerVersion() if err != nil { return false, fmt.Errorf("Unable to get server version: %v", err) } sv, err := utilversion.ParseSemantic(serverVersion.GitVersion) if err != nil { return false, fmt.Errorf("Unable to parse server version %q: %v", serverVersion.GitVersion, err) } return sv.AtLeast(v), nil } func SkipUnlessKubectlVersionGTE(v *utilversion.Version) { gte, err := KubectlVersionGTE(v) if err != nil { Failf("Failed to get kubectl version: %v", err) } if !gte { Skipf("Not supported for kubectl versions before %q", v) } } // KubectlVersionGTE returns true if the kubectl version is greater than or // equal to v. func KubectlVersionGTE(v *utilversion.Version) (bool, error) { kv, err := KubectlVersion() if err != nil { return false, err } return kv.AtLeast(v), nil } // KubectlVersion gets the version of kubectl that's currently being used (see // --kubectl-path in e2e.go to use an alternate kubectl). func KubectlVersion() (*utilversion.Version, error) { output := RunKubectlOrDie("version", "--client") matches := gitVersionRegexp.FindStringSubmatch(output) if len(matches) != 2 { return nil, fmt.Errorf("Could not find kubectl version in output %v", output) } // Don't use the full match, as it contains "GitVersion:\"" and a // trailing "\"". Just use the submatch. return utilversion.ParseSemantic(matches[1]) } func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error { By("trying to dial each unique pod") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) return wait.PollImmediate(Poll, podRespondingTimeout, PodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses) } func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) return PodsCreatedByLabel(c, ns, name, replicas, label) } func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) { timeout := 2 * time.Minute for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { options := metav1.ListOptions{LabelSelector: label.String()} // List the pods, making sure we observe all the replicas. pods, err := c.CoreV1().Pods(ns).List(options) if err != nil { return nil, err } created := []v1.Pod{} for _, pod := range pods.Items { if pod.DeletionTimestamp != nil { continue } created = append(created, pod) } Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas) if int32(len(created)) == replicas { pods.Items = created return pods, nil } } return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas) } func podsRunning(c clientset.Interface, pods *v1.PodList) []error { // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. By("ensuring each pod is running") e := []error{} error_chan := make(chan error) for _, pod := range pods.Items { go func(p v1.Pod) { error_chan <- WaitForPodRunningInNamespace(c, &p) }(pod) } for range pods.Items { err := <-error_chan if err != nil { e = append(e, err) } } return e } func VerifyPods(c clientset.Interface, ns, name string, wantName bool, replicas int32) error { return podRunningMaybeResponding(c, ns, name, wantName, replicas, true) } func VerifyPodsRunning(c clientset.Interface, ns, name string, wantName bool, replicas int32) error { return podRunningMaybeResponding(c, ns, name, wantName, replicas, false) } func podRunningMaybeResponding(c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error { pods, err := PodsCreated(c, ns, name, replicas) if err != nil { return err } e := podsRunning(c, pods) if len(e) > 0 { return fmt.Errorf("failed to wait for pods running: %v", e) } if checkResponding { err = PodsResponding(c, ns, name, wantName, pods) if err != nil { return fmt.Errorf("failed to wait for pods responding: %v", err) } } return nil } func ServiceResponding(c clientset.Interface, ns, name string) error { By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name)) return wait.PollImmediate(Poll, ServiceRespondingTimeout, func() (done bool, err error) { proxyRequest, errProxy := GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get()) if errProxy != nil { Logf("Failed to get services proxy request: %v:", errProxy) return false, nil } ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout) defer cancel() body, err := proxyRequest.Namespace(ns). Context(ctx). Name(name). Do(). Raw() if err != nil { if ctx.Err() != nil { Failf("Failed to GET from service %s: %v", name, err) return true, err } Logf("Failed to GET from service %s: %v:", name, err) return false, nil } got := string(body) if len(got) == 0 { Logf("Service %s: expected non-empty response", name) return false, err // stop polling } Logf("Service %s: found nonempty answer: %s", name, got) return true, nil }) } func RestclientConfig(kubeContext string) (*clientcmdapi.Config, error) { Logf(">>> kubeConfig: %s", TestContext.KubeConfig) if TestContext.KubeConfig == "" { return nil, fmt.Errorf("KubeConfig must be specified to load client config") } c, err := clientcmd.LoadFromFile(TestContext.KubeConfig) if err != nil { return nil, fmt.Errorf("error loading KubeConfig: %v", err.Error()) } if kubeContext != "" { Logf(">>> kubeContext: %s", kubeContext) c.CurrentContext = kubeContext } return c, nil } type ClientConfigGetter func() (*restclient.Config, error) func LoadConfig() (*restclient.Config, error) { if TestContext.NodeE2E { // This is a node e2e test, apply the node e2e configuration return &restclient.Config{Host: TestContext.Host}, nil } c, err := RestclientConfig(TestContext.KubeContext) if err != nil { if TestContext.KubeConfig == "" { return restclient.InClusterConfig() } else { return nil, err } } return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig() } func LoadInternalClientset() (*internalclientset.Clientset, error) { config, err := LoadConfig() if err != nil { return nil, fmt.Errorf("error creating client: %v", err.Error()) } return internalclientset.NewForConfig(config) } func LoadClientset() (*clientset.Clientset, error) { config, err := LoadConfig() if err != nil { return nil, fmt.Errorf("error creating client: %v", err.Error()) } return clientset.NewForConfig(config) } // randomSuffix provides a random string to append to pods,services,rcs. // TODO: Allow service names to have the same form as names // for pods and replication controllers so we don't // need to use such a function and can instead // use the UUID utility function. func randomSuffix() string { r := rand.New(rand.NewSource(time.Now().UnixNano())) return strconv.Itoa(r.Int() % 10000) } func ExpectNoError(err error, explain ...interface{}) { ExpectNoErrorWithOffset(1, err, explain...) } // ExpectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller // (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f"). func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) { if err != nil { Logf("Unexpected error occurred: %v", err) } ExpectWithOffset(1+offset, err).NotTo(HaveOccurred(), explain...) } func ExpectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) { var err error for i := 0; i < maxRetries; i++ { err = fn() if err == nil { return } Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err) } ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...) } // Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped. func Cleanup(filePath, ns string, selectors ...string) { By("using delete to clean up resources") var nsArg string if ns != "" { nsArg = fmt.Sprintf("--namespace=%s", ns) } RunKubectlOrDie("delete", "--grace-period=0", "-f", filePath, nsArg) AssertCleanup(ns, selectors...) } // Asserts that cleanup of a namespace wrt selectors occurred. func AssertCleanup(ns string, selectors ...string) { var nsArg string if ns != "" { nsArg = fmt.Sprintf("--namespace=%s", ns) } var e error verifyCleanupFunc := func() (bool, error) { e = nil for _, selector := range selectors { resources := RunKubectlOrDie("get", "rc,svc", "-l", selector, "--no-headers", nsArg) if resources != "" { e = fmt.Errorf("Resources left running after stop:\n%s", resources) return false, nil } pods := RunKubectlOrDie("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}") if pods != "" { e = fmt.Errorf("Pods left unterminated after stop:\n%s", pods) return false, nil } } return true, nil } err := wait.PollImmediate(500*time.Millisecond, 1*time.Minute, verifyCleanupFunc) if err != nil { Failf(e.Error()) } } // KubectlCmd runs the kubectl executable through the wrapper script. func KubectlCmd(args ...string) *exec.Cmd { defaultArgs := []string{} // Reference a --server option so tests can run anywhere. if TestContext.Host != "" { defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+TestContext.Host) } if TestContext.KubeConfig != "" { defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig) // Reference the KubeContext if TestContext.KubeContext != "" { defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+TestContext.KubeContext) } } else { if TestContext.CertDir != "" { defaultArgs = append(defaultArgs, fmt.Sprintf("--certificate-authority=%s", filepath.Join(TestContext.CertDir, "ca.crt")), fmt.Sprintf("--client-certificate=%s", filepath.Join(TestContext.CertDir, "kubecfg.crt")), fmt.Sprintf("--client-key=%s", filepath.Join(TestContext.CertDir, "kubecfg.key"))) } } kubectlArgs := append(defaultArgs, args...) //We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh" //and so on. cmd := exec.Command(TestContext.KubectlPath, kubectlArgs...) //caller will invoke this and wait on it. return cmd } // kubectlBuilder is used to build, customize and execute a kubectl Command. // Add more functions to customize the builder as needed. type kubectlBuilder struct { cmd *exec.Cmd timeout <-chan time.Time } func NewKubectlCommand(args ...string) *kubectlBuilder { b := new(kubectlBuilder) b.cmd = KubectlCmd(args...) return b } func (b *kubectlBuilder) WithEnv(env []string) *kubectlBuilder { b.cmd.Env = env return b } func (b *kubectlBuilder) WithTimeout(t <-chan time.Time) *kubectlBuilder { b.timeout = t return b } func (b kubectlBuilder) WithStdinData(data string) *kubectlBuilder { b.cmd.Stdin = strings.NewReader(data) return &b } func (b kubectlBuilder) WithStdinReader(reader io.Reader) *kubectlBuilder { b.cmd.Stdin = reader return &b } func (b kubectlBuilder) ExecOrDie() string { str, err := b.Exec() // In case of i/o timeout error, try talking to the apiserver again after 2s before dying. // Note that we're still dying after retrying so that we can get visibility to triage it further. if isTimeout(err) { Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.") time.Sleep(2 * time.Second) retryStr, retryErr := RunKubectl("version") Logf("stdout: %q", retryStr) Logf("err: %v", retryErr) } Expect(err).NotTo(HaveOccurred()) return str } func isTimeout(err error) bool { switch err := err.(type) { case net.Error: if err.Timeout() { return true } case *url.Error: if err, ok := err.Err.(net.Error); ok && err.Timeout() { return true } } return false } func (b kubectlBuilder) Exec() (string, error) { var stdout, stderr bytes.Buffer cmd := b.cmd cmd.Stdout, cmd.Stderr = &stdout, &stderr Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately if err := cmd.Start(); err != nil { return "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err) } errCh := make(chan error, 1) go func() { errCh <- cmd.Wait() }() select { case err := <-errCh: if err != nil { var rc int = 127 if ee, ok := err.(*exec.ExitError); ok { rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus()) Logf("rc: %d", rc) } return "", uexec.CodeExitError{ Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err), Code: rc, } } case <-b.timeout: b.cmd.Process.Kill() return "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr) } Logf("stderr: %q", stderr.String()) Logf("stdout: %q", stdout.String()) return stdout.String(), nil } // RunKubectlOrDie is a convenience wrapper over kubectlBuilder func RunKubectlOrDie(args ...string) string { return NewKubectlCommand(args...).ExecOrDie() } // RunKubectl is a convenience wrapper over kubectlBuilder func RunKubectl(args ...string) (string, error) { return NewKubectlCommand(args...).Exec() } // RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin func RunKubectlOrDieInput(data string, args ...string) string { return NewKubectlCommand(args...).WithStdinData(data).ExecOrDie() } // RunKubemciWithKubeconfig is a convenience wrapper over RunKubemciCmd func RunKubemciWithKubeconfig(args ...string) (string, error) { if TestContext.KubeConfig != "" { args = append(args, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig) } return RunKubemciCmd(args...) } // RunKubemciCmd is a convenience wrapper over kubectlBuilder to run kubemci. // It assumes that kubemci exists in PATH. func RunKubemciCmd(args ...string) (string, error) { // kubemci is assumed to be in PATH. kubemci := "kubemci" b := new(kubectlBuilder) args = append(args, "--gcp-project="+TestContext.CloudConfig.ProjectID) b.cmd = exec.Command(kubemci, args...) return b.Exec() } func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) { stdout, err = cmd.StdoutPipe() if err != nil { return } stderr, err = cmd.StderrPipe() if err != nil { return } Logf("Asynchronously running '%s %s'", cmd.Path, strings.Join(cmd.Args, " ")) err = cmd.Start() return } // Rough equivalent of ctrl+c for cleaning up processes. Intended to be run in defer. func TryKill(cmd *exec.Cmd) { if err := cmd.Process.Kill(); err != nil { Logf("ERROR failed to kill command %v! The process may leak", cmd) } } // testContainerOutputMatcher runs the given pod in the given namespace and waits // for all of the containers in the podSpec to move into the 'Success' status, and tests // the specified container log against the given expected output using the given matcher. func (f *Framework) testContainerOutputMatcher(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string, matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) { By(fmt.Sprintf("Creating a pod to test %v", scenarioName)) if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) { Failf("Invalid container index: %d", containerIndex) } ExpectNoError(f.MatchContainerOutput(pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher)) } // MatchContainerOutput creates a pod and waits for all it's containers to exit with success. // It then tests that the matcher with each expectedOutput matches the output of the specified container. func (f *Framework) MatchContainerOutput( pod *v1.Pod, containerName string, expectedOutput []string, matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error { ns := pod.ObjectMeta.Namespace if ns == "" { ns = f.Namespace.Name } podClient := f.PodClientNS(ns) createdPod := podClient.Create(pod) defer func() { By("delete the pod") podClient.DeleteSync(createdPod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout) }() // Wait for client pod to complete. podErr := WaitForPodSuccessInNamespace(f.ClientSet, createdPod.Name, ns) // Grab its logs. Get host first. podStatus, err := podClient.Get(createdPod.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("failed to get pod status: %v", err) } if podErr != nil { // Pod failed. Dump all logs from all containers to see what's wrong for _, container := range podStatus.Spec.Containers { logs, err := GetPodLogs(f.ClientSet, ns, podStatus.Name, container.Name) if err != nil { Logf("Failed to get logs from node %q pod %q container %q: %v", podStatus.Spec.NodeName, podStatus.Name, container.Name, err) continue } Logf("Output of node %q pod %q container %q: %s", podStatus.Spec.NodeName, podStatus.Name, container.Name, logs) } return fmt.Errorf("expected pod %q success: %v", createdPod.Name, podErr) } Logf("Trying to get logs from node %s pod %s container %s: %v", podStatus.Spec.NodeName, podStatus.Name, containerName, err) // Sometimes the actual containers take a second to get started, try to get logs for 60s logs, err := GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName) if err != nil { Logf("Failed to get logs from node %q pod %q container %q. %v", podStatus.Spec.NodeName, podStatus.Name, containerName, err) return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err) } for _, expected := range expectedOutput { m := matcher(expected) matches, err := m.Match(logs) if err != nil { return fmt.Errorf("expected %q in container output: %v", expected, err) } else if !matches { return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs)) } } return nil } type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error) func DumpEventsInNamespace(eventsLister EventsLister, namespace string) { By(fmt.Sprintf("Collecting events from namespace %q.", namespace)) events, err := eventsLister(metav1.ListOptions{}, namespace) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("Found %d events.", len(events.Items))) // Sort events by their first timestamp sortedEvents := events.Items if len(sortedEvents) > 1 { sort.Sort(byFirstTimestamp(sortedEvents)) } for _, e := range sortedEvents { Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message) } // Note that we don't wait for any Cleanup to propagate, which means // that if you delete a bunch of pods right before ending your test, // you may or may not see the killing/deletion/Cleanup events. } func DumpAllNamespaceInfo(c clientset.Interface, namespace string) { DumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) { return c.CoreV1().Events(ns).List(opts) }, namespace) // If cluster is large, then the following logs are basically useless, because: // 1. it takes tens of minutes or hours to grab all of them // 2. there are so many of them that working with them are mostly impossible // So we dump them only if the cluster is relatively small. maxNodesForDump := 20 if nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}); err == nil { if len(nodes.Items) <= maxNodesForDump { dumpAllPodInfo(c) dumpAllNodeInfo(c) } else { Logf("skipping dumping cluster info - cluster too large") } } else { Logf("unable to fetch node list: %v", err) } } // byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker. type byFirstTimestamp []v1.Event func (o byFirstTimestamp) Len() int { return len(o) } func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } func (o byFirstTimestamp) Less(i, j int) bool { if o[i].FirstTimestamp.Equal(&o[j].FirstTimestamp) { return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name } return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp) } func dumpAllPodInfo(c clientset.Interface) { pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{}) if err != nil { Logf("unable to fetch pod debug info: %v", err) } logPodStates(pods.Items) } func dumpAllNodeInfo(c clientset.Interface) { // It should be OK to list unschedulable Nodes here. nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { Logf("unable to fetch node list: %v", err) return } names := make([]string, len(nodes.Items)) for ix := range nodes.Items { names[ix] = nodes.Items[ix].Name } DumpNodeDebugInfo(c, names, Logf) } func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) { for _, n := range nodeNames { logFunc("\nLogging node info for node %v", n) node, err := c.CoreV1().Nodes().Get(n, metav1.GetOptions{}) if err != nil { logFunc("Error getting node info %v", err) } logFunc("Node Info: %v", node) logFunc("\nLogging kubelet events for node %v", n) for _, e := range getNodeEvents(c, n) { logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v", e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject) } logFunc("\nLogging pods the kubelet thinks is on node %v", n) podList, err := GetKubeletPods(c, n) if err != nil { logFunc("Unable to retrieve kubelet pods for node %v: %v", n, err) continue } for _, p := range podList.Items { logFunc("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses)) for _, c := range p.Status.InitContainerStatuses { logFunc("\tInit container %v ready: %v, restart count %v", c.Name, c.Ready, c.RestartCount) } for _, c := range p.Status.ContainerStatuses { logFunc("\tContainer %v ready: %v, restart count %v", c.Name, c.Ready, c.RestartCount) } } HighLatencyKubeletOperations(c, 10*time.Second, n, logFunc) // TODO: Log node resource info } } // logNodeEvents logs kubelet events from the given node. This includes kubelet // restart and node unhealthy events. Note that listing events like this will mess // with latency metrics, beware of calling it during a test. func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event { selector := fields.Set{ "involvedObject.kind": "Node", "involvedObject.name": nodeName, "involvedObject.namespace": metav1.NamespaceAll, "source": "kubelet", }.AsSelector().String() options := metav1.ListOptions{FieldSelector: selector} events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(options) if err != nil { Logf("Unexpected error retrieving node events %v", err) return []v1.Event{} } return events.Items } // waitListSchedulableNodes is a wrapper around listing nodes supporting retries. func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) { var nodes *v1.NodeList var err error if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { nodes, err = c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { if testutils.IsRetryableAPIError(err) { return false, nil } return false, err } return true, nil }) != nil { return nodes, err } return nodes, nil } // waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries. func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList { nodes, err := waitListSchedulableNodes(c) if err != nil { ExpectNoError(err, "Non-retryable failure or timed out while listing nodes for e2e cluster.") } return nodes } // Node is schedulable if: // 1) doesn't have "unschedulable" field set // 2) it's Ready condition is set to true // 3) doesn't have NetworkUnavailable condition set to true func isNodeSchedulable(node *v1.Node) bool { nodeReady := IsNodeConditionSetAsExpected(node, v1.NodeReady, true) networkReady := IsNodeConditionUnset(node, v1.NodeNetworkUnavailable) || IsNodeConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false) return !node.Spec.Unschedulable && nodeReady && networkReady } // Test whether a fake pod can be scheduled on "node", given its current taints. func isNodeUntainted(node *v1.Node) bool { fakePod := &v1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Name: "fake-not-scheduled", Namespace: "fake-not-scheduled", }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "fake-not-scheduled", Image: "fake-not-scheduled", }, }, }, } nodeInfo := schedulercache.NewNodeInfo() nodeInfo.SetNode(node) fit, _, err := predicates.PodToleratesNodeTaints(fakePod, nil, nodeInfo) if err != nil { Failf("Can't test predicates for node %s: %v", node.Name, err) return false } return fit } // GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on. // 1) Needs to be schedulable. // 2) Needs to be ready. // If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely. func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList) { nodes = waitListSchedulableNodesOrDie(c) // previous tests may have cause failures of some nodes. Let's skip // 'Not Ready' nodes, just in case (there is no need to fail the test). FilterNodes(nodes, func(node v1.Node) bool { return isNodeSchedulable(&node) && isNodeUntainted(&node) }) return nodes } func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error { Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes) var notSchedulable []*v1.Node attempt := 0 return wait.PollImmediate(30*time.Second, timeout, func() (bool, error) { attempt++ notSchedulable = nil opts := metav1.ListOptions{ ResourceVersion: "0", FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(), } nodes, err := c.CoreV1().Nodes().List(opts) if err != nil { Logf("Unexpected error listing nodes: %v", err) if testutils.IsRetryableAPIError(err) { return false, nil } return false, err } for i := range nodes.Items { node := &nodes.Items[i] if !isNodeSchedulable(node) { notSchedulable = append(notSchedulable, node) } } // Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready, // to make it possible e.g. for incorrect deployment of some small percentage // of nodes (which we allow in cluster validation). Some nodes that are not // provisioned correctly at startup will never become ready (e.g. when something // won't install correctly), so we can't expect them to be ready at any point. // // However, we only allow non-ready nodes with some specific reasons. if len(notSchedulable) > 0 { // In large clusters, log them only every 10th pass. if len(nodes.Items) >= largeClusterThreshold && attempt%10 == 0 { Logf("Unschedulable nodes:") for i := range notSchedulable { Logf("-> %s Ready=%t Network=%t", notSchedulable[i].Name, IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true), IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false)) } Logf("================================") } } return len(notSchedulable) <= TestContext.AllowedNotReadyNodes, nil }) } func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration { // With SecretManager(ConfigMapManager), we may have to wait up to full sync period + // TTL of secret(configmap) to elapse before the Kubelet projects the update into the // volume and the container picks it up. // So this timeout is based on default Kubelet sync period (1 minute) + maximum TTL for // secret(configmap) that's based on cluster size + additional time as a fudge factor. secretTTL, err := GetNodeTTLAnnotationValue(c) if err != nil { Logf("Couldn't get node TTL annotation (using default value of 0): %v", err) } podLogTimeout := 240*time.Second + secretTTL return podLogTimeout } func GetNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) { nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil || len(nodes.Items) == 0 { return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err) } // Since TTL the kubelet is using is stored in node object, for the timeout // purpose we take it from the first node (all of them should be the same). node := &nodes.Items[0] if node.Annotations == nil { return time.Duration(0), fmt.Errorf("No annotations found on the node") } value, ok := node.Annotations[v1.ObjectTTLAnnotationKey] if !ok { return time.Duration(0), fmt.Errorf("No TTL annotation found on the node") } intValue, err := strconv.Atoi(value) if err != nil { return time.Duration(0), fmt.Errorf("Cannot convert TTL annotation from %#v to int", *node) } return time.Duration(intValue) * time.Second, nil } func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) { ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue})) } func AddOrUpdateLabelOnNodeAndReturnOldValue(c clientset.Interface, nodeName string, labelKey, labelValue string) string { var oldValue string node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) ExpectNoError(err) oldValue = node.Labels[labelKey] ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue})) return oldValue } func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) { By("verifying the node has the label " + labelKey + " " + labelValue) node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) ExpectNoError(err) Expect(node.Labels[labelKey]).To(Equal(labelValue)) } func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) { ExpectNoError(controller.RemoveTaintOffNode(c, nodeName, nil, &taint)) VerifyThatTaintIsGone(c, nodeName, &taint) } func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Taint) { ExpectNoError(controller.AddOrUpdateTaintOnNode(c, nodeName, &taint)) } // RemoveLabelOffNode is for cleaning up labels temporarily added to node, // won't fail if target label doesn't exist or has been removed. func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) { By("removing the label " + labelKey + " off the node " + nodeName) ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey})) By("verifying the node doesn't have the label " + labelKey) ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey})) } func VerifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) { By("verifying the node doesn't have the taint " + taint.ToString()) nodeUpdated, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) ExpectNoError(err) if taintutils.TaintExists(nodeUpdated.Spec.Taints, taint) { Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName) } } func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) { By("verifying the node has the taint " + taint.ToString()) if has, err := NodeHasTaint(c, nodeName, taint); !has { ExpectNoError(err) Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName) } } func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) { node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) if err != nil { return false, err } nodeTaints := node.Spec.Taints if len(nodeTaints) == 0 || !taintutils.TaintExists(nodeTaints, taint) { return false, nil } return true, nil } //AddOrUpdateAvoidPodOnNode adds avoidPods annotations to node, will override if it exists func AddOrUpdateAvoidPodOnNode(c clientset.Interface, nodeName string, avoidPods v1.AvoidPods) { err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) if err != nil { if testutils.IsRetryableAPIError(err) { return false, nil } return false, err } taintsData, err := json.Marshal(avoidPods) ExpectNoError(err) if node.Annotations == nil { node.Annotations = make(map[string]string) } node.Annotations[v1.PreferAvoidPodsAnnotationKey] = string(taintsData) _, err = c.CoreV1().Nodes().Update(node) if err != nil { if !apierrs.IsConflict(err) { ExpectNoError(err) } else { Logf("Conflict when trying to add/update avoidPonds %v to %v", avoidPods, nodeName) } } return true, nil }) ExpectNoError(err) } //RemoveAnnotationOffNode removes AvoidPods annotations from the node. It does not fail if no such annotation exists. func RemoveAvoidPodsOffNode(c clientset.Interface, nodeName string) { err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) if err != nil { if testutils.IsRetryableAPIError(err) { return false, nil } return false, err } if node.Annotations == nil { return true, nil } delete(node.Annotations, v1.PreferAvoidPodsAnnotationKey) _, err = c.CoreV1().Nodes().Update(node) if err != nil { if !apierrs.IsConflict(err) { ExpectNoError(err) } else { Logf("Conflict when trying to remove avoidPods to %v", nodeName) } } return true, nil }) ExpectNoError(err) } func ScaleResource( clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool, kind schema.GroupKind, gr schema.GroupResource, ) error { By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size)) scaler := kubectl.NewScaler(scalesGetter) if err := testutils.ScaleResourceWithRetries(scaler, ns, name, size, gr); err != nil { return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err) } if !wait { return nil } return WaitForControlledPodsRunning(clientset, ns, name, kind) } // Wait up to 10 minutes for pods to become Running. func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind schema.GroupKind) error { rtObject, err := getRuntimeObjectForKind(c, kind, ns, name) if err != nil { return err } selector, err := getSelectorFromRuntimeObject(rtObject) if err != nil { return err } replicas, err := getReplicasFromRuntimeObject(rtObject) if err != nil { return err } err = testutils.WaitForEnoughPodsWithLabelRunning(c, ns, selector, int(replicas)) if err != nil { return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err) } return nil } // Wait up to PodListTimeout for getting pods of the specified controller name and return them. func WaitForControlledPods(c clientset.Interface, ns, name string, kind schema.GroupKind) (pods *v1.PodList, err error) { rtObject, err := getRuntimeObjectForKind(c, kind, ns, name) if err != nil { return nil, err } selector, err := getSelectorFromRuntimeObject(rtObject) if err != nil { return nil, err } return WaitForPodsWithLabel(c, ns, selector) } // Returns true if all the specified pods are scheduled, else returns false. func podsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (bool, error) { ps, err := testutils.NewPodStore(c, ns, label, fields.Everything()) if err != nil { return false, err } defer ps.Stop() pods := ps.List() if len(pods) == 0 { return false, nil } for _, pod := range pods { if pod.Spec.NodeName == "" { return false, nil } } return true, nil } // Wait for all matching pods to become scheduled and at least one // matching pod exists. Return the list of matching pods. func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) { err = wait.PollImmediate(Poll, podScheduledBeforeTimeout, func() (bool, error) { pods, err = WaitForPodsWithLabel(c, ns, label) if err != nil { return false, err } for _, pod := range pods.Items { if pod.Spec.NodeName == "" { return false, nil } } return true, nil }) return pods, err } // Wait up to PodListTimeout for getting pods with certain label func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) { for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) { options := metav1.ListOptions{LabelSelector: label.String()} pods, err = c.CoreV1().Pods(ns).List(options) if err != nil { if testutils.IsRetryableAPIError(err) { continue } return } if len(pods.Items) > 0 { break } } if pods == nil || len(pods.Items) == 0 { err = fmt.Errorf("Timeout while waiting for pods with label %v", label) } return } // Wait for exact amount of matching pods to become running and ready. // Return the list of matching pods. func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) { var current int err = wait.Poll(Poll, timeout, func() (bool, error) { pods, err := WaitForPodsWithLabel(c, ns, label) if err != nil { Logf("Failed to list pods: %v", err) if testutils.IsRetryableAPIError(err) { return false, nil } return false, err } current = 0 for _, pod := range pods.Items { if flag, err := testutils.PodRunningReady(&pod); err == nil && flag == true { current++ } } if current != num { Logf("Got %v pods running and ready, expect: %v", current, num) return false, nil } return true, nil }) return pods, err } func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) { switch kind { case api.Kind("ReplicationController"): return c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"): return c.ExtensionsV1beta1().ReplicaSets(ns).Get(name, metav1.GetOptions{}) case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"): return c.ExtensionsV1beta1().Deployments(ns).Get(name, metav1.GetOptions{}) case extensionsinternal.Kind("DaemonSet"): return c.ExtensionsV1beta1().DaemonSets(ns).Get(name, metav1.GetOptions{}) case batchinternal.Kind("Job"): return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{}) default: return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind) } } func getSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) { switch typed := obj.(type) { case *v1.ReplicationController: return labels.SelectorFromSet(typed.Spec.Selector), nil case *extensions.ReplicaSet: return metav1.LabelSelectorAsSelector(typed.Spec.Selector) case *extensions.Deployment: return metav1.LabelSelectorAsSelector(typed.Spec.Selector) case *extensions.DaemonSet: return metav1.LabelSelectorAsSelector(typed.Spec.Selector) case *batch.Job: return metav1.LabelSelectorAsSelector(typed.Spec.Selector) default: return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj) } } func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) { switch typed := obj.(type) { case *v1.ReplicationController: if typed.Spec.Replicas != nil { return *typed.Spec.Replicas, nil } return 0, nil case *extensions.ReplicaSet: if typed.Spec.Replicas != nil { return *typed.Spec.Replicas, nil } return 0, nil case *extensions.Deployment: if typed.Spec.Replicas != nil { return *typed.Spec.Replicas, nil } return 0, nil case *batch.Job: // TODO: currently we use pause pods so that's OK. When we'll want to switch to Pods // that actually finish we need a better way to do this. if typed.Spec.Parallelism != nil { return *typed.Spec.Parallelism, nil } return 0, nil default: return -1, fmt.Errorf("Unsupported kind when getting number of replicas: %v", obj) } } // DeleteResourceAndPods deletes a given resource and all pods it spawned func DeleteResourceAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, scaleClient scaleclient.ScalesGetter, kind schema.GroupKind, ns, name string) error { By(fmt.Sprintf("deleting %v %s in namespace %s", kind, name, ns)) rtObject, err := getRuntimeObjectForKind(clientset, kind, ns, name) if err != nil { if apierrs.IsNotFound(err) { Logf("%v %s not found: %v", kind, name, err) return nil } return err } selector, err := getSelectorFromRuntimeObject(rtObject) if err != nil { return err } ps, err := testutils.NewPodStore(clientset, ns, selector, fields.Everything()) if err != nil { return err } defer ps.Stop() startTime := time.Now() if err := testutils.DeleteResourceUsingReaperWithRetries(internalClientset, kind, ns, name, nil, scaleClient); err != nil { return fmt.Errorf("error while stopping %v: %s: %v", kind, name, err) } deleteTime := time.Now().Sub(startTime) Logf("Deleting %v %s took: %v", kind, name, deleteTime) err = waitForPodsInactive(ps, 100*time.Millisecond, 10*time.Minute) if err != nil { return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err) } terminatePodTime := time.Now().Sub(startTime) - deleteTime Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime) // this is to relieve namespace controller's pressure when deleting the // namespace after a test. err = waitForPodsGone(ps, 100*time.Millisecond, 10*time.Minute) if err != nil { return fmt.Errorf("error while waiting for pods gone %s: %v", name, err) } gcPodTime := time.Now().Sub(startTime) - terminatePodTime Logf("Garbage collecting %v %s pods took: %v", kind, name, gcPodTime) return nil } // DeleteResourceAndWaitForGC deletes only given resource and waits for GC to delete the pods. func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns, name string) error { By(fmt.Sprintf("deleting %v %s in namespace %s, will wait for the garbage collector to delete the pods", kind, name, ns)) rtObject, err := getRuntimeObjectForKind(c, kind, ns, name) if err != nil { if apierrs.IsNotFound(err) { Logf("%v %s not found: %v", kind, name, err) return nil } return err } selector, err := getSelectorFromRuntimeObject(rtObject) if err != nil { return err } replicas, err := getReplicasFromRuntimeObject(rtObject) if err != nil { return err } ps, err := testutils.NewPodStore(c, ns, selector, fields.Everything()) if err != nil { return err } defer ps.Stop() falseVar := false deleteOption := &metav1.DeleteOptions{OrphanDependents: &falseVar} startTime := time.Now() if err := testutils.DeleteResourceWithRetries(c, kind, ns, name, deleteOption); err != nil { return err } deleteTime := time.Now().Sub(startTime) Logf("Deleting %v %s took: %v", kind, name, deleteTime) var interval, timeout time.Duration switch { case replicas < 100: interval = 100 * time.Millisecond case replicas < 1000: interval = 1 * time.Second default: interval = 10 * time.Second } if replicas < 5000 { timeout = 10 * time.Minute } else { timeout = time.Duration(replicas/gcThroughput) * time.Second // gcThroughput is pretty strict now, add a bit more to it timeout = timeout + 3*time.Minute } err = waitForPodsInactive(ps, interval, timeout) if err != nil { return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err) } terminatePodTime := time.Now().Sub(startTime) - deleteTime Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime) err = waitForPodsGone(ps, interval, 10*time.Minute) if err != nil { return fmt.Errorf("error while waiting for pods gone %s: %v", name, err) } return nil } // waitForPodsInactive waits until there are no active pods left in the PodStore. // This is to make a fair comparison of deletion time between DeleteRCAndPods // and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas // when the pod is inactvie. func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error { return wait.PollImmediate(interval, timeout, func() (bool, error) { pods := ps.List() for _, pod := range pods { if controller.IsPodActive(pod) { return false, nil } } return true, nil }) } // waitForPodsGone waits until there are no pods left in the PodStore. func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error { return wait.PollImmediate(interval, timeout, func() (bool, error) { if pods := ps.List(); len(pods) == 0 { return true, nil } return false, nil }) } func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) options := metav1.ListOptions{LabelSelector: label.String()} return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { pods, err := c.CoreV1().Pods(ns).List(options) if err != nil { return false, nil } for _, pod := range pods.Items { if !podutil.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) { return false, nil } } return true, nil }) } // Waits for the number of events on the given object to reach a desired count. func WaitForEvents(c clientset.Interface, ns string, objOrRef runtime.Object, desiredEventsCount int) error { return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, objOrRef) if err != nil { return false, fmt.Errorf("error in listing events: %s", err) } eventsCount := len(events.Items) if eventsCount == desiredEventsCount { return true, nil } if eventsCount < desiredEventsCount { return false, nil } // Number of events has exceeded the desired count. return false, fmt.Errorf("number of events has exceeded the desired count, eventsCount: %d, desiredCount: %d", eventsCount, desiredEventsCount) }) } // Waits for the number of events on the given object to be at least a desired count. func WaitForPartialEvents(c clientset.Interface, ns string, objOrRef runtime.Object, atLeastEventsCount int) error { return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, objOrRef) if err != nil { return false, fmt.Errorf("error in listing events: %s", err) } eventsCount := len(events.Items) if eventsCount >= atLeastEventsCount { return true, nil } return false, nil }) } type updateDSFunc func(*apps.DaemonSet) func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *apps.DaemonSet, err error) { daemonsets := c.AppsV1().DaemonSets(namespace) var updateErr error pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { if ds, err = daemonsets.Get(name, metav1.GetOptions{}); err != nil { if testutils.IsRetryableAPIError(err) { return false, nil } return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(ds) if ds, err = daemonsets.Update(ds); err == nil { Logf("Updating DaemonSet %s", name) return true, nil } updateErr = err return false, nil }) if pollErr == wait.ErrWaitTimeout { pollErr = fmt.Errorf("couldn't apply the provided updated to DaemonSet %q: %v", name, updateErr) } return ds, pollErr } // NodeAddresses returns the first address of the given type of each node. func NodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string { hosts := []string{} for _, n := range nodelist.Items { for _, addr := range n.Status.Addresses { // Use the first external IP address we find on the node, and // use at most one per node. // TODO(roberthbailey): Use the "preferred" address for the node, once // such a thing is defined (#2462). if addr.Type == addrType { hosts = append(hosts, addr.Address) break } } } return hosts } // NodeSSHHosts returns SSH-able host names for all schedulable nodes - this excludes master node. // It returns an error if it can't find an external IP for every node, though it still returns all // hosts that it found in that case. func NodeSSHHosts(c clientset.Interface) ([]string, error) { nodelist := waitListSchedulableNodesOrDie(c) // TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462). hosts := NodeAddresses(nodelist, v1.NodeExternalIP) // Error if any node didn't have an external IP. if len(hosts) != len(nodelist.Items) { return hosts, fmt.Errorf( "only found %d external IPs on nodes, but found %d nodes. Nodelist: %v", len(hosts), len(nodelist.Items), nodelist) } sshHosts := make([]string, 0, len(hosts)) for _, h := range hosts { sshHosts = append(sshHosts, net.JoinHostPort(h, sshPort)) } return sshHosts, nil } type SSHResult struct { User string Host string Cmd string Stdout string Stderr string Code int } // NodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name, // eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across // cloud providers since it involves ssh. func NodeExec(nodeName, cmd string) (SSHResult, error) { return SSH(cmd, net.JoinHostPort(nodeName, sshPort), TestContext.Provider) } // SSH synchronously SSHs to a node running on provider and runs cmd. If there // is no error performing the SSH, the stdout, stderr, and exit code are // returned. func SSH(cmd, host, provider string) (SSHResult, error) { result := SSHResult{Host: host, Cmd: cmd} // Get a signer for the provider. signer, err := GetSigner(provider) if err != nil { return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err) } // RunSSHCommand will default to Getenv("USER") if user == "", but we're // defaulting here as well for logging clarity. result.User = os.Getenv("KUBE_SSH_USER") if result.User == "" { result.User = os.Getenv("USER") } stdout, stderr, code, err := sshutil.RunSSHCommand(cmd, result.User, host, signer) result.Stdout = stdout result.Stderr = stderr result.Code = code return result, err } func LogSSHResult(result SSHResult) { remote := fmt.Sprintf("%s@%s", result.User, result.Host) Logf("ssh %s: command: %s", remote, result.Cmd) Logf("ssh %s: stdout: %q", remote, result.Stdout) Logf("ssh %s: stderr: %q", remote, result.Stderr) Logf("ssh %s: exit code: %d", remote, result.Code) } func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*SSHResult, error) { Logf("Getting external IP address for %s", node.Name) host := "" for _, a := range node.Status.Addresses { if a.Type == v1.NodeExternalIP { host = net.JoinHostPort(a.Address, sshPort) break } } if host == "" { // No external IPs were found, let's try to use internal as plan B for _, a := range node.Status.Addresses { if a.Type == v1.NodeInternalIP { host = net.JoinHostPort(a.Address, sshPort) break } } } if host == "" { return nil, fmt.Errorf("couldn't find any IP address for node %s", node.Name) } Logf("SSH %q on %s(%s)", cmd, node.Name, host) result, err := SSH(cmd, host, provider) LogSSHResult(result) if result.Code != 0 || err != nil { return nil, fmt.Errorf("failed running %q: %v (exit code %d)", cmd, err, result.Code) } return &result, nil } func IssueSSHCommand(cmd, provider string, node *v1.Node) error { _, err := IssueSSHCommandWithResult(cmd, provider, node) if err != nil { return err } return nil } // NewHostExecPodSpec returns the pod spec of hostexec pod func NewHostExecPodSpec(ns, name string) *v1.Pod { immediate := int64(0) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ns, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "hostexec", Image: imageutils.GetE2EImage(imageutils.Hostexec), ImagePullPolicy: v1.PullIfNotPresent, }, }, HostNetwork: true, SecurityContext: &v1.PodSecurityContext{}, TerminationGracePeriodSeconds: &immediate, }, } return pod } // RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec` // inside of a shell. func RunHostCmd(ns, name, cmd string) (string, error) { return RunKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd) } // RunHostCmdOrDie calls RunHostCmd and dies on error. func RunHostCmdOrDie(ns, name, cmd string) string { stdout, err := RunHostCmd(ns, name, cmd) Logf("stdout: %v", stdout) ExpectNoError(err) return stdout } // RunHostCmdWithRetries calls RunHostCmd and retries all errors // until it succeeds or the specified timeout expires. // This can be used with idempotent commands to deflake transient Node issues. func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration) (string, error) { start := time.Now() for { out, err := RunHostCmd(ns, name, cmd) if err == nil { return out, nil } if elapsed := time.Since(start); elapsed > timeout { return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err) } Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err) time.Sleep(interval) } } // LaunchHostExecPod launches a hostexec pod in the given namespace and waits // until it's Running func LaunchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod { hostExecPod := NewHostExecPodSpec(ns, name) pod, err := client.CoreV1().Pods(ns).Create(hostExecPod) ExpectNoError(err) err = WaitForPodRunningInNamespace(client, pod) ExpectNoError(err) return pod } // newExecPodSpec returns the pod spec of exec pod func newExecPodSpec(ns, generateName string) *v1.Pod { immediate := int64(0) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: generateName, Namespace: ns, }, Spec: v1.PodSpec{ TerminationGracePeriodSeconds: &immediate, Containers: []v1.Container{ { Name: "exec", Image: BusyBoxImage, Command: []string{"sh", "-c", "while true; do sleep 5; done"}, }, }, }, } return pod } // CreateExecPodOrFail creates a simple busybox pod in a sleep loop used as a // vessel for kubectl exec commands. // Returns the name of the created pod. func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) string { Logf("Creating new exec pod") execPod := newExecPodSpec(ns, generateName) if tweak != nil { tweak(execPod) } created, err := client.CoreV1().Pods(ns).Create(execPod) Expect(err).NotTo(HaveOccurred()) err = wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) { retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{}) if err != nil { if testutils.IsRetryableAPIError(err) { return false, nil } return false, err } return retrievedPod.Status.Phase == v1.PodRunning, nil }) Expect(err).NotTo(HaveOccurred()) return created.Name } func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort) { By(fmt.Sprintf("Creating pod %s in namespace %s", name, ns)) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: labels, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "pause", Image: imageutils.GetPauseImageName(), Ports: containerPorts, // Add a dummy environment variable to work around a docker issue. // https://github.com/docker/docker/issues/14203 Env: []v1.EnvVar{{Name: "FOO", Value: " "}}, }, }, }, } _, err := c.CoreV1().Pods(ns).Create(pod) Expect(err).NotTo(HaveOccurred()) } func DeletePodOrFail(c clientset.Interface, ns, name string) { By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns)) err := c.CoreV1().Pods(ns).Delete(name, nil) Expect(err).NotTo(HaveOccurred()) } // GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be // used to SSH to their nodes. func GetSigner(provider string) (ssh.Signer, error) { // Get the directory in which SSH keys are located. keydir := filepath.Join(os.Getenv("HOME"), ".ssh") // Select the key itself to use. When implementing more providers here, // please also add them to any SSH tests that are disabled because of signer // support. keyfile := "" key := "" switch provider { case "gce", "gke", "kubemark": keyfile = "google_compute_engine" case "aws": // If there is an env. variable override, use that. aws_keyfile := os.Getenv("AWS_SSH_KEY") if len(aws_keyfile) != 0 { return sshutil.MakePrivateKeySignerFromFile(aws_keyfile) } // Otherwise revert to home dir keyfile = "kube_aws_rsa" case "local", "vsphere": keyfile = os.Getenv("LOCAL_SSH_KEY") // maybe? if len(keyfile) == 0 { keyfile = "id_rsa" } case "skeleton": keyfile = os.Getenv("KUBE_SSH_KEY") if len(keyfile) == 0 { keyfile = "id_rsa" } default: return nil, fmt.Errorf("GetSigner(...) not implemented for %s", provider) } if len(key) == 0 { key = filepath.Join(keydir, keyfile) } return sshutil.MakePrivateKeySignerFromFile(key) } // CheckPodsRunningReady returns whether all pods whose names are listed in // podNames in namespace ns are running and ready, using c and waiting at most // timeout. func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready") } // CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are // listed in podNames in namespace ns are running and ready, or succeeded; use // c and waiting at most timeout. func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded") } // CheckPodsCondition returns whether all pods whose names are listed in podNames // in namespace ns are in the condition, using c and waiting at most timeout. func CheckPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool { np := len(podNames) Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames) type waitPodResult struct { success bool podName string } result := make(chan waitPodResult, len(podNames)) for _, podName := range podNames { // Launch off pod readiness checkers. go func(name string) { err := WaitForPodCondition(c, ns, name, desc, timeout, condition) result <- waitPodResult{err == nil, name} }(podName) } // Wait for them all to finish. success := true for range podNames { res := <-result if !res.success { Logf("Pod %[1]s failed to be %[2]s.", res.podName, desc) success = false } } Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames) return success } // WaitForNodeToBeReady returns whether node name is ready within timeout. func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool { return WaitForNodeToBe(c, name, v1.NodeReady, true, timeout) } // WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the // readiness condition is anything but ready, e.g false or unknown) within // timeout. func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool { return WaitForNodeToBe(c, name, v1.NodeReady, false, timeout) } func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool { // Check the node readiness condition (logging all). for _, cond := range node.Status.Conditions { // Ensure that the condition type and the status matches as desired. if cond.Type == conditionType { // For NodeReady condition we need to check Taints as well if cond.Type == v1.NodeReady { hasNodeControllerTaints := false // For NodeReady we need to check if Taints are gone as well taints := node.Spec.Taints for _, taint := range taints { if taint.MatchTaint(nodectlr.UnreachableTaintTemplate) || taint.MatchTaint(nodectlr.NotReadyTaintTemplate) { hasNodeControllerTaints = true break } } if wantTrue { if (cond.Status == v1.ConditionTrue) && !hasNodeControllerTaints { return true } else { msg := "" if !hasNodeControllerTaints { msg = fmt.Sprintf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v", conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message) } else { msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure", conditionType, node.Name, cond.Status == v1.ConditionTrue, taints) } if !silent { Logf(msg) } return false } } else { // TODO: check if the Node is tainted once we enable NC notReady/unreachable taints by default if cond.Status != v1.ConditionTrue { return true } if !silent { Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v", conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message) } return false } } if (wantTrue && (cond.Status == v1.ConditionTrue)) || (!wantTrue && (cond.Status != v1.ConditionTrue)) { return true } else { if !silent { Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v", conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message) } return false } } } if !silent { Logf("Couldn't find condition %v on node %v", conditionType, node.Name) } return false } func IsNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool { return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false) } func IsNodeConditionSetAsExpectedSilent(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool { return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true) } func IsNodeConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool { for _, cond := range node.Status.Conditions { if cond.Type == conditionType { return false } } return true } // WaitForNodeToBe returns whether node "name's" condition state matches wantTrue // within timeout. If wantTrue is true, it will ensure the node condition status // is ConditionTrue; if it's false, it ensures the node condition is in any state // other than ConditionTrue (e.g. not true or unknown). func WaitForNodeToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool { Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{}) if err != nil { Logf("Couldn't get node %s", name) continue } if IsNodeConditionSetAsExpected(node, conditionType, wantTrue) { return true } } Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout) return false } // Checks whether all registered nodes are ready. // TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy, // and figure out how to do it in a configurable way, as we can't expect all setups to run // default test add-ons. func AllNodesReady(c clientset.Interface, timeout time.Duration) error { Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, TestContext.AllowedNotReadyNodes) var notReady []*v1.Node err := wait.PollImmediate(Poll, timeout, func() (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { if testutils.IsRetryableAPIError(err) { return false, nil } return false, err } for i := range nodes.Items { node := &nodes.Items[i] if !IsNodeConditionSetAsExpected(node, v1.NodeReady, true) { notReady = append(notReady, node) } } // Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready, // to make it possible e.g. for incorrect deployment of some small percentage // of nodes (which we allow in cluster validation). Some nodes that are not // provisioned correctly at startup will never become ready (e.g. when something // won't install correctly), so we can't expect them to be ready at any point. return len(notReady) <= TestContext.AllowedNotReadyNodes, nil }) if err != nil && err != wait.ErrWaitTimeout { return err } if len(notReady) > TestContext.AllowedNotReadyNodes { msg := "" for _, node := range notReady { msg = fmt.Sprintf("%s, %s", msg, node.Name) } return fmt.Errorf("Not ready nodes: %#v", msg) } return nil } // checks whether all registered nodes are ready and all required Pods are running on them. func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error { Logf("Waiting up to %v for all nodes to be ready", timeout) var notReady []v1.Node var missingPodsPerNode map[string][]string err := wait.PollImmediate(Poll, timeout, func() (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"}) if err != nil { if testutils.IsRetryableAPIError(err) { return false, nil } return false, err } for _, node := range nodes.Items { if !IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) { notReady = append(notReady, node) } } pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: "0"}) if err != nil { return false, err } systemPodsPerNode := make(map[string][]string) for _, pod := range pods.Items { if pod.Namespace == metav1.NamespaceSystem && pod.Status.Phase == v1.PodRunning { if pod.Spec.NodeName != "" { systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name) } } } missingPodsPerNode = make(map[string][]string) for _, node := range nodes.Items { if !system.IsMasterNode(node.Name) { for _, requiredPod := range requiredPerNodePods { foundRequired := false for _, presentPod := range systemPodsPerNode[node.Name] { if requiredPod.MatchString(presentPod) { foundRequired = true break } } if !foundRequired { missingPodsPerNode[node.Name] = append(missingPodsPerNode[node.Name], requiredPod.String()) } } } } return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil }) if err != nil && err != wait.ErrWaitTimeout { return err } if len(notReady) > 0 { return fmt.Errorf("Not ready nodes: %v", notReady) } if len(missingPodsPerNode) > 0 { return fmt.Errorf("Not running system Pods: %v", missingPodsPerNode) } return nil } // Filters nodes in NodeList in place, removing nodes that do not // satisfy the given condition // TODO: consider merging with pkg/client/cache.NodeLister func FilterNodes(nodeList *v1.NodeList, fn func(node v1.Node) bool) { var l []v1.Node for _, node := range nodeList.Items { if fn(node) { l = append(l, node) } } nodeList.Items = l } // ParseKVLines parses output that looks like lines containing "<key>: <val>" // and returns <val> if <key> is found. Otherwise, it returns the empty string. func ParseKVLines(output, key string) string { delim := ":" key = key + delim for _, line := range strings.Split(output, "\n") { pieces := strings.SplitAfterN(line, delim, 2) if len(pieces) != 2 { continue } k, v := pieces[0], pieces[1] if k == key { return strings.TrimSpace(v) } } return "" } func RestartKubeProxy(host string) error { // TODO: Make it work for all providers. if !ProviderIs("gce", "gke", "aws") { return fmt.Errorf("unsupported provider: %s", TestContext.Provider) } // kubelet will restart the kube-proxy since it's running in a static pod Logf("Killing kube-proxy on node %v", host) result, err := SSH("sudo pkill kube-proxy", host, TestContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) return fmt.Errorf("couldn't restart kube-proxy: %v", err) } // wait for kube-proxy to come back up sshCmd := "sudo /bin/sh -c 'pgrep kube-proxy | wc -l'" err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { Logf("Waiting for kubeproxy to come back up with %v on %v", sshCmd, host) result, err := SSH(sshCmd, host, TestContext.Provider) if err != nil { return false, err } if result.Code != 0 { LogSSHResult(result) return false, fmt.Errorf("failed to run command, exited %d", result.Code) } if result.Stdout == "0\n" { return false, nil } Logf("kube-proxy is back up.") return true, nil }) if err != nil { return fmt.Errorf("kube-proxy didn't recover: %v", err) } return nil } func RestartKubelet(host string) error { // TODO: Make it work for all providers and distros. supportedProviders := []string{"gce", "aws", "vsphere"} if !ProviderIs(supportedProviders...) { return fmt.Errorf("unsupported provider: %s, supported providers are: %v", TestContext.Provider, supportedProviders) } if ProviderIs("gce") && !NodeOSDistroIs("debian", "gci") { return fmt.Errorf("unsupported node OS distro: %s", TestContext.NodeOSDistro) } var cmd string if ProviderIs("gce") && NodeOSDistroIs("debian") { cmd = "sudo /etc/init.d/kubelet restart" } else if ProviderIs("vsphere") { var sudoPresent bool sshResult, err := SSH("sudo --version", host, TestContext.Provider) if err != nil { return fmt.Errorf("Unable to ssh to host %s with error %v", host, err) } if !strings.Contains(sshResult.Stderr, "command not found") { sudoPresent = true } sshResult, err = SSH("systemctl --version", host, TestContext.Provider) if !strings.Contains(sshResult.Stderr, "command not found") { cmd = "systemctl restart kubelet" } else { cmd = "service kubelet restart" } if sudoPresent { cmd = fmt.Sprintf("sudo %s", cmd) } } else { cmd = "sudo systemctl restart kubelet" } Logf("Restarting kubelet via ssh on host %s with command %s", host, cmd) result, err := SSH(cmd, host, TestContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) return fmt.Errorf("couldn't restart kubelet: %v", err) } return nil } func WaitForKubeletUp(host string) error { cmd := "curl http://localhost:" + strconv.Itoa(ports.KubeletReadOnlyPort) + "/healthz" for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { result, err := SSH(cmd, host, TestContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) } if result.Stdout == "ok" { return nil } } return fmt.Errorf("waiting for kubelet timed out") } func RestartApiserver(cs clientset.Interface) error { // TODO: Make it work for all providers. if !ProviderIs("gce", "gke", "aws") { return fmt.Errorf("unsupported provider: %s", TestContext.Provider) } if ProviderIs("gce", "aws") { initialRestartCount, err := getApiserverRestartCount(cs) if err != nil { return fmt.Errorf("failed to get apiserver's restart count: %v", err) } if err := sshRestartMaster(); err != nil { return fmt.Errorf("failed to restart apiserver: %v", err) } return waitForApiserverRestarted(cs, initialRestartCount) } // GKE doesn't allow ssh access, so use a same-version master // upgrade to teardown/recreate master. v, err := cs.Discovery().ServerVersion() if err != nil { return err } return masterUpgradeGKE(v.GitVersion[1:]) // strip leading 'v' } func sshRestartMaster() error { if !ProviderIs("gce", "aws") { return fmt.Errorf("unsupported provider: %s", TestContext.Provider) } var command string if ProviderIs("gce") { command = "pidof kube-apiserver | xargs sudo kill" } else { command = "sudo /etc/init.d/kube-apiserver restart" } Logf("Restarting master via ssh, running: %v", command) result, err := SSH(command, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) return fmt.Errorf("couldn't restart apiserver: %v", err) } return nil } func WaitForApiserverUp(c clientset.Interface) error { for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { body, err := c.CoreV1().RESTClient().Get().AbsPath("/healthz").Do().Raw() if err == nil && string(body) == "ok" { return nil } } return fmt.Errorf("waiting for apiserver timed out") } // waitForApiserverRestarted waits until apiserver's restart count increased. func waitForApiserverRestarted(c clientset.Interface, initialRestartCount int32) error { for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { restartCount, err := getApiserverRestartCount(c) if err != nil { Logf("Failed to get apiserver's restart count: %v", err) continue } if restartCount > initialRestartCount { Logf("Apiserver has restarted.") return nil } Logf("Waiting for apiserver restart count to increase") } return fmt.Errorf("timed out waiting for apiserver to be restarted") } func getApiserverRestartCount(c clientset.Interface) (int32, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{"component": "kube-apiserver"})) listOpts := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts) if err != nil { return -1, err } if len(pods.Items) != 1 { return -1, fmt.Errorf("unexpected number of apiserver pod: %d", len(pods.Items)) } for _, s := range pods.Items[0].Status.ContainerStatuses { if s.Name != "kube-apiserver" { continue } return s.RestartCount, nil } return -1, fmt.Errorf("failed to find kube-apiserver container in pod") } func RestartControllerManager() error { // TODO: Make it work for all providers and distros. if !ProviderIs("gce", "aws") { return fmt.Errorf("unsupported provider: %s", TestContext.Provider) } if ProviderIs("gce") && !MasterOSDistroIs("gci") { return fmt.Errorf("unsupported master OS distro: %s", TestContext.MasterOSDistro) } cmd := "pidof kube-controller-manager | xargs sudo kill" Logf("Restarting controller-manager via ssh, running: %v", cmd) result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) return fmt.Errorf("couldn't restart controller-manager: %v", err) } return nil } func WaitForControllerManagerUp() error { cmd := "curl http://localhost:" + strconv.Itoa(ports.InsecureKubeControllerManagerPort) + "/healthz" for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) } if result.Stdout == "ok" { return nil } } return fmt.Errorf("waiting for controller-manager timed out") } // CheckForControllerManagerHealthy checks that the controller manager does not crash within "duration" func CheckForControllerManagerHealthy(duration time.Duration) error { var PID string cmd := "pidof kube-controller-manager" for start := time.Now(); time.Since(start) < duration; time.Sleep(5 * time.Second) { result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider) if err != nil { // We don't necessarily know that it crashed, pipe could just be broken LogSSHResult(result) return fmt.Errorf("master unreachable after %v", time.Since(start)) } else if result.Code != 0 { LogSSHResult(result) return fmt.Errorf("SSH result code not 0. actually: %v after %v", result.Code, time.Since(start)) } else if result.Stdout != PID { if PID == "" { PID = result.Stdout } else { //its dead return fmt.Errorf("controller manager crashed, old PID: %s, new PID: %s", PID, result.Stdout) } } else { Logf("kube-controller-manager still healthy after %v", time.Since(start)) } } return nil } // NumberOfRegisteredNodes returns number of registered Nodes excluding Master Node. func NumberOfRegisteredNodes(c clientset.Interface) (int, error) { nodes, err := waitListSchedulableNodes(c) if err != nil { Logf("Failed to list nodes: %v", err) return 0, err } return len(nodes.Items), nil } // NumberOfReadyNodes returns number of ready Nodes excluding Master Node. func NumberOfReadyNodes(c clientset.Interface) (int, error) { nodes, err := waitListSchedulableNodes(c) if err != nil { Logf("Failed to list nodes: %v", err) return 0, err } // Filter out not-ready nodes. FilterNodes(nodes, func(node v1.Node) bool { return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) }) return len(nodes.Items), nil } // CheckNodesReady waits up to timeout for cluster to has desired size and // there is no not-ready nodes in it. By cluster size we mean number of Nodes // excluding Master Node. func CheckNodesReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { nodes, err := waitListSchedulableNodes(c) if err != nil { Logf("Failed to list nodes: %v", err) continue } numNodes := len(nodes.Items) // Filter out not-ready nodes. FilterNodes(nodes, func(node v1.Node) bool { return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) }) numReady := len(nodes.Items) if numNodes == size && numReady == size { Logf("Cluster has reached the desired number of ready nodes %d", size) return nodes.Items, nil } Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady) } return nil, fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size) } // WaitForReadyNodes waits up to timeout for cluster to has desired size and // there is no not-ready nodes in it. By cluster size we mean number of Nodes // excluding Master Node. func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) error { _, err := CheckNodesReady(c, size, timeout) return err } func GenerateMasterRegexp(prefix string) string { return prefix + "(-...)?" } // waitForMasters waits until the cluster has the desired number of ready masters in it. func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { Logf("Failed to list nodes: %v", err) continue } // Filter out nodes that are not master replicas FilterNodes(nodes, func(node v1.Node) bool { res, err := regexp.Match(GenerateMasterRegexp(masterPrefix), ([]byte)(node.Name)) if err != nil { Logf("Failed to match regexp to node name: %v", err) return false } return res }) numNodes := len(nodes.Items) // Filter out not-ready nodes. FilterNodes(nodes, func(node v1.Node) bool { return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) }) numReady := len(nodes.Items) if numNodes == size && numReady == size { Logf("Cluster has reached the desired number of masters %d", size) return nil } Logf("Waiting for the number of masters %d, current %d, not ready master nodes %d", size, numNodes, numNodes-numReady) } return fmt.Errorf("timeout waiting %v for the number of masters to be %d", timeout, size) } // GetHostExternalAddress gets the node for a pod and returns the first External // address. Returns an error if the node the pod is on doesn't have an External // address. func GetHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddress string, err error) { node, err := client.CoreV1().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{}) if err != nil { return "", err } for _, address := range node.Status.Addresses { if address.Type == v1.NodeExternalIP { if address.Address != "" { externalAddress = address.Address break } } } if externalAddress == "" { err = fmt.Errorf("No external address for pod %v on node %v", p.Name, p.Spec.NodeName) } return } type extractRT struct { http.Header } func (rt *extractRT) RoundTrip(req *http.Request) (*http.Response, error) { rt.Header = req.Header return &http.Response{}, nil } // headersForConfig extracts any http client logic necessary for the provided // config. func headersForConfig(c *restclient.Config) (http.Header, error) { extract := &extractRT{} rt, err := restclient.HTTPWrappersForConfig(c, extract) if err != nil { return nil, err } if _, err := rt.RoundTrip(&http.Request{}); err != nil { return nil, err } return extract.Header, nil } // OpenWebSocketForURL constructs a websocket connection to the provided URL, using the client // config, with the specified protocols. func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) { tlsConfig, err := restclient.TLSConfigFor(config) if err != nil { return nil, fmt.Errorf("failed to create tls config: %v", err) } if tlsConfig != nil { url.Scheme = "wss" if !strings.Contains(url.Host, ":") { url.Host += ":443" } } else { url.Scheme = "ws" if !strings.Contains(url.Host, ":") { url.Host += ":80" } } headers, err := headersForConfig(config) if err != nil { return nil, fmt.Errorf("failed to load http headers: %v", err) } cfg, err := websocket.NewConfig(url.String(), "http://localhost") if err != nil { return nil, fmt.Errorf("failed to create websocket config: %v", err) } cfg.Header = headers cfg.TlsConfig = tlsConfig cfg.Protocol = protocols return websocket.DialConfig(cfg) } // Looks for the given string in the log of a specific pod container func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) { return LookForString(expectedString, timeout, func() string { return RunKubectlOrDie("logs", podName, container, fmt.Sprintf("--namespace=%v", ns)) }) } // Looks for the given string in a file in a specific pod container func LookForStringInFile(ns, podName, container, file, expectedString string, timeout time.Duration) (result string, err error) { return LookForString(expectedString, timeout, func() string { return RunKubectlOrDie("exec", podName, "-c", container, fmt.Sprintf("--namespace=%v", ns), "--", "cat", file) }) } // Looks for the given string in the output of a command executed in a specific pod container func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) { return LookForString(expectedString, timeout, func() string { // use the first container args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"} args = append(args, command...) return RunKubectlOrDie(args...) }) } // Looks for the given string in the output of fn, repeatedly calling fn until // the timeout is reached or the string is found. Returns last log and possibly // error if the string was not found. func LookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) { for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) { result = fn() if strings.Contains(result, expectedString) { return } } err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result) return } // getSvcNodePort returns the node port for the given service:port. func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) { svc, err := client.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) if err != nil { return 0, err } for _, p := range svc.Spec.Ports { if p.Port == int32(svcPort) { if p.NodePort != 0 { return int(p.NodePort), nil } } } return 0, fmt.Errorf( "No node port found for service %v, port %v", name, svcPort) } // GetNodePortURL returns the url to a nodeport Service. func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (string, error) { nodePort, err := getSvcNodePort(client, ns, name, svcPort) if err != nil { return "", err } // This list of nodes must not include the master, which is marked // unschedulable, since the master doesn't run kube-proxy. Without // kube-proxy NodePorts won't work. var nodes *v1.NodeList if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { nodes, err = client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { if testutils.IsRetryableAPIError(err) { return false, nil } return false, err } return true, nil }) != nil { return "", err } if len(nodes.Items) == 0 { return "", fmt.Errorf("Unable to list nodes in cluster.") } for _, node := range nodes.Items { for _, address := range node.Status.Addresses { if address.Type == v1.NodeExternalIP { if address.Address != "" { return fmt.Sprintf("http://%v:%v", address.Address, nodePort), nil } } } } return "", fmt.Errorf("Failed to find external address for service %v", name) } // TODO(random-liu): Change this to be a member function of the framework. func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { return getPodLogsInternal(c, namespace, podName, containerName, false) } func getPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { return getPodLogsInternal(c, namespace, podName, containerName, true) } // utility function for gomega Eventually func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) { logs, err := c.CoreV1().RESTClient().Get(). Resource("pods"). Namespace(namespace). Name(podName).SubResource("log"). Param("container", containerName). Param("previous", strconv.FormatBool(previous)). Do(). Raw() if err != nil { return "", err } if err == nil && strings.Contains(string(logs), "Internal Error") { return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs)) } return string(logs), err } func GetGCECloud() (*gcecloud.GCECloud, error) { gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud) if !ok { return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider) } return gceCloud, nil } // EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created // are actually cleaned up. Currently only implemented for GCE/GKE. func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error { if TestContext.Provider == "gce" || TestContext.Provider == "gke" { return ensureGCELoadBalancerResourcesDeleted(ip, portRange) } return nil } func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error { gceCloud, err := GetGCECloud() if err != nil { return err } project := TestContext.CloudConfig.ProjectID region, err := gcecloud.GetGCERegion(TestContext.CloudConfig.Zone) if err != nil { return fmt.Errorf("could not get region for zone %q: %v", TestContext.CloudConfig.Zone, err) } return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) { service := gceCloud.ComputeServices().GA list, err := service.ForwardingRules.List(project, region).Do() if err != nil { return false, err } for _, item := range list.Items { if item.PortRange == portRange && item.IPAddress == ip { Logf("found a load balancer: %v", item) return false, nil } } return true, nil }) } // The following helper functions can block/unblock network from source // host to destination host by manipulating iptable rules. // This function assumes it can ssh to the source host. // // Caution: // Recommend to input IP instead of hostnames. Using hostnames will cause iptables to // do a DNS lookup to resolve the name to an IP address, which will // slow down the test and cause it to fail if DNS is absent or broken. // // Suggested usage pattern: // func foo() { // ... // defer UnblockNetwork(from, to) // BlockNetwork(from, to) // ... // } // func BlockNetwork(from string, to string) { Logf("block network traffic from %s to %s", from, to) iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to) dropCmd := fmt.Sprintf("sudo iptables --insert %s", iptablesRule) if result, err := SSH(dropCmd, from, TestContext.Provider); result.Code != 0 || err != nil { LogSSHResult(result) Failf("Unexpected error: %v", err) } } func UnblockNetwork(from string, to string) { Logf("Unblock network traffic from %s to %s", from, to) iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to) undropCmd := fmt.Sprintf("sudo iptables --delete %s", iptablesRule) // Undrop command may fail if the rule has never been created. // In such case we just lose 30 seconds, but the cluster is healthy. // But if the rule had been created and removing it failed, the node is broken and // not coming back. Subsequent tests will run or fewer nodes (some of the tests // may fail). Manual intervention is required in such case (recreating the // cluster solves the problem too). err := wait.Poll(time.Millisecond*100, time.Second*30, func() (bool, error) { result, err := SSH(undropCmd, from, TestContext.Provider) if result.Code == 0 && err == nil { return true, nil } LogSSHResult(result) if err != nil { Logf("Unexpected error: %v", err) } return false, nil }) if err != nil { Failf("Failed to remove the iptable REJECT rule. Manual intervention is "+ "required on host %s: remove rule %s, if exists", from, iptablesRule) } } func isElementOf(podUID types.UID, pods *v1.PodList) bool { for _, pod := range pods.Items { if pod.UID == podUID { return true } } return false } // timeout for proxy requests. const proxyTimeout = 2 * time.Minute // NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client. func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) { // proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call. // This will leak a goroutine if proxy hangs. #22165 var result restclient.Result finished := make(chan struct{}) go func() { result = c.CoreV1().RESTClient().Get(). Resource("nodes"). SubResource("proxy"). Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)). Suffix(endpoint). Do() finished <- struct{}{} }() select { case <-finished: return result, nil case <-time.After(proxyTimeout): return restclient.Result{}, nil } } // GetKubeletPods retrieves the list of pods on the kubelet func GetKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) { return getKubeletPods(c, node, "pods") } // GetKubeletRunningPods retrieves the list of running pods on the kubelet. The pods // includes necessary information (e.g., UID, name, namespace for // pods/containers), but do not contain the full spec. func GetKubeletRunningPods(c clientset.Interface, node string) (*v1.PodList, error) { return getKubeletPods(c, node, "runningpods") } func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) { result := &v1.PodList{} client, err := NodeProxyRequest(c, node, resource) if err != nil { return &v1.PodList{}, err } if err = client.Into(result); err != nil { return &v1.PodList{}, err } return result, nil } // LaunchWebserverPod launches a pod serving http on port 8080 to act // as the target for networking connectivity checks. The ip address // of the created pod will be returned if the pod is launched // successfully. func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) { containerName := fmt.Sprintf("%s-container", podName) port := 8080 pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: containerName, Image: imageutils.GetE2EImage(imageutils.Porter), Env: []v1.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}}, Ports: []v1.ContainerPort{{ContainerPort: int32(port)}}, }, }, NodeName: nodeName, RestartPolicy: v1.RestartPolicyNever, }, } podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) _, err := podClient.Create(pod) ExpectNoError(err) ExpectNoError(f.WaitForPodRunning(podName)) createdPod, err := podClient.Get(podName, metav1.GetOptions{}) ExpectNoError(err) ip = net.JoinHostPort(createdPod.Status.PodIP, strconv.Itoa(port)) Logf("Target pod IP:port is %s", ip) return } type PingCommand string const ( IPv4PingCommand PingCommand = "ping" IPv6PingCommand PingCommand = "ping6" ) // CheckConnectivityToHost launches a pod to test connectivity to the specified // host. An error will be returned if the host is not reachable from the pod. // // An empty nodeName will use the schedule to choose where the pod is executed. func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, pingCmd PingCommand, timeout int) error { contName := fmt.Sprintf("%s-container", podName) command := []string{ string(pingCmd), "-c", "3", // send 3 pings "-W", "2", // wait at most 2 seconds for a reply "-w", strconv.Itoa(timeout), host, } pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: contName, Image: BusyBoxImage, Command: command, }, }, NodeName: nodeName, RestartPolicy: v1.RestartPolicyNever, }, } podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) _, err := podClient.Create(pod) if err != nil { return err } err = WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name) if err != nil { logs, logErr := GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, contName) if logErr != nil { Logf("Warning: Failed to get logs from pod %q: %v", pod.Name, logErr) } else { Logf("pod %s/%s logs:\n%s", f.Namespace.Name, pod.Name, logs) } } return err } // CoreDump SSHs to the master and all nodes and dumps their logs into dir. // It shells out to cluster/log-dump/log-dump.sh to accomplish this. func CoreDump(dir string) { if TestContext.DisableLogDump { Logf("Skipping dumping logs from cluster") return } var cmd *exec.Cmd if TestContext.LogexporterGCSPath != "" { Logf("Dumping logs from nodes to GCS directly at path: %s", TestContext.LogexporterGCSPath) cmd = exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump", "log-dump.sh"), dir, TestContext.LogexporterGCSPath) } else { Logf("Dumping logs locally to: %s", dir) cmd = exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump", "log-dump.sh"), dir) } cmd.Env = append(os.Environ(), fmt.Sprintf("LOG_DUMP_SYSTEMD_SERVICES=%s", parseSystemdServices(TestContext.SystemdServices))) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { Logf("Error running cluster/log-dump/log-dump.sh: %v", err) } } // parseSystemdServices converts services separator from comma to space. func parseSystemdServices(services string) string { return strings.TrimSpace(strings.Replace(services, ",", " ", -1)) } func UpdatePodWithRetries(client clientset.Interface, ns, name string, update func(*v1.Pod)) (*v1.Pod, error) { for i := 0; i < 3; i++ { pod, err := client.CoreV1().Pods(ns).Get(name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("Failed to get pod %q: %v", name, err) } update(pod) pod, err = client.CoreV1().Pods(ns).Update(pod) if err == nil { return pod, nil } if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) { return nil, fmt.Errorf("Failed to update pod %q: %v", name, err) } } return nil, fmt.Errorf("Too many retries updating Pod %q", name) } func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) { pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) if err != nil { return []*v1.Pod{}, err } ignoreSelector := labels.SelectorFromSet(ignoreLabels) filtered := []*v1.Pod{} for _, p := range pods.Items { if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) { continue } filtered = append(filtered, &p) } return filtered, nil } // RunCmd runs cmd using args and returns its stdout and stderr. It also outputs // cmd's stdout and stderr to their respective OS streams. func RunCmd(command string, args ...string) (string, string, error) { return RunCmdEnv(nil, command, args...) } // RunCmdEnv runs cmd with the provided environment and args and // returns its stdout and stderr. It also outputs cmd's stdout and // stderr to their respective OS streams. func RunCmdEnv(env []string, command string, args ...string) (string, string, error) { Logf("Running %s %v", command, args) var bout, berr bytes.Buffer cmd := exec.Command(command, args...) // We also output to the OS stdout/stderr to aid in debugging in case cmd // hangs and never returns before the test gets killed. // // This creates some ugly output because gcloud doesn't always provide // newlines. cmd.Stdout = io.MultiWriter(os.Stdout, &bout) cmd.Stderr = io.MultiWriter(os.Stderr, &berr) cmd.Env = env err := cmd.Run() stdout, stderr := bout.String(), berr.String() if err != nil { return "", "", fmt.Errorf("error running %s %v; got error %v, stdout %q, stderr %q", command, args, err, stdout, stderr) } return stdout, stderr, nil } // retryCmd runs cmd using args and retries it for up to SingleCallTimeout if // it returns an error. It returns stdout and stderr. func retryCmd(command string, args ...string) (string, string, error) { var err error stdout, stderr := "", "" wait.Poll(Poll, SingleCallTimeout, func() (bool, error) { stdout, stderr, err = RunCmd(command, args...) if err != nil { Logf("Got %v", err) return false, nil } return true, nil }) return stdout, stderr, err } // GetPodsScheduled returns a number of currently scheduled and not scheduled Pods. func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) { for _, pod := range pods.Items { if !masterNodes.Has(pod.Spec.NodeName) { if pod.Spec.NodeName != "" { _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) Expect(scheduledCondition != nil).To(Equal(true)) Expect(scheduledCondition.Status).To(Equal(v1.ConditionTrue)) scheduledPods = append(scheduledPods, pod) } else { _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) Expect(scheduledCondition != nil).To(Equal(true)) Expect(scheduledCondition.Status).To(Equal(v1.ConditionFalse)) if scheduledCondition.Reason == "Unschedulable" { notScheduledPods = append(notScheduledPods, pod) } } } } return } // WaitForStableCluster waits until all existing pods are scheduled and returns their amount. func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int { timeout := 10 * time.Minute startTime := time.Now() allPods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) ExpectNoError(err) // API server returns also Pods that succeeded. We need to filter them out. currentPods := make([]v1.Pod, 0, len(allPods.Items)) for _, pod := range allPods.Items { if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed { currentPods = append(currentPods, pod) } } allPods.Items = currentPods scheduledPods, currentlyNotScheduledPods := GetPodsScheduled(masterNodes, allPods) for len(currentlyNotScheduledPods) != 0 { time.Sleep(2 * time.Second) allPods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) ExpectNoError(err) scheduledPods, currentlyNotScheduledPods = GetPodsScheduled(masterNodes, allPods) if startTime.Add(timeout).Before(time.Now()) { Failf("Timed out after %v waiting for stable cluster.", timeout) break } } return len(scheduledPods) } // GetMasterAndWorkerNodesOrDie will return a list masters and schedulable worker nodes func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList) { nodes := &v1.NodeList{} masters := sets.NewString() all, _ := c.CoreV1().Nodes().List(metav1.ListOptions{}) for _, n := range all.Items { if system.IsMasterNode(n.Name) { masters.Insert(n.Name) } else if isNodeSchedulable(&n) && isNodeUntainted(&n) { nodes.Items = append(nodes.Items, n) } } return masters, nodes } func ListNamespaceEvents(c clientset.Interface, ns string) error { ls, err := c.CoreV1().Events(ns).List(metav1.ListOptions{}) if err != nil { return err } for _, event := range ls.Items { glog.Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message) } return nil } // E2ETestNodePreparer implements testutils.TestNodePreparer interface, which is used // to create/modify Nodes before running a test. type E2ETestNodePreparer struct { client clientset.Interface // Specifies how many nodes should be modified using the given strategy. // Only one strategy can be applied to a single Node, so there needs to // be at least <sum_of_keys> Nodes in the cluster. countToStrategy []testutils.CountToStrategy nodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy } func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy) testutils.TestNodePreparer { return &E2ETestNodePreparer{ client: client, countToStrategy: countToStrategy, nodeToAppliedStrategy: make(map[string]testutils.PrepareNodeStrategy), } } func (p *E2ETestNodePreparer) PrepareNodes() error { nodes := GetReadySchedulableNodesOrDie(p.client) numTemplates := 0 for k := range p.countToStrategy { numTemplates += k } if numTemplates > len(nodes.Items) { return fmt.Errorf("Can't prepare Nodes. Got more templates than existing Nodes.") } index := 0 sum := 0 for _, v := range p.countToStrategy { sum += v.Count for ; index < sum; index++ { if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil { glog.Errorf("Aborting node preparation: %v", err) return err } p.nodeToAppliedStrategy[nodes.Items[index].Name] = v.Strategy } } return nil } func (p *E2ETestNodePreparer) CleanupNodes() error { var encounteredError error nodes := GetReadySchedulableNodesOrDie(p.client) for i := range nodes.Items { var err error name := nodes.Items[i].Name strategy, found := p.nodeToAppliedStrategy[name] if found { if err = testutils.DoCleanupNode(p.client, name, strategy); err != nil { glog.Errorf("Skipping cleanup of Node: failed update of %v: %v", name, err) encounteredError = err } } } return encounteredError } func GetClusterID(c clientset.Interface) (string, error) { cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{}) if err != nil || cm == nil { return "", fmt.Errorf("error getting cluster ID: %v", err) } clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster] providerID, providerIDExists := cm.Data[gcecloud.UIDProvider] if !clusterIDExists { return "", fmt.Errorf("cluster ID not set") } if providerIDExists { return providerID, nil } return clusterID, nil } // CleanupGCEResources cleans up GCE Service Type=LoadBalancer resources with // the given name. The name is usually the UUID of the Service prefixed with an // alpha-numeric character ('a') to work around cloudprovider rules. func CleanupGCEResources(c clientset.Interface, loadBalancerName, region, zone string) (retErr error) { gceCloud, err := GetGCECloud() if err != nil { return err } if region == "" { // Attempt to parse region from zone if no region is given. region, err = gcecloud.GetGCERegion(zone) if err != nil { return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err) } } if err := gceCloud.DeleteFirewall(gcecloud.MakeFirewallName(loadBalancerName)); err != nil && !IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) { retErr = err } if err := gceCloud.DeleteRegionForwardingRule(loadBalancerName, region); err != nil && !IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) { retErr = fmt.Errorf("%v\n%v", retErr, err) } if err := gceCloud.DeleteRegionAddress(loadBalancerName, region); err != nil && !IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) { retErr = fmt.Errorf("%v\n%v", retErr, err) } clusterID, err := GetClusterID(c) if err != nil { retErr = fmt.Errorf("%v\n%v", retErr, err) return } hcNames := []string{gcecloud.MakeNodesHealthCheckName(clusterID)} hc, getErr := gceCloud.GetHttpHealthCheck(loadBalancerName) if getErr != nil && !IsGoogleAPIHTTPErrorCode(getErr, http.StatusNotFound) { retErr = fmt.Errorf("%v\n%v", retErr, getErr) return } if hc != nil { hcNames = append(hcNames, hc.Name) } if err := gceCloud.DeleteExternalTargetPoolAndChecks(&v1.Service{}, loadBalancerName, region, clusterID, hcNames...); err != nil && !IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) { retErr = fmt.Errorf("%v\n%v", retErr, err) } return } // IsHTTPErrorCode returns true if the error is a google api // error matching the corresponding HTTP error code. func IsGoogleAPIHTTPErrorCode(err error, code int) bool { apiErr, ok := err.(*googleapi.Error) return ok && apiErr.Code == code } // getMaster populates the externalIP, internalIP and hostname fields of the master. // If any of these is unavailable, it is set to "". func getMaster(c clientset.Interface) Address { master := Address{} // Populate the internal IP. eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) if err != nil { Failf("Failed to get kubernetes endpoints: %v", err) } if len(eps.Subsets) != 1 || len(eps.Subsets[0].Addresses) != 1 { Failf("There are more than 1 endpoints for kubernetes service: %+v", eps) } master.internalIP = eps.Subsets[0].Addresses[0].IP // Populate the external IP/hostname. url, err := url.Parse(TestContext.Host) if err != nil { Failf("Failed to parse hostname: %v", err) } if net.ParseIP(url.Host) != nil { // TODO: Check that it is external IP (not having a reserved IP address as per RFC1918). master.externalIP = url.Host } else { master.hostname = url.Host } return master } // GetMasterAddress returns the hostname/external IP/internal IP as appropriate for e2e tests on a particular provider // which is the address of the interface used for communication with the kubelet. func GetMasterAddress(c clientset.Interface) string { master := getMaster(c) switch TestContext.Provider { case "gce", "gke": return master.externalIP case "aws": return awsMasterIP default: Failf("This test is not supported for provider %s and should be disabled", TestContext.Provider) } return "" } // GetNodeExternalIP returns node external IP concatenated with port 22 for ssh // e.g. 1.2.3.4:22 func GetNodeExternalIP(node *v1.Node) string { Logf("Getting external IP address for %s", node.Name) host := "" for _, a := range node.Status.Addresses { if a.Type == v1.NodeExternalIP { host = net.JoinHostPort(a.Address, sshPort) break } } if host == "" { Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses) } return host } // SimpleGET executes a get on the given url, returns error if non-200 returned. func SimpleGET(c *http.Client, url, host string) (string, error) { req, err := http.NewRequest("GET", url, nil) if err != nil { return "", err } req.Host = host res, err := c.Do(req) if err != nil { return "", err } defer res.Body.Close() rawBody, err := ioutil.ReadAll(res.Body) if err != nil { return "", err } body := string(rawBody) if res.StatusCode != http.StatusOK { err = fmt.Errorf( "GET returned http error %v", res.StatusCode) } return body, err } // PollURL polls till the url responds with a healthy http code. If // expectUnreachable is true, it breaks on first non-healthy http code instead. func PollURL(route, host string, timeout time.Duration, interval time.Duration, httpClient *http.Client, expectUnreachable bool) error { var lastBody string pollErr := wait.PollImmediate(interval, timeout, func() (bool, error) { var err error lastBody, err = SimpleGET(httpClient, route, host) if err != nil { Logf("host %v path %v: %v unreachable", host, route, err) return expectUnreachable, nil } Logf("host %v path %v: reached", host, route) return !expectUnreachable, nil }) if pollErr != nil { return fmt.Errorf("Failed to execute a successful GET within %v, Last response body for %v, host %v:\n%v\n\n%v\n", timeout, route, host, lastBody, pollErr) } return nil } func DescribeIng(ns string) { Logf("\nOutput of kubectl describe ing:\n") desc, _ := RunKubectl( "describe", "ing", fmt.Sprintf("--namespace=%v", ns)) Logf(desc) } // NewTestPod returns a pod that has the specified requests and limits func (f *Framework) NewTestPod(name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod { return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "pause", Image: GetPauseImageName(f.ClientSet), Resources: v1.ResourceRequirements{ Requests: requests, Limits: limits, }, }, }, }, } } // create empty file at given path on the pod. func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error { _, err := RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath)) return err } // GetAzureCloud returns azure cloud provider func GetAzureCloud() (*azure.Cloud, error) { cloud, ok := TestContext.CloudConfig.Provider.(*azure.Cloud) if !ok { return nil, fmt.Errorf("failed to convert CloudConfig.Provider to Azure: %#v", TestContext.CloudConfig.Provider) } return cloud, nil } func PrintSummaries(summaries []TestDataSummary, testBaseName string) { now := time.Now() for i := range summaries { Logf("Printing summary: %v", summaries[i].SummaryKind()) switch TestContext.OutputPrintType { case "hr": if TestContext.ReportDir == "" { Logf(summaries[i].PrintHumanReadable()) } else { // TODO: learn to extract test name and append it to the kind instead of timestamp. filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt") if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintHumanReadable()), 0644); err != nil { Logf("Failed to write file %v with test performance data: %v", filePath, err) } } case "json": fallthrough default: if TestContext.OutputPrintType != "json" { Logf("Unknown output type: %v. Printing JSON", TestContext.OutputPrintType) } if TestContext.ReportDir == "" { Logf("%v JSON\n%v", summaries[i].SummaryKind(), summaries[i].PrintJSON()) Logf("Finished") } else { // TODO: learn to extract test name and append it to the kind instead of timestamp. filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".json") Logf("Writing to %s", filePath) if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintJSON()), 0644); err != nil { Logf("Failed to write file %v with test performance data: %v", filePath, err) } } } } } func DumpDebugInfo(c clientset.Interface, ns string) { sl, _ := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) for _, s := range sl.Items { desc, _ := RunKubectl("describe", "po", s.Name, fmt.Sprintf("--namespace=%v", ns)) Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc) l, _ := RunKubectl("logs", s.Name, fmt.Sprintf("--namespace=%v", ns), "--tail=100") Logf("\nLast 100 log lines of %v:\n%v", s.Name, l) } } // DsFromManifest reads a .json/yaml file and returns the daemonset in it. func DsFromManifest(url string) (*extensions.DaemonSet, error) { var controller extensions.DaemonSet Logf("Parsing ds from %v", url) var response *http.Response var err error for i := 1; i <= 5; i++ { response, err = http.Get(url) if err == nil && response.StatusCode == 200 { break } time.Sleep(time.Duration(i) * time.Second) } if err != nil { return nil, fmt.Errorf("failed to get url: %v", err) } if response.StatusCode != 200 { return nil, fmt.Errorf("invalid http response status: %v", response.StatusCode) } defer response.Body.Close() data, err := ioutil.ReadAll(response.Body) if err != nil { return nil, fmt.Errorf("failed to read html response body: %v", err) } json, err := utilyaml.ToJSON(data) if err != nil { return nil, fmt.Errorf("failed to parse data to json: %v", err) } err = runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), json, &controller) if err != nil { return nil, fmt.Errorf("failed to decode DaemonSet spec: %v", err) } return &controller, nil } // waitForServerPreferredNamespacedResources waits until server preferred namespaced resources could be successfully discovered. // TODO: Fix https://github.com/kubernetes/kubernetes/issues/55768 and remove the following retry. func waitForServerPreferredNamespacedResources(d discovery.DiscoveryInterface, timeout time.Duration) ([]*metav1.APIResourceList, error) { Logf("Waiting up to %v for server preferred namespaced resources to be successfully discovered", timeout) var resources []*metav1.APIResourceList if err := wait.PollImmediate(Poll, timeout, func() (bool, error) { var err error resources, err = d.ServerPreferredNamespacedResources() if err == nil || isDynamicDiscoveryError(err) { return true, nil } if !discovery.IsGroupDiscoveryFailedError(err) { return false, err } Logf("Error discoverying server preferred namespaced resources: %v, retrying in %v.", err, Poll) return false, nil }); err != nil { return nil, err } return resources, nil } // WaitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first. func WaitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { _, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{}) if err != nil { if apierrs.IsNotFound(err) { Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns) return nil } Logf("Failed to get claim %q in namespace %q, retrying in %v. Error: %v", pvcName, ns, Poll, err) } } return fmt.Errorf("PersistentVolumeClaim %s is not removed from the system within %v", pvcName, timeout) } func GetClusterZones(c clientset.Interface) (sets.String, error) { nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err) } // collect values of zone label from all nodes zones := sets.NewString() for _, node := range nodes.Items { if zone, found := node.Labels[kubeletapis.LabelZoneFailureDomain]; found { zones.Insert(zone) } } return zones, nil }
[ "\"KUBE_SSH_USER\"", "\"USER\"", "\"HOME\"", "\"AWS_SSH_KEY\"", "\"LOCAL_SSH_KEY\"", "\"KUBE_SSH_KEY\"" ]
[]
[ "KUBE_SSH_KEY", "LOCAL_SSH_KEY", "AWS_SSH_KEY", "KUBE_SSH_USER", "USER", "HOME" ]
[]
["KUBE_SSH_KEY", "LOCAL_SSH_KEY", "AWS_SSH_KEY", "KUBE_SSH_USER", "USER", "HOME"]
go
6
0
ml/pkg/util/utils.go
package util import ( "net" "os" "strconv" ) // Finds a free port in the current machine/container func FindFreePort() (int, error) { listener, err := net.Listen("tcp", ":0") if err != nil { return 0, err } port := listener.Addr().(*net.TCPAddr).Port err = listener.Close() if err != nil { return 0, err } return port, nil } func IsDebugEnv() bool { d := os.Getenv("DEBUG_ENV") if len(d) == 0 { return false } debug, err := strconv.ParseBool(d) if err != nil { panic(err) } return debug } func LimitParallelism() bool { d := os.Getenv("LIMIT_PARALLELISM") if len(d) == 0 { return false } debug, err := strconv.ParseBool(d) if err != nil { panic(err) } return debug }
[ "\"DEBUG_ENV\"", "\"LIMIT_PARALLELISM\"" ]
[]
[ "DEBUG_ENV", "LIMIT_PARALLELISM" ]
[]
["DEBUG_ENV", "LIMIT_PARALLELISM"]
go
2
0
producer/main.go
package main import ( "fmt" "os" "github.com/confluentinc/confluent-kafka-go/kafka" ) var ( TOPIC string = os.Getenv("TOPIC") BROKERS string = os.Getenv("BROKERS") producer *kafka.Producer ) func init() { if BROKERS == "" { panic("Missing environment variable: BROKERS") } if TOPIC == "" { panic("Missing environment variable: TOPIC") } config := kafka.ConfigMap{ "bootstrap.servers": BROKERS, //"enable.idempotence": true, //"acks": "all", "retries": 10, } var err error producer, err = kafka.NewProducer(&config) if err != nil { panic(err) } } func main() { termChan := make(chan bool, 1) doneChan := make(chan bool) go listenEvents(termChan, doneChan, producer) for i := 0; i < 10; i++ { msg := fmt.Sprintf(`{"msg": "kafka with Golang - %v"}`, i) sendMessage(fmt.Sprintf("msg%v", i), msg) } closeProducer(producer, termChan, doneChan) } func sendMessage(key, data string) { var headers []kafka.Header headers = append(headers, kafka.Header{ Key: "origin", Value: []byte("producer"), }) message := kafka.Message{ TopicPartition: kafka.TopicPartition{ Topic: &TOPIC, Partition: kafka.PartitionAny, }, Headers: headers, Key: []byte(key), Value: []byte(data), } if err := producer.Produce(&message, nil); err != nil { fmt.Printf("Error on producing message! %v", err.Error()) } } func closeProducer(producer *kafka.Producer, termChan, doneChan chan bool) { // Flush the Producer queue timeOut := 10000 if count := producer.Flush(timeOut); count > 0 { fmt.Printf("\nFailed to flush messages. %d message(s) remain\n", count) } else { fmt.Println("All messages flushed from the queue!") } // Stop listening to events and close the producer termChan <- true <-doneChan producer.Close() } // Handle any events that we get func listenEvents(termChan, doneChan chan bool, producer *kafka.Producer) { doTerm := false for !doTerm { select { case ev := <-producer.Events(): switch ev.(type) { case *kafka.Message: km := ev.(*kafka.Message) if km.TopicPartition.Error != nil { fmt.Printf("Failed to send message '%v' to topic '%v'\n\tErr: %v", string(km.Value), string(*km.TopicPartition.Topic), km.TopicPartition.Error) } else { fmt.Printf("Message '%v' delivered to topic '%v' (partition %d at offset %d)\n", string(km.Value), string(*km.TopicPartition.Topic), km.TopicPartition.Partition, km.TopicPartition.Offset) } case kafka.Error: em := ev.(kafka.Error) fmt.Printf("Error:\n\t%v\n", em) } case <-termChan: doTerm = true } } close(doneChan) }
[ "\"TOPIC\"", "\"BROKERS\"" ]
[]
[ "TOPIC", "BROKERS" ]
[]
["TOPIC", "BROKERS"]
go
2
0
tests/__init__.py
import unittest from redis import StrictRedis from logbook import NullHandler from rq import push_connection, pop_connection def find_empty_redis_database(): """Tries to connect to a random Redis database (starting from 4), and will use/connect it when no keys are in there. """ for dbnum in range(4, 17): testconn = StrictRedis(db=dbnum) empty = len(testconn.keys('*')) == 0 if empty: return testconn assert False, 'No empty Redis database found to run tests in.' def slow(f): import os from functools import wraps @wraps(f) def _inner(*args, **kwargs): if os.environ.get('ONLY_RUN_FAST_TESTS'): f(*args, **kwargs) return _inner class RQTestCase(unittest.TestCase): """Base class to inherit test cases from for RQ. It sets up the Redis connection (available via self.testconn), turns off logging to the terminal and flushes the Redis database before and after running each test. Also offers assertQueueContains(queue, that_func) assertion method. """ @classmethod def setUpClass(cls): # Set up connection to Redis testconn = find_empty_redis_database() push_connection(testconn) # Store the connection (for sanity checking) cls.testconn = testconn # Shut up logbook cls.log_handler = NullHandler() cls.log_handler.push_thread() def setUp(self): # Flush beforewards (we like our hygiene) self.testconn.flushdb() def tearDown(self): # Flush afterwards self.testconn.flushdb() # Implement assertIsNotNone for Python runtimes < 2.7 or < 3.1 if not hasattr(unittest.TestCase, 'assertIsNotNone'): def assertIsNotNone(self, value, *args): self.assertNotEqual(value, None, *args) @classmethod def tearDownClass(cls): cls.log_handler.pop_thread() # Pop the connection to Redis testconn = pop_connection() assert testconn == cls.testconn, 'Wow, something really nasty ' \ 'happened to the Redis connection stack. Check your setup.'
[]
[]
[ "ONLY_RUN_FAST_TESTS" ]
[]
["ONLY_RUN_FAST_TESTS"]
python
1
0
web/tests/functional/report_viewer_api/test_get_run_results.py
# # ------------------------------------------------------------------------- # # Part of the CodeChecker project, under the Apache License v2.0 with # LLVM Exceptions. See LICENSE for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- """ Tests for getting the run results. """ import logging import os import re import unittest import codecs from codechecker_api.codeCheckerDBAccess_v6.ttypes import Encoding, Order, \ ReportFilter, SortMode, SortType, RunSortMode, RunSortType from codechecker_web.shared import convert from libtest.debug_printer import print_run_results from libtest.thrift_client_to_db import get_all_run_results from libtest.result_compare import find_all from libtest import env class RunResults(unittest.TestCase): _ccClient = None def setUp(self): test_workspace = os.environ['TEST_WORKSPACE'] test_class = self.__class__.__name__ print('Running ' + test_class + ' tests in ' + test_workspace) # Get the clang version which is tested. self._clang_to_test = env.clang_to_test() self._testproject_data = env.setup_test_proj_cfg(test_workspace) self.assertIsNotNone(self._testproject_data) self._cc_client = env.setup_viewer_client(test_workspace) self.assertIsNotNone(self._cc_client) # Get the run names which belong to this test. run_names = env.get_run_names(test_workspace) sort_mode = RunSortMode(RunSortType.DATE, Order.ASC) runs = self._cc_client.getRunData(None, None, 0, sort_mode) test_runs = [run for run in runs if run.name in run_names] self._runid = test_runs[0].runId def __check_bug_path_order(self, run_results, order): """ Checks the bug path length order of the run results. :param run_results: Run results. :param order: If it is a negative value, it checks that bug path length of the results order is descending otherwise ascending. """ prev = None for res in run_results: self.assertGreater(res.bugPathLength, 0) if not prev: prev = res continue if order == Order.ASC: self.assertGreaterEqual(res.bugPathLength, prev.bugPathLength) else: self.assertLessEqual(res.bugPathLength, prev.bugPathLength) def test_get_run_results_no_filter(self): """ Get all the run results without any filtering. """ runid = self._runid logging.debug('Get all run results from the db for runid: ' + str(runid)) run_result_count = self._cc_client.getRunResultCount([runid], None, None) self.assertTrue(run_result_count) run_results = get_all_run_results(self._cc_client, runid) print_run_results(run_results) self.assertIsNotNone(run_results) self.assertEqual(run_result_count, len(run_results)) def test_get_run_results_checker_id_and_file_path(self): """ Test if all the bugs are found based on the test project configuration. """ runid = self._runid logging.debug('Get all run results from the db for runid: ' + str(runid)) run_result_count = self._cc_client.getRunResultCount([runid], None, None) self.assertTrue(run_result_count) run_results = get_all_run_results(self._cc_client, runid) self.assertIsNotNone(run_results) self.assertEqual(run_result_count, len(run_results)) test_project_results = self._testproject_data[ self._clang_to_test]['bugs'] for r in test_project_results: print(r) not_found = find_all(run_results, test_project_results) print_run_results(run_results) if not_found: print("===================") print('Not found bugs:') for bug in not_found: print(bug) print("===================") self.assertEqual(len(not_found), 0) def test_get_source_file_content(self): """ Test getting the source file content stored to the database. Test unicode support the stored file can be decoded properly compare results form the database to the original file. """ runid = self._runid report_filter = ReportFilter(checkerName=['*'], filepath=['*.c*']) run_result_count = self._cc_client.getRunResultCount([runid], report_filter, None) self.assertTrue(run_result_count) run_results = get_all_run_results(self._cc_client, runid, [], report_filter) self.assertIsNotNone(run_results) for run_res in run_results: self.assertTrue(re.match(r'.*\.c(pp)?$', run_res.checkedFile)) print('Getting the content of ' + run_res.checkedFile) file_data = self._cc_client.getSourceFileData(run_res.fileId, True, None) self.assertIsNotNone(file_data) file_content1 = file_data.fileContent self.assertIsNotNone(file_content1) with codecs.open(run_res.checkedFile, 'r', encoding='utf-8', errors='ignore') as source_file: file_content2 = source_file.read() self.assertEqual(file_content1, file_content2) file_data_b64 = self._cc_client.getSourceFileData( run_res.fileId, True, Encoding.BASE64) self.assertIsNotNone(file_data_b64) file_content1_b64 = convert.from_b64(file_data_b64.fileContent) self.assertIsNotNone(file_content1_b64) self.assertEqual(file_content1_b64, file_content2) print('got ' + str(len(run_results)) + ' files') self.assertEqual(run_result_count, len(run_results)) def test_get_source_file_content_latin1_encoding(self): """ Test if the source file was saved with latin1 encoding. Test if the source file can be read back from the database even if it was not saved with utf-8 encoding. """ runid = self._runid report_filter = ReportFilter(checkerName=['*'], filepath=['*call_and_message.cpp*']) run_result_count = self._cc_client.getRunResultCount([runid], report_filter, None) self.assertTrue(run_result_count) run_results = get_all_run_results(self._cc_client, runid, [], report_filter) self.assertIsNotNone(run_results) self.assertIsNotNone(run_results) for run_res in run_results: print('Getting the content of ' + run_res.checkedFile) file_data = self._cc_client.getSourceFileData(run_res.fileId, True, None) self.assertIsNotNone(file_data) file_content1 = file_data.fileContent self.assertIsNotNone(file_content1) with codecs.open(run_res.checkedFile, 'r', encoding='utf-8', errors='ignore') as source_file: file_content2 = source_file.read() self.assertEqual(file_content1, file_content2) file_data_b64 = self._cc_client.getSourceFileData( run_res.fileId, True, Encoding.BASE64) self.assertIsNotNone(file_data_b64) file_content1_b64 = convert.from_b64(file_data_b64.fileContent) self.assertIsNotNone(file_content1_b64) self.assertEqual(file_content1_b64, file_content2) print('got ' + str(len(run_results)) + ' files') self.assertEqual(run_result_count, len(run_results)) def test_get_run_results_severity_sort(self): """ Get the run results and sort them by severity and filename ASC. """ runid = self._runid logging.debug('Get all run results from the db for runid: ' + str(runid)) sort_mode1 = SortMode(SortType.SEVERITY, Order.ASC) sort_mode2 = SortMode(SortType.FILENAME, Order.ASC) sort_types = [sort_mode1, sort_mode2] run_result_count = self._cc_client.getRunResultCount([runid], None, None) self.assertTrue(run_result_count) run_results = get_all_run_results(self._cc_client, runid, sort_types, None) self.assertIsNotNone(run_results) for i in range(run_result_count - 1): bug1 = run_results[i] bug2 = run_results[i + 1] self.assertTrue(bug1.severity <= bug2.severity) self.assertTrue((bug1.severity != bug2.severity) or (bug1.checkedFile <= bug2.checkedFile)) print_run_results(run_results) self.assertEqual(run_result_count, len(run_results)) def test_get_run_results_sorted2(self): """ Get the run results and sort them by file name and checker name ASC. """ runid = self._runid logging.debug('Get all run results from the db for runid: ' + str(runid)) sortMode1 = SortMode(SortType.FILENAME, Order.ASC) sortMode2 = SortMode(SortType.CHECKER_NAME, Order.ASC) sort_types = [sortMode1, sortMode2] run_result_count = self._cc_client.getRunResultCount([runid], None, None) self.assertTrue(run_result_count) run_results = get_all_run_results(self._cc_client, runid, sort_types, None) self.assertIsNotNone(run_results) print_run_results(run_results) self.assertEqual(run_result_count, len(run_results)) for i in range(run_result_count - 1): bug1 = run_results[i] bug2 = run_results[i + 1] self.assertTrue(bug1.checkedFile <= bug2.checkedFile) self.assertTrue((bug1.checkedFile != bug2.checkedFile) or (bug1.line <= bug2.line) or (bug1.checkerId <= bug2.checkerId)) def test_bug_path_length(self): runid = self._runid sortMode1 = SortMode(SortType.BUG_PATH_LENGTH, Order.ASC) sortMode2 = SortMode(SortType.BUG_PATH_LENGTH, Order.DESC) simple_filter = ReportFilter() unique_filter = ReportFilter(isUnique=True) run_results = self._cc_client.getRunResults([runid], 100, 0, [sortMode1], simple_filter, None, False) self.__check_bug_path_order(run_results, Order.ASC) run_results = self._cc_client.getRunResults([runid], 100, 0, [sortMode2], unique_filter, None, False) self.__check_bug_path_order(run_results, Order.DESC) def test_report_details(self): """ Get run results and check that report details are correctly set. """ runid = self._runid simple_filter = ReportFilter() run_results = self._cc_client.getRunResults([runid], 100, 0, None, simple_filter, None, True) self.assertTrue(any(res.details for res in run_results)) def test_unqiue_report_details(self): """ Get uniqued run results and check that report details are correctly set. """ runid = self._runid unique_filter = ReportFilter(isUnique=True) run_results = self._cc_client.getRunResults([runid], 100, 0, None, unique_filter, None, True) self.assertTrue(any(res.details for res in run_results))
[]
[]
[ "TEST_WORKSPACE" ]
[]
["TEST_WORKSPACE"]
python
1
0
src/main/java/io/github/pulsebeat02/emcinstallers/OS.java
/** * MIT License * * <p>Copyright (c) 2021 Brandon Li * * <p>Permission is hereby granted, free of charge, to any person obtaining a copy of this software * and associated documentation files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, publish, distribute, * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * <p>The above copyright notice and this permission notice shall be included in all copies or * substantial portions of the Software. * * <p>THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package io.github.pulsebeat02.emcinstallers; import java.nio.file.Path; import java.nio.file.Paths; import java.util.Locale; public enum OS { LINUX, FREEBSD, MAC, WINDOWS; private static final String OS_ARCH; private static final OS CURRENT; private static final boolean BITS_64; private static final boolean ARM; private static final Path EXECUTABLES; static { OS_ARCH = System.getProperty("os.arch").toLowerCase(Locale.ROOT); CURRENT = getOperatingSystem0(); BITS_64 = is64Bits0(); ARM = isArm0(); EXECUTABLES = getPath0(); } private static OS getOperatingSystem0() { final String os = System.getProperty("os.name").toLowerCase(); if (os.contains("win")) { return WINDOWS; } else if (os.contains("mac")) { return MAC; } else if (os.contains("freebsd")) { return FREEBSD; } else { return LINUX; } } private static boolean is64Bits0() { if (CURRENT == WINDOWS) { final String arch = System.getenv("PROCESSOR_ARCHITECTURE"); final String wow64Arch = System.getenv("PROCESSOR_ARCHITEW6432"); return arch != null && arch.endsWith("64") || wow64Arch != null && wow64Arch.endsWith("64"); } else { return OS_ARCH.contains("64"); } } private static Path getPath0() { return Paths.get(System.getProperty("user.home"), "static-emc"); } private static boolean isArm0() { return OS_ARCH.contains("arm"); } public static OS getOperatingSystem() { return CURRENT; } public static boolean isBits64() { return BITS_64; } public static boolean isArm() { return ARM; } public static Path getExecutablePath() { return EXECUTABLES; } }
[ "\"PROCESSOR_ARCHITECTURE\"", "\"PROCESSOR_ARCHITEW6432\"" ]
[]
[ "PROCESSOR_ARCHITEW6432", "PROCESSOR_ARCHITECTURE" ]
[]
["PROCESSOR_ARCHITEW6432", "PROCESSOR_ARCHITECTURE"]
java
2
0
autobahn/wamp/test/test_runner.py
############################################################################### # # The MIT License (MIT) # # Copyright (c) Crossbar.io Technologies GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################### import os import unittest from txaio.testutil import replace_loop if os.environ.get('USE_TWISTED', False): from unittest.mock import patch from zope.interface import implementer from twisted.internet.interfaces import IReactorTime @implementer(IReactorTime) class FakeReactor(object): ''' This just fakes out enough reactor methods so .run() can work. ''' stop_called = False def __init__(self, to_raise): self.stop_called = False self.to_raise = to_raise self.delayed = [] def run(self, *args, **kw): raise self.to_raise def stop(self): self.stop_called = True def callLater(self, delay, func, *args, **kwargs): self.delayed.append((delay, func, args, kwargs)) def connectTCP(self, *args, **kw): raise RuntimeError("ConnectTCP shouldn't get called") class TestWampTwistedRunner(unittest.TestCase): # XXX should figure out *why* but the test_protocol timeout # tests fail if we *don't* patch out this txaio stuff. So, # presumably it's messing up some global state that both tests # implicitly depend on ... @patch('txaio.use_twisted') @patch('txaio.start_logging') @patch('txaio.config') def test_connect_error(self, *args): ''' Ensure the runner doesn't swallow errors and that it exits the reactor properly if there is one. ''' try: from autobahn.twisted.wamp import ApplicationRunner from twisted.internet.error import ConnectionRefusedError # the 'reactor' member doesn't exist until we import it from twisted.internet import reactor # noqa: F401 except ImportError: raise unittest.SkipTest('No twisted') runner = ApplicationRunner('ws://localhost:1', 'realm') exception = ConnectionRefusedError("It's a trap!") with patch('twisted.internet.reactor', FakeReactor(exception)) as mockreactor: self.assertRaises( ConnectionRefusedError, # pass a no-op session-creation method runner.run, lambda _: None, start_reactor=True ) self.assertTrue(mockreactor.stop_called) else: import asyncio from unittest.mock import patch, Mock from autobahn.asyncio.wamp import ApplicationRunner class TestApplicationRunner(unittest.TestCase): ''' Test the autobahn.asyncio.wamp.ApplicationRunner class. ''' def _assertRaisesRegex(self, exception, error, *args, **kw): try: self.assertRaisesRegex except AttributeError: f = self.assertRaisesRegexp else: f = self.assertRaisesRegex f(exception, error, *args, **kw) def test_explicit_SSLContext(self): ''' Ensure that loop.create_connection is called with the exact SSL context object that is passed (as ssl) to the __init__ method of ApplicationRunner. ''' with replace_loop(Mock()) as loop: with patch.object(asyncio, 'get_event_loop', return_value=loop): loop.run_until_complete = Mock(return_value=(Mock(), Mock())) ssl = {} runner = ApplicationRunner('ws://127.0.0.1:8080/ws', 'realm', ssl=ssl) runner.run('_unused_') self.assertIs(ssl, loop.create_connection.call_args[1]['ssl']) def test_omitted_SSLContext_insecure(self): ''' Ensure that loop.create_connection is called with ssl=False if no ssl argument is passed to the __init__ method of ApplicationRunner and the websocket URL starts with "ws:". ''' with replace_loop(Mock()) as loop: with patch.object(asyncio, 'get_event_loop', return_value=loop): loop.run_until_complete = Mock(return_value=(Mock(), Mock())) runner = ApplicationRunner('ws://127.0.0.1:8080/ws', 'realm') runner.run('_unused_') self.assertIs(False, loop.create_connection.call_args[1]['ssl']) def test_omitted_SSLContext_secure(self): ''' Ensure that loop.create_connection is called with ssl=True if no ssl argument is passed to the __init__ method of ApplicationRunner and the websocket URL starts with "wss:". ''' with replace_loop(Mock()) as loop: with patch.object(asyncio, 'get_event_loop', return_value=loop): loop.run_until_complete = Mock(return_value=(Mock(), Mock())) runner = ApplicationRunner('wss://127.0.0.1:8080/wss', 'realm') runner.run(self.fail) self.assertIs(True, loop.create_connection.call_args[1]['ssl']) def test_conflict_SSL_True_with_ws_url(self): ''' ApplicationRunner must raise an exception if given an ssl value of True but only a "ws:" URL. ''' with replace_loop(Mock()) as loop: loop.run_until_complete = Mock(return_value=(Mock(), Mock())) runner = ApplicationRunner('ws://127.0.0.1:8080/wss', 'realm', ssl=True) error = (r'^ssl argument value passed to ApplicationRunner ' r'conflicts with the "ws:" prefix of the url ' r'argument\. Did you mean to use "wss:"\?$') self._assertRaisesRegex(Exception, error, runner.run, '_unused_') def test_conflict_SSLContext_with_ws_url(self): ''' ApplicationRunner must raise an exception if given an ssl value that is an instance of SSLContext, but only a "ws:" URL. ''' import ssl try: # Try to create an SSLContext, to be as rigorous as we can be # by avoiding making assumptions about the ApplicationRunner # implementation. If we happen to be on a Python that has no # SSLContext, we pass ssl=True, which will simply cause this # test to degenerate to the behavior of # test_conflict_SSL_True_with_ws_url (above). In fact, at the # moment (2015-05-10), none of this matters because the # ApplicationRunner implementation does not check to require # that its ssl argument is either a bool or an SSLContext. But # that may change, so we should be careful. ssl.create_default_context except AttributeError: context = True else: context = ssl.create_default_context() with replace_loop(Mock()) as loop: loop.run_until_complete = Mock(return_value=(Mock(), Mock())) runner = ApplicationRunner('ws://127.0.0.1:8080/wss', 'realm', ssl=context) error = (r'^ssl argument value passed to ApplicationRunner ' r'conflicts with the "ws:" prefix of the url ' r'argument\. Did you mean to use "wss:"\?$') self._assertRaisesRegex(Exception, error, runner.run, '_unused_')
[]
[]
[ "USE_TWISTED" ]
[]
["USE_TWISTED"]
python
1
0
cyclopeps/tools/params.py
import os, sys import uuid try: from mpi4py import MPI # MPI Global Variables COMM = MPI.COMM_WORLD RANK = COMM.Get_rank() SIZE = COMM.size except: RANK = 0 SIZE = 1 # Constrict Printing to only rank 0 if RANK != 0: sys.stdout = open(os.devnull,'w') sys.stderr = open(os.devnull,'w') # Temporary directories for calculation TMPDIR = os.environ.get('TMPDIR','.') TMPDIR = os.environ.get('CYCLOPEPS_TMPDIR',TMPDIR) DIRID = str(uuid.uuid1()).replace('/','_') os.mkdir(TMPDIR+DIRID) # Printing Global Variables DEBUG = False DEBUG_MEM = False VERBOSE = 1 VERBOSE_TIME = 3 VERBOSE_MEM = 10 OUTPUT_DIGITS = 5 OUTPUT_COLS = 5 # Eigenproblem parameters DAVIDSON_TOL = 1e-16 DAVIDSON_MAX_ITER = 100 USE_PRECOND = False ARNOLDI_TOL = 1e-8 ARNOLDI_MAX_ITER = 100 # Memory Global Variables try: import psutil _,av,_,_,_,_,_,_,_,_,_ = psutil.virtual_memory() MAX_MEMORY = av except: pass
[]
[]
[ "CYCLOPEPS_TMPDIR", "TMPDIR" ]
[]
["CYCLOPEPS_TMPDIR", "TMPDIR"]
python
2
0
implementation codes/quartic oscillator/setupC.py
from distutils.core import setup, Extension from math import pi import numpy as np import os, sys, shutil, glob import argparse parser = argparse.ArgumentParser() parser.add_argument('--lambda', default= pi/25., type=float, metavar='\lambda', help='the strength of the quartic anharmonic oscillator') parser.add_argument('--x_max', default=8.5, type=float, metavar='x_{max}', help='the distance from the center to the border of the simulation space') parser.add_argument('--grid_size', default = 0.1, type=float, metavar='h', help='the grid size of the discretized simulation space') parser.add_argument('--mass', default = 1./pi, type=float, metavar='m', help='the mass of the simulated particle') parser.add_argument('--moment', default = 5, type=int, help='the order of the distribution moments to compute in the compiled function "get_moments"') args = parser.parse_args() # Please rewrite the following arguments based on your OS and your prescription of compilation if necessary # Please refer to https://software.intel.com/en-us/articles/intel-mkl-link-line-advisor . Usually Python uses GCC as the default compiler, and then GNU compiler should be selected. The arguments starting with "-I" mean to "include" those directories. link_options = ['-Wl,--start-group', os.environ['MKLROOT']+'/lib/intel64/libmkl_intel_ilp64.a', os.environ['MKLROOT']+'/lib/intel64/libmkl_intel_thread.a', os.environ['MKLROOT']+'/lib/intel64/libmkl_core.a', '-Wl,--end-group', '-liomp5', '-lpthread', '-lm', '-ldl'] compiler_options = ['-DMKL_ILP64','-m64'] ############################################################################## # The following is the compilation program. def compile(x_max, grid_size, mass, lambda_, moment): assert lambda_>= 0., 'quartic oscillator strength \lambda should be positive' assert mass> 0., 'the mass should be positive' assert x_max> 0., 'the size of the simulation space (2 * x_max) should be positive' assert grid_size> 0., 'the simulation grid size should be positive' assert moment >= 1, 'the order of distribution moments should be larger than 1' # It invokes the native "distutils.core" of Python by setting the commandline arguments stored in sys.argv to the desired one ("build") # set the "build" command original_args_exist = False if len(sys.argv)>=2: original_args=sys.argv[1:] sys.argv = [sys.argv[0], "build"] original_args_exist = True else: sys.argv.append("build") os.environ["MKL_NUM_THREADS"] = "1" package_name = 'simulation' module1 = Extension(package_name,language='c++', define_macros = [('X_MAX', str(x_max)), ('GRID_SIZE', repr(grid_size)), ('MASS',repr(mass)), ('LAMBDA', repr(lambda_)), ('MOMENT', str(moment))], # pass the defining parameters include_dirs = [np.get_include(), os.path.join(os.environ['MKLROOT'],'include')], sources = ['simulation_quart.cpp'], extra_compile_args = compiler_options+['-Ofast','-funroll-loops', '-march=native', '-flto','-fuse-linker-plugin','--param', 'ipcp-unit-growth=2000', '-std=c++14','-fno-stack-protector','-fmerge-all-constants'], extra_link_args = link_options+['-Ofast','-fdelete-null-pointer-checks','-funroll-loops', '-march=native', '-fwhole-program','-flto','-fuse-linker-plugin','--param', 'ipcp-unit-growth=2000','-std=c++14','-fno-stack-protector','-fmerge-all-constants']) setup (name = package_name, version = '1.0', description = 'do simulation steps', author = 'Wang Zhikang', ext_modules = [module1]) # copy the compiled C module to the root to import compiled_files = glob.glob('build/**/*') for compiled_file in compiled_files: if 'temp' not in compiled_file: shutil.move(compiled_file, os.path.basename(compiled_file), copy_function=shutil.copy2) # restore the original commandline arguments if original_args_exist: sys.argv = [sys.argv[0]]+original_args else: sys.argv.pop(1) compile(x_max=args.x_max, grid_size=args.grid_size, mass=args.mass, lambda_=args.__dict__['lambda'], moment=args.moment)
[]
[]
[ "MKL_NUM_THREADS", "MKLROOT" ]
[]
["MKL_NUM_THREADS", "MKLROOT"]
python
2
0
snakeobjects/cli.py
import os import sys from typing import List, Optional helpData = { "version": "prints the version", "help": '''sobjects help [command] Shows description of the command line interface. Without an argument, prints help for all the commands. If one argument is given, it should be one of the available commands and help for the given command is shown.''', "describe": '''Prints basic information about the project and the pipeline that are used.''', "prepareTest": '''sobjects prepareTest [<arguments to build_object_graph.py>] Uses the build_object_graph.py in the pipeline directory to create a new object graph and prints statistics of the current and the new object graph. The project is not modified at all.''', "buildObjectGraph": '''sobjects buildObjectGraph [<arguments to build_object_graph.py>] Uses the build_object_graph.py in the pipeline directory to create a new object graph and stores it on the file OG.json in the project directory.''', "createSnakefile": '''sobjects createSnakefile First, the command finds the list of implemented object types by obtaining the names (without the .snakefile suffix) of *.snakefile files in the pipeline directory. The createSnakefile command then checks if the object graph of the current project uses object types that have no corresponding <object type>.snakefile. In such cases, it creates dummy *.snakefiles for the new object types and extends the list of object types. (The check for new object types in the current project is an esoteric feature helpful for pipeline developers during the development or extension of pipelines.) The command then creates a Snakefile in the pipeline directory based on the complete list of the object types. ''', "createSymbolicLinks": '''sobjects createSymbolicLinks Uses the objectGraph OG.json to create object directories for objects that have symbolic links in object's parameters.''', "prepare": '''sobjects prepare [<arguments to build_object_graph.py>] This is a short-cut command equivallent to: sobjects buildObjectGraph [<arguments to build_object_graph.py>] sobjects createSnakefile sobjects createSymlinks''', "run": '''sobjects run [<arguments to snakemake>] Creates targets for objects in the object graph by running snakemake. The <arguments to snakemake> determine which targets will be created and what resources will be used.''', "cleanProject": '''sobjects cleanProject [ -f] Will ask user to remove OG.json, .snakemake, and all objects directories. With -f option all is removed silently.''', "submit": '''sobjects submit [<arguments to snakemake>] Creates targets for objects in the object graph by running snakemake with profile specified in default_snakemake_args directive of so_project.yaml. The <arguments to snakemake> determine which targets will be created and what resources will be used.''', "printEnv": '''sobjects printEnv Prints out on the standart output the shell commands defining shell environment variables, such as PATH, PYTHONPATH, etc.''', "graph": '''sobject graph [-w width] [-p penwidth] [-a arrowsize] [-l legend] [-o out] [-i id] [-s shape] optional arguments: -h, --help show this help message and exit -w width, --width width width of node, default is 0.75 -p penwidth, --penwidth penwidth thickness of edges, default is 1.0 -a arrowsize, --arrowsize arrowsize multiplicative scale factor for arrowheads, default is 1.0 -l legend, --legend legend Name of the output legend file, default is no legend -o out, --out out name of the output file, default is stdout -t text, --text text place text in nodes: [|oId|oType:oId|params], default no text -s shape, --shape shape shape of the node, default is circle, for all shape names see https://www.graphviz.org/doc/info/shapes.html''' } def load_yaml(file_name): import yaml CF = open(file_name, 'r') config = yaml.safe_load(CF) CF.close() return config def get_arg_value(args, arg): try: i = args.index(arg) except ValueError: return None if i + 1 >= len(args): return None return args[i + 1] def cli(args: Optional[List[str]] = None): if not args: args = sys.argv[1:] command = args[0] if command == "jobscript.sh": import importlib.resources as importlib_resources print(importlib_resources.read_text(__package__, 'jobscript.sh'), end='') return if "READTHEDOCS" in os.environ: from _version import get_versions __version__ = get_versions()['version'] else: from snakeobjects import __version__ if command in ["help", "-h", "--help"]: print("Snakeobjects %s\n" % (__version__)) if len(args) == 1: print("Available commands are:\n\t", "\n\t".join(helpData.keys()), "\n", sep="") print("Typical sequence of commands is descripe, prepareTest, prepare, run:\n") for cmd, hs in helpData.items(): print(cmd) print('#' * len(cmd)) print(hs) print() elif len(args) == 2: hCmd = args[1] if hCmd in helpData: print(helpData[hCmd]) else: print("The command", hCmd, "is unknown") return 1 else: print("Help accepts at most one argument.") return 1 return if command == "version": print(__version__) return from snakeobjects import Project, ObjectGraph, load_object_graph, graph import importlib.resources as importlib_resources import yaml from importlib.util import spec_from_file_location, module_from_spec proj = Project() if command == "buildObjectGraph": proj.buildObjectGraph(args[1:]) proj.save_object_graph() elif command in ["createSnakefile"]: proj.write_main_snakefile() elif command == "createSymbolicLinks": proj.create_symbolic_links() elif command in ["prepare", "prepareTest"]: print("# WORKING ON PROJECT", proj.directory) print("# WITH PIPELINE", proj.pipeline.get_definition()) proj.buildObjectGraph(args[1:]) if command == "prepareTest": print("Current graph stats") print("+++++++++++++++++++") proj.objectGraph.print_stats() print("\n") print("New graph stats") print("+++++++++++++++") proj.objectGraph.print_stats() else: proj.save_object_graph() proj.write_main_snakefile() proj.create_symbolic_links() proj.objectGraph.print_stats() elif command == "printEnv": proj.set_environment(update_environment=False) elif command == "run": print("# WORKING ON PROJECT", proj.directory) print("# WITH PIPELINE", proj.pipeline.get_definition()) sargs = ['snakemake', '-s', str(proj.pipeline.get_main_snakefile_path()), '-d', proj.directory] if "default_snakemake_args" in proj.parameters: sargs += proj.parameters["default_snakemake_args"].split() sargs += args[1:] if not os.path.exists(proj.directory + '/OG.json'): print("OG.json doesn't exist in " + proj.directory + ", do 'sobjects prepare' first.") exit(1) proj.set_environment() print("RUNNING:", " ".join(sargs)) default_remote_provider = get_arg_value(sargs, '--default-remote-provider') default_remote_prefix = get_arg_value(sargs, '--default-remote-prefix') if default_remote_provider and default_remote_prefix: from snakeobjects.remoteProjects import upload_project_files_to_remote upload_project_files_to_remote(default_remote_provider, default_remote_prefix) if ("--kubernetes" in sargs or "--google-lifesciences" in sargs): if default_remote_provider and default_remote_prefix: os.environ['SO_REMOTE'] = f"{default_remote_provider}:{default_remote_prefix}" sargs += ["--envvars SO_REMOTE "] os.execvp('snakemake', sargs) elif command == "submit": print("# WORKING ON PROJECT", proj.directory) print("# WITH PIPELINE", proj.pipeline.get_definition()) from snakeobjects.Project import ProjectException if not os.path.exists(proj.directory + '/OG.json'): print("OG.json doesn't exist in " + proj.directory + ", do 'sobjects prepare' first.") exit(1) sargs = [] if "default_snakemake_args" in proj.parameters: sargs += proj.parameters["default_snakemake_args"].split() else: raise ProjectException("No profile specified") sargs += args[1:] profile = sargs[sargs.index('--profile') + 1] if not os.path.exists(profile): raise ProjectException("Profile not found %s" % profile) if not os.path.exists(profile + "/config.yaml"): raise ProjectException("No config.yaml in %s" % profile) pr_config = load_yaml(profile + "/config.yaml") if not "cluster" in pr_config: ProjectException("cluster in not specified in %s" % profile + "/config.yaml") cmd = pr_config["cluster"] proj.set_environment(sargs) if os.system('sobjects jobscript.sh >$SO_PROJECT/jobscript.sh'): raise ProjectException("sobjects jobscript.sh failed") with open(proj.directory + '/jobscript.sh', 'a') as js: for k, v in pr_config.items(): if not k in 'jobname jobscript cluster cluster-status'.split(' '): js.write('--' + str(k) + ' ' + str(v) + ' ') js.write(' '.join(args[1:])) os.system("%s/%s" % (profile, cmd) + " $SO_PROJECT/jobscript.sh") #os.execvp('python', [profile + "/" +cmd, "$SO_PROJECT/jobscript.sh"]) elif command == "describe": print("# WORKING ON PROJECT", proj.directory) print("# WITH PIPELINE", proj.pipeline.get_definition()) print("Project parameters:") for k, v in proj.parameters.items(): print(f"\t{k}: {v}") proj.objectGraph.print_stats() elif command == "graph": print(args, file=sys.stderr) graph.driver(proj.objectGraph, args) elif command == "cleanProject": print("# WORKING ON PROJECT", proj.directory) print("# WITH PIPELINE", proj.pipeline.get_definition()) import shutil sm = proj.directory + '/.snakemake' og = proj.directory + '/OG.json' if "-f" in sys.argv: val = input( f'\033[91m \nDO YOU REALLY WANT TO DELETE EVERYTHING IN {proj.directory} ? (Y/n):\033[00m') if val == 'Y': if os.path.exists(sm): shutil.rmtree(os.path.abspath(sm)) if os.path.exists(og): os.remove(os.path.abspath(og)) for ot in proj.objectGraph.get_object_types(): if os.path.exists(proj.directory + '/' + ot): shutil.rmtree(os.path.abspath(proj.directory + '/' + ot)) return 0 else: return 0 if os.path.exists(sm): val = input('Delete .snakemake? (y/n):') if val == 'y': shutil.rmtree(os.path.abspath(sm)) if os.path.exists(og): val = input('Delete OG.json? (y/n):') if val == 'y': os.remove(os.path.abspath(og)) for ot in proj.objectGraph.get_object_types(): if os.path.exists(proj.directory + '/' + ot): val = input(f'Delete {proj.directory+"/"+ot}? (y/n):') if val == 'y': shutil.rmtree(os.path.abspath(proj.directory + '/' + ot)) else: print("Don't know the command:", command) return 1 if __name__ == '__main__': import sys # print("BBBBBBB") cli(sys.argv[1:])
[]
[]
[ "SO_REMOTE" ]
[]
["SO_REMOTE"]
python
1
0
support/db/dbtest/db.go
package dbtest import ( "crypto/rand" "encoding/hex" "fmt" "os" "strconv" "strings" "testing" "github.com/jmoiron/sqlx" "github.com/lib/pq" "github.com/xdbfoundation/go/support/db/sqlutils" "github.com/xdbfoundation/go/support/errors" "github.com/stretchr/testify/require" ) // DB represents an ephemeral database that starts blank and can be used // to run tests against. type DB struct { Dialect string DSN string dbName string t *testing.T closer func() closed bool } // randomName returns a new psuedo-random name that is sufficient for naming a // test database. In the event that reading from the source of randomness // fails, a panic will occur. func randomName() string { raw := make([]byte, 6) _, err := rand.Read(raw) if err != nil { err = errors.Wrap(err, "read from rand failed") panic(err) } enc := hex.EncodeToString(raw) return fmt.Sprintf("test_%s", enc) } // Close closes and deletes the database represented by `db` func (db *DB) Close() { if db.closed { return } db.closer() db.closed = true } // Load executes all of the statements in the provided sql script against the // test database, panicking if any fail. The receiver is returned allowing for // chain-style calling within your test functions. func (db *DB) Load(sql string) *DB { conn := db.Open() defer conn.Close() tx, err := conn.Begin() require.NoError(db.t, err) defer tx.Rollback() for i, cmd := range sqlutils.AllStatements(sql) { _, err = tx.Exec(cmd) require.NoError(db.t, err, "failed execing statement: %d", i) } err = tx.Commit() require.NoError(db.t, err) return db } // Open opens a sqlx connection to the db. func (db *DB) Open() *sqlx.DB { conn, err := sqlx.Open(db.Dialect, db.DSN) require.NoError(db.t, err) return conn } func (db *DB) Version() (major int) { conn := db.Open() defer conn.Close() versionFull := "" err := conn.Get(&versionFull, "SHOW server_version") require.NoError(db.t, err) version := strings.Fields(versionFull) parts := strings.Split(version[0], ".") major, err = strconv.Atoi(parts[0]) require.NoError(db.t, err) return major } func execStatement(t *testing.T, pguser, query string) { db, err := sqlx.Open("postgres", fmt.Sprintf("postgres://%s@localhost/?sslmode=disable", pguser)) require.NoError(t, err) _, err = db.Exec(query) require.NoError(t, err) require.NoError(t, db.Close()) } // Postgres provisions a new, blank database with a random name on the localhost // of the running process. It assumes that you have postgres running on the // default port, have the command line postgres tools installed, and that the // current user has access to the server. It panics on the event of a failure. func Postgres(t *testing.T) *DB { var result DB result.dbName = randomName() result.Dialect = "postgres" result.t = t t.Log("Test Database:", result.dbName) pgUser := os.Getenv("PGUSER") if len(pgUser) == 0 { pgUser = "postgres" } // create the db execStatement(t, pgUser, "CREATE DATABASE "+pq.QuoteIdentifier(result.dbName)) result.DSN = fmt.Sprintf("postgres://%s@localhost/%s?sslmode=disable&timezone=UTC", pgUser, result.dbName) result.closer = func() { execStatement(t, pgUser, "DROP DATABASE "+pq.QuoteIdentifier(result.dbName)) } return &result }
[ "\"PGUSER\"" ]
[]
[ "PGUSER" ]
[]
["PGUSER"]
go
1
0
contrib/dokku-installer.py
#!/usr/bin/env python3 import cgi import json import os import re import shutil try: import SimpleHTTPServer import SocketServer except ImportError: import http.server as SimpleHTTPServer import socketserver as SocketServer import subprocess import sys import threading VERSION = 'v0.23.7' def bytes_to_string(b): if type(b) == bytes: encoding = sys.stdout.encoding if encoding is None: encoding = 'utf-8' b = b.decode(encoding) b = b.strip() return b def string_to_bytes(s): if type(s) == str: encoding = sys.stdout.encoding if encoding is None: encoding = 'utf-8' s = s.encode(encoding) return s hostname = '' try: command = "bash -c '[[ $(dig +short $HOSTNAME) ]] && echo $HOSTNAME || wget -q -O - icanhazip.com'" hostname = bytes_to_string(subprocess.check_output(command, shell=True)) except subprocess.CalledProcessError: pass key_file = os.getenv('KEY_FILE', None) if os.path.isfile('/home/ec2-user/.ssh/authorized_keys'): key_file = '/home/ec2-user/.ssh/authorized_keys' elif os.path.isfile('/home/ubuntu/.ssh/authorized_keys'): key_file = '/home/ubuntu/.ssh/authorized_keys' else: key_file = '/root/.ssh/authorized_keys' admin_keys = [] if os.path.isfile(key_file): try: command = "cat {0}".format(key_file) admin_keys = bytes_to_string(subprocess.check_output(command, shell=True)).strip().split("\n") except subprocess.CalledProcessError: pass ufw_display = 'block' try: command = "sudo ufw status" ufw_output = bytes_to_string(subprocess.check_output(command, shell=True).strip()) if "inactive" in ufw_output: ufw_display = 'none' except subprocess.CalledProcessError: ufw_display = 'none' nginx_dir = '/etc/nginx' nginx_init = '/etc/init.d/nginx' try: command = "test -x /usr/bin/openresty" subprocess.check_output(command, shell=True) nginx_dir = '/usr/local/openresty/nginx/conf' nginx_init = '/etc/init.d/openresty' except subprocess.CalledProcessError: pass def check_boot(): if 'onboot' not in sys.argv: return init_dir = os.getenv('INIT_DIR', '/etc/init') systemd_dir = os.getenv('SYSTEMD_DIR', '/etc/systemd/system') nginx_conf_dir = os.getenv('NGINX_CONF_DIR', '{0}/conf.d'.format(nginx_dir)) if os.path.exists(init_dir): with open('{0}/dokku-installer.conf'.format(init_dir), 'w') as f: f.write("start on runlevel [2345]\n") f.write("exec {0} selfdestruct\n".format(os.path.abspath(__file__))) if os.path.exists(systemd_dir): with open('{0}/dokku-installer.service'.format(systemd_dir), 'w') as f: f.write("[Unit]\n") f.write("Description=Dokku web-installer\n") f.write("\n") f.write("[Service]\n") f.write("ExecStart={0} selfdestruct\n".format(os.path.abspath(__file__))) f.write("\n") f.write("[Install]\n") f.write("WantedBy=multi-user.target\n") f.write("WantedBy=graphical.target\n") if os.path.exists(nginx_conf_dir): with open('{0}/dokku-installer.conf'.format(nginx_conf_dir), 'w') as f: f.write("upstream dokku-installer { server 127.0.0.1:2000; }\n") f.write("server {\n") f.write(" listen 80;\n") f.write(" location / {\n") f.write(" proxy_pass http://dokku-installer;\n") f.write(" }\n") f.write("}\n") subprocess.call('rm -f {0}/sites-enabled/*'.format(nginx_dir), shell=True) sys.exit(0) class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def write_content(self, content): try: self.wfile.write(content) except TypeError: self.wfile.write(string_to_bytes(content)) def do_GET(self): content = PAGE.replace('{VERSION}', VERSION) content = content.replace('{UFW_DISPLAY}', ufw_display) content = content.replace('{HOSTNAME}', hostname) content = content.replace('{AUTHORIZED_KEYS_LOCATION}', key_file) content = content.replace('{ADMIN_KEYS}', "\n".join(admin_keys)) self.send_response(200) self.end_headers() self.write_content(content) def do_POST(self): if self.path not in ['/setup', '/setup/']: return params = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={ 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type']}) dokku_root = os.getenv('DOKKU_ROOT', '/home/dokku') dokku_user = os.getenv('DOKKU_SYSTEM_GROUP', 'dokku') dokku_group = os.getenv('DOKKU_SYSTEM_USER', 'dokku') vhost_enable = 'false' vhost_filename = '{0}/VHOST'.format(dokku_root) if 'vhost' in params and params['vhost'].value == 'true': vhost_enable = 'true' with open(vhost_filename, 'w') as f: f.write(params['hostname'].value.strip("/")) shutil.chown(vhost_filename, dokku_user, dokku_group) else: try: os.remove(vhost_filename) except OSError: pass hostname_filename = '{0}/HOSTNAME'.format(dokku_root) with open(hostname_filename, 'w') as f: f.write(params['hostname'].value.strip("/")) shutil.chown(hostname_filename, dokku_user, dokku_group) for (index, key) in enumerate(params['keys'].value.splitlines(), 1): user = 'admin' if self.admin_user_exists() is not None: user = 'web-admin' if self.web_admin_user_exists() is not None: index = int(self.web_admin_user_exists()) + 1 elif self.web_admin_user_exists() is None: index = 1 elif self.admin_user_exists() is None: pass else: index = int(self.admin_user_exists()) + 1 user = user + str(index) command = ['sshcommand', 'acl-add', 'dokku', user] proc = subprocess.Popen(command, stdin=subprocess.PIPE) try: proc.stdin.write(key) except TypeError: proc.stdin.write(string_to_bytes(key)) proc.stdin.close() proc.wait() set_debconf_selection('boolean', 'nginx_enable', 'true') set_debconf_selection('boolean', 'skip_key_file', 'true') set_debconf_selection('boolean', 'vhost_enable', vhost_enable) set_debconf_selection('boolean', 'web_config', 'false') set_debconf_selection('string', 'hostname', params['hostname'].value) if 'selfdestruct' in sys.argv: DeleteInstallerThread() content = json.dumps({'status': 'ok'}) self.send_response(200) self.end_headers() self.write_content(content) def web_admin_user_exists(self): return self.user_exists('web-admin(\d+)') def admin_user_exists(self): return self.user_exists('admin(\d+)') def user_exists(self, name): command = 'dokku ssh-keys:list' pattern = re.compile(r'NAME="' + name + '"') proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) max_num = 0 exists = False for line in proc.stdout: m = pattern.search(bytes_to_string(line)) if m: # User of the form `user` or `user#` exists exists = True max_num = max(max_num, int(m.group(1))) if exists: return max_num else: return None def set_debconf_selection(debconf_type, key, value): found = False with open('/etc/os-release', 'r') as f: for line in f: if 'debian' in line: found = True if not found: return ps = subprocess.Popen(['echo', 'dokku dokku/{0} {1} {2}'.format( key, debconf_type, value )], stdout=subprocess.PIPE) try: subprocess.check_output(['debconf-set-selections'], stdin=ps.stdout) except subprocess.CalledProcessError: pass ps.wait() class DeleteInstallerThread(object): def __init__(self, interval=1): thread = threading.Thread(target=self.run, args=()) thread.daemon = True thread.start() def run(self): command = "rm {0}/conf.d/dokku-installer.conf && {1} stop && {1} start".format(nginx_dir, nginx_init) try: subprocess.call(command, shell=True) except: pass command = "rm -f /etc/init/dokku-installer.conf /etc/systemd/system/dokku-installer.service && (stop dokku-installer || systemctl stop dokku-installer.service)" try: subprocess.call(command, shell=True) except: pass def main(): check_boot() port = int(os.getenv('PORT', 2000)) httpd = SocketServer.TCPServer(("", port), GetHandler) print("Listening on 0.0.0.0:{0}, CTRL+C to stop".format(port)) httpd.serve_forever() PAGE = """ <html> <head> <meta charset="utf-8" /> <title>Dokku Setup</title> <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" integrity="sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO" crossorigin="anonymous"> <style> .bd-callout { padding: 1.25rem; margin-top: 1.25rem; margin-bottom: 1.25rem; border: 1px solid #eee; border-left-width: .25rem; border-radius: .25rem; } .bd-callout p:last-child { margin-bottom: 0; } .bd-callout-info { border-left-color: #5bc0de; } pre { font-size: 80%; margin-bottom: 0; } h1 small { font-size: 50%; } h5 { font-size: 1rem; } .container { width: 640px; } .result { padding-left: 20px; } input.form-control, textarea.form-control { background-color: #fafbfc; font-size: 14px; } input.form-control::placeholder, textarea.form-control::placeholder { color: #adb2b8 } </style> </head> <body> <div class="container"> <form id="form" role="form"> <h1 class="pt-3">Dokku Setup <small class="text-muted">{VERSION}</small></h1> <div class="alert alert-warning small" role="alert"> <strong>Warning:</strong> The SSH key filled out here can grant root access to the server. Please complete the setup as soon as possible. </div> <div class="row"> <div class="col"> <h3>Admin Access</h3> <div class="form-group"> <label for="key">Public SSH Keys</label><br /> <textarea class="form-control" name="keys" rows="5" id="key" placeholder="Begins with 'ssh-rsa', 'ssh-dss', 'ssh-ed25519', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384', or 'ecdsa-sha2-nistp521'">{ADMIN_KEYS}</textarea> <small class="form-text text-muted">Public keys allow users to ssh onto the server as the <code>dokku</code> user, as well as remotely execute Dokku commands. They are currently auto-populated from: <code>{AUTHORIZED_KEYS_LOCATION}</code>, and can be changed later via the <a href="http://dokku.viewdocs.io/dokku/deployment/user-management/" target="_blank"><code>dokku ssh-keys</code></a> plugin.</small> </div> </div> </div> <div class="row"> <div class="col"> <h3>Hostname Configuration</h3> <div class="form-group"> <label for="hostname">Hostname</label> <input class="form-control" type="text" id="hostname" name="hostname" value="{HOSTNAME}" placeholder="A hostname or ip address such as {HOSTNAME}" /> <small class="form-text text-muted">This will be used as the default host for all applications, and can be changed later via the <a href="http://dokku.viewdocs.io/dokku/configuration/domains/" target="_blank"><code>dokku domains:set-global</code></a> command.</small> </div> <div class="form-check"> <input class="form-check-input" type="checkbox" id="vhost" name="vhost" value="true"> <label class="form-check-label" for="vhost">Use virtualhost naming for apps</label> <small class="form-text text-muted">When enabled, Nginx will be run on port 80 and proxy requests to apps based on hostname.</small> <small class="form-text text-muted">When disabled, a specific port will be setup for each application on first deploy, and requests to that port will be proxied to the relevant app.</small> </div> <div class="alert alert-warning small mt-3 d-{UFW_DISPLAY}" role="alert"> <strong>Warning:</strong> UFW is active. To allow traffic to specific ports, run <code>sudo ufw allow PORT</code> for the port in question. </div> <div class="bd-callout bd-callout-info"> <h5>What will app URLs look like?</h5> <pre><code id="example">http://hostname:port</code></pre> </div> </div> </div> <button type="button" onclick="setup()" class="btn btn-primary">Finish Setup</button> <span class="result"></span> </form> </div> <div id="error-output"></div> <script> var $ = document.querySelector.bind(document) function setup() { if ($("#key").value.trim() == "") { alert("Your admin public key cannot be blank.") return } if ($("#hostname").value.trim() == "") { alert("Your hostname cannot be blank.") return } var data = new FormData($("#form")) var inputs = [].slice.call(document.querySelectorAll("input, textarea, button")) inputs.forEach(function (input) { input.disabled = true }) var result = $(".result") fetch("/setup", {method: "POST", body: data}) .then(function(response) { if (response.ok) { return response.json() } else { throw new Error('Server returned error') } }) .then(function(response) { result.classList.add("text-success"); result.textContent = "Success! Redirecting in 3 seconds. .." setTimeout(function() { window.location.href = "http://dokku.viewdocs.io/dokku~{VERSION}/deployment/application-deployment/"; }, 3000); }) .catch(function (error) { result.classList.add("text-danger"); result.textContent = "Could not send the request" }) } function update() { if ($("#vhost").matches(":checked") && $("#hostname").value.match(/^(\d{1,3}\.){3}\d{1,3}$/)) { alert("In order to use virtualhost naming, the hostname must not be an IP but a valid domain name.") $("#vhost").checked = false; } if ($("#vhost").matches(':checked')) { $("#example").textContent = "http://<app-name>."+$("#hostname").value } else { $("#example").textContent = "http://"+$("#hostname").value+":<app-port>" } } $("#vhost").addEventListener("change", update); $("#hostname").addEventListener("input", update); update(); </script> </body> </html> """ if __name__ == "__main__": main()
[]
[]
[ "PORT", "INIT_DIR", "DOKKU_SYSTEM_USER", "KEY_FILE", "SYSTEMD_DIR", "DOKKU_SYSTEM_GROUP", "NGINX_CONF_DIR", "DOKKU_ROOT" ]
[]
["PORT", "INIT_DIR", "DOKKU_SYSTEM_USER", "KEY_FILE", "SYSTEMD_DIR", "DOKKU_SYSTEM_GROUP", "NGINX_CONF_DIR", "DOKKU_ROOT"]
python
8
0
Task2-CameraShotSegmentation/CALF-detection/src/main.py
import os import logging from datetime import datetime import time import numpy as np from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter import torch from dataset import SoccerNet, SoccerNetClips, SoccerNetClipsTesting from model import Model from train import trainer, test from loss import SegmentationLoss, SpottingLoss # for reproducibility torch.manual_seed(0) np.random.seed(0) def main(args, model_save_path): logging.info("Parameters:") for arg in vars(args): logging.info(arg.rjust(15) + " : " + str(getattr(args, arg))) # create dataset # dataset_Train = SoccerNet(path=args.SoccerNet_path, split="train", version=args.version, framerate=args.framerate) # dataset_Valid = SoccerNet(path=args.SoccerNet_path, split="valid", version=args.version, framerate=args.framerate) # dataset_Test = SoccerNet(path=args.SoccerNet_path, split="test", version=args.version, framerate=args.framerate) if not args.test_only: dataset_Train = SoccerNetClips(path=args.SoccerNet_path, features=args.features, split="train", version=args.version, framerate=args.framerate, chunk_size=args.chunk_size*args.framerate, receptive_field=args.receptive_field*args.framerate) dataset_Valid = SoccerNetClips(path=args.SoccerNet_path, features=args.features, split="valid", version=args.version, framerate=args.framerate, chunk_size=args.chunk_size*args.framerate, receptive_field=args.receptive_field*args.framerate) dataset_Test = SoccerNetClipsTesting(path=args.SoccerNet_path, features=args.features, split="test", version=args.version, framerate=args.framerate, chunk_size=args.chunk_size*args.framerate, receptive_field=args.receptive_field*args.framerate, advanced_test=args.advanced_test) # create model model = Model(weights=args.load_weights, chunk_size=args.chunk_size*args.framerate, dim_capsule=args.dim_capsule, receptive_field=args.receptive_field*args.framerate, num_detections=dataset_Test.num_detections, framerate=args.framerate).cuda() logging.info(model) total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) parameters_per_layer = [p.numel() for p in model.parameters() if p.requires_grad] logging.info("Total number of parameters: " + str(total_params)) # create dataloader if not args.test_only: train_loader = torch.utils.data.DataLoader(dataset_Train, batch_size=args.batch_size, shuffle=True, num_workers=args.max_num_worker, pin_memory=True) val_loader = torch.utils.data.DataLoader(dataset_Valid, batch_size=args.batch_size, shuffle=False, num_workers=args.max_num_worker, pin_memory=True) test_loader = torch.utils.data.DataLoader(dataset_Test, batch_size=1, shuffle=False, num_workers=1, pin_memory=True) # training parameters if not args.test_only: criterion_segmentation = SegmentationLoss(K=dataset_Train.K_parameters) criterion_spotting = SpottingLoss(lambda_coord=args.lambda_coord, lambda_noobj=args.lambda_noobj) optimizer = torch.optim.Adam(model.parameters(), lr=args.LR, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False) if args.scheduler == "ExponentialDecay": scheduler = [args.LR, args.LR/1000] elif args.scheduler == "ReduceLRonPlateau": scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', verbose=True, patience=args.patience) # start training trainer(train_loader, val_loader, test_loader, model, optimizer, scheduler, [criterion_segmentation, criterion_spotting], [args.loss_weight_segmentation, args.loss_weight_detection], max_epochs=args.max_epochs, model_save_path=model_save_path) best_model_path = os.path.join(model_save_path, "model.pth.tar") # print("loding?") if os.path.exists(best_model_path): print(f"loading {best_model_path}") checkpoint = torch.load(best_model_path) model.load_state_dict(checkpoint['state_dict']) average_mAP = test(test_loader, model, "best", model_save_path) logging.info("Best Performance at end of training " + str(average_mAP)) if __name__ == '__main__': parser = ArgumentParser(description='context aware loss function', formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('--SoccerNet_path', required=False, type=str, default="path/to/SoccerNet/", help='Path for SoccerNet' ) parser.add_argument('--features', required=False, type=str, default="ResNET_PCA512.npy", help='Video features' ) parser.add_argument('--max_epochs', required=False, type=int, default=1000, help='Maximum number of epochs' ) parser.add_argument('--load_weights', required=False, type=str, default=None, help='weights to load' ) parser.add_argument('--model_name', required=False, type=str, default="CALF", help='named of the model to save' ) parser.add_argument('--test_only', required=False, action='store_true', help='Perform testing only' ) parser.add_argument('--advanced_test', required=False, type=str, default="abrupt", help='Perform testing only' ) parser.add_argument('--version', required=False, type=int, default=1, help='Version of the dataset' ) parser.add_argument('--num_features', required=False, type=int, default=512, help='Number of input features' ) parser.add_argument('--dim_capsule', required=False, type=int, default=16, help='Dimension of the capsule network' ) parser.add_argument('--framerate', required=False, type=int, default=2, help='Framerate of the input features' ) parser.add_argument('--chunk_size', required=False, type=int, default=120, help='Size of the chunk (in seconds)' ) parser.add_argument('--receptive_field', required=False, type=int, default=40, help='Temporal receptive field of the network (in seconds)' ) parser.add_argument('--num_detections', required=False, type=int, default=5, help='Maximal number of detections per chunk' ) parser.add_argument("--lambda_coord", required=False, type=float, default=5.0, help="Weight of the coordinates of the event in the detection loss") parser.add_argument("--lambda_noobj", required=False, type=float, default=0.5, help="Weight of the no object detection in the detection loss") parser.add_argument("--loss_weight_segmentation", required=False, type=float, default=0.002, help="Weight of the segmentation loss compared to the detection loss") parser.add_argument("--loss_weight_detection", required=False, type=float, default=1.0, help="Weight of the detection loss") parser.add_argument("--scheduler", required=False, type=str, default="ExponentialDecay", help="define scheduler") parser.add_argument('--batch_size', required=False, type=int, default=1, help='Batch size' ) parser.add_argument('--LR', required=False, type=float, default=1e-04, help='Learning Rate' ) parser.add_argument('--patience', required=False, type=int, default=25, help='Batch size' ) parser.add_argument('--GPU', required=False, type=int, default=-1, help='ID of the GPU to use' ) parser.add_argument('--max_num_worker', required=False, type=int, default=4, help='number of worker to load data') parser.add_argument('--loglevel', required=False, type=str, default='INFO', help='logging level') args = parser.parse_args() start_time = datetime.now().strftime('%Y-%m-%d %H-%M-%S') model_save_path = os.path.join("models", args.model_name) os.makedirs(model_save_path, exist_ok=True) log_path = os.path.join(model_save_path, f"log.txt") numeric_level = getattr(logging, args.loglevel.upper(), None) if not isinstance(numeric_level, int): raise ValueError('Invalid log level: %s' % args.loglevel) logging.basicConfig( level=numeric_level, format= "%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s", handlers=[ logging.FileHandler(log_path), logging.StreamHandler() ]) if args.GPU >= 0: os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(args.GPU) start=time.time() logging.info('Starting main function') main(args, model_save_path) logging.info(f'Total Execution Time is {time.time()-start} seconds')
[]
[]
[ "CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
python
2
0
anchore_engine/services/policy_engine/__init__.py
import time import sys import pkg_resources import os import retrying from sqlalchemy.exc import IntegrityError # anchore modules import anchore_engine.clients.services.common import anchore_engine.subsys.servicestatus import anchore_engine.subsys.metrics from anchore_engine.subsys import logger from anchore_engine.configuration import localconfig from anchore_engine.clients.services import simplequeue, internal_client_for from anchore_engine.clients.services.simplequeue import SimpleQueueClient from anchore_engine.service import ApiService, LifeCycleStages # from anchore_engine.subsys.logger import enable_bootstrap_logging # enable_bootstrap_logging() feed_sync_queuename = 'feed_sync_tasks' system_user_auth = None feed_sync_msg = { 'task_type': 'feed_sync', 'enabled': True } try: FEED_SYNC_RETRIES = int(os.getenv('ANCHORE_FEED_SYNC_CHECK_RETRIES', 5)) except: logger.exception('Error parsing env value ANCHORE_FEED_SYNC_CHECK_RETRIES into int, using default value of 5') FEED_SYNC_RETRIES = 5 try: FEED_SYNC_RETRY_BACKOFF = int(os.getenv('ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF', 5)) except: logger.exception('Error parsing env value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into int, using default value of 5') FEED_SYNC_RETRY_BACKOFF = 5 try: feed_config_check_retries = int(os.getenv('FEED_CLIENT_CHECK_RETRIES', 3)) except: logger.exception('Error parsing env value FEED_CLIENT_CHECK_RETRIES into int, using default value of 3') feed_config_check_retries = 3 try: feed_config_check_backoff = int(os.getenv('FEED_CLIENT_CHECK_BACKOFF', 5)) except: logger.exception('Error parsing env FEED_CLIENT_CHECK_BACKOFF value into int, using default value of 5') feed_config_check_backoff = 5 # service funcs (must be here) def _check_feed_client_credentials(): from anchore_engine.services.policy_engine.engine.feeds.client import get_client sleep_time = feed_config_check_backoff last_ex = None for i in range(feed_config_check_retries): if i > 0: logger.info("Waiting for {} seconds to try feeds client config check again".format(sleep_time)) time.sleep(sleep_time) sleep_time += feed_config_check_backoff try: logger.info('Checking feeds client credentials. Attempt {} of {}'.format(i + 1, feed_config_check_retries)) client = get_client() client = None logger.info('Feeds client credentials ok') return True except Exception as e: logger.warn("Could not verify feeds endpoint and/or config. Got exception: {}".format(e)) last_ex = e else: if last_ex: raise last_ex else: raise Exception('Exceeded retries for feeds client config check. Failing check') def _system_creds(): global system_user_auth if not system_user_auth: config = localconfig.get_config() system_user_auth = config['system_user_auth'] return system_user_auth def process_preflight(): """ Execute the preflight functions, aborting service startup if any throw uncaught exceptions or return False return value :return: """ preflight_check_functions = [_init_db_content] for fn in preflight_check_functions: try: fn() except Exception as e: logger.exception('Preflight checks failed with error: {}. Aborting service startup'.format(e)) sys.exit(1) def _init_distro_mappings(): from anchore_engine.db import session_scope, DistroMapping initial_mappings = [ DistroMapping(from_distro='alpine', to_distro='alpine', flavor='ALPINE'), DistroMapping(from_distro='busybox', to_distro='busybox', flavor='BUSYB'), DistroMapping(from_distro='centos', to_distro='centos', flavor='RHEL'), DistroMapping(from_distro='debian', to_distro='debian', flavor='DEB'), DistroMapping(from_distro='fedora', to_distro='centos', flavor='RHEL'), DistroMapping(from_distro='ol', to_distro='ol', flavor='RHEL'), DistroMapping(from_distro='rhel', to_distro='centos', flavor='RHEL'), DistroMapping(from_distro='ubuntu', to_distro='ubuntu', flavor='DEB'), DistroMapping(from_distro='amzn', to_distro='amzn', flavor='RHEL'), #DistroMapping(from_distro='java', to_distro='snyk', flavor='JAVA'), #DistroMapping(from_distro='gem', to_distro='snyk', flavor='RUBY'), #DistroMapping(from_distro='npm', to_distro='snyk', flavor='NODEJS'), #DistroMapping(from_distro='python', to_distro='snyk', flavor='PYTHON'), ] # set up any data necessary at system init try: logger.info('Checking policy engine db initialization. Checking initial set of distro mappings') with session_scope() as dbsession: distro_mappings = dbsession.query(DistroMapping).all() for i in initial_mappings: if not [x for x in distro_mappings if x.from_distro == i.from_distro]: logger.info('Adding missing mapping: {}'.format(i)) dbsession.add(i) logger.info('Distro mapping initialization complete') except Exception as err: if isinstance(err, IntegrityError): logger.warn("another process has already initialized, continuing") else: raise Exception("unable to initialize default distro mappings - exception: " + str(err)) return True def _init_db_content(): """ Initialize the policy engine db with any data necessary at startup. :return: """ return _init_distro_mappings() def do_feed_sync(msg): if 'FeedsUpdateTask' not in locals(): from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask if 'get_selected_feeds_to_sync' not in locals(): from anchore_engine.services.policy_engine.engine.feeds.sync import get_selected_feeds_to_sync handler_success = False timer = time.time() logger.info("FIRING: feed syncer") try: feeds = get_selected_feeds_to_sync(localconfig.get_config()) logger.info('Syncing configured feeds: {}'.format(feeds)) result = FeedsUpdateTask.run_feeds_update(json_obj=msg.get('data')) if result is not None: handler_success = True else: logger.warn('Feed sync task marked as disabled, so skipping') except ValueError as e: logger.warn('Received msg of wrong type') except Exception as err: logger.warn("failure in feed sync handler - exception: " + str(err)) if handler_success: anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer, function='do_feed_sync', status="success") else: anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer, function='do_feed_sync', status="fail") def handle_feed_sync(*args, **kwargs): """ Initiates a feed sync in the system in response to a message from the queue :param args: :param kwargs: :return: """ system_user = _system_creds() logger.info('init args: {}'.format(kwargs)) cycle_time = kwargs['mythread']['cycle_timer'] while True: config = localconfig.get_config() feed_sync_enabled = config.get('feeds', {}).get('sync_enabled', True) if feed_sync_enabled: logger.info("Feed sync task executor activated") try: run_feed_sync(system_user) except Exception as e: logger.error('Caught escaped error in feed sync handler: {}'.format(e)) finally: logger.info('Feed sync task executor complete') else: logger.info("sync_enabled is set to false in config - skipping feed sync") time.sleep(cycle_time) return True @retrying.retry(stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000) def run_feed_sync(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready(['simplequeue']) if not all_ready: logger.info("simplequeue service not yet ready, will retry") raise Exception('Simplequeue service not yet ready') else: try: # This has its own retry on the queue fetch, so wrap with catch block to ensure we don't double-retry on task exec simplequeue.run_target_with_queue_ttl(None, queue=feed_sync_queuename, target=do_feed_sync, max_wait_seconds=30, visibility_timeout=180, retries=FEED_SYNC_RETRIES, backoff_time=FEED_SYNC_RETRY_BACKOFF) except Exception as err: logger.warn("failed to process task this cycle: " + str(err)) def handle_feed_sync_trigger(*args, **kwargs): """ Checks to see if there is a task for a feed sync in the queue and if not, adds one. Interval for firing this should be longer than the expected feed sync duration. :param args: :param kwargs: :return: """ system_user = _system_creds() logger.info('init args: {}'.format(kwargs)) cycle_time = kwargs['mythread']['cycle_timer'] while True: config = localconfig.get_config() feed_sync_enabled = config.get('feeds', {}).get('sync_enabled', True) if feed_sync_enabled: logger.info('Feed Sync task creator activated') try: push_sync_task(system_user) logger.info('Feed Sync Trigger done, waiting for next cycle.') except Exception as e: logger.error('Error caught in feed sync trigger handler after all retries. Will wait for next cycle') finally: logger.info('Feed Sync task creator complete') else: logger.info("sync_enabled is set to false in config - skipping feed sync trigger") time.sleep(cycle_time) return True @retrying.retry(stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000) def push_sync_task(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready(['simplequeue']) if not all_ready: logger.info("simplequeue service not yet ready, will retry") raise Exception("Simplequeue service not yet ready") else: #q_client = SimpleQueueClient(user=system_user[0], password=system_user[1]) q_client = internal_client_for(SimpleQueueClient, userId=None) if not q_client.is_inqueue(name=feed_sync_queuename, inobj=feed_sync_msg): try: q_client.enqueue(name=feed_sync_queuename, inobj=feed_sync_msg) except: logger.error('Could not enqueue message for a feed sync') raise class PolicyEngineService(ApiService): __service_name__ = 'policy_engine' __spec_dir__ = pkg_resources.resource_filename(__name__, 'swagger') __monitors__ = { 'service_heartbeat': {'handler': anchore_engine.subsys.servicestatus.handle_service_heartbeat, 'taskType': 'handle_service_heartbeat', 'args': [__service_name__], 'cycle_timer': 60, 'min_cycle_timer': 60, 'max_cycle_timer': 60, 'last_queued': 0, 'last_return': False, 'initialized': False}, 'feed_sync_checker': {'handler': handle_feed_sync_trigger, 'taskType': 'handle_feed_sync_trigger', 'args': [], 'cycle_timer': 600, 'min_cycle_timer': 300, 'max_cycle_timer': 100000, 'last_queued': 0, 'last_return': False, 'initialized': False}, 'feed_sync': {'handler': handle_feed_sync, 'taskType': 'handle_feed_sync', 'args': [], 'cycle_timer': 3600, 'min_cycle_timer': 1800, 'max_cycle_timer': 100000, 'last_queued': 0, 'last_return': False, 'initialized': False} } __lifecycle_handlers__ = { LifeCycleStages.pre_register: [(process_preflight, None)] } #def _register_instance_handlers(self): # super()._register_instance_handlers() # self.register_handler(LifeCycleStages.pre_register, process_preflight, None)
[]
[]
[ "FEED_CLIENT_CHECK_RETRIES", "ANCHORE_FEED_SYNC_CHECK_RETRIES", "ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF", "FEED_CLIENT_CHECK_BACKOFF" ]
[]
["FEED_CLIENT_CHECK_RETRIES", "ANCHORE_FEED_SYNC_CHECK_RETRIES", "ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF", "FEED_CLIENT_CHECK_BACKOFF"]
python
4
0
tests/settings.py
import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) SECRET_KEY = '@s8$swhj9du^aglt5+@ut^)wepr+un1m7r*+ixcq(-5i^st=y^' SELENIUM_HEADLESS = True if os.environ.get('SELENIUM_HEADLESS', False) else False DEBUG = True ALLOWED_HOSTS = [] INSTALLED_APPS = [ 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # test project 'test_project', 'openwisp_utils.admin_theme', 'django.contrib.sites', # admin 'django.contrib.admin', # rest framework 'rest_framework', 'drf_yasg', ] EXTENDED_APPS = ('openwisp_controller', 'django_loci') # Just for testing purposes STATICFILES_FINDERS = [ 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'openwisp_utils.staticfiles.DependencyFinder', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'urls' LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True STATIC_URL = '/static/' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'OPTIONS': { 'loaders': [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', 'openwisp_utils.loaders.DependencyLoader', ], 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'openwisp_utils.admin_theme.context_processor.menu_groups', 'openwisp_utils.admin_theme.context_processor.admin_theme_settings', 'test_project.context_processors.test_theme_helper', ], }, } ] DATABASES = { 'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'openwisp_utils.db'} } OPENWISP_ADMIN_SITE_CLASS = 'test_project.site.CustomAdminSite' SITE_ID = 1 EMAIL_PORT = '1025' LOGIN_REDIRECT_URL = 'admin:index' ACCOUNT_LOGOUT_REDIRECT_URL = LOGIN_REDIRECT_URL # during development only EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # only for automated test purposes REST_FRAMEWORK = { 'DEFAULT_THROTTLE_CLASSES': [ 'test_project.api.throttling.CustomScopedRateThrottle' ], 'DEFAULT_THROTTLE_RATES': {'anon': '20/hour'}, } CACHES = {'default': {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}} OPENWISP_TEST_ADMIN_MENU_ITEMS = [{'model': 'test_project.Project'}] OPENWISP_ADMIN_THEME_LINKS = [ { 'type': 'text/css', 'href': 'admin/css/openwisp.css', 'rel': 'stylesheet', 'media': 'all', }, { 'type': 'text/css', 'href': 'menu-test.css', 'rel': 'stylesheet', 'media': 'all', }, # custom css for testing menu icons { 'type': 'image/x-icon', 'href': 'ui/openwisp/images/favicon.png', 'rel': 'icon', }, ] OPENWISP_ADMIN_THEME_JS = ['dummy.js'] # local settings must be imported before test runner otherwise they'll be ignored try: from local_settings import * except ImportError: pass
[]
[]
[ "SELENIUM_HEADLESS" ]
[]
["SELENIUM_HEADLESS"]
python
1
0
toolium/driver_wrapper.py
# -*- coding: utf-8 -*- u""" Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. This file is part of Toolium. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import logging.config import os import screeninfo from toolium.config_driver import ConfigDriver from toolium.config_parser import ExtendedConfigParser from toolium.driver_wrappers_pool import DriverWrappersPool from toolium.utils.driver_utils import Utils from toolium.utils.path_utils import get_valid_filename class DriverWrapper(object): """Wrapper with the webdriver and the configuration needed to execute tests :type driver: selenium.webdriver.remote.webdriver.WebDriver or appium.webdriver.webdriver.WebDriver :type config: toolium.config_parser.ExtendedConfigParser or configparser.ConfigParser :type utils: toolium.utils.driver_utils.Utils :type app_strings: dict :type session_id: str :type remote_node: str :type remote_node_video_enabled: bool :type logger: logging.Logger :type config_properties_filenames: str :type config_log_filename: str :type output_log_filename: str :type visual_baseline_directory: str :type baseline_name: str """ driver = None #: webdriver instance config = ExtendedConfigParser() #: driver configuration utils = None #: test utils instance app_strings = None #: mobile application strings session_id = None #: remote webdriver session id server_type = None #: remote server type remote_node = None #: remote grid node remote_node_video_enabled = False #: True if the remote grid node has the video recorder enabled logger = None #: logger instance # Configuration and output files config_properties_filenames = None #: configuration filenames separated by commas config_log_filename = None #: configuration log file output_log_filename = None #: output log file visual_baseline_directory = None #: folder with the baseline images baseline_name = None #: baseline name def __init__(self): if not DriverWrappersPool.is_empty(): # Copy config object and other properties from default driver default_wrapper = DriverWrappersPool.get_default_wrapper() self.config = default_wrapper.config.deepcopy() self.logger = default_wrapper.logger self.config_properties_filenames = default_wrapper.config_properties_filenames self.config_log_filename = default_wrapper.config_log_filename self.output_log_filename = default_wrapper.output_log_filename self.visual_baseline_directory = default_wrapper.visual_baseline_directory self.baseline_name = default_wrapper.baseline_name # Create utils instance and add wrapper to the pool self.utils = Utils(self) DriverWrappersPool.add_wrapper(self) def configure_logger(self, tc_config_log_filename=None, tc_output_log_filename=None): """Configure selenium instance logger :param tc_config_log_filename: test case specific logging config file :param tc_output_log_filename: test case specific output logger file """ # Get config logger filename config_log_filename = DriverWrappersPool.get_configured_value('Config_log_filename', tc_config_log_filename, 'logging.conf') config_log_filename = os.path.join(DriverWrappersPool.config_directory, config_log_filename) # Configure logger only if logging filename has changed if self.config_log_filename != config_log_filename: # Get output logger filename output_log_filename = DriverWrappersPool.get_configured_value('Output_log_filename', tc_output_log_filename, 'toolium.log') output_log_filename = os.path.join(DriverWrappersPool.output_directory, output_log_filename) output_log_filename = output_log_filename.replace('\\', '\\\\') try: logging.config.fileConfig(config_log_filename, {'logfilename': output_log_filename}, False) except Exception as exc: print("[WARN] Error reading logging config file '{}': {}".format(config_log_filename, exc)) self.config_log_filename = config_log_filename self.output_log_filename = output_log_filename self.logger = logging.getLogger(__name__) def configure_properties(self, tc_config_prop_filenames=None, behave_properties=None): """Configure selenium instance properties :param tc_config_prop_filenames: test case specific properties filenames :param behave_properties: dict with behave user data properties """ prop_filenames = DriverWrappersPool.get_configured_value('Config_prop_filenames', tc_config_prop_filenames, 'properties.cfg;local-properties.cfg') prop_filenames = [os.path.join(DriverWrappersPool.config_directory, filename) for filename in prop_filenames.split(';')] prop_filenames = ';'.join(prop_filenames) # Configure config only if properties filename has changed if self.config_properties_filenames != prop_filenames: # Initialize the config object self.config = ExtendedConfigParser.get_config_from_file(prop_filenames) self.config_properties_filenames = prop_filenames # Override properties with system properties self.config.update_properties(os.environ) # Override properties with behave userdata properties if behave_properties: self.config.update_properties(behave_properties) # Modify config properties before driver creation self.finalize_properties_configuration() def finalize_properties_configuration(self): # Override method if config properties (self.config object) need custom modifications before driver creation pass def configure_visual_baseline(self): """Configure baseline directory""" # Get baseline name and translate config variables baseline_name = self.config.get_optional('VisualTests', 'baseline_name', '{Driver_type}') baseline_name = self.config.translate_config_variables(baseline_name) # Configure baseline directory if baseline name has changed if self.baseline_name != baseline_name: self.baseline_name = baseline_name self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory, get_valid_filename(baseline_name)) def update_visual_baseline(self): """Configure baseline directory after driver is created""" # Update baseline with real platformVersion value if '{PlatformVersion}' in self.baseline_name: try: platform_version = self.driver.desired_capabilities['platformVersion'] except KeyError: platform_version = None self.baseline_name = self.baseline_name.replace('{PlatformVersion}', str(platform_version)) self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory, self.baseline_name) # Update baseline with real version value if '{Version}' in self.baseline_name: try: splitted_version = self.driver.desired_capabilities['version'].split('.') version = '.'.join(splitted_version[:2]) except KeyError: version = None self.baseline_name = self.baseline_name.replace('{Version}', str(version)) self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory, self.baseline_name) # Update baseline with remote node value if '{RemoteNode}' in self.baseline_name: self.baseline_name = self.baseline_name.replace('{RemoteNode}', str(self.remote_node)) self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory, self.baseline_name) def configure(self, tc_config_files, is_selenium_test=True, behave_properties=None): """Configure initial selenium instance using logging and properties files for Selenium or Appium tests :param tc_config_files: test case specific config files :param is_selenium_test: true if test is a selenium or appium test case :param behave_properties: dict with behave user data properties """ # Configure config and output directories DriverWrappersPool.configure_common_directories(tc_config_files) # Configure logger self.configure_logger(tc_config_files.config_log_filename, tc_config_files.output_log_filename) # Initialize the config object self.configure_properties(tc_config_files.config_properties_filenames, behave_properties) # Configure visual directories if is_selenium_test: driver_info = self.config.get('Driver', 'type') DriverWrappersPool.configure_visual_directories(driver_info) self.configure_visual_baseline() def connect(self, maximize=True): """Set up the selenium driver and connect to the server :param maximize: True if the driver should be maximized :returns: selenium driver """ if not self.config.get('Driver', 'type') or self.config.get('Driver', 'type') in ['api', 'no_driver']: return None self.driver = ConfigDriver(self.config, self.utils).create_driver() # Save session id and remote node to download video after the test execution self.session_id = self.driver.session_id self.server_type, self.remote_node = self.utils.get_remote_node() if self.server_type == 'grid': self.remote_node_video_enabled = self.utils.is_remote_video_enabled(self.remote_node) else: self.remote_node_video_enabled = True if self.server_type in ['ggr', 'selenoid'] else False # Save app_strings in mobile tests if self.is_mobile_test() and not self.is_web_test() and self.config.getboolean_optional('Driver', 'appium_app_strings'): self.app_strings = self.driver.app_strings() if self.is_maximizable(): # Bounds and screen bounds_x, bounds_y = self.get_config_window_bounds() self.driver.set_window_position(bounds_x, bounds_y) self.logger.debug('Window bounds: %s x %s', bounds_x, bounds_y) # Maximize browser if maximize: # Set window size or maximize window_width = self.config.get_optional('Driver', 'window_width') window_height = self.config.get_optional('Driver', 'window_height') if window_width and window_height: self.driver.set_window_size(window_width, window_height) else: self.driver.maximize_window() # Log window size window_size = self.utils.get_window_size() self.logger.debug('Window size: %s x %s', window_size['width'], window_size['height']) # Update baseline self.update_visual_baseline() # Discard previous logcat logs self.utils.discard_logcat_logs() # Set implicitly wait timeout self.utils.set_implicitly_wait() return self.driver def get_config_window_bounds(self): """Reads bounds from config and, if monitor is specified, modify the values to match with the specified monitor :return: coords X and Y where set the browser window. """ bounds_x = int(self.config.get_optional('Driver', 'bounds_x') or 0) bounds_y = int(self.config.get_optional('Driver', 'bounds_y') or 0) monitor_index = int(self.config.get_optional('Driver', 'monitor') or -1) if monitor_index > -1: try: monitor = screeninfo.get_monitors()[monitor_index] bounds_x += monitor.x bounds_y += monitor.y except NotImplementedError: self.logger.warning('Current environment doesn\'t support get_monitors') return bounds_x, bounds_y def is_android_test(self): """Check if actual test must be executed in an Android mobile :returns: True if test must be executed in an Android mobile """ return self.utils.get_driver_name() == 'android' def is_ios_test(self): """Check if actual test must be executed in an iOS mobile :returns: True if test must be executed in an iOS mobile """ return self.utils.get_driver_name() in ('ios', 'iphone') def is_mobile_test(self): """Check if actual test must be executed in a mobile :returns: True if test must be executed in a mobile """ return self.is_android_test() or self.is_ios_test() def is_web_test(self): """Check if actual test must be executed in a browser :returns: True if test must be executed in a browser """ appium_browser_name = self.config.get_optional('AppiumCapabilities', 'browserName') return not self.is_mobile_test() or appium_browser_name not in (None, '') def is_android_web_test(self): """Check if actual test must be executed in a browser of an Android mobile :returns: True if test must be executed in a browser of an Android mobile """ return self.is_android_test() and self.is_web_test() def is_ios_web_test(self): """Check if actual test must be executed in a browser of an iOS mobile :returns: True if test must be executed in a browser of an iOS mobile """ return self.is_ios_test() and self.is_web_test() def is_maximizable(self): """Check if the browser is maximizable :returns: True if the browser is maximizable """ return not self.is_mobile_test() def should_reuse_driver(self, scope, test_passed, context=None): """Check if the driver should be reused :param scope: execution scope (function, module, class or session) :param test_passed: True if the test has passed :param context: behave context :returns: True if the driver should be reused """ reuse_driver = self.config.getboolean_optional('Driver', 'reuse_driver') reuse_driver_session = self.config.getboolean_optional('Driver', 'reuse_driver_session') restart_driver_after_failure = (self.config.getboolean_optional('Driver', 'restart_driver_after_failure') or self.config.getboolean_optional('Driver', 'restart_driver_fail')) if context and scope == 'function': reuse_driver = reuse_driver or (hasattr(context, 'reuse_driver_from_tags') and context.reuse_driver_from_tags) return (((reuse_driver and scope == 'function') or (reuse_driver_session and scope != 'session')) and (test_passed or not restart_driver_after_failure)) def get_driver_platform(self): """ Get driver platform where tests are running :return: platform name """ platform = '' if 'platform' in self.driver.desired_capabilities: platform = self.driver.desired_capabilities['platform'] elif 'platformName' in self.driver.desired_capabilities: platform = self.driver.desired_capabilities['platformName'] return platform
[]
[]
[]
[]
[]
python
0
0
monitor/monitor/asgi.py
""" ASGI config for monitor project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'monitor.settings') application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
cattle.go
package main import ( "fmt" "os" "github.com/rancher/go-rancher/client" ) type CattleClient struct { rancherClient *client.RancherClient } func NewCattleClientFromEnvironment() (*CattleClient, error) { var cattleURL string var cattleAccessKey string var cattleSecretKey string if env := os.Getenv("CATTLE_URL"); len(env) > 0 { cattleURL = env } else { return nil, fmt.Errorf("Environment variable 'CATTLE_URL' is not set") } if env := os.Getenv("CATTLE_ACCESS_KEY"); len(env) > 0 { cattleAccessKey = env } else { return nil, fmt.Errorf("Environment variable 'CATTLE_ACCESS_KEY' is not set") } if env := os.Getenv("CATTLE_SECRET_KEY"); len(env) > 0 { cattleSecretKey = env } else { return nil, fmt.Errorf("Environment variable 'CATTLE_SECRET_KEY' is not set") } apiClient, err := client.NewRancherClient(&client.ClientOpts{ Url: cattleURL, AccessKey: cattleAccessKey, SecretKey: cattleSecretKey, }) if err != nil { return nil, err } return &CattleClient{ rancherClient: apiClient, }, nil } func (c *CattleClient) UpdateServiceFqdn(serviceName, stackName, fqdn string) error { event := &client.ExternalDnsEvent{ EventType: "dns.update", ExternalId: fqdn, ServiceName: serviceName, StackName: stackName, Fqdn: fqdn, } _, err := c.rancherClient.ExternalDnsEvent.Create(event) return err } func (c *CattleClient) TestConnect() error { opts := &client.ListOpts{} _, err := c.rancherClient.ExternalDnsEvent.List(opts) return err }
[ "\"CATTLE_URL\"", "\"CATTLE_ACCESS_KEY\"", "\"CATTLE_SECRET_KEY\"" ]
[]
[ "CATTLE_SECRET_KEY", "CATTLE_URL", "CATTLE_ACCESS_KEY" ]
[]
["CATTLE_SECRET_KEY", "CATTLE_URL", "CATTLE_ACCESS_KEY"]
go
3
0
server/server/server.go
package server import ( "database/sql" "fmt" "net" "net/http" "os" "runtime" "sync" "syscall" "time" "github.com/google/uuid" "github.com/gorilla/mux" "github.com/pkg/errors" "github.com/mattermost/focalboard/server/api" "github.com/mattermost/focalboard/server/app" "github.com/mattermost/focalboard/server/auth" appModel "github.com/mattermost/focalboard/server/model" "github.com/mattermost/focalboard/server/services/audit" "github.com/mattermost/focalboard/server/services/config" "github.com/mattermost/focalboard/server/services/metrics" "github.com/mattermost/focalboard/server/services/scheduler" "github.com/mattermost/focalboard/server/services/store" "github.com/mattermost/focalboard/server/services/store/mattermostauthlayer" "github.com/mattermost/focalboard/server/services/store/sqlstore" "github.com/mattermost/focalboard/server/services/telemetry" "github.com/mattermost/focalboard/server/services/webhook" "github.com/mattermost/focalboard/server/web" "github.com/mattermost/focalboard/server/ws" "github.com/oklog/run" "github.com/mattermost/mattermost-server/v6/shared/mlog" "github.com/mattermost/mattermost-server/v6/shared/filestore" "github.com/mattermost/mattermost-server/v6/utils" ) const ( cleanupSessionTaskFrequency = 10 * time.Minute updateMetricsTaskFrequency = 15 * time.Minute minSessionExpiryTime = int64(60 * 60 * 24 * 31) // 31 days MattermostAuthMod = "mattermost" ) type Server struct { config *config.Configuration wsAdapter ws.Adapter webServer *web.Server store store.Store filesBackend filestore.FileBackend telemetry *telemetry.Service logger *mlog.Logger cleanUpSessionsTask *scheduler.ScheduledTask metricsServer *metrics.Service metricsService *metrics.Metrics metricsUpdaterTask *scheduler.ScheduledTask auditService *audit.Audit servicesStartStopMutex sync.Mutex localRouter *mux.Router localModeServer *http.Server api *api.API } func New(cfg *config.Configuration, singleUserToken string, db store.Store, logger *mlog.Logger, serverID string, wsAdapter ws.Adapter) (*Server, error) { authenticator := auth.New(cfg, db) // if no ws adapter is provided, we spin up a websocket server if wsAdapter == nil { wsAdapter = ws.NewServer(authenticator, singleUserToken, cfg.AuthMode == MattermostAuthMod, logger) } filesBackendSettings := filestore.FileBackendSettings{} filesBackendSettings.DriverName = cfg.FilesDriver filesBackendSettings.Directory = cfg.FilesPath filesBackendSettings.AmazonS3AccessKeyId = cfg.FilesS3Config.AccessKeyID filesBackendSettings.AmazonS3SecretAccessKey = cfg.FilesS3Config.SecretAccessKey filesBackendSettings.AmazonS3Bucket = cfg.FilesS3Config.Bucket filesBackendSettings.AmazonS3PathPrefix = cfg.FilesS3Config.PathPrefix filesBackendSettings.AmazonS3Region = cfg.FilesS3Config.Region filesBackendSettings.AmazonS3Endpoint = cfg.FilesS3Config.Endpoint filesBackendSettings.AmazonS3SSL = cfg.FilesS3Config.SSL filesBackendSettings.AmazonS3SignV2 = cfg.FilesS3Config.SignV2 filesBackendSettings.AmazonS3SSE = cfg.FilesS3Config.SSE filesBackendSettings.AmazonS3Trace = cfg.FilesS3Config.Trace filesBackend, appErr := filestore.NewFileBackend(filesBackendSettings) if appErr != nil { logger.Error("Unable to initialize the files storage", mlog.Err(appErr)) return nil, errors.New("unable to initialize the files storage") } webhookClient := webhook.NewClient(cfg, logger) // Init metrics instanceInfo := metrics.InstanceInfo{ Version: appModel.CurrentVersion, BuildNum: appModel.BuildNumber, Edition: appModel.Edition, InstallationID: os.Getenv("MM_CLOUD_INSTALLATION_ID"), } metricsService := metrics.NewMetrics(instanceInfo) // Init audit auditService, errAudit := audit.NewAudit() if errAudit != nil { return nil, fmt.Errorf("unable to create the audit service: %w", errAudit) } if err := auditService.Configure(cfg.AuditCfgFile, cfg.AuditCfgJSON); err != nil { return nil, fmt.Errorf("unable to initialize the audit service: %w", err) } appServices := app.Services{ Auth: authenticator, Store: db, FilesBackend: filesBackend, Webhook: webhookClient, Metrics: metricsService, Logger: logger, } app := app.New(cfg, wsAdapter, appServices) focalboardAPI := api.NewAPI(app, singleUserToken, cfg.AuthMode, logger, auditService) // Local router for admin APIs localRouter := mux.NewRouter() focalboardAPI.RegisterAdminRoutes(localRouter) // Init workspace if _, err := app.GetRootWorkspace(); err != nil { logger.Error("Unable to get root workspace", mlog.Err(err)) return nil, err } webServer := web.NewServer(cfg.WebPath, cfg.ServerRoot, cfg.Port, cfg.UseSSL, cfg.LocalOnly, logger) // if the adapter is a routed service, register it before the API if routedService, ok := wsAdapter.(web.RoutedService); ok { webServer.AddRoutes(routedService) } webServer.AddRoutes(focalboardAPI) settings, err := db.GetSystemSettings() if err != nil { return nil, err } // Init telemetry telemetryID := settings["TelemetryID"] if len(telemetryID) == 0 { telemetryID = uuid.New().String() if err = db.SetSystemSetting("TelemetryID", uuid.New().String()); err != nil { return nil, err } } telemetryOpts := telemetryOptions{ app: app, cfg: cfg, telemetryID: telemetryID, serverID: serverID, logger: logger, singleUser: len(singleUserToken) > 0, } telemetryService := initTelemetry(telemetryOpts) server := Server{ config: cfg, wsAdapter: wsAdapter, webServer: webServer, store: db, filesBackend: filesBackend, telemetry: telemetryService, metricsServer: metrics.NewMetricsServer(cfg.PrometheusAddress, metricsService, logger), metricsService: metricsService, auditService: auditService, logger: logger, localRouter: localRouter, api: focalboardAPI, } server.initHandlers() return &server, nil } func NewStore(config *config.Configuration, logger *mlog.Logger) (store.Store, error) { sqlDB, err := sql.Open(config.DBType, config.DBConfigString) if err != nil { logger.Error("connectDatabase failed", mlog.Err(err)) return nil, err } err = sqlDB.Ping() if err != nil { logger.Error(`Database Ping failed`, mlog.Err(err)) return nil, err } var db store.Store db, err = sqlstore.New(config.DBType, config.DBConfigString, config.DBTablePrefix, logger, sqlDB) if err != nil { return nil, err } if config.AuthMode == MattermostAuthMod { layeredStore, err2 := mattermostauthlayer.New(config.DBType, db.(*sqlstore.SQLStore).DBHandle(), db, logger) if err2 != nil { return nil, err2 } db = layeredStore } return db, nil } func (s *Server) Start() error { s.logger.Info("Server.Start") s.webServer.Start() s.servicesStartStopMutex.Lock() defer s.servicesStartStopMutex.Unlock() if s.config.EnableLocalMode { if err := s.startLocalModeServer(); err != nil { return err } } if s.config.AuthMode != MattermostAuthMod { s.cleanUpSessionsTask = scheduler.CreateRecurringTask("cleanUpSessions", func() { secondsAgo := minSessionExpiryTime if secondsAgo < s.config.SessionExpireTime { secondsAgo = s.config.SessionExpireTime } if err := s.store.CleanUpSessions(secondsAgo); err != nil { s.logger.Error("Unable to clean up the sessions", mlog.Err(err)) } }, cleanupSessionTaskFrequency) } metricsUpdater := func() { blockCounts, err := s.store.GetBlockCountsByType() if err != nil { s.logger.Error("Error updating metrics", mlog.String("group", "blocks"), mlog.Err(err)) return } s.logger.Log(mlog.LvlFBMetrics, "Block metrics collected", mlog.Map("block_counts", blockCounts)) for blockType, count := range blockCounts { s.metricsService.ObserveBlockCount(blockType, count) } workspaceCount, err := s.store.GetWorkspaceCount() if err != nil { s.logger.Error("Error updating metrics", mlog.String("group", "workspaces"), mlog.Err(err)) return } s.logger.Log(mlog.LvlFBMetrics, "Workspace metrics collected", mlog.Int64("workspace_count", workspaceCount)) s.metricsService.ObserveWorkspaceCount(workspaceCount) } // metricsUpdater() Calling this immediately causes integration unit tests to fail. s.metricsUpdaterTask = scheduler.CreateRecurringTask("updateMetrics", metricsUpdater, updateMetricsTaskFrequency) if s.config.Telemetry { firstRun := utils.MillisFromTime(time.Now()) s.telemetry.RunTelemetryJob(firstRun) } var group run.Group if s.config.PrometheusAddress != "" { group.Add(func() error { if err := s.metricsServer.Run(); err != nil { return errors.Wrap(err, "PromServer Run") } return nil }, func(error) { _ = s.metricsServer.Shutdown() }) if err := group.Run(); err != nil { return err } } return nil } func (s *Server) Shutdown() error { if err := s.webServer.Shutdown(); err != nil { return err } s.stopLocalModeServer() s.servicesStartStopMutex.Lock() defer s.servicesStartStopMutex.Unlock() if s.cleanUpSessionsTask != nil { s.cleanUpSessionsTask.Cancel() } if s.metricsUpdaterTask != nil { s.metricsUpdaterTask.Cancel() } if err := s.telemetry.Shutdown(); err != nil { s.logger.Warn("Error occurred when shutting down telemetry", mlog.Err(err)) } if err := s.auditService.Shutdown(); err != nil { s.logger.Warn("Error occurred when shutting down audit service", mlog.Err(err)) } defer s.logger.Info("Server.Shutdown") return s.store.Shutdown() } func (s *Server) Config() *config.Configuration { return s.config } func (s *Server) Logger() *mlog.Logger { return s.logger } // Local server func (s *Server) startLocalModeServer() error { s.localModeServer = &http.Server{ Handler: s.localRouter, ConnContext: api.SetContextConn, } // TODO: Close and delete socket file on shutdown if err := syscall.Unlink(s.config.LocalModeSocketLocation); err != nil { s.logger.Error("Unable to unlink socket.", mlog.Err(err)) } socket := s.config.LocalModeSocketLocation unixListener, err := net.Listen("unix", socket) if err != nil { return err } if err = os.Chmod(socket, 0600); err != nil { return err } go func() { s.logger.Info("Starting unix socket server") err = s.localModeServer.Serve(unixListener) if err != nil && !errors.Is(err, http.ErrServerClosed) { s.logger.Error("Error starting unix socket server", mlog.Err(err)) } }() return nil } func (s *Server) stopLocalModeServer() { if s.localModeServer != nil { _ = s.localModeServer.Close() s.localModeServer = nil } } func (s *Server) GetRootRouter() *mux.Router { return s.webServer.Router() } type telemetryOptions struct { app *app.App cfg *config.Configuration telemetryID string serverID string logger *mlog.Logger singleUser bool } func initTelemetry(opts telemetryOptions) *telemetry.Service { telemetryService := telemetry.New(opts.telemetryID, opts.logger) telemetryService.RegisterTracker("server", func() (telemetry.Tracker, error) { return map[string]interface{}{ "version": appModel.CurrentVersion, "build_number": appModel.BuildNumber, "build_hash": appModel.BuildHash, "edition": appModel.Edition, "operating_system": runtime.GOOS, "server_id": opts.serverID, }, nil }) telemetryService.RegisterTracker("config", func() (telemetry.Tracker, error) { return map[string]interface{}{ "serverRoot": opts.cfg.ServerRoot == config.DefaultServerRoot, "port": opts.cfg.Port == config.DefaultPort, "useSSL": opts.cfg.UseSSL, "dbType": opts.cfg.DBType, "single_user": opts.singleUser, }, nil }) telemetryService.RegisterTracker("activity", func() (telemetry.Tracker, error) { m := make(map[string]interface{}) var count int var err error if count, err = opts.app.GetRegisteredUserCount(); err != nil { return nil, err } m["registered_users"] = count if count, err = opts.app.GetDailyActiveUsers(); err != nil { return nil, err } m["daily_active_users"] = count if count, err = opts.app.GetWeeklyActiveUsers(); err != nil { return nil, err } m["weekly_active_users"] = count if count, err = opts.app.GetMonthlyActiveUsers(); err != nil { return nil, err } m["monthly_active_users"] = count return m, nil }) telemetryService.RegisterTracker("blocks", func() (telemetry.Tracker, error) { blockCounts, err := opts.app.GetBlockCountsByType() if err != nil { return nil, err } m := make(map[string]interface{}) for k, v := range blockCounts { m[k] = v } return m, nil }) telemetryService.RegisterTracker("workspaces", func() (telemetry.Tracker, error) { count, err := opts.app.GetWorkspaceCount() if err != nil { return nil, err } m := map[string]interface{}{ "workspaces": count, } return m, nil }) return telemetryService }
[ "\"MM_CLOUD_INSTALLATION_ID\"" ]
[]
[ "MM_CLOUD_INSTALLATION_ID" ]
[]
["MM_CLOUD_INSTALLATION_ID"]
go
1
0
backend/manage.py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_incredible_app_29251.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[]
[]
[]
[]
[]
python
0
0
nematus/translate.py
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' Translates a source file using a translation model. ''' import sys import numpy import json import os import logging from multiprocessing import Process, Queue from collections import defaultdict from Queue import Empty from util import load_dict, load_config, seqs2words from compat import fill_options from hypgraph import HypGraphRenderer from settings import TranslationSettings class Translation(object): #TODO move to separate file? """ Models a translated segment. """ def __init__(self, source_words, target_words, sentence_id=None, score=0, alignment=None, target_probs=None, hyp_graph=None, hypothesis_id=None): self.source_words = source_words self.target_words = target_words self.sentence_id = sentence_id self.score = score self.alignment = alignment #TODO: assertion of length? self.target_probs = target_probs #TODO: assertion of length? self.hyp_graph = hyp_graph self.hypothesis_id = hypothesis_id def get_alignment(self): return self.alignment def get_alignment_text(self): """ Returns this translation's alignment rendered as a string. Columns in header: sentence id ||| target words ||| score ||| source words ||| number of source words ||| number of target words """ columns = [ self.sentence_id, " ".join(self.target_words), self.score, " ".join(self.source_words), len(self.source_words) + 1, len(self.target_words) + 1 ] header = "{0} ||| {1} ||| {2} ||| {3} ||| {4} {5}\n".format(*columns) matrix = [] for target_word_alignment in self.alignment: current_weights = [] for weight in target_word_alignment: current_weights.append(str(weight)) matrix.append(" ".join(current_weights)) return header + "\n".join(matrix) def get_alignment_json(self, as_string=True): """ Returns this translation's alignment as a JSON serializable object (@param as_string False) or a JSON formatted string (@param as_string True). """ source_tokens = self.source_words + ["</s>"] target_tokens = self.target_words + ["</s>"] if self.hypothesis_id is not None: tid = self.sentence_id + self.hypothesis_id else: tid = self.sentence_id links = [] for target_index, target_word_alignment in enumerate(self.alignment): for source_index, weight in enumerate(target_word_alignment): links.append( (target_tokens[target_index], source_tokens[source_index], str(weight), self.sentence_id, tid) ) return json.dumps(links, ensure_ascii=False, indent=2) if as_string else links def get_target_probs(self): """ Returns this translation's word probabilities as a string. """ return " ".join("{0}".format(prob) for prob in self.target_probs) def save_hyp_graph(self, filename, word_idict_trg, detailed=True, highlight_best=True): """ Writes this translation's search graph to disk. """ if self.hyp_graph: renderer = HypGraphRenderer(self.hyp_graph) renderer.wordify(word_idict_trg) renderer.save(filename, detailed, highlight_best) else: pass #TODO: Warning if no search graph has been constructed during decoding? class QueueItem(object): """ Models items in a queue. """ def __init__(self, **kwargs): self.__dict__.update(kwargs) class Translator(object): def __init__(self, settings): """ Loads translation models. """ self._models = settings.models self._num_processes = settings.num_processes self._device_list = settings.device_list self._verbose = settings.verbose self._retrieved_translations = defaultdict(dict) # load model options self._load_model_options() # load and invert dictionaries self._build_dictionaries() # set up queues self._init_queues() # init worker processes self._init_processes() def _load_model_options(self): """ Loads config options for each model. """ options = [] for model in self._models: m = load_config(model) if not 'concatenate_lm_decoder' in m: m['concatenate_lm_decoder'] = False options.append(m) # backward compatibility fill_options(options[-1]) self._options = options def _build_dictionaries(self): """ Builds and inverts source and target dictionaries, taken from the first model since all of them must have the same vocabulary. """ dictionaries = self._options[0]['dictionaries'] dictionaries_source = dictionaries[:-1] dictionary_target = dictionaries[-1] # load and invert source dictionaries word_dicts = [] word_idicts = [] for dictionary in dictionaries_source: word_dict = load_dict(dictionary) if self._options[0]['n_words_src']: for key, idx in word_dict.items(): if idx >= self._options[0]['n_words_src']: del word_dict[key] word_idict = dict() for kk, vv in word_dict.iteritems(): word_idict[vv] = kk word_idict[0] = '<eos>' word_idict[1] = 'UNK' word_dicts.append(word_dict) word_idicts.append(word_idict) self._word_dicts = word_dicts self._word_idicts = word_idicts # load and invert target dictionary word_dict_trg = load_dict(dictionary_target) word_idict_trg = dict() for kk, vv in word_dict_trg.iteritems(): word_idict_trg[vv] = kk word_idict_trg[0] = '<eos>' word_idict_trg[1] = 'UNK' self._word_idict_trg = word_idict_trg def _init_queues(self): """ Sets up shared queues for inter-process communication. """ self._input_queue = Queue() self._output_queue = Queue() def shutdown(self): """ Executed from parent process to terminate workers, method: "poison pill". """ for process in self._processes: self._input_queue.put(None) def _init_processes(self): """ Starts child (worker) processes. """ processes = [None] * self._num_processes for process_id in xrange(self._num_processes): deviceid = '' if self._device_list is not None and len(self._device_list) != 0: deviceid = self._device_list[process_id % len(self._device_list)].strip() processes[process_id] = Process( target=self._start_worker, args=(process_id, deviceid) ) processes[process_id].start() self._processes = processes ### MODEL LOADING AND TRANSLATION IN CHILD PROCESS ### def _load_theano(self): """ Loads models, sets theano shared variables and builds samplers. This entails irrevocable binding to a specific GPU. """ from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams from theano import shared from nmt import (build_sampler, gen_sample) from theano_util import (numpy_floatX, load_params, init_theano_params) trng = RandomStreams(1234) use_noise = shared(numpy_floatX(0.)) fs_init = [] fs_next = [] for model, option in zip(self._models, self._options): param_list = numpy.load(model).files param_list = dict.fromkeys( [key for key in param_list if not key.startswith('adam_')], 0) params = load_params(model, param_list) tparams = init_theano_params(params) # always return alignment at this point f_init, f_next = build_sampler( tparams, option, use_noise, trng, return_alignment=True) fs_init.append(f_init) fs_next.append(f_next) return trng, fs_init, fs_next, gen_sample def _set_device(self, device_id): """ Modifies environment variable to change the THEANO device. """ if device_id != '': try: theano_flags = os.environ['THEANO_FLAGS'].split(',') exist = False for i in xrange(len(theano_flags)): if theano_flags[i].strip().startswith('device'): exist = True theano_flags[i] = '%s=%s' % ('device', device_id) break if exist is False: theano_flags.append('%s=%s' % ('device', device_id)) os.environ['THEANO_FLAGS'] = ','.join(theano_flags) except KeyError: # environment variable does not exist at all os.environ['THEANO_FLAGS'] = 'device=%s' % device_id def _load_models(self, process_id, device_id): """ Modifies environment variable to change the THEANO device, then loads models and returns them. """ logging.debug("Process '%s' - Loading models on device %s\n" % (process_id, device_id)) # modify environment flag 'device' self._set_device(device_id) # build and return models return self._load_theano() def _start_worker(self, process_id, device_id): """ Function executed by each worker once started. Do not execute in the parent process. """ # load theano functionality trng, fs_init, fs_next, gen_sample = self._load_models(process_id, device_id) # listen to queue in while loop, translate items while True: input_item = self._input_queue.get() if input_item is None: break idx = input_item.idx request_id = input_item.request_id output_item = self._translate(process_id, input_item, trng, fs_init, fs_next, gen_sample) self._output_queue.put((request_id, idx, output_item)) return def _translate(self, process_id, input_item, trng, fs_init, fs_next, gen_sample): """ Actual translation (model sampling). """ # unpack input item attributes normalization_alpha = input_item.normalization_alpha nbest = input_item.nbest idx = input_item.idx # logging logging.debug('{0} - {1}\n'.format(process_id, idx)) # sample given an input sequence and obtain scores sample, score, word_probs, alignment, hyp_graph = self._sample(input_item, trng, fs_init, fs_next, gen_sample) # normalize scores according to sequence lengths if normalization_alpha: adjusted_lengths = numpy.array([len(s) ** normalization_alpha for s in sample]) score = score / adjusted_lengths if nbest is True: output_item = sample, score, word_probs, alignment, hyp_graph else: # return translation with lowest score only sidx = numpy.argmin(score) output_item = sample[sidx], score[sidx], word_probs[ sidx], alignment[sidx], hyp_graph return output_item def _sample(self, input_item, trng, fs_init, fs_next, gen_sample): """ Sample from model. """ # unpack input item attributes return_hyp_graph = input_item.return_hyp_graph return_alignment = input_item.return_alignment suppress_unk = input_item.suppress_unk k = input_item.k seq = input_item.seq max_ratio = input_item.max_ratio maxlen = 200 #TODO: should be configurable if max_ratio: maxlen = int(max_ratio * len(seq)) return gen_sample(fs_init, fs_next, numpy.array(seq).T.reshape( [len(seq[0]), len(seq), 1]), self._options[0], trng=trng, k=k, maxlen=maxlen, stochastic=False, argmax=False, return_alignment=return_alignment, suppress_unk=suppress_unk, return_hyp_graph=return_hyp_graph) ### WRITING TO AND READING FROM QUEUES ### def _send_jobs(self, input_, translation_settings): """ """ source_sentences = [] for idx, line in enumerate(input_): if translation_settings.char_level: words = list(line.decode('utf-8').strip()) else: words = line.strip().split() x = [] for w in words: w = [self._word_dicts[i][f] if f in self._word_dicts[i] else 1 for (i,f) in enumerate(w.split('|'))] if len(w) != self._options[0]['factors']: logging.warning('Expected {0} factors, but input word has {1}\n'.format(self._options[0]['factors'], len(w))) for midx in xrange(self._num_processes): self._processes[midx].terminate() sys.exit(1) x.append(w) x += [[0]*self._options[0]['factors']] input_item = QueueItem(verbose=self._verbose, return_hyp_graph=translation_settings.get_search_graph, return_alignment=translation_settings.get_alignment, k=translation_settings.beam_width, suppress_unk=translation_settings.suppress_unk, normalization_alpha=translation_settings.normalization_alpha, nbest=translation_settings.n_best, max_ratio=translation_settings.max_ratio, seq=x, idx=idx, request_id=translation_settings.request_id) self._input_queue.put(input_item) source_sentences.append(words) return idx+1, source_sentences def _retrieve_jobs(self, num_samples, request_id, timeout=5): """ """ while len(self._retrieved_translations[request_id]) < num_samples: resp = None while resp is None: try: resp = self._output_queue.get(True, timeout) # if queue is empty after 5s, check if processes are still alive except Empty: for midx in xrange(self._num_processes): if not self._processes[midx].is_alive() and self._processes[midx].exitcode != 0: # kill all other processes and raise exception if one dies self._input_queue.cancel_join_thread() self._output_queue.cancel_join_thread() for idx in xrange(self._num_processes): self._processes[idx].terminate() logging.error("Translate worker process {0} crashed with exitcode {1}".format(self._processes[midx].pid, self._processes[midx].exitcode)) sys.exit(1) request_id, idx, output_item = resp self._retrieved_translations[request_id][idx] = output_item #print self._retrieved_translations for idx in xrange(num_samples): yield self._retrieved_translations[request_id][idx] # then remove all entries with this request ID from the dictionary del self._retrieved_translations[request_id] ### EXPOSED TRANSLATION FUNCTIONS ### def translate(self, source_segments, translation_settings): """ Returns the translation of @param source_segments. """ logging.info('Translating {0} segments...\n'.format(len(source_segments))) n_samples, source_sentences = self._send_jobs(source_segments, translation_settings) translations = [] for i, trans in enumerate(self._retrieve_jobs(n_samples, translation_settings.request_id)): samples, scores, word_probs, alignment, hyp_graph = trans # n-best list if translation_settings.n_best is True: order = numpy.argsort(scores) n_best_list = [] for j in order: current_alignment = None if not translation_settings.get_alignment else alignment[j] translation = Translation(sentence_id=i, source_words=source_sentences[i], target_words=seqs2words(samples[j], self._word_idict_trg, join=False), score=scores[j], alignment=current_alignment, target_probs=word_probs[j], hyp_graph=hyp_graph, hypothesis_id=j) n_best_list.append(translation) translations.append(n_best_list) # single-best translation else: current_alignment = None if not translation_settings.get_alignment else alignment translation = Translation(sentence_id=i, source_words=source_sentences[i], target_words=seqs2words(samples, self._word_idict_trg, join=False), score=scores, alignment=current_alignment, target_probs=word_probs, hyp_graph=hyp_graph) translations.append(translation) return translations def translate_file(self, input_object, translation_settings): """ """ source_segments = input_object.readlines() return self.translate(source_segments, translation_settings) def translate_string(self, segment, translation_settings): """ Translates a single segment """ if not segment.endswith('\n'): segment += '\n' source_segments = [segment] return self.translate(source_segments, translation_settings) def translate_list(self, segments, translation_settings): """ Translates a list of segments """ source_segments = [s + '\n' if not s.endswith('\n') else s for s in segments] return self.translate(source_segments, translation_settings) ### FUNCTIONS FOR WRITING THE RESULTS ### def write_alignment(self, translation, translation_settings): """ Writes alignments to a file. """ output_file = translation_settings.output_alignment if translation_settings.json_alignment: output_file.write(translation.get_alignment_json() + "\n") else: output_file.write(translation.get_alignment_text() + "\n\n") def write_translation(self, output_file, translation, translation_settings): """ Writes a single translation to a file or STDOUT. """ output_items = [] # sentence ID only for nbest if translation_settings.n_best is True: output_items.append(str(translation.sentence_id)) # translations themselves output_items.append(" ".join(translation.target_words)) # write scores for nbest? if translation_settings.n_best is True: output_items.append(str(translation.score)) # write probabilities? if translation_settings.get_word_probs: output_items.append(translation.get_target_probs()) if translation_settings.n_best is True: output_file.write(" ||| ".join(output_items) + "\n") else: output_file.write("\n".join(output_items) + "\n") # write alignments to file? if translation_settings.get_alignment: self.write_alignment(translation, translation_settings) # construct hypgraph? if translation_settings.get_search_graph: translation.save_hyp_graph( translation_settings.search_graph_filename, self._word_idict_trg, detailed=True, highlight_best=True ) def write_translations(self, output_file, translations, translation_settings): """ Writes translations to a file or STDOUT. """ if translation_settings.n_best is True: for nbest_list in translations: for translation in nbest_list: self.write_translation(output_file, translation, translation_settings) else: for translation in translations: self.write_translation(output_file, translation, translation_settings) def main(input_file, output_file, translation_settings): """ Translates a source language file (or STDIN) into a target language file (or STDOUT). """ translator = Translator(translation_settings) translations = translator.translate_file(input_file, translation_settings) translator.write_translations(output_file, translations, translation_settings) logging.info('Done') translator.shutdown() if __name__ == "__main__": # parse console arguments translation_settings = TranslationSettings(from_console_arguments=True) input_file = translation_settings.input output_file = translation_settings.output # start logging level = logging.DEBUG if translation_settings.verbose else logging.WARNING logging.basicConfig(level=level, format='%(levelname)s: %(message)s') main(input_file, output_file, translation_settings)
[]
[]
[ "THEANO_FLAGS" ]
[]
["THEANO_FLAGS"]
python
1
0
manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py
# Copyright 2020-2021 The Kubeflow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from http.server import BaseHTTPRequestHandler, HTTPServer import json import os import base64 def main(): settings = get_settings_from_env() server = server_factory(**settings) server.serve_forever() def get_settings_from_env(controller_port=None, visualization_server_image=None, frontend_image=None, visualization_server_tag=None, frontend_tag=None, disable_istio_sidecar=None, minio_access_key=None, minio_secret_key=None, kfp_default_pipeline_root=None): """ Returns a dict of settings from environment variables relevant to the controller Environment settings can be overridden by passing them here as arguments. Settings are pulled from the all-caps version of the setting name. The following defaults are used if those environment variables are not set to enable backwards compatibility with previous versions of this script: visualization_server_image: gcr.io/ml-pipeline/visualization-server visualization_server_tag: value of KFP_VERSION environment variable frontend_image: gcr.io/ml-pipeline/frontend frontend_tag: value of KFP_VERSION environment variable disable_istio_sidecar: Required (no default) minio_access_key: Required (no default) minio_secret_key: Required (no default) """ settings = dict() settings["controller_port"] = \ controller_port or \ os.environ.get("CONTROLLER_PORT", "8080") settings["visualization_server_image"] = \ visualization_server_image or \ os.environ.get("VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server") settings["frontend_image"] = \ frontend_image or \ os.environ.get("FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend") # Look for specific tags for each image first, falling back to # previously used KFP_VERSION environment variable for backwards # compatibility settings["visualization_server_tag"] = \ visualization_server_tag or \ os.environ.get("VISUALIZATION_SERVER_TAG") or \ os.environ["KFP_VERSION"] settings["frontend_tag"] = \ frontend_tag or \ os.environ.get("FRONTEND_TAG") or \ os.environ["KFP_VERSION"] settings["disable_istio_sidecar"] = \ disable_istio_sidecar if disable_istio_sidecar is not None \ else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true" settings["minio_access_key"] = \ minio_access_key or \ base64.b64encode(bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8') settings["minio_secret_key"] = \ minio_secret_key or \ base64.b64encode(bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8') # KFP_DEFAULT_PIPELINE_ROOT is optional settings["kfp_default_pipeline_root"] = \ kfp_default_pipeline_root or \ os.environ.get("KFP_DEFAULT_PIPELINE_ROOT") return settings def server_factory(visualization_server_image, visualization_server_tag, frontend_image, frontend_tag, disable_istio_sidecar, minio_access_key, minio_secret_key, kfp_default_pipeline_root=None, url="", controller_port=8080): """ Returns an HTTPServer populated with Handler with customized settings """ class Controller(BaseHTTPRequestHandler): def sync(self, parent, children): # parent is a namespace namespace = parent.get("metadata", {}).get("name") pipeline_enabled = parent.get("metadata", {}).get( "labels", {}).get("pipelines.kubeflow.org/enabled") if pipeline_enabled != "true": return {"status": {}, "children": []} desired_configmap_count = 1 desired_resources = [] if kfp_default_pipeline_root: desired_configmap_count = 2 desired_resources += [{ "apiVersion": "v1", "kind": "ConfigMap", "metadata": { "name": "kfp-launcher", "namespace": namespace, }, "data": { "defaultPipelineRoot": kfp_default_pipeline_root, }, }] # Compute status based on observed state. desired_status = { "kubeflow-pipelines-ready": len(children["Secret.v1"]) == 1 and len(children["ConfigMap.v1"]) == desired_configmap_count and len(children["Deployment.apps/v1"]) == 2 and len(children["Service.v1"]) == 2 and len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 and "True" or "False" } # Generate the desired child object(s). desired_resources += [ { "apiVersion": "v1", "kind": "ConfigMap", "metadata": { "name": "metadata-grpc-configmap", "namespace": namespace, }, "data": { "METADATA_GRPC_SERVICE_HOST": "metadata-grpc-service.kubeflow", "METADATA_GRPC_SERVICE_PORT": "8080", }, }, # Visualization server related manifests below { "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { "labels": { "app": "ml-pipeline-visualizationserver" }, "name": "ml-pipeline-visualizationserver", "namespace": namespace, }, "spec": { "selector": { "matchLabels": { "app": "ml-pipeline-visualizationserver" }, }, "template": { "metadata": { "labels": { "app": "ml-pipeline-visualizationserver" }, "annotations": disable_istio_sidecar and { "sidecar.istio.io/inject": "false" } or {}, }, "spec": { "containers": [{ "image": f"{visualization_server_image}:{visualization_server_tag}", "imagePullPolicy": "IfNotPresent", "name": "ml-pipeline-visualizationserver", "ports": [{ "containerPort": 8888 }], "resources": { "requests": { "cpu": "50m", "memory": "200Mi" }, "limits": { "cpu": "500m", "memory": "1Gi" }, } }], "serviceAccountName": "default-editor", }, }, }, }, { "apiVersion": "networking.istio.io/v1alpha3", "kind": "DestinationRule", "metadata": { "name": "ml-pipeline-visualizationserver", "namespace": namespace, }, "spec": { "host": "ml-pipeline-visualizationserver", "trafficPolicy": { "tls": { "mode": "ISTIO_MUTUAL" } } } }, { "apiVersion": "security.istio.io/v1beta1", "kind": "AuthorizationPolicy", "metadata": { "name": "ml-pipeline-visualizationserver", "namespace": namespace, }, "spec": { "selector": { "matchLabels": { "app": "ml-pipeline-visualizationserver" } }, "rules": [{ "from": [{ "source": { "principals": ["cluster.local/ns/kubeflow/sa/ml-pipeline"] } }] }] } }, { "apiVersion": "v1", "kind": "Service", "metadata": { "name": "ml-pipeline-visualizationserver", "namespace": namespace, }, "spec": { "ports": [{ "name": "http", "port": 8888, "protocol": "TCP", "targetPort": 8888, }], "selector": { "app": "ml-pipeline-visualizationserver", }, }, }, # Artifact fetcher related resources below. { "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { "labels": { "app": "ml-pipeline-ui-artifact" }, "name": "ml-pipeline-ui-artifact", "namespace": namespace, }, "spec": { "selector": { "matchLabels": { "app": "ml-pipeline-ui-artifact" } }, "template": { "metadata": { "labels": { "app": "ml-pipeline-ui-artifact" }, "annotations": disable_istio_sidecar and { "sidecar.istio.io/inject": "false" } or {}, }, "spec": { "containers": [{ "name": "ml-pipeline-ui-artifact", "image": f"{frontend_image}:{frontend_tag}", "imagePullPolicy": "IfNotPresent", "ports": [{ "containerPort": 3000 }], "env": [ { "name": "MINIO_ACCESS_KEY", "valueFrom": { "secretKeyRef": { "key": "accesskey", "name": "mlpipeline-minio-artifact" } } }, { "name": "MINIO_SECRET_KEY", "valueFrom": { "secretKeyRef": { "key": "secretkey", "name": "mlpipeline-minio-artifact" } } } ], "resources": { "requests": { "cpu": "10m", "memory": "70Mi" }, "limits": { "cpu": "100m", "memory": "500Mi" }, } }], "serviceAccountName": "default-editor" } } } }, { "apiVersion": "v1", "kind": "Service", "metadata": { "name": "ml-pipeline-ui-artifact", "namespace": namespace, "labels": { "app": "ml-pipeline-ui-artifact" } }, "spec": { "ports": [{ "name": "http", # name is required to let istio understand request protocol "port": 80, "protocol": "TCP", "targetPort": 3000 }], "selector": { "app": "ml-pipeline-ui-artifact" } } }, ] print('Received request:\n', json.dumps(parent, sort_keys=True)) print('Desired resources except secrets:\n', json.dumps(desired_resources, sort_keys=True)) # Moved after the print argument because this is sensitive data. desired_resources.append({ "apiVersion": "v1", "kind": "Secret", "metadata": { "name": "mlpipeline-minio-artifact", "namespace": namespace, }, "data": { "accesskey": minio_access_key, "secretkey": minio_secret_key, }, }) return {"status": desired_status, "children": desired_resources} def do_POST(self): # Serve the sync() function as a JSON webhook. observed = json.loads( self.rfile.read(int(self.headers.get("content-length")))) desired = self.sync(observed["parent"], observed["children"]) self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() self.wfile.write(bytes(json.dumps(desired), 'utf-8')) return HTTPServer((url, int(controller_port)), Controller) if __name__ == "__main__": main()
[]
[]
[ "CONTROLLER_PORT", "KFP_VERSION", "VISUALIZATION_SERVER_IMAGE", "MINIO_SECRET_KEY", "FRONTEND_IMAGE", "KFP_DEFAULT_PIPELINE_ROOT", "FRONTEND_TAG", "DISABLE_ISTIO_SIDECAR", "VISUALIZATION_SERVER_TAG", "MINIO_ACCESS_KEY" ]
[]
["CONTROLLER_PORT", "KFP_VERSION", "VISUALIZATION_SERVER_IMAGE", "MINIO_SECRET_KEY", "FRONTEND_IMAGE", "KFP_DEFAULT_PIPELINE_ROOT", "FRONTEND_TAG", "DISABLE_ISTIO_SIDECAR", "VISUALIZATION_SERVER_TAG", "MINIO_ACCESS_KEY"]
python
10
0
src/magplan/views/posts.py
import datetime import io import os from typing import List, Optional from zipfile import ZIP_DEFLATED, ZipFile import html2text from django.conf import settings from django.contrib import messages from django.contrib.auth.decorators import login_required from django.core.mail import EmailMultiAlternatives from django.db import transaction from django.http import HttpRequest, HttpResponseForbidden from django.shortcuts import HttpResponse, redirect, render from django.shortcuts import get_object_or_404 from django.template import Context, Template from django.template.loader import render_to_string from django.urls import reverse from magplan.conf import settings as config from magplan.forms import ( CommentModelForm, PostBaseModelForm, PostExtendedModelForm, PostMetaForm, ) from magplan.models import ( Attachment, Comment, Idea, Post, Stage, User, ) from magplan.tasks.send_post_comment_notification import send_post_comment_notification from magplan.tasks.upload_post_to_wp import upload_post_to_wp from slugify import slugify IMAGE_MIME_TYPE_JPEG= 'image/jpeg' IMAGE_MIME_TYPES = { 'image/gif', IMAGE_MIME_TYPE_JPEG, 'image/png', } def _get_arbitrary_chunk(post: Post) -> str: """Render instance specific template code Used to render some arbitrary HTML code in a context of Post instance. Useful to provide sensitive HTML template, which can't be committed into Git repository directly or may vary for each particular instance. :param post: Post instance to use in template :return: Rendered template string """ instance_template = Template(config.PLAN_POSTS_INSTANCE_CHUNK) instance_chunk = instance_template.render(Context({"post": post})) return instance_chunk def _create_system_comment( action_type, user, post, changelog=None, attachments=None, stage=None ) -> Comment: """Create auto-generated system comment with post changes logs :param action_type: :param user: :param post: :param changelog: :return: """ if not config.SYSTEM_USER_ID: return None if not attachments: attachments = () if not stage: stage = post.stage system_user = User.objects.get(id=config.SYSTEM_USER_ID) comment = Comment() comment.commentable = post comment.type = Comment.TYPE_SYSTEM comment.user = system_user # Depending on action type, fill different fields meta = { "action": action_type, "user": {"id": user.id, "str": user.__str__()}, "files": [], } if action_type == Comment.SYSTEM_ACTION_CHANGE_META: meta["changelog"] = changelog elif action_type == Comment.SYSTEM_ACTION_UPDATE: if len(attachments) > 0: meta["files"] = [ {"id": a.id, "str": a.original_filename} for a in attachments ] elif action_type == Comment.SYSTEM_ACTION_SET_STAGE: meta["stage"] = {"id": post.stage.id, "str": post.stage.__str__()} # Assign builded meta to comment and save comment.meta["comment"] = meta comment.save() return comment def _generate_changelog_for_form(form: PostMetaForm) -> List[str]: """Iterate over all changed attributes and stage changes in logs :param form: Django form :return: list, where each element is changelog line """ changelog = [] changed_fields = form.changed_data.copy() if "wp_id" in changed_fields: changed_fields.remove("wp_id") __ = lambda form, field: ( ", ".join([str(i) for i in form.initial.get(field)]), ", ".join([str(i) for i in form.cleaned_data.get(field)]), ) _ = lambda form, field: (form.initial.get(field), form.cleaned_data.get(field)) for changed_field in changed_fields: log = None if changed_field == "issues": log = '* выпуски сменились с "{0}" на "{1}"'.format( *__(form, changed_field) ) elif changed_field == "editor": # Initial ForeignKey value is stored as int. Populate it args = _(form, changed_field) init_editor = str(User.objects.get(id=args[0])) new_args = (init_editor, args[1]) log = '* редактор cменился с "{0}" на "{1}"'.format(*new_args) elif changed_field == "finished_at": log = '* дедлайн этапа cменился с "{0}" на "{1}"'.format( *_(form, changed_field) ) elif changed_field == "published_at": log = '* дата публикации сменилась с "{0}" на "{1}"'.format( *_(form, changed_field) ) if log: changelog.append(log) return changelog def _authorize_stage_change(user: User, post: Post, new_stage_id: int) -> bool: """Check if user is authorized to set stage for post :param user: User instance :param post: Post instance :param new_stage_id: Stage to to set for post :return: True if authorized, otherwise False """ legit_stages = (post.stage.prev_stage_id, post.stage.next_stage_id) if new_stage_id in legit_stages and post.assignee == user: return True if user.has_perm("magplan.edit_extended_post_attrs"): return True return False def _save_attachments( files: List, post: Post, user: User, featured_image_file: Optional = None ) -> List[Attachment]: attachments = [] with transaction.atomic(): for file in files: # Delete files with the same filename, # uploaded for current post. Emulates overwrite without # custom FileSystemStorage Attachment.objects.filter( post=post, original_filename=file.name ).delete() attachment = Attachment( post=post, user=user, original_filename=file.name ) # save original filename # Slugify original filename and save with safe one filename, extension = os.path.splitext(file.name) file.name = "%s%s" % (slugify(filename), extension) # Assign file object with slugified filename as name, # original is copied by value attachment.file = file if file.content_type in IMAGE_MIME_TYPES: attachment.type = Attachment.TYPE_IMAGE elif file.content_type == "application/pdf": attachment.type = Attachment.TYPE_PDF else: attachment.type = Attachment.TYPE_FILE attachment.save() attachments.append(attachment) # This can be spoofed on client_side if featured_image_file and featured_image_file.content_type == IMAGE_MIME_TYPE_JPEG: # Delete any previously uploaded featured images Attachment.objects.filter( post=post, type=Attachment.TYPE_FEATURED_IMAGE ).delete() attachment = Attachment( post=post, user=user, original_filename=featured_image_file.name, type=Attachment.TYPE_FEATURED_IMAGE, file=featured_image_file, ) # save original filename attachment.save() return attachments @login_required def show(request, post_id): post = Post.objects.prefetch_related( "editor", "authors", "stage", "section", "issues", "comments__user" ).get(id=post_id) post_meta_form = PostMetaForm( initial={ 'wp_id': post.meta.get('wpid') }, instance=post ) api_issues_search_url = reverse('api_issues_search') return render( request, "magplan/posts/show.html", { "post": post, "stages": Stage.on_current_site.order_by("sort").all(), "instance_chunk": _get_arbitrary_chunk(post), "comment_form": CommentModelForm(), "meta_form": post_meta_form, "TYPE_CHOICES": Comment.TYPE_CHOICES, "SYSTEM_ACTION_CHOICES": Comment.SYSTEM_ACTION_CHOICES, 'api_issues_search_url': api_issues_search_url, }, ) @login_required def create(request): if request.method == "POST": form: Post = PostBaseModelForm(request.POST) # Set post site scope if form.is_valid(): post = form.save(commit=False) post.editor = request.user.user post.stage = Stage.objects.get(slug="waiting") post.save() form.save_m2m() idea = Idea.objects.get(id=request.POST.get("idea_id", None)) idea.post = post idea.save() return redirect("posts_show", post.id) else: return HttpResponse(status=405) @login_required def edit(request, post_id): post = Post.objects.prefetch_related( "editor", "authors", "stage", "section", "issues" ).get(id=post_id) if request.method == "POST": form = PostExtendedModelForm(request.POST, request.FILES, instance=post) attachments_files = request.FILES.getlist("attachments") featured_image_files = request.FILES.getlist('featured_image') attachments = _save_attachments( attachments_files, post, request.user.user, featured_image_file=( featured_image_files[0] if featured_image_files else None ) ) if form.is_valid(): post.imprint_updater(request.user.user) form.save() _create_system_comment( Comment.SYSTEM_ACTION_UPDATE, request.user.user, post, attachments=attachments, ) messages.add_message( request, messages.SUCCESS, "Пост «%s» успешно отредактирован" % post ) return redirect("posts_edit", post_id) else: messages.add_message( request, messages.ERROR, "При обновлении поста произошла ошибка ввода" ) else: form = PostExtendedModelForm(instance=post) api_authors_search_url = reverse('api_authors_search') return render(request, "magplan/posts/edit.html", { "post": post, "form": form, 'api_authors_search_url': api_authors_search_url }) @login_required def edit_meta(request, post_id): try: post = Post.objects.get(id=post_id) except Post.DoesNotExist: return HttpResponse(status=401) if request.method == "POST": form = PostMetaForm(request.POST, instance=post) if not form.is_valid(): return HttpResponse(status=403) # Manually set new Wordpress ID as it's ignored by form form.instance.meta["wpid"] = form.cleaned_data.get("wp_id") published_at = form.cleaned_data.get("published_at") if published_at: dt = published_at.replace(hour=10, minute=0, second=0) form.instance.published_at = dt form.save() # Create system comment with changelog changelog = _generate_changelog_for_form(form) if len(changelog) > 0: _create_system_comment( Comment.SYSTEM_ACTION_CHANGE_META, request.user.user, post, changelog ) messages.add_message(request, messages.INFO, f"Пост {post} успешно обновлен!") return redirect("posts_show", post_id) @login_required def set_stage(request, post_id, system=Comment.TYPE_SYSTEM): post = Post.objects.prefetch_related("stage__n_stage", "stage__p_stage").get( id=post_id ) if request.method == "POST" and request.POST.get("new_stage_id"): if not _authorize_stage_change( request.user.user, post, int(request.POST.get("new_stage_id")) ): return HttpResponseForbidden() stage = Stage.objects.get(id=request.POST.get("new_stage_id", None)) # set deadline to current stage durtion. If no duration, append 1 day duration = stage.duration if stage.duration else 1 post.finished_at = post.finished_at + +datetime.timedelta(days=duration) post.stage = stage post.imprint_updater(request.user.user) post.save() messages.add_message( request, messages.INFO, "Текущий этап статьи «%s» обновлен" % post ) # Create system comment _create_system_comment( Comment.SYSTEM_ACTION_SET_STAGE, request.user.user, post, stage=post.stage ) # TODO: extract method # Send email if stage allows it if post.assignee != request.user.user and stage.skip_notification is False: subject = f"На вас назначена статья «{post}»" html_content = render_to_string( "email/assigned_to_you.html", {"post": post, "APP_URL": os.environ.get("APP_URL", None)}, ) text_content = html2text.html2text(html_content) msg = EmailMultiAlternatives( subject, text_content, config.PLAN_EMAIL_FROM, [post.assignee.email] ) msg.attach_alternative(html_content, "text/html") msg.send() return redirect("posts_show", post_id) return redirect("posts_show", post.id) @login_required def comments(request, post_id): post = Post.objects.prefetch_related( "editor", "authors", "stage", "section", "issues", "comments__user" ).get(id=post_id) if request.method == "POST": form = CommentModelForm(request.POST) comment = form.save(commit=False) comment.commentable = post comment.user = request.user.user if form.is_valid(): form.save() # Send notification to users with 'recieve_post_email_updates' permission send_post_comment_notification.delay(comment.id) return redirect("posts_show", post_id) return render( request, "magplan/posts/show.html", { "post": post, "stages": Stage.objects.order_by("sort").all(), "form": CommentModelForm(), }, ) @login_required def attachment_delete(request, post_id): if request.method == "POST": try: attachemnt = Attachment.objects.get( id=request.POST.get("attachment_id", None), post_id=post_id ) attachemnt.delete() except: # TODO: fix too broad exception pass return HttpResponse(status=204) @login_required def download_content(request: HttpRequest, post_id: int) -> HttpResponse: """Get all files of requested type for post_id and stream to client as ZIP archive :param request: Django request object :param post_id: post_id :return: Django HttpResponse with file """ if request.method == "GET": s = io.BytesIO() zipfile = ZipFile(s, "w", ZIP_DEFLATED) attachments = Attachment.objects.filter( post_id=post_id, type=Attachment.TYPE_IMAGE ).all() for attachment in attachments: fs_path = "%s/%s" % (settings.MEDIA_ROOT, attachment.file.name) filename = attachment.original_filename try: zipfile.write(fs_path, arcname=filename) except Exception as e: # TODO: handle not found files pass zipfile.close() resp = HttpResponse(s.getvalue(), content_type="application/x-zip-compressed") resp["Content-Disposition"] = f"attachment; filename=content_{post_id}.zip" return resp def send_to_wp(request: HttpRequest, post_id: int) -> HttpResponse: if request.method == "GET": post = get_object_or_404(Post, id=post_id) upload_post_to_wp.delay(post.id) messages.add_message( request, messages.INFO, "Пост «%s» отправлен в Wordpress" % post ) return redirect("posts_show", post.id)
[]
[]
[ "APP_URL" ]
[]
["APP_URL"]
python
1
0
qa/pull-tester/rpc-tests.py
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Run Regression Test Suite This module calls down into individual test cases via subprocess. It will forward all unrecognized arguments onto the individual test scripts, other than: - `-extended`: run the "extended" test suite in addition to the basic one. - `-win`: signal that this is running in a Windows environment, and we should run the tests. - `--coverage`: this generates a basic coverage report for the RPC interface. For a description of arguments recognized by test scripts, see `qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`. """ import os import time import shutil import sys import subprocess import tempfile import re sys.path.append("qa/pull-tester/") from tests_config import * BOLD = ("","") if os.name == 'posix': # primitive formatting on supported # terminal via ANSI escape sequences: BOLD = ('\033[0m', '\033[1m') RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/' #If imported values are not defined then set to zero (or disabled) if 'ENABLE_WALLET' not in vars(): ENABLE_WALLET=0 if 'ENABLE_BITCOIND' not in vars(): ENABLE_BITCOIND=0 if 'ENABLE_UTILS' not in vars(): ENABLE_UTILS=0 if 'ENABLE_ZMQ' not in vars(): ENABLE_ZMQ=0 ENABLE_COVERAGE=0 #Create a set to store arguments and create the passon string opts = set() passon_args = [] PASSON_REGEX = re.compile("^--") PARALLEL_REGEX = re.compile('^-parallel=') print_help = False run_parallel = 4 for arg in sys.argv[1:]: if arg == "--help" or arg == "-h" or arg == "-?": print_help = True break if arg == '--coverage': ENABLE_COVERAGE = 1 elif PASSON_REGEX.match(arg): passon_args.append(arg) elif PARALLEL_REGEX.match(arg): run_parallel = int(arg.split(sep='=', maxsplit=1)[1]) else: opts.add(arg) #Set env vars if "BITCOIND" not in os.environ: os.environ["OCEAND"] = BUILDDIR+ '/src/oceand' + EXEEXT if EXEEXT == ".exe" and "-win" not in opts: # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9 # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964 print("Win tests currently disabled by default. Use -win option to enable") sys.exit(0) if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1): print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled") sys.exit(0) # python3-zmq may not be installed. Handle this gracefully and with some helpful info if ENABLE_ZMQ: try: import zmq except ImportError: print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " "to run zmq tests, see dependency info in /qa/README.md.") # ENABLE_ZMQ=0 raise testScripts = [ # longest test should go first, to favor running tests in parallel 'wallet-hd.py', #'walletbackup.py', # vv Tests less than 5m vv 'p2p-fullblocktest.py', 'fundrawtransaction.py', # TODO fix mininode #'p2p-compactblocks.py', 'segwit.py', # vv Tests less than 2m vv 'wallet.py', 'whitelisting.py', 'policytransactions.py', 'onboard.py', 'onboard_cit.py', 'onboardmanual.py', 'onboardmanual_cit.py', 'recoverencryptionkeys.py', 'fixedfee.py', 'hardfork.py', 'hardfork_disable.py', 'hardfork_policy.py', 'contractfork.py', # Accounts not supported #'wallet-accounts.py', # TODO fix mininode #'p2p-segwit.py', 'listtransactions.py', # vv Tests less than 60s vv 'sendheaders.py', 'zapwallettxes.py', 'importmulti.py', # Mempool stuff different due to CT sizes #'mempool_limit.py', 'merkle_blocks.py', 'receivedby.py', 'abandonconflict.py', # Previous ISM/BIP9 always enforced #'bip68-112-113-p2p.py', 'rawtransactions.py', 'reindex.py', # vv Tests less than 30s vv 'mempool_resurrect_test.py', 'txn_doublespend.py --mineblock', 'txn_clone.py', 'getchaintips.py', 'rest.py', 'mempool_spendcoinbase.py', 'mempool_reorg.py', 'httpbasics.py', 'multi_rpc.py', 'proxy_test.py', 'signrawtransactions.py', 'nodehandling.py', 'decodescript.py', 'blockchain.py', #'disablewallet.py', 'keypool.py', 'p2p-mempool.py', 'prioritise_transaction.py', 'invalidblockrequest.py', 'invalidtxrequest.py', 'rpc_getblockstats.py', 'confidential_transactions.py', 'unconfidential_transactions.py', 'asset_stats.py', 'raw_issuance.py', 'mempool_accept.py', 'preciousblock.py', #'p2p-segwit.py', #'importprunedfunds.py', 'signmessages.py', 'contractintx.py', #'nulldummy.py', # TODO reactivate this #'import-rescan.py', # TODO fix priority assumptions in test #'bumpfee.py', 'replace-by-fee.py', 'rpcnamedargs.py', 'listsinceblock.py', 'p2p-leaktests.py', 'redemption.py', 'requests.py', 'request-bids.py', 'request-auction.py', 'validBurn.py', 'sendany.py', 'splittransactions.py' ] if ENABLE_ZMQ: testScripts.append('zmq_test.py') testScriptsExt = [ 'pruning.py', # vv Tests less than 20m vv # 'smartfees.py', # vv Tests less than 5m vv 'maxuploadtarget.py', 'mempool_packages.py', # vv Tests less than 2m vv #'bip68-sequence.py', 'getblocktemplate_longpoll.py', 'p2p-timeouts.py', # vv Tests less than 60s vv #'bip9-softforks.py', 'p2p-feefilter.py', # 'rpcbind_test.py', # vv Tests less than 30s vv #'bip65-cltv.py', #'bip65-cltv-p2p.py', #'bipdersig-p2p.py', #'bipdersig.py', 'getblocktemplate_proposals.py', 'txn_doublespend.py', 'txn_clone.py --mineblock', 'forknotify.py', 'invalidateblock.py', 'maxblocksinflight.py', 'p2p-acceptblock.py', ] def runtests(): test_list = [] if '-extended' in opts: test_list = testScripts + testScriptsExt elif len(opts) == 0 or (len(opts) == 1 and "-win" in opts): test_list = testScripts else: for t in testScripts + testScriptsExt: if t in opts or re.sub(".py$", "", t) in opts: test_list.append(t) if print_help: # Only print help of the first script and exit subprocess.check_call((RPC_TESTS_DIR + test_list[0]).split() + ['-h']) sys.exit(0) coverage = None if ENABLE_COVERAGE: coverage = RPCCoverage() print("Initializing coverage directory at %s\n" % coverage.dir) flags = ["--srcdir=%s/src" % BUILDDIR] + passon_args flags.append("--cachedir=%s/qa/cache" % BUILDDIR) if coverage: flags.append(coverage.flag) if len(test_list) > 1 and run_parallel > 1: # Populate cache subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags) #Run Tests max_len_name = len(max(test_list, key=len)) time_sum = 0 time0 = time.time() job_queue = RPCTestHandler(run_parallel, test_list, flags) results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0] all_passed = True for _ in range(len(test_list)): (name, stdout, stderr, passed, duration) = job_queue.get_next() all_passed = all_passed and passed time_sum += duration print('\n' + BOLD[1] + name + BOLD[0] + ":") print('' if passed else stdout + '\n', end='') print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='') results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration) print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration)) results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0] print(results) print("\nRuntime: %s s" % (int(time.time() - time0))) if coverage: coverage.report_rpc_coverage() print("Cleaning up coverage data") coverage.cleanup() sys.exit(not all_passed) class RPCTestHandler: """ Trigger the testscrips passed in via the list. """ def __init__(self, num_tests_parallel, test_list=None, flags=None): assert(num_tests_parallel >= 1) self.num_jobs = num_tests_parallel self.test_list = test_list self.flags = flags self.num_running = 0 # In case there is a graveyard of zombie bitcoinds, we can apply a # pseudorandom offset to hopefully jump over them. # (625 is PORT_RANGE/MAX_NODES) self.portseed_offset = int(time.time() * 1000) % 625 self.jobs = [] def get_next(self): while self.num_running < self.num_jobs and self.test_list: # Add tests self.num_running += 1 t = self.test_list.pop(0) port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)] log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) self.jobs.append((t, time.time(), subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed, universal_newlines=True, stdout=log_stdout, stderr=log_stderr), log_stdout, log_stderr)) if not self.jobs: raise IndexError('pop from empty list') while True: # Return first proc that finishes time.sleep(.5) for j in self.jobs: (name, time0, proc, log_out, log_err) = j if proc.poll() is not None: log_out.seek(0), log_err.seek(0) [stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)] log_out.close(), log_err.close() passed = stderr == "" and proc.returncode == 0 self.num_running -= 1 self.jobs.remove(j) return name, stdout, stderr, passed, int(time.time() - time0) print('.', end='', flush=True) class RPCCoverage(object): """ Coverage reporting utilities for pull-tester. Coverage calculation works by having each test script subprocess write coverage files into a particular directory. These files contain the RPC commands invoked during testing, as well as a complete listing of RPC commands per `bitcoin-cli help` (`rpc_interface.txt`). After all tests complete, the commands run are combined and diff'd against the complete list to calculate uncovered RPC commands. See also: qa/rpc-tests/test_framework/coverage.py """ def __init__(self): self.dir = tempfile.mkdtemp(prefix="coverage") self.flag = '--coveragedir=%s' % self.dir def report_rpc_coverage(self): """ Print out RPC commands that were unexercised by tests. """ uncovered = self._get_uncovered_rpc_commands() if uncovered: print("Uncovered RPC commands:") print("".join((" - %s\n" % i) for i in sorted(uncovered))) else: print("All RPC commands covered.") def cleanup(self): return shutil.rmtree(self.dir) def _get_uncovered_rpc_commands(self): """ Return a set of currently untested RPC commands. """ # This is shared from `qa/rpc-tests/test-framework/coverage.py` REFERENCE_FILENAME = 'rpc_interface.txt' COVERAGE_FILE_PREFIX = 'coverage.' coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME) coverage_filenames = set() all_cmds = set() covered_cmds = set() if not os.path.isfile(coverage_ref_filename): raise RuntimeError("No coverage reference found") with open(coverage_ref_filename, 'r') as f: all_cmds.update([i.strip() for i in f.readlines()]) for root, dirs, files in os.walk(self.dir): for filename in files: if filename.startswith(COVERAGE_FILE_PREFIX): coverage_filenames.add(os.path.join(root, filename)) for filename in coverage_filenames: with open(filename, 'r') as f: covered_cmds.update([i.strip() for i in f.readlines()]) return all_cmds - covered_cmds if __name__ == '__main__': runtests()
[]
[]
[ "OCEAND" ]
[]
["OCEAND"]
python
1
0
tests/test_examples.py
import os from subprocess import PIPE, Popen import numpy as np import pytest import vtk import vtki from vtki import examples from vtki.plotting import running_xserver TEST_DOWNLOADS = False try: if os.environ['TEST_DOWNLOADS'] == 'True': TEST_DOWNLOADS = True except KeyError: pass @pytest.mark.skipif(not running_xserver(), reason="Requires X11") def test_docexample_advancedplottingwithnumpy(): import vtki import numpy as np # Make a grid x, y, z = np.meshgrid(np.linspace(-5, 5, 20), np.linspace(-5, 5, 20), np.linspace(-5, 5, 5)) points = np.empty((x.size, 3)) points[:, 0] = x.ravel('F') points[:, 1] = y.ravel('F') points[:, 2] = z.ravel('F') # Compute a direction for the vector field direction = np.sin(points)**3 # plot using the plotting class plotter = vtki.Plotter(off_screen=True) plotter.add_arrows(points, direction, 0.5) plotter.set_background([0, 0, 0]) # RGB set to black plotter.plot(auto_close=False) np.any(plotter.screenshot()) plotter.close() @pytest.mark.skipif(not running_xserver(), reason="Requires X11") def test_creatingagifmovie(tmpdir, off_screen=True): if tmpdir: filename = str(tmpdir.mkdir("tmpdir").join('wave.gif')) else: filename = '/tmp/wave.gif' x = np.arange(-10, 10, 0.25) y = np.arange(-10, 10, 0.25) x, y = np.meshgrid(x, y) r = np.sqrt(x**2 + y**2) z = np.sin(r) # Create and structured surface grid = vtki.StructuredGrid(x, y, z) # Make copy of points pts = grid.points.copy() # Start a plotter object and set the scalars to the Z height plotter = vtki.Plotter(off_screen=off_screen) plotter.add_mesh(grid, scalars=z.ravel()) plotter.plot(auto_close=False) # Open a gif plotter.open_gif(filename) # Update Z and write a frame for each updated position nframe = 5 for phase in np.linspace(0, 2*np.pi, nframe + 1)[:nframe]: z = np.sin(r + phase) pts[:, -1] = z.ravel() plotter.update_coordinates(pts) plotter.update_scalars(z.ravel()) plotter.write_frame() # Close movie and delete object plotter.close() @pytest.mark.skipif(not running_xserver(), reason="Requires X11") def test_plot_wave(): points = examples.plot_wave(wavetime=0.1, off_screen=True) assert isinstance(points, np.ndarray) @pytest.mark.skipif(not running_xserver(), reason="Requires X11") def test_beam_example(): examples.beam_example(off_screen=True) @pytest.mark.skipif(not running_xserver(), reason="Requires X11") def test_plot_ants_plane(): examples.plot_ants_plane(off_screen=True) def test_load_ant(): """ Load ply ant mesh """ mesh = examples.load_ant() assert mesh.n_points def test_load_airplane(): """ Load ply airplane mesh """ mesh = examples.load_airplane() assert mesh.n_points def test_load_sphere(): """ Loads sphere ply mesh """ mesh = examples.load_sphere() assert mesh.n_points def test_load_channels(): """ Loads geostat training image """ mesh = examples.load_channels() assert mesh.n_points if TEST_DOWNLOADS: def test_download_masonry_texture(): data = examples.download_masonry_texture() assert isinstance(data, vtk.vtkTexture) def test_download_usa_texture(): data = examples.download_usa_texture() assert isinstance(data, vtk.vtkTexture) def test_download_usa(): data = examples.download_usa() assert np.any(data.points) def test_download_st_helens(): data = examples.download_st_helens() assert data.n_points def test_download_bunny(): data = examples.download_bunny() assert data.n_points def test_download_cow(): data = examples.download_cow() assert data.n_points def test_download_faults(): data = examples.download_faults() assert data.n_points def test_download_tensors(): data = examples.download_tensors() assert data.n_points def test_download_head(): data = examples.download_head() assert data.n_points def test_download_bolt_nut(): data = examples.download_bolt_nut() assert isinstance(data, vtki.MultiBlock) def test_download_clown(): data = examples.download_clown() assert data.n_points def test_download_exodus(): data = examples.download_exodus() assert data.n_blocks # End of download tests
[]
[]
[ "TEST_DOWNLOADS" ]
[]
["TEST_DOWNLOADS"]
python
1
0
source/sam_spot_bot_function/app.py
import boto3 import json import os from sam_spot_bot_create_job.bot_dao import BotDao # Global variables are reused across execution contexts (if available) session = boto3.Session() def lambda_handler(event, context): """ Sample json in API request body - { "name": name, "file_types": file_types, "bot_image": bot_image, "bot_image_cmd": bot_image_cmd, "endpoint_name": endpoint_name, "endpoint_ecr_image_path": endpoint_ecr_image_path, "instance_type": instance_type, "model_s3_path": model_s3_path, "create_date": create_date, "update_date": update_date } """ print("Received event: " + json.dumps(event, indent=2)) print("All ENV " + str(os.environ)) method = event["httpMethod"] request_body = json.loads(event["body"]) botDao = BotDao() if method is "POST": botDao.create_one_bot(**request_body) return { "statusCode": 201, "body": "Created" } elif method is "PUT": botDao.update_bot_by_name(**request_body) return { "statusCode": 205, "body": "Reset Content" } elif method is "DELETE": botDao.delete_bot_by_name(request_body["name"]) return { "statusCode": 202, "body": "Accepted" } elif method is "GET": bot = botDao.get_bot_def(request_body["name"]) return { "statusCode": 200, "body": json.dumps(bot) } return { "statusCode": 405, "body": "Method not allowed." }
[]
[]
[]
[]
[]
python
0
0
distributed/tests/test_utils.py
import asyncio import contextvars import functools import io import os import queue import socket import traceback import warnings from array import array from collections import deque from time import sleep import pytest from tornado.ioloop import IOLoop import dask from distributed.compatibility import MACOS, WINDOWS from distributed.metrics import time from distributed.utils import ( All, Log, Logs, LoopRunner, TimeoutError, _maybe_complex, ensure_ip, ensure_memoryview, format_dashboard_link, get_ip_interface, get_traceback, is_kernel, is_valid_xml, iscoroutinefunction, log_errors, nbytes, offload, open_port, parse_ports, read_block, recursive_to_dict, seek_delimiter, set_thread_state, sync, thread_state, truncate_exception, warn_on_duration, ) from distributed.utils_test import ( _UnhashableCallable, captured_logger, div, gen_test, has_ipv6, inc, throws, ) @gen_test() async def test_All(): async def throws(): 1 / 0 async def slow(): await asyncio.sleep(10) async def inc(x): return x + 1 results = await All([inc(i) for i in range(10)]) assert results == list(range(1, 11)) start = time() for tasks in [[throws(), slow()], [slow(), throws()]]: try: await All(tasks) assert False except ZeroDivisionError: pass end = time() assert end - start < 10 def test_sync_error(loop_in_thread): loop = loop_in_thread try: result = sync(loop, throws, 1) except Exception as exc: f = exc assert "hello" in str(exc) tb = get_traceback() L = traceback.format_tb(tb) assert any("throws" in line for line in L) def function1(x): return function2(x) def function2(x): return throws(x) try: result = sync(loop, function1, 1) except Exception as exc: assert "hello" in str(exc) tb = get_traceback() L = traceback.format_tb(tb) assert any("function1" in line for line in L) assert any("function2" in line for line in L) def test_sync_timeout(loop_in_thread): loop = loop_in_thread with pytest.raises(TimeoutError): sync(loop_in_thread, asyncio.sleep, 0.5, callback_timeout=0.05) with pytest.raises(TimeoutError): sync(loop_in_thread, asyncio.sleep, 0.5, callback_timeout="50ms") def test_sync_closed_loop(): async def get_loop(): return IOLoop.current() loop = asyncio.run(get_loop()) loop.close() with pytest.raises(RuntimeError) as exc_info: sync(loop, inc, 1) exc_info.match("IOLoop is clos(ed|ing)") def test_is_kernel(): pytest.importorskip("IPython") assert is_kernel() is False # @pytest.mark.leaking('fds') # def test_zzz_leaks(l=[]): # import os, subprocess # l.append(b"x" * (17 * 1024**2)) # os.open(__file__, os.O_RDONLY) # subprocess.Popen('sleep 100', shell=True, stdin=subprocess.DEVNULL) def test_ensure_ip(): assert ensure_ip("localhost") in ("127.0.0.1", "::1") assert ensure_ip("123.123.123.123") == "123.123.123.123" assert ensure_ip("8.8.8.8") == "8.8.8.8" if has_ipv6(): assert ensure_ip("2001:4860:4860::8888") == "2001:4860:4860::8888" assert ensure_ip("::1") == "::1" @pytest.mark.skipif(WINDOWS, reason="TODO") def test_get_ip_interface(): iface = "lo0" if MACOS else "lo" assert get_ip_interface(iface) == "127.0.0.1" with pytest.raises(ValueError, match=f"'__notexist'.+network interface.+'{iface}'"): get_ip_interface("__notexist") def test_truncate_exception(): e = ValueError("a" * 1000) assert len(str(e)) >= 1000 f = truncate_exception(e, 100) assert type(f) == type(e) assert len(str(f)) < 200 assert "aaaa" in str(f) e = ValueError("a") assert truncate_exception(e) is e def test_get_traceback(): def a(x): return div(x, 0) def b(x): return a(x) def c(x): return b(x) try: c(1) except Exception as e: tb = get_traceback() assert type(tb).__name__ == "traceback" def test_maybe_complex(): assert not _maybe_complex(1) assert not _maybe_complex("x") assert _maybe_complex((inc, 1)) assert _maybe_complex([(inc, 1)]) assert _maybe_complex([(inc, 1)]) assert _maybe_complex({"x": (inc, 1)}) def test_read_block(): delimiter = b"\n" data = delimiter.join([b"123", b"456", b"789"]) f = io.BytesIO(data) assert read_block(f, 1, 2) == b"23" assert read_block(f, 0, 1, delimiter=b"\n") == b"123\n" assert read_block(f, 0, 2, delimiter=b"\n") == b"123\n" assert read_block(f, 0, 3, delimiter=b"\n") == b"123\n" assert read_block(f, 0, 5, delimiter=b"\n") == b"123\n456\n" assert read_block(f, 0, 8, delimiter=b"\n") == b"123\n456\n789" assert read_block(f, 0, 100, delimiter=b"\n") == b"123\n456\n789" assert read_block(f, 1, 1, delimiter=b"\n") == b"" assert read_block(f, 1, 5, delimiter=b"\n") == b"456\n" assert read_block(f, 1, 8, delimiter=b"\n") == b"456\n789" for ols in [[(0, 3), (3, 3), (6, 3), (9, 2)], [(0, 4), (4, 4), (8, 4)]]: out = [read_block(f, o, l, b"\n") for o, l in ols] assert b"".join(filter(None, out)) == data def test_seek_delimiter_endline(): f = io.BytesIO(b"123\n456\n789") # if at zero, stay at zero seek_delimiter(f, b"\n", 5) assert f.tell() == 0 # choose the first block for bs in [1, 5, 100]: f.seek(1) seek_delimiter(f, b"\n", blocksize=bs) assert f.tell() == 4 # handle long delimiters well, even with short blocksizes f = io.BytesIO(b"123abc456abc789") for bs in [1, 2, 3, 4, 5, 6, 10]: f.seek(1) seek_delimiter(f, b"abc", blocksize=bs) assert f.tell() == 6 # End at the end f = io.BytesIO(b"123\n456") f.seek(5) seek_delimiter(f, b"\n", 5) assert f.tell() == 7 @pytest.mark.parametrize( "data", [ b"", bytearray(), b"1", bytearray(b"1"), memoryview(b"1"), memoryview(bytearray(b"1")), array("B", b"1"), array("I", range(5)), memoryview(b"123456")[1:-1], memoryview(b"123456")[::2], memoryview(array("I", range(5)))[1:-1], memoryview(array("I", range(5)))[::2], memoryview(b"123456").cast("B", (2, 3)), memoryview(b"0123456789").cast("B", (5, 2))[1:-1], memoryview(b"0123456789").cast("B", (5, 2))[::2], ], ) def test_ensure_memoryview(data): data_mv = memoryview(data) result = ensure_memoryview(data) assert isinstance(result, memoryview) assert result.contiguous assert result.ndim == 1 assert result.format == "B" assert result == bytes(data_mv) if data_mv.nbytes and data_mv.contiguous: assert result.readonly == data_mv.readonly if isinstance(data, memoryview): if data.ndim == 1 and data.format == "B": assert id(result) == id(data) else: assert id(data) != id(result) else: assert id(result.obj) != id(data_mv.obj) assert not result.readonly @pytest.mark.parametrize( "dt, nitems, shape, strides", [ ("i8", 12, (12,), (8,)), ("i8", 12, (3, 4), (32, 8)), ("i8", 12, (4, 3), (8, 32)), ("i8", 12, (3, 2), (32, 16)), ("i8", 12, (2, 3), (16, 32)), ], ) def test_ensure_memoryview_ndarray(dt, nitems, shape, strides): np = pytest.importorskip("numpy") data = np.ndarray( shape, dtype=dt, buffer=np.arange(nitems, dtype=dt), strides=strides ) result = ensure_memoryview(data) assert isinstance(result, memoryview) assert result.ndim == 1 assert result.format == "B" assert result.contiguous def test_ensure_memoryview_pyarrow_buffer(): pa = pytest.importorskip("pyarrow") buf = pa.py_buffer(b"123") result = ensure_memoryview(buf) assert isinstance(result, memoryview) def test_nbytes(): np = pytest.importorskip("numpy") def check(obj, expected): assert nbytes(obj) == expected assert nbytes(memoryview(obj)) == expected check(b"123", 3) check(bytearray(b"4567"), 4) multi_dim = np.ones(shape=(10, 10)) scalar = np.array(1) check(multi_dim, multi_dim.nbytes) check(scalar, scalar.nbytes) def test_open_port(): port = open_port() s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("", port)) s.close() def test_set_thread_state(): with set_thread_state(x=1): assert thread_state.x == 1 assert not hasattr(thread_state, "x") def assert_running(loop): """ Raise if the given IOLoop is not running. """ q = queue.Queue() loop.add_callback(q.put, 42) assert q.get(timeout=1) == 42 def assert_not_running(loop): """ Raise if the given IOLoop is running. """ q = queue.Queue() try: loop.add_callback(q.put, 42) except RuntimeError: # On AsyncIOLoop, can't add_callback() after the loop is closed pass else: with pytest.raises(queue.Empty): q.get(timeout=0.02) def test_loop_runner(loop_in_thread): # Implicit loop loop = IOLoop() loop.make_current() runner = LoopRunner() assert runner.loop not in (loop, loop_in_thread) assert not runner.is_started() assert_not_running(runner.loop) runner.start() assert runner.is_started() assert_running(runner.loop) runner.stop() assert not runner.is_started() assert_not_running(runner.loop) # Explicit loop loop = IOLoop() runner = LoopRunner(loop=loop) assert runner.loop is loop assert not runner.is_started() assert_not_running(loop) runner.start() assert runner.is_started() assert_running(loop) runner.stop() assert not runner.is_started() assert_not_running(loop) # Explicit loop, already started runner = LoopRunner(loop=loop_in_thread) assert not runner.is_started() assert_running(loop_in_thread) runner.start() assert runner.is_started() assert_running(loop_in_thread) runner.stop() assert not runner.is_started() assert_running(loop_in_thread) # Implicit loop, asynchronous=True loop = IOLoop() loop.make_current() runner = LoopRunner(asynchronous=True) assert runner.loop is loop assert not runner.is_started() assert_not_running(runner.loop) runner.start() assert runner.is_started() assert_not_running(runner.loop) runner.stop() assert not runner.is_started() assert_not_running(runner.loop) # Explicit loop, asynchronous=True loop = IOLoop() runner = LoopRunner(loop=loop, asynchronous=True) assert runner.loop is loop assert not runner.is_started() assert_not_running(runner.loop) runner.start() assert runner.is_started() assert_not_running(runner.loop) runner.stop() assert not runner.is_started() assert_not_running(runner.loop) def test_two_loop_runners(loop_in_thread): # Loop runners tied to the same loop should cooperate # ABCCBA loop = IOLoop() a = LoopRunner(loop=loop) b = LoopRunner(loop=loop) assert_not_running(loop) a.start() assert_running(loop) c = LoopRunner(loop=loop) b.start() assert_running(loop) c.start() assert_running(loop) c.stop() assert_running(loop) b.stop() assert_running(loop) a.stop() assert_not_running(loop) # ABCABC loop = IOLoop() a = LoopRunner(loop=loop) b = LoopRunner(loop=loop) assert_not_running(loop) a.start() assert_running(loop) b.start() assert_running(loop) c = LoopRunner(loop=loop) c.start() assert_running(loop) a.stop() assert_running(loop) b.stop() assert_running(loop) c.stop() assert_not_running(loop) # Explicit loop, already started a = LoopRunner(loop=loop_in_thread) b = LoopRunner(loop=loop_in_thread) assert_running(loop_in_thread) a.start() assert_running(loop_in_thread) b.start() assert_running(loop_in_thread) a.stop() assert_running(loop_in_thread) b.stop() assert_running(loop_in_thread) @gen_test() async def test_loop_runner_gen(): runner = LoopRunner(asynchronous=True) assert runner.loop is IOLoop.current() assert not runner.is_started() await asyncio.sleep(0.01) runner.start() assert runner.is_started() await asyncio.sleep(0.01) runner.stop() assert not runner.is_started() await asyncio.sleep(0.01) @gen_test() async def test_all_quiet_exceptions(): class CustomError(Exception): pass async def throws(msg): raise CustomError(msg) with captured_logger("") as sio: with pytest.raises(CustomError): await All([throws("foo") for _ in range(5)]) with pytest.raises(CustomError): await All([throws("bar") for _ in range(5)], quiet_exceptions=CustomError) assert "bar" not in sio.getvalue() assert "foo" in sio.getvalue() def test_warn_on_duration(): with warnings.catch_warnings(record=True) as record: with warn_on_duration("10s", "foo"): pass assert not record with pytest.warns(UserWarning, match=r"foo") as record: with warn_on_duration("1ms", "foo"): sleep(0.100) assert record assert any("foo" in str(rec.message) for rec in record) def test_logs(): log = Log("Hello") assert isinstance(log, str) d = Logs({"123": log, "456": Log("World!")}) assert isinstance(d, dict) text = d._repr_html_() assert is_valid_xml("<div>" + text + "</div>") assert "Hello" in text assert "456" in text def test_is_valid_xml(): assert is_valid_xml("<a>foo</a>") with pytest.raises(Exception): assert is_valid_xml("<a>foo") def test_format_dashboard_link(): with dask.config.set({"distributed.dashboard.link": "foo"}): assert format_dashboard_link("host", 1234) == "foo" assert "host" in format_dashboard_link("host", 1234) assert "1234" in format_dashboard_link("host", 1234) try: os.environ["host"] = "hello" assert "hello" not in format_dashboard_link("host", 1234) finally: del os.environ["host"] def test_parse_ports(): assert parse_ports(None) == [None] assert parse_ports(23) == [23] assert parse_ports("45") == [45] assert parse_ports("100:103") == [100, 101, 102, 103] assert parse_ports([100, 101, 102, 103]) == [100, 101, 102, 103] out = parse_ports((100, 101, 102, 103)) assert out == [100, 101, 102, 103] assert isinstance(out, list) with pytest.raises(ValueError, match="port_stop must be greater than port_start"): parse_ports("103:100") with pytest.raises(TypeError): parse_ports(100.5) with pytest.raises(TypeError): parse_ports([100, 100.5]) with pytest.raises(ValueError): parse_ports("foo") with pytest.raises(ValueError): parse_ports("100.5") @gen_test() async def test_offload(): assert (await offload(inc, 1)) == 2 assert (await offload(lambda x, y: x + y, 1, y=2)) == 3 @gen_test() async def test_offload_preserves_contextvars(): var = contextvars.ContextVar("var") async def set_var(v: str): var.set(v) r = await offload(var.get) assert r == v await asyncio.gather(set_var("foo"), set_var("bar")) def test_serialize_for_cli_deprecated(): with pytest.warns(FutureWarning, match="serialize_for_cli is deprecated"): from distributed.utils import serialize_for_cli assert serialize_for_cli is dask.config.serialize def test_deserialize_for_cli_deprecated(): with pytest.warns(FutureWarning, match="deserialize_for_cli is deprecated"): from distributed.utils import deserialize_for_cli assert deserialize_for_cli is dask.config.deserialize def test_parse_bytes_deprecated(): with pytest.warns(FutureWarning, match="parse_bytes is deprecated"): from distributed.utils import parse_bytes assert parse_bytes is dask.utils.parse_bytes def test_format_bytes_deprecated(): with pytest.warns(FutureWarning, match="format_bytes is deprecated"): from distributed.utils import format_bytes assert format_bytes is dask.utils.format_bytes def test_format_time_deprecated(): with pytest.warns(FutureWarning, match="format_time is deprecated"): from distributed.utils import format_time assert format_time is dask.utils.format_time def test_funcname_deprecated(): with pytest.warns(FutureWarning, match="funcname is deprecated"): from distributed.utils import funcname assert funcname is dask.utils.funcname def test_parse_timedelta_deprecated(): with pytest.warns(FutureWarning, match="parse_timedelta is deprecated"): from distributed.utils import parse_timedelta assert parse_timedelta is dask.utils.parse_timedelta def test_typename_deprecated(): with pytest.warns(FutureWarning, match="typename is deprecated"): from distributed.utils import typename assert typename is dask.utils.typename def test_tmpfile_deprecated(): with pytest.warns(FutureWarning, match="tmpfile is deprecated"): from distributed.utils import tmpfile assert tmpfile is dask.utils.tmpfile def test_iscoroutinefunction_unhashable_input(): # Ensure iscoroutinefunction can handle unhashable callables assert not iscoroutinefunction(_UnhashableCallable()) def test_iscoroutinefunction_nested_partial(): async def my_async_callable(x, y, z): pass assert iscoroutinefunction( functools.partial(functools.partial(my_async_callable, 1), 2) ) def test_recursive_to_dict(): class C: def __init__(self, x): self.x = x def __repr__(self): return "<C>" def _to_dict(self, *, exclude): assert exclude == ["foo"] return ["C:", recursive_to_dict(self.x, exclude=exclude)] class D: def __repr__(self): return "<D>" class E: def __init__(self): self.x = 1 # Public attribute; dump self._y = 2 # Private attribute; don't dump self.foo = 3 # In exclude; don't dump @property def z(self): # Public property; dump return 4 def f(self): # Callable; don't dump return 5 def _to_dict(self, *, exclude): # Output: {"x": 1, "z": 4} return recursive_to_dict(self, exclude=exclude, members=True) inp = [ 1, 1.1, True, False, None, "foo", b"bar", C, C(1), D(), (1, 2), [3, 4], {5, 6}, frozenset([7, 8]), deque([9, 10]), {3: 4, 1: 2}.keys(), {3: 4, 1: 2}.values(), E(), ] expect = [ 1, 1.1, True, False, None, "foo", "b'bar'", "<class 'test_utils.test_recursive_to_dict.<locals>.C'>", ["C:", 1], "<D>", [1, 2], [3, 4], list({5, 6}), list(frozenset([7, 8])), [9, 10], [3, 1], [4, 2], {"x": 1, "z": 4}, ] assert recursive_to_dict(inp, exclude=["foo"]) == expect # Test recursion a = [] c = C(a) a += [c, c] # The blocklist of already-seen objects is reentrant: a is converted to string when # found inside itself; c must *not* be converted to string the second time it's # found, because it's outside of itself. assert recursive_to_dict(a, exclude=["foo"]) == [ ["C:", "[<C>, <C>]"], ["C:", "[<C>, <C>]"], ] def test_recursive_to_dict_no_nest(): class Person: def __init__(self, name): self.name = name self.children = [] self.pets = [] ... def _to_dict_no_nest(self, exclude=()): return recursive_to_dict(self.__dict__, exclude=exclude) def __repr__(self): return self.name class Pet: def __init__(self, name): self.name = name self.owners = [] ... def _to_dict_no_nest(self, exclude=()): return recursive_to_dict(self.__dict__, exclude=exclude) def __repr__(self): return self.name alice = Person("Alice") bob = Person("Bob") charlie = Pet("Charlie") alice.children.append(bob) alice.pets.append(charlie) bob.pets.append(charlie) charlie.owners[:] = [alice, bob] info = {"people": [alice, bob], "pets": [charlie]} expect = { "people": [ {"name": "Alice", "children": ["Bob"], "pets": ["Charlie"]}, {"name": "Bob", "children": [], "pets": ["Charlie"]}, ], "pets": [ {"name": "Charlie", "owners": ["Alice", "Bob"]}, ], } assert recursive_to_dict(info) == expect @gen_test() async def test_log_errors(): class CustomError(Exception): pass # Use the logger of the caller module with captured_logger("test_utils") as caplog: # Context manager with log_errors(): pass with log_errors(): with log_errors(): pass with log_errors(pdb=True): pass with pytest.raises(CustomError): with log_errors(): raise CustomError("err1") with pytest.raises(CustomError): with log_errors(): with log_errors(): raise CustomError("err2") # Bare decorator @log_errors def _(): return 123 assert _() == 123 @log_errors def _(): raise CustomError("err3") with pytest.raises(CustomError): _() @log_errors def inner(): raise CustomError("err4") @log_errors def outer(): inner() with pytest.raises(CustomError): outer() # Decorator with parameters @log_errors() def _(): return 456 assert _() == 456 @log_errors() def _(): with log_errors(): raise CustomError("err5") with pytest.raises(CustomError): _() @log_errors(pdb=True) def _(): return 789 assert _() == 789 # Decorate async function @log_errors async def _(): return 123 assert await _() == 123 @log_errors async def _(): raise CustomError("err6") with pytest.raises(CustomError): await _() assert [row for row in caplog.getvalue().splitlines() if row.startswith("err")] == [ "err1", "err2", "err2", "err3", "err4", "err4", "err5", "err5", "err6", ] # Test unroll_stack with captured_logger("distributed.utils") as caplog: with pytest.raises(CustomError): with log_errors(unroll_stack=0): raise CustomError("err7") assert caplog.getvalue().startswith("err7\n")
[]
[]
[ "host" ]
[]
["host"]
python
1
0
src/cmd/go/internal/modload/init.go
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package modload import ( "bytes" "encoding/json" "errors" "fmt" "go/build" "internal/lazyregexp" "io/ioutil" "os" "path" "path/filepath" "runtime/debug" "strconv" "strings" "cmd/go/internal/base" "cmd/go/internal/cfg" "cmd/go/internal/load" "cmd/go/internal/lockedfile" "cmd/go/internal/modconv" "cmd/go/internal/modfetch" "cmd/go/internal/mvs" "cmd/go/internal/search" "golang.org/x/mod/modfile" "golang.org/x/mod/module" "golang.org/x/mod/semver" ) var ( mustUseModules = false initialized bool modRoot string Target module.Version // targetPrefix is the path prefix for packages in Target, without a trailing // slash. For most modules, targetPrefix is just Target.Path, but the // standard-library module "std" has an empty prefix. targetPrefix string // targetInGorootSrc caches whether modRoot is within GOROOT/src. // The "std" module is special within GOROOT/src, but not otherwise. targetInGorootSrc bool gopath string CmdModInit bool // running 'go mod init' CmdModModule string // module argument for 'go mod init' allowMissingModuleImports bool ) // ModFile returns the parsed go.mod file. // // Note that after calling ImportPaths or LoadBuildList, // the require statements in the modfile.File are no longer // the source of truth and will be ignored: edits made directly // will be lost at the next call to WriteGoMod. // To make permanent changes to the require statements // in go.mod, edit it before calling ImportPaths or LoadBuildList. func ModFile() *modfile.File { Init() if modFile == nil { die() } return modFile } func BinDir() string { Init() return filepath.Join(gopath, "bin") } // Init determines whether module mode is enabled, locates the root of the // current module (if any), sets environment variables for Git subprocesses, and // configures the cfg, codehost, load, modfetch, and search packages for use // with modules. func Init() { if initialized { return } initialized = true // Keep in sync with WillBeEnabled. We perform extra validation here, and // there are lots of diagnostics and side effects, so we can't use // WillBeEnabled directly. env := cfg.Getenv("GO111MODULE") switch env { default: base.Fatalf("go: unknown environment setting GO111MODULE=%s", env) case "auto", "": mustUseModules = false case "on": mustUseModules = true case "off": mustUseModules = false return } // Disable any prompting for passwords by Git. // Only has an effect for 2.3.0 or later, but avoiding // the prompt in earlier versions is just too hard. // If user has explicitly set GIT_TERMINAL_PROMPT=1, keep // prompting. // See golang.org/issue/9341 and golang.org/issue/12706. if os.Getenv("GIT_TERMINAL_PROMPT") == "" { os.Setenv("GIT_TERMINAL_PROMPT", "0") } // Disable any ssh connection pooling by Git. // If a Git subprocess forks a child into the background to cache a new connection, // that child keeps stdout/stderr open. After the Git subprocess exits, // os /exec expects to be able to read from the stdout/stderr pipe // until EOF to get all the data that the Git subprocess wrote before exiting. // The EOF doesn't come until the child exits too, because the child // is holding the write end of the pipe. // This is unfortunate, but it has come up at least twice // (see golang.org/issue/13453 and golang.org/issue/16104) // and confuses users when it does. // If the user has explicitly set GIT_SSH or GIT_SSH_COMMAND, // assume they know what they are doing and don't step on it. // But default to turning off ControlMaster. if os.Getenv("GIT_SSH") == "" && os.Getenv("GIT_SSH_COMMAND") == "" { os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no") } if CmdModInit { // Running 'go mod init': go.mod will be created in current directory. modRoot = base.Cwd } else { modRoot = findModuleRoot(base.Cwd) if modRoot == "" { if cfg.ModFile != "" { base.Fatalf("go: cannot find main module, but -modfile was set.\n\t-modfile cannot be used to set the module root directory.") } if !mustUseModules { // GO111MODULE is 'auto', and we can't find a module root. // Stay in GOPATH mode. return } } else if search.InDir(modRoot, os.TempDir()) == "." { // If you create /tmp/go.mod for experimenting, // then any tests that create work directories under /tmp // will find it and get modules when they're not expecting them. // It's a bit of a peculiar thing to disallow but quite mysterious // when it happens. See golang.org/issue/26708. modRoot = "" fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in system temp root %v\n", os.TempDir()) } } if cfg.ModFile != "" && !strings.HasSuffix(cfg.ModFile, ".mod") { base.Fatalf("go: -modfile=%s: file does not have .mod extension", cfg.ModFile) } // We're in module mode. Install the hooks to make it work. list := filepath.SplitList(cfg.BuildContext.GOPATH) if len(list) == 0 || list[0] == "" { base.Fatalf("missing $GOPATH") } gopath = list[0] if _, err := os.Stat(filepath.Join(gopath, "go.mod")); err == nil { base.Fatalf("$GOPATH/go.mod exists but should not") } cfg.ModulesEnabled = true load.ModBinDir = BinDir load.ModLookup = Lookup load.ModPackageModuleInfo = PackageModuleInfo load.ModImportPaths = ImportPaths load.ModPackageBuildInfo = PackageBuildInfo load.ModInfoProg = ModInfoProg load.ModImportFromFiles = ImportFromFiles load.ModDirImportPath = DirImportPath if modRoot == "" { // We're in module mode, but not inside a module. // // Commands like 'go build', 'go run', 'go list' have no go.mod file to // read or write. They would need to find and download the latest versions // of a potentially large number of modules with no way to save version // information. We can succeed slowly (but not reproducibly), but that's // not usually a good experience. // // Instead, we forbid resolving import paths to modules other than std and // cmd. Users may still build packages specified with .go files on the // command line, but they'll see an error if those files import anything // outside std. // // This can be overridden by calling AllowMissingModuleImports. // For example, 'go get' does this, since it is expected to resolve paths. // // See golang.org/issue/32027. } else { modfetch.GoSumFile = strings.TrimSuffix(ModFilePath(), ".mod") + ".sum" search.SetModRoot(modRoot) } } func init() { load.ModInit = Init } // WillBeEnabled checks whether modules should be enabled but does not // initialize modules by installing hooks. If Init has already been called, // WillBeEnabled returns the same result as Enabled. // // This function is needed to break a cycle. The main package needs to know // whether modules are enabled in order to install the module or GOPATH version // of 'go get', but Init reads the -modfile flag in 'go get', so it shouldn't // be called until the command is installed and flags are parsed. Instead of // calling Init and Enabled, the main package can call this function. func WillBeEnabled() bool { if modRoot != "" || mustUseModules { return true } if initialized { return false } // Keep in sync with Init. Init does extra validation and prints warnings or // exits, so it can't call this function directly. env := cfg.Getenv("GO111MODULE") switch env { case "on": return true case "auto", "": break default: return false } if CmdModInit { // Running 'go mod init': go.mod will be created in current directory. return true } if modRoot := findModuleRoot(base.Cwd); modRoot == "" { // GO111MODULE is 'auto', and we can't find a module root. // Stay in GOPATH mode. return false } else if search.InDir(modRoot, os.TempDir()) == "." { // If you create /tmp/go.mod for experimenting, // then any tests that create work directories under /tmp // will find it and get modules when they're not expecting them. // It's a bit of a peculiar thing to disallow but quite mysterious // when it happens. See golang.org/issue/26708. return false } return true } // Enabled reports whether modules are (or must be) enabled. // If modules are enabled but there is no main module, Enabled returns true // and then the first use of module information will call die // (usually through MustModRoot). func Enabled() bool { Init() return modRoot != "" || mustUseModules } // ModRoot returns the root of the main module. // It calls base.Fatalf if there is no main module. func ModRoot() string { if !HasModRoot() { die() } return modRoot } // HasModRoot reports whether a main module is present. // HasModRoot may return false even if Enabled returns true: for example, 'get' // does not require a main module. func HasModRoot() bool { Init() return modRoot != "" } // ModFilePath returns the effective path of the go.mod file. Normally, this // "go.mod" in the directory returned by ModRoot, but the -modfile flag may // change its location. ModFilePath calls base.Fatalf if there is no main // module, even if -modfile is set. func ModFilePath() string { if !HasModRoot() { die() } if cfg.ModFile != "" { return cfg.ModFile } return filepath.Join(modRoot, "go.mod") } // printStackInDie causes die to print a stack trace. // // It is enabled by the testgo tag, and helps to diagnose paths that // unexpectedly require a main module. var printStackInDie = false func die() { if printStackInDie { debug.PrintStack() } if cfg.Getenv("GO111MODULE") == "off" { base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'") } if dir, name := findAltConfig(base.Cwd); dir != "" { rel, err := filepath.Rel(base.Cwd, dir) if err != nil { rel = dir } cdCmd := "" if rel != "." { cdCmd = fmt.Sprintf("cd %s && ", rel) } base.Fatalf("go: cannot find main module, but found %s in %s\n\tto create a module there, run:\n\t%sgo mod init", name, dir, cdCmd) } base.Fatalf("go: cannot find main module; see 'go help modules'") } // InitMod sets Target and, if there is a main module, parses the initial build // list from its go.mod file. If InitMod is called by 'go mod init', InitMod // will populate go.mod in memory, possibly importing dependencies from a // legacy configuration file. For other commands, InitMod may make other // adjustments in memory, like adding a go directive. WriteGoMod should be // called later to write changes out to disk. // // As a side-effect, InitMod sets a default for cfg.BuildMod if it does not // already have an explicit value. func InitMod() { if len(buildList) > 0 { return } Init() if modRoot == "" { Target = module.Version{Path: "command-line-arguments"} targetPrefix = "command-line-arguments" buildList = []module.Version{Target} return } if CmdModInit { // Running go mod init: do legacy module conversion legacyModInit() modFileToBuildList() return } gomod := ModFilePath() data, err := lockedfile.Read(gomod) if err != nil { base.Fatalf("go: %v", err) } var fixed bool f, err := modfile.Parse(gomod, data, fixVersion(&fixed)) if err != nil { // Errors returned by modfile.Parse begin with file:line. base.Fatalf("go: errors parsing go.mod:\n%s\n", err) } modFile = f index = indexModFile(data, f, fixed) if len(f.Syntax.Stmt) == 0 || f.Module == nil { // Empty mod file. Must add module path. path, err := findModulePath(modRoot) if err != nil { base.Fatalf("go: %v", err) } f.AddModuleStmt(path) } if len(f.Syntax.Stmt) == 1 && f.Module != nil { // Entire file is just a module statement. // Populate require if possible. legacyModInit() } modFileToBuildList() setDefaultBuildMod() if cfg.BuildMod == "vendor" { readVendorList() checkVendorConsistency() } } // fixVersion returns a modfile.VersionFixer implemented using the Query function. // // It resolves commit hashes and branch names to versions, // canonicalizes versions that appeared in early vgo drafts, // and does nothing for versions that already appear to be canonical. // // The VersionFixer sets 'fixed' if it ever returns a non-canonical version. func fixVersion(fixed *bool) modfile.VersionFixer { return func(path, vers string) (resolved string, err error) { defer func() { if err == nil && resolved != vers { *fixed = true } }() // Special case: remove the old -gopkgin- hack. if strings.HasPrefix(path, "gopkg.in/") && strings.Contains(vers, "-gopkgin-") { vers = vers[strings.Index(vers, "-gopkgin-")+len("-gopkgin-"):] } // fixVersion is called speculatively on every // module, version pair from every go.mod file. // Avoid the query if it looks OK. _, pathMajor, ok := module.SplitPathVersion(path) if !ok { return "", &module.ModuleError{ Path: path, Err: &module.InvalidVersionError{ Version: vers, Err: fmt.Errorf("malformed module path %q", path), }, } } if vers != "" && module.CanonicalVersion(vers) == vers { if err := module.CheckPathMajor(vers, pathMajor); err == nil { return vers, nil } } info, err := Query(path, vers, "", nil) if err != nil { return "", err } return info.Version, nil } } // AllowMissingModuleImports allows import paths to be resolved to modules // when there is no module root. Normally, this is forbidden because it's slow // and there's no way to make the result reproducible, but some commands // like 'go get' are expected to do this. func AllowMissingModuleImports() { allowMissingModuleImports = true } // modFileToBuildList initializes buildList from the modFile. func modFileToBuildList() { Target = modFile.Module.Mod targetPrefix = Target.Path if rel := search.InDir(base.Cwd, cfg.GOROOTsrc); rel != "" { targetInGorootSrc = true if Target.Path == "std" { targetPrefix = "" } } list := []module.Version{Target} for _, r := range modFile.Require { list = append(list, r.Mod) } buildList = list } // setDefaultBuildMod sets a default value for cfg.BuildMod // if it is currently empty. func setDefaultBuildMod() { if cfg.BuildMod != "" { // Don't override an explicit '-mod=' argument. return } cfg.BuildMod = "mod" if cfg.CmdName == "get" || strings.HasPrefix(cfg.CmdName, "mod ") { // Don't set -mod implicitly for commands whose purpose is to // manipulate the build list. return } if modRoot == "" { return } if fi, err := os.Stat(filepath.Join(modRoot, "vendor")); err == nil && fi.IsDir() { modGo := "unspecified" if index.goVersion != "" { if semver.Compare("v"+index.goVersion, "v1.14") >= 0 { // The Go version is at least 1.14, and a vendor directory exists. // Set -mod=vendor by default. cfg.BuildMod = "vendor" cfg.BuildModReason = "Go version in go.mod is at least 1.14 and vendor directory exists." return } else { modGo = index.goVersion } } // Since a vendor directory exists, we have a non-trivial reason for // choosing -mod=mod, although it probably won't be used for anything. // Record the reason anyway for consistency. // It may be overridden if we switch to mod=readonly below. cfg.BuildModReason = fmt.Sprintf("Go version in go.mod is %s.", modGo) } p := ModFilePath() if fi, err := os.Stat(p); err == nil && !hasWritePerm(p, fi) { cfg.BuildMod = "readonly" cfg.BuildModReason = "go.mod file is read-only." } } func legacyModInit() { if modFile == nil { path, err := findModulePath(modRoot) if err != nil { base.Fatalf("go: %v", err) } fmt.Fprintf(os.Stderr, "go: creating new go.mod: module %s\n", path) modFile = new(modfile.File) modFile.AddModuleStmt(path) addGoStmt() // Add the go directive before converted module requirements. } for _, name := range altConfigs { cfg := filepath.Join(modRoot, name) data, err := ioutil.ReadFile(cfg) if err == nil { convert := modconv.Converters[name] if convert == nil { return } fmt.Fprintf(os.Stderr, "go: copying requirements from %s\n", base.ShortPath(cfg)) cfg = filepath.ToSlash(cfg) if err := modconv.ConvertLegacyConfig(modFile, cfg, data); err != nil { base.Fatalf("go: %v", err) } if len(modFile.Syntax.Stmt) == 1 { // Add comment to avoid re-converting every time it runs. modFile.AddComment("// go: no requirements found in " + name) } return } } } // addGoStmt adds a go directive to the go.mod file if it does not already include one. // The 'go' version added, if any, is the latest version supported by this toolchain. func addGoStmt() { if modFile.Go != nil && modFile.Go.Version != "" { return } tags := build.Default.ReleaseTags version := tags[len(tags)-1] if !strings.HasPrefix(version, "go") || !modfile.GoVersionRE.MatchString(version[2:]) { base.Fatalf("go: unrecognized default version %q", version) } if err := modFile.AddGoStmt(version[2:]); err != nil { base.Fatalf("go: internal error: %v", err) } } var altConfigs = []string{ "Gopkg.lock", "GLOCKFILE", "Godeps/Godeps.json", "dependencies.tsv", "glide.lock", "vendor.conf", "vendor.yml", "vendor/manifest", "vendor/vendor.json", ".git/config", } func findModuleRoot(dir string) (root string) { if dir == "" { panic("dir not set") } dir = filepath.Clean(dir) // Look for enclosing go.mod. for { if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() { return dir } d := filepath.Dir(dir) if d == dir { break } dir = d } return "" } func findAltConfig(dir string) (root, name string) { if dir == "" { panic("dir not set") } dir = filepath.Clean(dir) if rel := search.InDir(dir, cfg.BuildContext.GOROOT); rel != "" { // Don't suggest creating a module from $GOROOT/.git/config // or a config file found in any parent of $GOROOT (see #34191). return "", "" } for { for _, name := range altConfigs { if fi, err := os.Stat(filepath.Join(dir, name)); err == nil && !fi.IsDir() { return dir, name } } d := filepath.Dir(dir) if d == dir { break } dir = d } return "", "" } func findModulePath(dir string) (string, error) { if CmdModModule != "" { // Running go mod init x/y/z; return x/y/z. if err := module.CheckImportPath(CmdModModule); err != nil { return "", err } return CmdModModule, nil } // TODO(bcmills): once we have located a plausible module path, we should // query version control (if available) to verify that it matches the major // version of the most recent tag. // See https://golang.org/issue/29433, https://golang.org/issue/27009, and // https://golang.org/issue/31549. // Cast about for import comments, // first in top-level directory, then in subdirectories. list, _ := ioutil.ReadDir(dir) for _, info := range list { if info.Mode().IsRegular() && strings.HasSuffix(info.Name(), ".go") { if com := findImportComment(filepath.Join(dir, info.Name())); com != "" { return com, nil } } } for _, info1 := range list { if info1.IsDir() { files, _ := ioutil.ReadDir(filepath.Join(dir, info1.Name())) for _, info2 := range files { if info2.Mode().IsRegular() && strings.HasSuffix(info2.Name(), ".go") { if com := findImportComment(filepath.Join(dir, info1.Name(), info2.Name())); com != "" { return path.Dir(com), nil } } } } } // Look for Godeps.json declaring import path. data, _ := ioutil.ReadFile(filepath.Join(dir, "Godeps/Godeps.json")) var cfg1 struct{ ImportPath string } json.Unmarshal(data, &cfg1) if cfg1.ImportPath != "" { return cfg1.ImportPath, nil } // Look for vendor.json declaring import path. data, _ = ioutil.ReadFile(filepath.Join(dir, "vendor/vendor.json")) var cfg2 struct{ RootPath string } json.Unmarshal(data, &cfg2) if cfg2.RootPath != "" { return cfg2.RootPath, nil } // Look for path in GOPATH. for _, gpdir := range filepath.SplitList(cfg.BuildContext.GOPATH) { if gpdir == "" { continue } if rel := search.InDir(dir, filepath.Join(gpdir, "src")); rel != "" && rel != "." { return filepath.ToSlash(rel), nil } } msg := `cannot determine module path for source directory %s (outside GOPATH, module path must be specified) Example usage: 'go mod init example.com/m' to initialize a v0 or v1 module 'go mod init example.com/m/v2' to initialize a v2 module Run 'go help mod init' for more information. ` return "", fmt.Errorf(msg, dir) } var ( importCommentRE = lazyregexp.New(`(?m)^package[ \t]+[^ \t\r\n/]+[ \t]+//[ \t]+import[ \t]+(\"[^"]+\")[ \t]*\r?\n`) ) func findImportComment(file string) string { data, err := ioutil.ReadFile(file) if err != nil { return "" } m := importCommentRE.FindSubmatch(data) if m == nil { return "" } path, err := strconv.Unquote(string(m[1])) if err != nil { return "" } return path } var allowWriteGoMod = true // DisallowWriteGoMod causes future calls to WriteGoMod to do nothing at all. func DisallowWriteGoMod() { allowWriteGoMod = false } // AllowWriteGoMod undoes the effect of DisallowWriteGoMod: // future calls to WriteGoMod will update go.mod if needed. // Note that any past calls have been discarded, so typically // a call to AlowWriteGoMod should be followed by a call to WriteGoMod. func AllowWriteGoMod() { allowWriteGoMod = true } // MinReqs returns a Reqs with minimal additional dependencies of Target, // as will be written to go.mod. func MinReqs() mvs.Reqs { var retain []string for _, m := range buildList[1:] { _, explicit := index.require[m] if explicit || loaded.direct[m.Path] { retain = append(retain, m.Path) } } min, err := mvs.Req(Target, retain, Reqs()) if err != nil { base.Fatalf("go: %v", err) } return &mvsReqs{buildList: append([]module.Version{Target}, min...)} } // WriteGoMod writes the current build list back to go.mod. func WriteGoMod() { // If we're using -mod=vendor we basically ignored // go.mod, so definitely don't try to write back our // incomplete view of the world. if !allowWriteGoMod || cfg.BuildMod == "vendor" { return } // If we aren't in a module, we don't have anywhere to write a go.mod file. if modRoot == "" { return } if cfg.BuildMod != "readonly" { addGoStmt() } if loaded != nil { reqs := MinReqs() min, err := reqs.Required(Target) if err != nil { base.Fatalf("go: %v", err) } var list []*modfile.Require for _, m := range min { list = append(list, &modfile.Require{ Mod: m, Indirect: !loaded.direct[m.Path], }) } modFile.SetRequire(list) } modFile.Cleanup() dirty := index.modFileIsDirty(modFile) if dirty && cfg.BuildMod == "readonly" { // If we're about to fail due to -mod=readonly, // prefer to report a dirty go.mod over a dirty go.sum if cfg.BuildModReason != "" { base.Fatalf("go: updates to go.mod needed, disabled by -mod=readonly\n\t(%s)", cfg.BuildModReason) } else { base.Fatalf("go: updates to go.mod needed, disabled by -mod=readonly") } } // Always update go.sum, even if we didn't change go.mod: we may have // downloaded modules that we didn't have before. modfetch.WriteGoSum(keepSums()) if !dirty && cfg.CmdName != "mod tidy" { // The go.mod file has the same semantic content that it had before // (but not necessarily the same exact bytes). // Ignore any intervening edits. return } new, err := modFile.Format() if err != nil { base.Fatalf("go: %v", err) } defer func() { // At this point we have determined to make the go.mod file on disk equal to new. index = indexModFile(new, modFile, false) }() // Make a best-effort attempt to acquire the side lock, only to exclude // previous versions of the 'go' command from making simultaneous edits. if unlock, err := modfetch.SideLock(); err == nil { defer unlock() } errNoChange := errors.New("no update needed") err = lockedfile.Transform(ModFilePath(), func(old []byte) ([]byte, error) { if bytes.Equal(old, new) { // The go.mod file is already equal to new, possibly as the result of some // other process. return nil, errNoChange } if index != nil && !bytes.Equal(old, index.data) { // The contents of the go.mod file have changed. In theory we could add all // of the new modules to the build list, recompute, and check whether any // module in *our* build list got bumped to a different version, but that's // a lot of work for marginal benefit. Instead, fail the command: if users // want to run concurrent commands, they need to start with a complete, // consistent module definition. return nil, fmt.Errorf("existing contents have changed since last read") } return new, nil }) if err != nil && err != errNoChange { base.Fatalf("go: updating go.mod: %v", err) } } // keepSums returns a set of module sums to preserve in go.sum. The set // includes entries for all modules used to load packages (according to // the last load function like ImportPaths, LoadALL, etc.). It also contains // entries for go.mod files needed for MVS (the version of these entries // ends with "/go.mod"). func keepSums() map[module.Version]bool { // Walk the module graph and keep sums needed by MVS. modkey := func(m module.Version) module.Version { return module.Version{Path: m.Path, Version: m.Version + "/go.mod"} } keep := make(map[module.Version]bool) replaced := make(map[module.Version]bool) reqs := Reqs() var walk func(module.Version) walk = func(m module.Version) { // If we build using a replacement module, keep the sum for the replacement, // since that's the code we'll actually use during a build. // // TODO(golang.org/issue/29182): Perhaps we should keep both sums, and the // sums for both sets of transitive requirements. r := Replacement(m) if r.Path == "" { keep[modkey(m)] = true } else { replaced[m] = true keep[modkey(r)] = true } list, _ := reqs.Required(m) for _, r := range list { if !keep[modkey(r)] && !replaced[r] { walk(r) } } } walk(Target) // Add entries for modules that provided packages loaded with ImportPaths, // LoadALL, or similar functions. if loaded != nil { for _, pkg := range loaded.pkgs { m := pkg.mod if r := Replacement(m); r.Path != "" { keep[r] = true } else { keep[m] = true } } } return keep } func TrimGoSum() { modfetch.TrimGoSum(keepSums()) }
[ "\"GIT_TERMINAL_PROMPT\"", "\"GIT_SSH\"", "\"GIT_SSH_COMMAND\"" ]
[]
[ "GIT_SSH", "GIT_SSH_COMMAND", "GIT_TERMINAL_PROMPT" ]
[]
["GIT_SSH", "GIT_SSH_COMMAND", "GIT_TERMINAL_PROMPT"]
go
3
0
go/ComputerVision/ComputerVisionQuickstart.go
// <snippet_imports> package main /* Import the required libraries. If this is your first time running a Go program, * you will need to 'go get' the azure-sdk-for-go and go-autorest packages. */ import ( "context" "encoding/json" "fmt" "github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v2.0/computervision" "github.com/Azure/go-autorest/autorest" "io" "log" "os" "strings" "time" ) // </snippet_imports> /* The examples in this quickstart are for the Computer Vision API for Microsoft * Cognitive Services with the following tasks: * - Describing images * - Categorizing images * - Tagging images * - Detecting faces * - Detecting adult or racy content * - Detecting the color scheme * - Detecting domain-specific content (celebrities/landmarks) * - Detecting image types (clip art/line drawing) * - Detecting objects * - Detecting brands * - Recognizing printed and handwritten text with the batch read API * - Recognizing printed text with OCR */ // <snippet_context> // Declare global so don't have to pass it to all of the tasks. var computerVisionContext context.Context // </snippet_context> func main() { // <snippet_client> /* Prerequisites for the Computer Vision client: * Set environment variables for COMPUTER_VISION_SUBSCRIPTION_KEY and COMPUTER_VISION_ENDPOINT, * then restart your command shell or your IDE for changes to take effect. */ computerVisionKey := os.Getenv("COMPUTER_VISION_SUBSCRIPTION_KEY") if (computerVisionKey == null) { log.Fatal("\n\nPlease set a COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n" + "**You may need to restart your shell or IDE after it's set.**\n") } endpointURL := os.Getenv("COMPUTER_VISION_ENDPOINT") if (endpointURL == null) { log.Fatal("\n\nPlease set a COMPUTER_VISION_ENDPOINT environment variable.\n" + "**You may need to restart your shell or IDE after it's set.**") } computerVisionClient := computervision.New(endpointURL); computerVisionClient.Authorizer = autorest.NewCognitiveServicesAuthorizer(computerVisionKey) computerVisionContext = context.Background() // END - Configure the Computer Vision client // </snippet_client> // Analyze a local image localImagePath := "resources\\faces.jpg" workingDirectory, err := os.Getwd() if err != nil { log.Fatal(err) } fmt.Printf("\nLocal image path:\n%v\n", workingDirectory + "\\" + localImagePath) DescribeLocalImage(computerVisionClient, localImagePath) CategorizeLocalImage(computerVisionClient, localImagePath) TagLocalImage(computerVisionClient, localImagePath) DetectFacesLocalImage(computerVisionClient, localImagePath) DetectAdultOrRacyContentLocalImage(computerVisionClient, localImagePath) DetectColorSchemeLocalImage(computerVisionClient, localImagePath) DetectDomainSpecificContentLocalImage(computerVisionClient, localImagePath) DetectImageTypesLocalImage(computerVisionClient, localImagePath) DetectObjectsLocalImage(computerVisionClient, localImagePath) // END - Analyze a local iamge // Brand detection on a local image fmt.Println("\nGetting new local image for brand detection ... \n") localImagePath = "resources\\gray-shirt-logo.jpg" workingDirectory, err = os.Getwd() if err != nil { log.Fatal(err) } fmt.Printf("Local image path:\n%v\n", workingDirectory + "\\" + localImagePath) DetectBrandsLocalImage(computerVisionClient, localImagePath) // END - Brand detection // Text recognition on a local image with the Read API fmt.Println("\nGetting new local image for text recognition of handwriting with the Read API... \n") localImagePath = "resources\\handwritten_text.jpg" workingDirectory, err = os.Getwd() if err != nil { log.Fatal(err) } fmt.Printf("Local image path:\n%v\n", workingDirectory + "\\" + localImagePath) RecognizeTextReadAPILocalImage(computerVisionClient, localImagePath) // END - Text recognition on a local image with the Read API // Text recognition on a local image with OCR fmt.Println("\nGetting new local image for text recognition with OCR... \n") localImagePath = "resources\\printed_text.jpg" workingDirectory, err = os.Getwd() if err != nil { log.Fatal(err) } fmt.Printf("Local image path:\n%v\n", workingDirectory + "\\" + localImagePath) ExtractTextOCRLocalImage(computerVisionClient, localImagePath) // END - Text recognition on a local image with OCR // <snippet_analyze_url> // Analyze a remote image remoteImageURL := "https://github.com/Azure-Samples/cognitive-services-sample-data-files/raw/master/ComputerVision/Images/landmark.jpg" fmt.Printf("\nRemote image path: \n%v\n", remoteImageURL) // </snippet_analyze_url> // <snippet_brand_url> // Brand detection on a remote image remoteImageBrandsURL = "https://docs.microsoft.com/en-us/azure/cognitive-services/computer-vision/images/gray-shirt-logo.jpg" fmt.Printf("Remote image path for brands: \n%v\n", remoteImageBrandsURL) // </snippet_brand_url> // <snippet_analyze> DescribeRemoteImage(computerVisionClient, remoteImageURL) CategorizeRemoteImage(computerVisionClient, remoteImageURL) TagRemoteImage(computerVisionClient, remoteImageURL) DetectObjectsRemoteImage(computerVisionClient, remoteImageURL) DetectBrandsRemoteImage(computerVisionClient, remoteImageURL) DetectFacesRemoteImage(computerVisionClient, remoteImageURL) DetectAdultOrRacyContentRemoteImage(computerVisionClient, remoteImageURL) DetectColorSchemeRemoteImage(computerVisionClient, remoteImageURL) DetectDomainSpecificContentRemoteImage(computerVisionClient, remoteImageURL) DetectImageTypesRemoteImage(computerVisionClient, remoteImageURL) // END - Analyze a remote image // </snippet_analyze> // <snippet_readinmain> // Text recognition on a remote image fmt.Println("\nGetting new remote image for text recognition of printed text with the Read API... \n") remoteImageURL = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/printed_text.jpg" fmt.Printf("Remote image path: \n%v\n", remoteImageURL) RecognizeTextReadAPIRemoteImage(computerVisionClient, remoteImageURL) // </snippet_readinmain> // END - Text recognition on a remote image // Text recognition on a remote image with OCR remoteImageURL = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/printed_text.jpg" ExtractTextOCRRemoteImage(computerVisionClient, remoteImageURL) // END - Text recognition on a remote image with OCR } /* Describe a local image by: * 1. Instantiating a ReadCloser, which is required by AnalyzeImageInStream. * 2. Opening the ReadCloser instance for reading. * 3. Calling the Computer Vision service's AnalyzeImageInStream with the: * - context * - image * - the number of descriptions to return * - "" to specify the default language ("en") as the output language * 4. Displaying the image captions and their confidence values. */ func DescribeLocalImage(client computervision.BaseClient, localImagePath string) { var localImage io.ReadCloser localImage, err := os.Open(localImagePath) if err != nil { log.Fatal(err) } maxNumberDescriptionCandidates := new(int32) *maxNumberDescriptionCandidates = 1 localImageDescription, err := client.DescribeImageInStream( computerVisionContext, localImage, maxNumberDescriptionCandidates, "") if err != nil { log.Fatal(err) } fmt.Println("\nCaptions from local image: ") if len(*localImageDescription.Captions) == 0 { fmt.Println("No captions detected.") } else { for _, caption := range *localImageDescription.Captions { fmt.Printf("'%v' with confidence %.2f%%\n", *caption.Text, *caption.Confidence * 100) } } } // END - Describe a local image /* Describe a remote image file by: * 1. Saving the URL as an ImageURL type for passing to AnalyzeImage. * 2. Defining what to extract from the image by initializing an array of VisualFeatureTypes. * 3. Calling the Computer Vision service's AnalyzeImage with the: * - context * - image * - features to extract * - an empty slice for the Details enumeration * - "" to specify the default language ("en") as the output language * 4. Displaying the image captions and their confidence values. */ // <snippet_analyze_describe> func DescribeRemoteImage(client computervision.BaseClient, remoteImageURL string) { var remoteImage computervision.ImageURL remoteImage.URL = &remoteImageURL maxNumberDescriptionCandidates := new(int32) *maxNumberDescriptionCandidates = 1 remoteImageDescription, err := client.DescribeImage( computerVisionContext, remoteImage, maxNumberDescriptionCandidates, "") if err != nil { log.Fatal(err) } fmt.Println("\nCaptions from remote image: ") if len(*remoteImageDescription.Captions) == 0 { fmt.Println("No captions detected.") } else { for _, caption := range *remoteImageDescription.Captions { fmt.Printf("'%v' with confidence %.2f%%\n", *caption.Text, *caption.Confidence * 100) } } } // </snippet_analyze_describe> // END - Describe a remote image /* Categorize a local image by: * 1. Instantiating a ReadCloser, which is required by AnalyzeImageInStream. * 2. Opening the ReadCloser instance for reading. * 3. Defining what to extract from the image by initializing an array of VisualFeatureTypes. * 4. Calling the Computer Vision service's AnalyzeImageInStream with the: * - context * - image * - features to extract * - an empty slice for the Details enumeration * - "" to specify the default language ("en") as the output language * 5. Displaying the image categories and their confidence values. */ func CategorizeLocalImage(client computervision.BaseClient, localImagePath string) { var localImage io.ReadCloser localImage, err := os.Open(localImagePath) if err != nil { log.Fatal(err) } features := []computervision.VisualFeatureTypes{computervision.VisualFeatureTypesCategories} imageAnalysis, err := client.AnalyzeImageInStream( computerVisionContext, localImage, features, []computervision.Details{}, "") if err != nil { log.Fatal(err) } fmt.Println("\nCategories from local image: ") if len(*imageAnalysis.Categories) == 0 { fmt.Println("No categories detected.") } else { for _, category := range *imageAnalysis.Categories { fmt.Printf("'%v' with confidence %.2f%%\n", *category.Name, *category.Score * 100) } } } // END - Categorize a local image /* Categorize a remote image by: * 1. Saving the URL as an ImageURL type for passing to AnalyzeImage. * 2. Defining what to extract from the image by initializing an array of VisualFeatureTypes. * 3. Calling the Computer Vision service's AnalyzeImage with the: * - context * - image * - features to extract * - an empty slice for the Details enumeration * - "" to specify the default language ("en") as the output language * 4. Displaying the image categories and their confidence values. */ // <snippet_analyze_categorize> func CategorizeRemoteImage(client computervision.BaseClient, remoteImageURL string) { var remoteImage computervision.ImageURL remoteImage.URL = &remoteImageURL features := []computervision.VisualFeatureTypes{computervision.VisualFeatureTypesCategories} imageAnalysis, err := client.AnalyzeImage( computerVisionContext, remoteImage, features, []computervision.Details{}, "") if err != nil { log.Fatal(err) } fmt.Println("\nCategories from remote image: ") if len(*imageAnalysis.Categories) == 0 { fmt.Println("No categories detected.") } else { for _, category := range *imageAnalysis.Categories { fmt.Printf("'%v' with confidence %.2f%%\n", *category.Name, *category.Score * 100) } } } // </snippet_analyze_categorize> // END - Categorize a remote image /* Tag a local image by: * 1. Instantiating a ReadCloser, which is required by TagImageInStream. * 2. Opening the ReadCloser instance for reading. * 3. Calling the Computer Vision service's AnalyzeImageInStream with the: * - context * - image * - "" to specify the default language ("en") as the output language * 4. Displaying the image categories and their confidence values. */ func TagLocalImage(client computervision.BaseClient, localImagePath string) { var localImage io.ReadCloser localImage, err := os.Open(localImagePath) if err != nil { log.Fatal(err) } localImageTags, err := client.TagImageInStream( computerVisionContext, localImage, "") if err != nil { log.Fatal(err) } fmt.Println("\nTags in the local image: ") if len(*localImageTags.Tags) == 0 { fmt.Println("No tags detected.") } else { for _, tag := range *localImageTags.Tags { fmt.Printf("'%v' with confidence %.2f%%\n", *tag.Name, *tag.Confidence * 100) } } } // END - Tag a local image /* Tag a remote image file by: * 1. Saving the URL as an ImageURL type for passing to AnalyzeImage. * 2. Calling the Computer Vision service's TagImage with the: * - context * - image * - "" to specify the default language ("en") as the output language * 3. Displaying the image categories and their confidence values. */ // <snippet_tags> func TagRemoteImage(client computervision.BaseClient, remoteImageURL string) { var remoteImage computervision.ImageURL remoteImage.URL = &remoteImageURL remoteImageTags, err := client.TagImage( computerVisionContext, remoteImage, "") if err != nil { log.Fatal(err) } fmt.Println("\nTags in the remote image: ") if len(*remoteImageTags.Tags) == 0 { fmt.Println("No tags detected.") } else { for _, tag := range *remoteImageTags.Tags { fmt.Printf("'%v' with confidence %.2f%%\n", *tag.Name, *tag.Confidence * 100) } } } // </snippet_tags> // END - Tag a remote image /* Detect faces in a local image by: * 1. Instantiating a ReadCloser, which is required by AnalyzeImageInStream. * 2. Opening the ReadCloser instance for reading. * 3. Defining what to extract from the image by initializing an array of VisualFeatureTypes. * 4. Calling the Computer Vision service's AnalyzeImageInStream with the: * - context * - image * - features to extract * - an empty slice for the Details enumeration * - "" to specify the default language ("en") as the output language * 5. Displaying the faces and their bounding boxes. */ func DetectFacesLocalImage(client computervision.BaseClient, localImagePath string) { var localImage io.ReadCloser localImage, err := os.Open(localImagePath) if err != nil { log.Fatal(err) } features := []computervision.VisualFeatureTypes{computervision.VisualFeatureTypesFaces} imageAnalysis, err := client.AnalyzeImageInStream( computerVisionContext, localImage, features, []computervision.Details{}, "") if err != nil { log.Fatal(err) } fmt.Println("\nDetecting faces in a local image ...") if len(*imageAnalysis.Faces) == 0 { fmt.Println("No faces detected.") } else { for _, face := range *imageAnalysis.Faces { fmt.Printf("'%v' of age %v at location (%v, %v), (%v, %v)\n", face.Gender, *face.Age, *face.FaceRectangle.Left, *face.FaceRectangle.Top, *face.FaceRectangle.Left + *face.FaceRectangle.Width, *face.FaceRectangle.Top + *face.FaceRectangle.Height) } } } // END - Detect faces in a local image /* Detect faces in a remote image file by: * 1. Saving the URL as an ImageURL type for passing to AnalyzeImage. * 2. Defining what to extract from the image by initializing an array of VisualFeatureTypes. * 3. Calling the Computer Vision service's AnalyzeImage with the: * - context * - image * - features to extract * - an empty slice for the Details enumeration * - "" to specify the default language ("en") as the output language * 4. Displaying the image categories and their confidence values. */ // <snippet_faces> func DetectFacesRemoteImage(client computervision.BaseClient, remoteImageURL string) { var remoteImage computervision.ImageURL remoteImage.URL = &remoteImageURL features := []computervision.VisualFeatureTypes{computervision.VisualFeatureTypesFaces} imageAnalysis, err := client.AnalyzeImage( computerVisionContext, remoteImage, features, []computervision.Details{}, "") if err != nil { log.Fatal(err) } fmt.Println("\nDetecting faces in a remote image ...") if len(*imageAnalysis.Faces) == 0 { fmt.Println("No faces detected.") } else { for _, face := range *imageAnalysis.Faces { fmt.Printf("'%v' of age %v at location (%v, %v), (%v, %v)\n", face.Gender, *face.Age, *face.FaceRectangle.Left, *face.FaceRectangle.Top, *face.FaceRectangle.Left + *face.FaceRectangle.Width, *face.FaceRectangle.Top + *face.FaceRectangle.Height) } } } // </snippet_faces> // END - Detect faces in a remote image /* Detect adult or racy content in a local image by: * 1. Instantiating a ReadCloser, which is required by AnalyzeImageInStream. * 2. Opening the ReadCloser instance for reading. * 3. Defining what to extract from the image by initializing an array of VisualFeatureTypes. * 4. Calling the Computer Vision service's AnalyzeImageInStream with the: * - context * - image * - features to extract * - an empty slice for the Details enumeration * - "" to specify the default language ("en") as the output language * 5. Displaying the faces and their bounding boxes. */ func DetectAdultOrRacyContentLocalImage(client computervision.BaseClient, localImagePath string) { var localImage io.ReadCloser localImage, err := os.Open(localImagePath) if err != nil { log.Fatal(err) } features := []computervision.VisualFeatureTypes{computervision.VisualFeatureTypesAdult} imageAnalysis, err := client.AnalyzeImageInStream( computerVisionContext, localImage, features, []computervision.Details{}, "") if err != nil { log.Fatal(err) } fmt.Println("\nAnalyzing local image for adult or racy content: "); fmt.Printf("Is adult content: %v with confidence %.2f%%\n", *imageAnalysis.Adult.IsAdultContent, *imageAnalysis.Adult.AdultScore * 100) fmt.Printf("Has racy content: %v with confidence %.2f%%\n", *imageAnalysis.Adult.IsRacyContent, *imageAnalysis.Adult.RacyScore * 100) } // END - Detect adult or racy content in a local image /* Detect adult or racy content in a remote image file by: * 1. Saving the URL as an ImageURL type for passing to AnalyzeImage. * 2. Defining what to extract from the image by initializing an array of VisualFeatureTypes. * 3. Calling the Computer Vision service's AnalyzeImage with the: * - context * - image * - features to extract * - an empty slice for the Details enumeration * - "" to specify the default language ("en") as the output language * 4. Displaying the image categories and their confidence values. */ // <snippet_adult> func DetectAdultOrRacyContentRemoteImage(client computervision.BaseClient, remoteImageURL string) { var remoteImage computervision.ImageURL remoteImage.URL = &remoteImageURL features := []computervision.VisualFeatureTypes{computervision.VisualFeatureTypesAdult} imageAnalysis, err := client.AnalyzeImage( computerVisionContext, remoteImage, features, []computervision.Details{}, "") if err != nil { log.Fatal(err) } fmt.Println("\nAnalyzing remote image for adult or racy content: "); fmt.Printf("Is adult content: %v with confidence %.2f%%\n", *imageAnalysis.Adult.IsAdultContent, *imageAnalysis.Adult.AdultScore * 100) fmt.Printf("Has racy content: %v with confidence %.2f%%\n", *imageAnalysis.Adult.IsRacyContent, *imageAnalysis.Adult.RacyScore * 100) } // </snippet_adult> // END - Detect adult or racy content in a remote image /* Detect the color scheme of a local image by: * 1. Instantiating a ReadCloser, which is required by AnalyzeImageInStream. * 2. Opening the ReadCloser instance for reading. * 3. Defining what to extract from the image by initializing an array of VisualFeatureTypes. * 4. Calling the Computer Vision service's AnalyzeImageInStream with the: * - context * - image * - features to extract * - an empty slice for the Details enumeration * - "" to specify the default language ("en") as the output language * 5. Displaying the faces and their bounding boxes. */ func DetectColorSchemeLocalImage(client computervision.BaseClient, localImagePath string) { var localImage io.ReadCloser localImage, err := os.Open(localImagePath) if err != nil { log.Fatal(err) } features := []computervision.VisualFeatureTypes{computervision.VisualFeatureTypesColor} imageAnalysis, err := client.AnalyzeImageInStream( computerVisionContext, localImage, features, []computervision.Details{}, "") if err != nil { log.Fatal(err) } fmt.Println("\nColor scheme of the local image: "); fmt.Printf("Is black and white: %v\n", *imageAnalysis.Color.IsBWImg) fmt.Printf("Accent color: 0x%v\n", *imageAnalysis.Color.AccentColor) fmt.Printf("Dominant background color: %v\n", *imageAnalysis.Color.DominantColorBackground) fmt.Printf("Dominant foreground color: %v\n", *imageAnalysis.Color.DominantColorForeground) fmt.Printf("Dominant colors: %v\n", strings.Join(*imageAnalysis.Color.DominantColors, ", ")) } // END - Detect the color scheme of a local image /* Detect the color scheme of a remote image file by: * 1. Saving the URL as an ImageURL type for passing to AnalyzeImage. * 2. Defining what to extract from the image by initializing an array of VisualFeatureTypes. * 3. Calling the Computer Vision service's AnalyzeImage with the: * - context * - image * - features to extract * - an empty slice for the Details enumeration * - "" to specify the default language ("en") as the output language * 4. Displaying the image categories and their confidence values. */ // <snippet_color> func DetectColorSchemeRemoteImage(client computervision.BaseClient, remoteImageURL string) { var remoteImage computervision.ImageURL remoteImage.URL = &remoteImageURL features := []computervision.VisualFeatureTypes{computervision.VisualFeatureTypesColor} imageAnalysis, err := client.AnalyzeImage( computerVisionContext, remoteImage, features, []computervision.Details{}, "") if err != nil { log.Fatal(err) } fmt.Println("\nColor scheme of the remote image: "); fmt.Printf("Is black and white: %v\n", *imageAnalysis.Color.IsBWImg) fmt.Printf("Accent color: 0x%v\n", *imageAnalysis.Color.AccentColor) fmt.Printf("Dominant background color: %v\n", *imageAnalysis.Color.DominantColorBackground) fmt.Printf("Dominant foreground color: %v\n", *imageAnalysis.Color.DominantColorForeground) fmt.Printf("Dominant colors: %v\n", strings.Join(*imageAnalysis.Color.DominantColors, ", ")) } // </snippet_color> // END - Detect the color scheme of a remote image /* Detect domain-specific content (celebrities, landmarks) in a local image by: * 1. Instantiating a ReadCloser, which is required by AnalyzeImageInStream. * 2. Opening the ReadCloser instance for reading. * 3. Calling the Computer Vision service's AnalyzeImageByDomainInStream with the: * - context * - domain-specific content to extract * - image * - "" to specify the default language ("en") as the output language * 4. Decoding the data returned from AnalyzeImageByDomainInStream. * 5. Displaying the celebrities/landmarks and their bounding boxes. */ func DetectDomainSpecificContentLocalImage(client computervision.BaseClient, localImagePath string) { var localImage io.ReadCloser localImage, err := os.Open(localImagePath) if err != nil { log.Fatal(err) } fmt.Println("\nDetecting domain-specific content in the local image ...") celebrities, err := client.AnalyzeImageByDomainInStream( computerVisionContext, "celebrities", localImage, "") if err != nil { log.Fatal(err) } fmt.Println("\nCelebrities: ") // Marshal the output from AnalyzeImageByDomainInStream into JSON. data, err := json.MarshalIndent(celebrities.Result, "", "\t") // Define structs for which to unmarshal the JSON. type Celebrities struct { Name string `json:"name"` } type CelebrityResult struct { Celebrities []Celebrities `json:"celebrities"` } var celebrityResult CelebrityResult // Unmarshal the data. err = json.Unmarshal(data, &celebrityResult) if err != nil { log.Fatal(err) } // Check if any celebrities detected if len(celebrityResult.Celebrities) == 0 { fmt.Println("No celebrities detected.") } else { for _, celebrity := range celebrityResult.Celebrities { fmt.Printf("name: %v\n", celebrity.Name) } } fmt.Println("\nLandmarks: ") localImage, err = os.Open(localImagePath) if err != nil { log.Fatal(err) } landmarks, err := client.AnalyzeImageByDomainInStream( computerVisionContext, "landmarks", localImage, "") if err != nil { log.Fatal(err) } // Marshal the output from AnalyzeImageByDomainInStream into JSON. data, err = json.MarshalIndent(landmarks.Result, "", "\t") // Define structs for which to unmarshal the JSON. type Landmarks struct { Name string `json:"name"` } type LandmarkResult struct { Landmarks []Landmarks `json:"landmarks"` } var landmarkResult LandmarkResult // Unmarshal the data. err = json.Unmarshal(data, &landmarkResult) if err != nil { log.Fatal(err) } // Check if any landmarks detected if len(landmarkResult.Landmarks) == 0 { fmt.Println("No landmarks detected.") } else { for _, landmark := range landmarkResult.Landmarks { fmt.Printf("name: %v\n", landmark.Name) } } } // END - Detect domain-specific content in a local image /* Detect domain-specific content (celebrities, landmarks) in remote image file by: * 1. Saving the URL as an ImageURL type for passing to AnalyzeImage. * 2. Calling the Computer Vision service's AnalyzeImageByDomain with the: * - context * - domain-specific content to extract * - image * - "" to specify the default language ("en") as the output language * 3. Decoding the data returned from AnalyzeImageByDomain. * 4. Displaying the celebrities/landmarks and their bounding boxes. */ // <snippet_celebs> func DetectDomainSpecificContentRemoteImage(client computervision.BaseClient, remoteImageURL string) { var remoteImage computervision.ImageURL remoteImage.URL = &remoteImageURL fmt.Println("\nDetecting domain-specific content in the local image ...") celebrities, err := client.AnalyzeImageByDomain( computerVisionContext, "celebrities", remoteImage, "") if err != nil { log.Fatal(err) } fmt.Println("\nCelebrities: ") // Marshal the output from AnalyzeImageByDomain into JSON. data, err := json.MarshalIndent(celebrities.Result, "", "\t") // Define structs for which to unmarshal the JSON. type Celebrities struct { Name string `json:"name"` } type CelebrityResult struct { Celebrities []Celebrities `json:"celebrities"` } var celebrityResult CelebrityResult // Unmarshal the data. err = json.Unmarshal(data, &celebrityResult) if err != nil { log.Fatal(err) } // Check if any celebrities detected if len(celebrityResult.Celebrities) == 0 { fmt.Println("No celebrities detected.") } else { for _, celebrity := range celebrityResult.Celebrities { fmt.Printf("name: %v\n", celebrity.Name) } } // </snippet_celebs> // <snippet_landmarks> fmt.Println("\nLandmarks: ") landmarks, err := client.AnalyzeImageByDomain( computerVisionContext, "landmarks", remoteImage, "") if err != nil { log.Fatal(err) } // Marshal the output from AnalyzeImageByDomain into JSON. data, err = json.MarshalIndent(landmarks.Result, "", "\t") // Define structs for which to unmarshal the JSON. type Landmarks struct { Name string `json:"name"` } type LandmarkResult struct { Landmarks []Landmarks `json:"landmarks"` } var landmarkResult LandmarkResult // Unmarshal the data. err = json.Unmarshal(data, &landmarkResult) if err != nil { log.Fatal(err) } // Check if any celebrities detected if len(landmarkResult.Landmarks) == 0 { fmt.Println("No landmarks detected.") } else { for _, landmark := range landmarkResult.Landmarks { fmt.Printf("name: %v\n", landmark.Name) } } } // </snippet_landmarks> // END - Detect domain-specific content in a remote image /* Detect the image type (clip art, line drawing) of a local image by: * 1. Instantiating a ReadCloser, which is required by AnalyzeImageInStream. * 2. Opening the ReadCloser instance for reading. * 3. Defining what to extract from the image by initializing an array of VisualFeatureTypes. * 4. Calling the Computer Vision service's AnalyzeImageInStream with the: * - context * - image * - features to extract * - an empty slice for the Details enumeration * - "" to specify the default language ("en") as the output language * 5. Displaying the faces and their bounding boxes. */ func DetectImageTypesLocalImage(client computervision.BaseClient, localImagePath string) { var localImage io.ReadCloser localImage, err := os.Open(localImagePath) if err != nil { log.Fatal(err) } features := []computervision.VisualFeatureTypes{computervision.VisualFeatureTypesImageType} imageAnalysis, err := client.AnalyzeImageInStream( computerVisionContext, localImage, features, []computervision.Details{}, "") if err != nil { log.Fatal(err) } fmt.Println("\nImage type of local image:") fmt.Println("\nClip art type: ") switch *imageAnalysis.ImageType.ClipArtType { case 0: fmt.Println("Image is not clip art.") case 1: fmt.Println("Image is ambiguously clip art.") case 2: fmt.Println("Image is normal clip art.") case 3: fmt.Println("Image is good clip art.") } fmt.Println("\nLine drawing type: ") if *imageAnalysis.ImageType.LineDrawingType == 1 { fmt.Println("Image is a line drawing.") } else { fmt.Println("Image is not a line drawing.") } } // END - Detect image type of a local image /* Detect the image type (clip art, line drawing) of a remote image by: * 1. Saving the URL as an ImageURL type for passing to AnalyzeImage. * 2. Defining what to extract from the image by initializing an array of VisualFeatureTypes. * 3. Calling the Computer Vision service's AnalyzeImage with the: * - context * - image * - features to extract * - an enumeration specifying the domain-specific details to return * - "" to specify the default language ("en") as the output language * 4. Displaying the image categories and their confidence values. */ // <snippet_type> func DetectImageTypesRemoteImage(client computervision.BaseClient, remoteImageURL string) { var remoteImage computervision.ImageURL remoteImage.URL = &remoteImageURL features := []computervision.VisualFeatureTypes{computervision.VisualFeatureTypesImageType} imageAnalysis, err := client.AnalyzeImage( computerVisionContext, remoteImage, features, []computervision.Details{}, "") if err != nil { log.Fatal(err) } fmt.Println("\nImage type of remote image:") fmt.Println("\nClip art type: ") switch *imageAnalysis.ImageType.ClipArtType { case 0: fmt.Println("Image is not clip art.") case 1: fmt.Println("Image is ambiguously clip art.") case 2: fmt.Println("Image is normal clip art.") case 3: fmt.Println("Image is good clip art.") } fmt.Println("\nLine drawing type: ") if *imageAnalysis.ImageType.LineDrawingType == 1 { fmt.Println("Image is a line drawing.") } else { fmt.Println("Image is not a line drawing.") } } // </snippet_type> // END - Detect image type of a remote image /* Detect objects in a local image by: * 1. Instantiating a ReadCloser, which is required by DetectObjectsInStream. * 2. Opening the ReadCloser instance for reading. * 3. Calling the Computer Vision service's DetectObjectsInStream with the: * - context * - image * 4. Displaying the objects and their bounding boxes. */ func DetectObjectsLocalImage(client computervision.BaseClient, localImagePath string) { var localImage io.ReadCloser localImage, err := os.Open(localImagePath) if err != nil { log.Fatal(err) } imageAnalysis, err := client.DetectObjectsInStream( computerVisionContext, localImage, ) if err != nil { log.Fatal(err) } fmt.Println("\nDetecting objects in local image: ") if len(*imageAnalysis.Objects) == 0 { fmt.Println("No objects detected.") } else { for _, object := range *imageAnalysis.Objects { fmt.Printf("'%v' with confidence %.2f%% at location (%v, %v), (%v, %v)\n", *object.Object, *object.Confidence * 100, *object.Rectangle.X, *object.Rectangle.X + *object.Rectangle.W, *object.Rectangle.Y, *object.Rectangle.Y + *object.Rectangle.H) } } } // END - Detect objects in local image /* Detect objects in a remote image by: * 1. Saving the URL as an ImageURL type for passing to DetectObjects. * 2. Calling the Computer Vision service's DetectObjects with the: * - context * - image * 3. Displaying the objects and their bounding boxes. */ // <snippet_objects> func DetectObjectsRemoteImage(client computervision.BaseClient, remoteImageURL string) { var remoteImage computervision.ImageURL remoteImage.URL = &remoteImageURL imageAnalysis, err := client.DetectObjects( computerVisionContext, remoteImage, ) if err != nil { log.Fatal(err) } fmt.Println("\nDetecting objects in remote image: ") if len(*imageAnalysis.Objects) == 0 { fmt.Println("No objects detected.") } else { for _, object := range *imageAnalysis.Objects { fmt.Printf("'%v' with confidence %.2f%% at location (%v, %v), (%v, %v)\n", *object.Object, *object.Confidence * 100, *object.Rectangle.X, *object.Rectangle.X + *object.Rectangle.W, *object.Rectangle.Y, *object.Rectangle.Y + *object.Rectangle.H) } } } // </snippet_objects> // END - Detect objects in remote image /* Detect brands in a local image by: * 1. Instantiating a ReadCloser, which is required by AnalyzeImageInStream. * 2. Opening the ReadCloser instance for reading. * 3. Defining what to extract from the image by initializing an array of VisualFeatureTypes. * 4. Calling the Computer Vision service's AnalyzeImageInStream with the: * - context * - image * - features to extract * - an empty slice for the Details enumeration * - "" to specify the default language ("en") as the output language * 5. Displaying the brands, confidence values, and their bounding boxes. */ func DetectBrandsLocalImage(client computervision.BaseClient, localImagePath string) { var localImage io.ReadCloser localImage, err := os.Open(localImagePath) if err != nil { log.Fatal(err) } features := []computervision.VisualFeatureTypes{computervision.VisualFeatureTypesBrands} imageAnalysis, err := client.AnalyzeImageInStream( computerVisionContext, localImage, features, []computervision.Details{}, "en") if err != nil { log.Fatal(err) } fmt.Println("\nDetecting brands in local image: ") if len(*imageAnalysis.Brands) == 0 { fmt.Println("No brands detected.") } else { for _, brand := range *imageAnalysis.Brands { fmt.Printf("'%v' with confidence %.2f%% at location (%v, %v), (%v, %v)\n", *brand.Name, *brand.Confidence * 100, *brand.Rectangle.X, *brand.Rectangle.X + *brand.Rectangle.W, *brand.Rectangle.Y, *brand.Rectangle.Y + *brand.Rectangle.H) } } } // END - Detect brands in local image /* Detect brands in a remote image by: * 1. Saving the URL as an ImageURL type for passing to AnalyzeImage. * 2. Defining what to extract from the image by initializing an array of VisualFeatureTypes. * 3. Calling the Computer Vision service's AnalyzeImage with the: * - context * - image * - features to extract * - an enumeration specifying the domain-specific details to return * - "" to specify the default language ("en") as the output language * 5. Displaying the brands, confidence values, and their bounding boxes. */ // <snippet_brands> func DetectBrandsRemoteImage(client computervision.BaseClient, remoteImageURL string) { var remoteImage computervision.ImageURL remoteImage.URL = &remoteImageURL features := []computervision.VisualFeatureTypes{computervision.VisualFeatureTypesBrands} imageAnalysis, err := client.AnalyzeImage( computerVisionContext, remoteImage, features, []computervision.Details{}, "en") if err != nil { log.Fatal(err) } fmt.Println("\nDetecting brands in remote image: ") if len(*imageAnalysis.Brands) == 0 { fmt.Println("No brands detected.") } else { for _, brand := range *imageAnalysis.Brands { fmt.Printf("'%v' with confidence %.2f%% at location (%v, %v), (%v, %v)\n", *brand.Name, *brand.Confidence * 100, *brand.Rectangle.X, *brand.Rectangle.X + *brand.Rectangle.W, *brand.Rectangle.Y, *brand.Rectangle.Y + *brand.Rectangle.H) } } } // </snippet_brands> // END - Detect brands in remote image /* Recognize text with the Read API in a local image by: * 1. Instantiating a ReadCloser, which is required by BatchReadFileInStream. * 2. Opening the ReadCloser instance for reading. * 3. Specifying whether the text to recognize is handwritten or printed. * 4. Calling the Computer Vision service's BatchReadFileInStream with the: * - context * - image * - text recognition mode * 5. Extracting the Operation-Location URL value from the BatchReadFileInStream * response * 6. Waiting for the operation to complete. * 7. Displaying the results. */ func RecognizeTextReadAPILocalImage(client computervision.BaseClient, localImagePath string) { var localImage io.ReadCloser localImage, err := os.Open(localImagePath) if err != nil { log.Fatal(err) } textRecognitionMode := computervision.Handwritten // When you use the Read Document interface, the response contains a field // called "Operation-Location", which contains the URL to use for your // GetReadOperationResult to access OCR results. textHeaders, err := client.BatchReadFileInStream( computerVisionContext, localImage, textRecognitionMode) if err != nil { log.Fatal(err) } // Use ExtractHeader from the autorest library to get the Operation-Location URL operationLocation := autorest.ExtractHeaderValue("Operation-Location", textHeaders.Response) numberOfCharsInOperationId := 36 operationId := string(operationLocation[len(operationLocation)-numberOfCharsInOperationId : len(operationLocation)]) readOperationResult, err := client.GetReadOperationResult(computerVisionContext, operationId) if err != nil { log.Fatal(err) } // Wait for the operation to complete. i := 0 maxRetries := 10 fmt.Println("\nRecognizing text in a local image with the batch Read API ... \n") for readOperationResult.Status != computervision.Failed && readOperationResult.Status != computervision.Succeeded { if i >= maxRetries { break } i++ fmt.Printf("Server status: %v, waiting %v seconds...\n", readOperationResult.Status, i) time.Sleep(1 * time.Second) readOperationResult, err = client.GetReadOperationResult(computerVisionContext, operationId) if err != nil { log.Fatal(err) } } // Display the results. fmt.Println() for _, recResult := range *(readOperationResult.RecognitionResults) { for _, line := range *recResult.Lines { fmt.Println(*line.Text) } } } // END - Recognize text with the Read API in a local image /* Recognize text with the Read API in a remote image by: * 1. Saving the URL as an ImageURL type for passing to BatchReadFile. * 2. Specifying whether the text to recognize is handwritten or printed. * 3. Calling the Computer Vision service's BatchReadFile with the: * - context * - image * - text recognition mode * 4. Extracting the Operation-Location URL value from the BatchReadFile * response * 5. Waiting for the operation to complete. * 6. Displaying the results. */ // <snippet_read_call> func RecognizeTextReadAPIRemoteImage(client computervision.BaseClient, remoteImageURL string) { var remoteImage computervision.ImageURL remoteImage.URL = &remoteImageURL textRecognitionMode := computervision.Printed // When you use the Read Document interface, the response contains a field // called "Operation-Location", which contains the URL to use for your // GetReadOperationResult to access OCR results. textHeaders, err := client.BatchReadFile( computerVisionContext, remoteImage, textRecognitionMode) if err != nil { log.Fatal(err) } // Use ExtractHeader from the autorest library to get the Operation-Location URL operationLocation := autorest.ExtractHeaderValue("Operation-Location", textHeaders.Response) numberOfCharsInOperationId := 36 operationId := string(operationLocation[len(operationLocation)-numberOfCharsInOperationId : len(operationLocation)]) // </snippet_read_call> // <snippet_read_response> readOperationResult, err := client.GetReadOperationResult(computerVisionContext, operationId) if err != nil { log.Fatal(err) } // Wait for the operation to complete. i := 0 maxRetries := 10 fmt.Println("\nRecognizing text in a remote image with the batch Read API ... \n") for readOperationResult.Status != computervision.Failed && readOperationResult.Status != computervision.Succeeded { if i >= maxRetries { break } i++ fmt.Printf("Server status: %v, waiting %v seconds...\n", readOperationResult.Status, i) time.Sleep(1 * time.Second) readOperationResult, err = client.GetReadOperationResult(computerVisionContext, operationId) if err != nil { log.Fatal(err) } } // </snippet_read_response> // <snippet_read_display> // Display the results. fmt.Println() for _, recResult := range *(readOperationResult.RecognitionResults) { for _, line := range *recResult.Lines { fmt.Println(*line.Text) } } // </snippet_read_display> } /* Extract text with OCR from a local image by: * 1. Instantiating a ReadCloser, which is required by AnalyzeImageInStream. * 2. Opening the ReadCloser instance for reading. * 3. Calling the Computer Vision service's RecognizePrintedTextInStream with the: * - context * - whether to detect the text orientation * - image * - language * 4. Displaying the brands, confidence values, and their bounding boxes. */ func ExtractTextOCRLocalImage(client computervision.BaseClient, localImagePath string) { var localImage io.ReadCloser localImage, err := os.Open(localImagePath) if err != nil { log.Fatal(err) } fmt.Println("\nRecognizing text in a local image with OCR ... \n") ocrResult, err := client.RecognizePrintedTextInStream(computerVisionContext, true, localImage, computervision.En) if err != nil { log.Fatal(err) } fmt.Printf("Text angle: %.4f\n", *ocrResult.TextAngle) for _, region := range *ocrResult.Regions { for _, line := range *region.Lines { fmt.Printf("\nBounding box: %v\n", *line.BoundingBox) s := "" for _, word := range *line.Words { s += *word.Text + " " } fmt.Printf("Text: %v\n", s) } } } /* Extract text with OCR from a local image by: * 1. Saving the URL as an ImageURL type for passing to AnalyzeImage. * 2. Calling the Computer Vision service's RecognizePrintedTextInStream with the: * - context * - whether to detect the text orientation * - image * - language * 3. Displaying the brands, confidence values, and their bounding boxes. */ func ExtractTextOCRRemoteImage(client computervision.BaseClient, remoteImageURL string) { var remoteImage computervision.ImageURL remoteImage.URL = &remoteImageURL fmt.Println("\nRecognizing text in a remote image with OCR ... \n") ocrResult, err := client.RecognizePrintedText(computerVisionContext, true, remoteImage, computervision.En) if err != nil { log.Fatal(err) } fmt.Printf("Text angle: %.4f\n", *ocrResult.TextAngle) for _, region := range *ocrResult.Regions { for _, line := range *region.Lines { fmt.Printf("\nBounding box: %v\n", *line.BoundingBox) s := "" for _, word := range *line.Words { s += *word.Text + " " } fmt.Printf("Text: %v\n", s) } } }
[ "\"COMPUTER_VISION_SUBSCRIPTION_KEY\"", "\"COMPUTER_VISION_ENDPOINT\"" ]
[]
[ "COMPUTER_VISION_ENDPOINT", "COMPUTER_VISION_SUBSCRIPTION_KEY" ]
[]
["COMPUTER_VISION_ENDPOINT", "COMPUTER_VISION_SUBSCRIPTION_KEY"]
go
2
0
neo_baka_chat/train/__main__.py
import logging import os import yaml from comet_ml import Experiment from neo_baka_chat.oss import OSS from neo_baka_chat.train.loader import Config, load_session ALIYUN_ACCESSKEY_ID = os.environ["ALIYUN_ACCESSKEY_ID"] ALIYUN_ACCESSKEY_SECRET = os.environ["ALIYUN_ACCESSKEY_SECRET"] ALIYUN_REGION = os.environ["ALIYUN_REGION"] OSS_BUCKET = os.environ["OSS_BUCKET"] COMET_KEY = os.environ.get("COMET_KEY") COMET_PROJECT = os.environ.get("COMET_PROJECT") CONFIG_PATH = os.environ["CONFIG_PATH"] def main(): logging.warning("Connecting to OSS.") bucket = OSS(ALIYUN_ACCESSKEY_ID, ALIYUN_ACCESSKEY_SECRET, ALIYUN_REGION, OSS_BUCKET) logging.warning("Fetching configuration.") _config = yaml.safe_load(bucket.loads(CONFIG_PATH, is_json=False)) config = Config(**_config) session = load_session(config, bucket) logging.warning("Training.") experiment = Experiment(COMET_KEY, project_name=COMET_PROJECT) if COMET_KEY else None result = session.train(experiment, config.mixed_precision) logging.warning("Uploading model.") meta = session.hparams.dumps() meta.update({"epoch": result.epoch, "loss": result.loss, "mixed_precision": result.mixed_precision}) model = bucket.create_model(config.save_model_prefix) model.put("vocab.json", session.corpus.vocab.dumps()) model.put("meta.json", meta) model.put("weights.pth", result.dump_state()) logging.warning("Complete.") main()
[]
[]
[ "OSS_BUCKET", "ALIYUN_ACCESSKEY_ID", "CONFIG_PATH", "COMET_KEY", "ALIYUN_REGION", "ALIYUN_ACCESSKEY_SECRET", "COMET_PROJECT" ]
[]
["OSS_BUCKET", "ALIYUN_ACCESSKEY_ID", "CONFIG_PATH", "COMET_KEY", "ALIYUN_REGION", "ALIYUN_ACCESSKEY_SECRET", "COMET_PROJECT"]
python
7
0
tests/test_service.py
import os from nitrado import Service, initialize_client def set_client(): url = "https://api.nitrado.net/" key = os.getenv('NITRADO_KEY') initialize_client(key, url) def test_services(): set_client() services = Service.all() assert len(services) > 0 def test_logs(): set_client() service = Service.all()[0] logs = service.logs() assert type(logs) == list def test_tasks(): set_client() service = Service.all()[0] tasks = service.tasks() assert type(tasks) == list def test_notifications(): set_client() service = Service.all()[0] notif = service.notifications() assert type(notif) == list def tests(): test_services() test_notifications() test_logs() test_tasks() if __name__ == "__main__": tests() print("passing")
[]
[]
[ "NITRADO_KEY" ]
[]
["NITRADO_KEY"]
python
1
0
main.go
// Copyright 2015 - 2017 Ka-Hing Cheung // Copyright 2015 - 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "strconv" goofys "github.com/kahing/goofys/api" . "github.com/kahing/goofys/internal" "fmt" "os" "os/signal" "strings" "sync" "syscall" "time" "golang.org/x/net/context" "github.com/jacobsa/fuse" "github.com/jinzhu/copier" "github.com/kardianos/osext" "github.com/urfave/cli" daemon "github.com/sevlyar/go-daemon" ) var log = GetLogger("main") func registerSIGINTHandler(fs *Goofys, flags *FlagStorage) { // Register for SIGINT. signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, os.Interrupt, syscall.SIGTERM, syscall.SIGUSR1) // Start a goroutine that will unmount when the signal is received. go func() { for { s := <-signalChan if s == syscall.SIGUSR1 { log.Infof("Received %v", s) fs.SigUsr1() continue } if len(flags.Cache) == 0 { log.Infof("Received %v, attempting to unmount...", s) err := TryUnmount(flags.MountPoint) if err != nil { log.Errorf("Failed to unmount in response to %v: %v", s, err) } else { log.Printf("Successfully unmounted %v in response to %v", flags.MountPoint, s) return } } else { log.Infof("Received %v", s) // wait for catfs to die and cleanup } } }() } var waitedForSignal os.Signal func waitForSignal(wg *sync.WaitGroup) { signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGUSR1, syscall.SIGUSR2) wg.Add(1) go func() { waitedForSignal = <-signalChan wg.Done() }() } func kill(pid int, s os.Signal) (err error) { p, err := os.FindProcess(pid) if err != nil { return err } defer p.Release() err = p.Signal(s) if err != nil { return err } return } // Start serving requests, returning a fuse.MountedFileSystem that can be joined // to wait for unmounting. func fuseServe( ctx context.Context, bucketName string, dev uintptr, flags *FlagStorage, ready chan error) (fs *Goofys, mfs *fuse.MountedFileSystem, err error) { // XXX really silly copy here! in goofys.Mount we will copy it // back to FlagStorage. But I don't see a easier way to expose // Config in the api package var config goofys.Config copier.Copy(&config, *flags) return goofys.FuseServe(ctx, bucketName, dev, &config, ready) } func massagePath() { for _, e := range os.Environ() { if strings.HasPrefix(e, "PATH=") { return } } // mount -a seems to run goofys without PATH // usually fusermount is in /bin os.Setenv("PATH", "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin") } func massageArg0() { var err error os.Args[0], err = osext.Executable() if err != nil { panic(fmt.Sprintf("Unable to discover current executable: %v", err)) } } var Version = "use `make build' to fill version hash correctly" func main() { VersionHash = Version massagePath() app := NewApp() var flags *FlagStorage var child *os.Process app.Action = func(c *cli.Context) (err error) { // We should get two arguments exactly. Otherwise error out. if len(c.Args()) != 2 { fmt.Fprintf( os.Stderr, "Error: %s takes exactly two arguments.\n\n", app.Name) cli.ShowAppHelp(c) os.Exit(1) } // Populate and parse flags. bucketName := c.Args()[0] flags = PopulateFlags(c) if flags == nil { cli.ShowAppHelp(c) err = fmt.Errorf("invalid arguments") return } defer func() { time.Sleep(time.Second) flags.Cleanup() }() // Mount the file system. var dev uintptr ready := make(chan error, 1) fuseMounted, err := fuse.IsMounted(flags.MountPoint) if !fuseMounted { mountCfg := goofys.GetMountConfig(context.Background(), bucketName, flags) dev, err = fuse.FuseMount(flags.MountPoint, mountCfg, ready) if err != nil { return err } err = os.Setenv("GOOFYS_FUSE_FD", strconv.Itoa(int(dev))) if err != nil { return err } } else { // XXX: We do Reborn() after starting FuseMount, so for OSX we need other way // to safely wait for mount completion. ready <- nil } if !flags.Foreground { var wg sync.WaitGroup waitForSignal(&wg) massageArg0() ctx := new(daemon.Context) child, err = ctx.Reborn() if err != nil { panic(fmt.Sprintf("unable to daemonize: %v", err)) } InitLoggers(!flags.Foreground && child == nil) if child != nil { // attempt to wait for child to notify parent wg.Wait() if waitedForSignal == syscall.SIGUSR1 { return } else { return fuse.EINVAL } } else { // kill our own waiting goroutine kill(os.Getpid(), syscall.SIGUSR1) wg.Wait() defer ctx.Release() devFd, err := strconv.Atoi(os.Getenv("GOOFYS_FUSE_FD")) if err != nil { return err } dev = uintptr(devFd) } } else { InitLoggers(!flags.Foreground) } // Start the fuse server. var mfs *fuse.MountedFileSystem var fs *Goofys fs, mfs, err = fuseServe( context.Background(), bucketName, dev, flags, ready) if err != nil { if !flags.Foreground { kill(os.Getppid(), syscall.SIGUSR2) } log.Fatalf("Mounting file system: %v", err) // fatal also terminates itself } else { if !flags.Foreground { kill(os.Getppid(), syscall.SIGUSR1) } log.Println("File system has been successfully mounted.") // Let the user unmount with Ctrl-C // (SIGINT). But if cache is on, catfs will // receive the signal and we would detect that exiting registerSIGINTHandler(fs, flags) // Wait for the file system to be unmounted. err = mfs.Join(context.Background()) if err != nil { err = fmt.Errorf("MountedFileSystem.Join: %v", err) return } log.Println("Successfully exiting.") } return } err := app.Run(MassageMountFlags(os.Args)) if err != nil { if flags != nil && !flags.Foreground && child != nil { log.Fatalln("Unable to mount file system, see syslog for details") } os.Exit(1) } }
[ "\"GOOFYS_FUSE_FD\"" ]
[]
[ "GOOFYS_FUSE_FD" ]
[]
["GOOFYS_FUSE_FD"]
go
1
0
data_validation/__main__.py
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import logging import json from yaml import dump, load, Dumper, Loader from data_validation import ( cli_tools, clients, consts, jellyfish_distance, state_manager, ) from data_validation.config_manager import ConfigManager from data_validation.data_validation import DataValidation def _get_arg_config_file(args): """Return String yaml config file path.""" if not args.config_file: raise ValueError("YAML Config File was not supplied.") return args.config_file def _get_yaml_config_from_file(config_file_path): """Return Dict of yaml validation data.""" with open(config_file_path, "r") as yaml_file: yaml_configs = load(yaml_file.read(), Loader=Loader) return yaml_configs def get_aggregate_config(args, config_manager): """Return list of formated aggregation objects. Args: config_manager (ConfigManager): Validation config manager instance. """ aggregate_configs = [config_manager.build_config_count_aggregate()] if args.count: col_args = None if args.count == "*" else cli_tools.get_arg_list(args.count) aggregate_configs += config_manager.build_config_column_aggregates( "count", col_args, None ) if args.sum: col_args = None if args.sum == "*" else cli_tools.get_arg_list(args.sum) aggregate_configs += config_manager.build_config_column_aggregates( "sum", col_args, consts.NUMERIC_DATA_TYPES ) if args.avg: col_args = None if args.avg == "*" else cli_tools.get_arg_list(args.avg) aggregate_configs += config_manager.build_config_column_aggregates( "avg", col_args, consts.NUMERIC_DATA_TYPES ) if args.min: col_args = None if args.min == "*" else cli_tools.get_arg_list(args.min) aggregate_configs += config_manager.build_config_column_aggregates( "min", col_args, consts.NUMERIC_DATA_TYPES ) if args.max: col_args = None if args.max == "*" else cli_tools.get_arg_list(args.max) aggregate_configs += config_manager.build_config_column_aggregates( "max", col_args, consts.NUMERIC_DATA_TYPES ) return aggregate_configs def build_config_from_args(args, config_manager): """Return config manager object ready to execute. Args: config_manager (ConfigManager): Validation config manager instance. """ config_manager.append_aggregates(get_aggregate_config(args, config_manager)) if args.primary_keys and not args.grouped_columns: if not args.grouped_columns and not config_manager.use_random_rows(): logging.warning( "No Grouped columns or Random Rows specified, ignoring primary keys." ) if args.grouped_columns: grouped_columns = cli_tools.get_arg_list(args.grouped_columns) config_manager.append_query_groups( config_manager.build_config_grouped_columns(grouped_columns) ) if args.primary_keys: primary_keys = cli_tools.get_arg_list(args.primary_keys, default_value=[]) config_manager.append_primary_keys( config_manager.build_config_grouped_columns(primary_keys) ) # TODO(GH#18): Add query filter config logic return config_manager def build_config_managers_from_args(args): """Return a list of config managers ready to execute.""" configs = [] if args.type is None: validate_cmd = args.validate_cmd.capitalize() if validate_cmd == "Schema": config_type = consts.SCHEMA_VALIDATION elif validate_cmd == "Column": # TODO: We need to discuss how GroupedColumn and Row are differentiated. if args.grouped_columns: config_type = consts.GROUPED_COLUMN_VALIDATION else: config_type = consts.COLUMN_VALIDATION else: raise ValueError(f"Unknown Validation Type: {validate_cmd}") else: config_type = args.type result_handler_config = None if args.bq_result_handler: result_handler_config = cli_tools.get_result_handler( args.bq_result_handler, args.service_account ) elif args.result_handler_config: result_handler_config = cli_tools.get_result_handler( args.result_handler_config, args.service_account ) # Schema validation will not accept filters, labels, or threshold as flags filter_config, labels, threshold = [], [], 0.0 if config_type != consts.SCHEMA_VALIDATION: if args.filters: filter_config = cli_tools.get_filters(args.filters) if args.threshold: threshold = args.threshold labels = cli_tools.get_labels(args.labels) mgr = state_manager.StateManager() source_client = clients.get_data_client(mgr.get_connection_config(args.source_conn)) target_client = clients.get_data_client(mgr.get_connection_config(args.target_conn)) format = args.format if args.format else "table" is_filesystem = source_client._source_type == "FileSystem" tables_list = cli_tools.get_tables_list( args.tables_list, default_value=[], is_filesystem=is_filesystem ) for table_obj in tables_list: config_manager = ConfigManager.build_config_manager( config_type, args.source_conn, args.target_conn, table_obj, labels, threshold, format, use_random_rows=args.use_random_row, random_row_batch_size=args.random_row_batch_size, source_client=source_client, target_client=target_client, result_handler_config=result_handler_config, filter_config=filter_config, verbose=args.verbose, ) if config_type != consts.SCHEMA_VALIDATION: config_manager = build_config_from_args(args, config_manager) configs.append(config_manager) return configs def build_config_managers_from_yaml(args): """Returns List[ConfigManager] instances ready to be executed.""" config_managers = [] config_file_path = _get_arg_config_file(args) yaml_configs = _get_yaml_config_from_file(config_file_path) mgr = state_manager.StateManager() source_conn = mgr.get_connection_config(yaml_configs[consts.YAML_SOURCE]) target_conn = mgr.get_connection_config(yaml_configs[consts.YAML_TARGET]) source_client = clients.get_data_client(source_conn) target_client = clients.get_data_client(target_conn) for config in yaml_configs[consts.YAML_VALIDATIONS]: config[consts.CONFIG_SOURCE_CONN] = source_conn config[consts.CONFIG_TARGET_CONN] = target_conn config[consts.CONFIG_RESULT_HANDLER] = yaml_configs[consts.YAML_RESULT_HANDLER] config_manager = ConfigManager( config, source_client, target_client, verbose=args.verbose ) config_managers.append(config_manager) return config_managers def _compare_match_tables(source_table_map, target_table_map, score_cutoff=0.8): """Return dict config object from matching tables.""" # TODO(dhercher): evaluate if improved comparison and score cutoffs should be used. table_configs = [] target_keys = target_table_map.keys() for source_key in source_table_map: target_key = jellyfish_distance.extract_closest_match( source_key, target_keys, score_cutoff=score_cutoff ) if target_key is None: continue table_config = { consts.CONFIG_SCHEMA_NAME: source_table_map[source_key][ consts.CONFIG_SCHEMA_NAME ], consts.CONFIG_TABLE_NAME: source_table_map[source_key][ consts.CONFIG_TABLE_NAME ], consts.CONFIG_TARGET_SCHEMA_NAME: target_table_map[target_key][ consts.CONFIG_SCHEMA_NAME ], consts.CONFIG_TARGET_TABLE_NAME: target_table_map[target_key][ consts.CONFIG_TABLE_NAME ], } table_configs.append(table_config) return table_configs def get_table_map(client, allowed_schemas=None): """Return dict with searchable keys for table matching.""" table_map = {} table_objs = clients.get_all_tables(client, allowed_schemas=allowed_schemas) for table_obj in table_objs: table_key = ".".join([t for t in table_obj if t]) table_map[table_key] = { consts.CONFIG_SCHEMA_NAME: table_obj[0], consts.CONFIG_TABLE_NAME: table_obj[1], } return table_map def find_tables_using_string_matching(args): """Return JSON String with matched tables for use in validations.""" score_cutoff = args.score_cutoff or 0.8 mgr = state_manager.StateManager() source_client = clients.get_data_client(mgr.get_connection_config(args.source_conn)) target_client = clients.get_data_client(mgr.get_connection_config(args.target_conn)) allowed_schemas = cli_tools.get_arg_list(args.allowed_schemas) source_table_map = get_table_map(source_client, allowed_schemas=allowed_schemas) target_table_map = get_table_map(target_client) table_configs = _compare_match_tables( source_table_map, target_table_map, score_cutoff=score_cutoff ) return json.dumps(table_configs) def run_raw_query_against_connection(args): """Return results of raw query for adhoc usage.""" mgr = state_manager.StateManager() client = clients.get_data_client(mgr.get_connection_config(args.conn)) with client.raw_sql(args.query, results=True) as cur: return cur.fetchall() def convert_config_to_yaml(args, config_managers): """Return dict objects formatted for yaml validations. Args: config_managers (list[ConfigManager]): List of config manager instances. """ yaml_config = { consts.YAML_SOURCE: args.source_conn, consts.YAML_TARGET: args.target_conn, consts.YAML_RESULT_HANDLER: config_managers[0].result_handler_config, consts.YAML_VALIDATIONS: [], } for config_manager in config_managers: yaml_config[consts.YAML_VALIDATIONS].append( config_manager.get_yaml_validation_block() ) return yaml_config def run_validation(config_manager, verbose=False): """Run a single validation. Args: config_manager (ConfigManager): Validation config manager instance. verbose (bool): Validation setting to log queries run. """ validator = DataValidation( config_manager.config, validation_builder=None, result_handler=None, verbose=verbose, ) validator.execute() def run_validations(args, config_managers): """Run and manage a series of validations. Args: config_managers (list[ConfigManager]): List of config manager instances. """ # TODO(issue/31): Add parallel execution logic for config_manager in config_managers: run_validation(config_manager, verbose=args.verbose) def store_yaml_config_file(args, config_managers): """Build a YAML config file from the supplied configs. Args: config_managers (list[ConfigManager]): List of config manager instances. """ config_file_path = _get_arg_config_file(args) yaml_configs = convert_config_to_yaml(args, config_managers) yaml_config_str = dump(yaml_configs, Dumper=Dumper) with open(config_file_path, "w") as yaml_file: yaml_file.write(yaml_config_str) def run(args): """ """ config_managers = build_config_managers_from_args(args) if args.config_file: store_yaml_config_file(args, config_managers) else: run_validations(args, config_managers) def run_connections(args): """ Run commands related to connection management.""" if args.connect_cmd == "list": cli_tools.list_connections() elif args.connect_cmd == "add": conn = cli_tools.get_connection_config_from_args(args) # Test getting a client to validate connection details _ = clients.get_data_client(conn) cli_tools.store_connection(args.connection_name, conn) else: raise ValueError(f"Connections Argument '{args.connect_cmd}' is not supported") def validate(args): """ Run commands related to data validation.""" if args.validate_cmd == "column" or args.validate_cmd == "schema": run(args) else: raise ValueError(f"Validation Argument '{args.validate_cmd}' is not supported") def main(): # Create Parser and Get Deployment Info args = cli_tools.get_parsed_args() if args.command == "run": run(args) elif args.command == "connections": run_connections(args) elif args.command == "run-config": config_managers = build_config_managers_from_yaml(args) run_validations(args, config_managers) elif args.command == "find-tables": print(find_tables_using_string_matching(args)) elif args.command == "query": print(run_raw_query_against_connection(args)) elif args.command == "validate": validate(args) elif args.command == "deploy": from data_validation import app app.app.run(debug=True, host="0.0.0.0", port=int(os.environ.get("PORT", 8080))) else: raise ValueError(f"Positional Argument '{args.command}' is not supported") if __name__ == "__main__": main()
[]
[]
[ "PORT" ]
[]
["PORT"]
python
1
0
cli/commands/metadata_apply_test.go
package commands import ( "net/url" "os" "testing" "time" "github.com/briandowns/spinner" "github.com/hasura/graphql-engine/cli" "github.com/hasura/graphql-engine/cli/version" "github.com/sirupsen/logrus/hooks/test" ) func testMetadataApply(t *testing.T, metadataFile string, endpoint *url.URL) { logger, _ := test.NewNullLogger() opts := &metadataApplyOptions{ EC: &cli.ExecutionContext{ Logger: logger, Spinner: spinner.New(spinner.CharSets[7], 100*time.Millisecond), MetadataFile: []string{metadataFile}, ServerConfig: &cli.ServerConfig{ Endpoint: endpoint.String(), AdminSecret: os.Getenv("HASURA_GRAPHQL_TEST_ADMIN_SECRET"), ParsedEndpoint: endpoint, }, }, actionType: "apply", } opts.EC.Version = version.New() v, err := version.FetchServerVersion(opts.EC.ServerConfig.Endpoint) if err != nil { t.Fatalf("getting server version failed: %v", err) } opts.EC.Version.SetServerVersion(v) err = opts.run() if err != nil { t.Fatalf("failed applying metadata: %v", err) } }
[ "\"HASURA_GRAPHQL_TEST_ADMIN_SECRET\"" ]
[]
[ "HASURA_GRAPHQL_TEST_ADMIN_SECRET" ]
[]
["HASURA_GRAPHQL_TEST_ADMIN_SECRET"]
go
1
0
manage.py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'send_email_microservice.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[]
[]
[]
[]
[]
python
0
0
experiments_ovary_centres/run_center_clustering.py
""" The clustering is already part of the center prediction scipt. The path to the image and segmentation serves just for visualisation, for the own clustering they are not needed. Copyright (C) 2017 Jiri Borovec <[email protected]> """ import logging import os import sys from functools import partial import matplotlib import numpy as np import pandas as pd from sklearn import cluster if os.environ.get('DISPLAY', '') == '' and matplotlib.rcParams['backend'] != 'agg': print('No display found. Using non-interactive Agg backend.') matplotlib.use('Agg') import matplotlib.pylab as plt sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root import run_center_candidate_training as run_train import imsegm.utilities.data_io as tl_data import imsegm.utilities.drawing as tl_visu import imsegm.utilities.experiments as tl_expt # import run_center_prediction as run_pred # Set experiment folders FOLDER_CENTER = 'centers' FOLDER_CLUSTER_VISUAL = 'centers_clustering' LIST_SUBDIRS = [FOLDER_CENTER, FOLDER_CLUSTER_VISUAL] IMAGE_EXTENSIONS = ['.png', '.jpg'] # subfigure size for visualisations MAX_FIGURE_SIZE = 12 FOLDER_EXPERIMENT = 'detect-centers-predict_%s' NAME_YAML_PARAMS = 'config_clustering.yaml' # The asumtion is that the max distance is about 3 * sampling distance CLUSTER_PARAMS = { 'DBSCAN_max_dist': 50, 'DBSCAN_min_samples': 1, } DEFAULT_PARAMS = run_train.CENTER_PARAMS DEFAULT_PARAMS.update(CLUSTER_PARAMS) DEFAULT_PARAMS.update({ 'path_images': os.path.join(run_train.PATH_IMAGES, 'image', '*.jpg'), 'path_segms': os.path.join(run_train.PATH_IMAGES, 'segm', '*.png'), 'path_centers': os.path.join( DEFAULT_PARAMS['path_output'], FOLDER_EXPERIMENT % DEFAULT_PARAMS['name'], 'candidates', '*.csv' ) }) def cluster_center_candidates(points, max_dist=100, min_samples=1): """ cluster center candidates by given density clustering :param list(list(float)) points: points :param float max_dist: maximal distance among points :param int min_samples: minimal number od samples :return tuple(ndarray, list(int)): """ points = np.array(points) if not list(points): return points, [] dbscan = cluster.DBSCAN(eps=max_dist, min_samples=min_samples) dbscan.fit(points) labels = dbscan.labels_.copy() centers = [] for i in range(max(labels) + 1): clust = points[labels == i] if len(clust) > 0: center = np.mean(clust, axis=0) centers.append(center) return np.array(centers), labels def export_draw_image_centers_clusters( path_out, name, img, centres, points=None, clust_labels=None, segm=None, fig_suffix='', max_fig_size=MAX_FIGURE_SIZE, ): """ draw visualisation of clustered center candidates and export it :param str path_out: :param str name: :param ndarray img: :param centres: :param list(list(float)) points: :param list(int) clust_labels: :param ndarray segm: :param str fig_suffix: :param int max_fig_size: """ # if the output dos nor exist, leave if not os.path.isdir(path_out): return size = None if img is not None: size = np.array(img.shape[:2][::-1], dtype=float) elif segm is not None: size = np.array(segm.shape[:2][::-1], dtype=float) if size is not None: fig_size = (size / size.max() * max_fig_size) else: fig_size = (max_fig_size, max_fig_size) fig, ax = plt.subplots(figsize=fig_size) if img.ndim == 3: img = img[:, :, 0] tl_visu.draw_image_clusters_centers(ax, img, centres, points, clust_labels, segm) fig.tight_layout(pad=0) fig.savefig(os.path.join(path_out, name + fig_suffix + '.png')) plt.close(fig) def cluster_points_draw_export(dict_row, params, path_out=None): """ cluster points into centers and export visualisations :param dict dict_row: :param dict params: :param str path_out: :return dict: """ if not all(n in dict_row for n in ['path_points', 'path_image', 'path_segm']): raise ValueError('missing some required fields: %r' % dict_row) name = os.path.splitext(os.path.basename(dict_row['path_points']))[0] points = tl_data.load_landmarks_csv(dict_row['path_points']) if not list(points): logging.debug('no points to cluster for "%s"', name) points = tl_data.swap_coord_x_y(points) centres, clust_labels = cluster_center_candidates( points, max_dist=params['DBSCAN_max_dist'], min_samples=params['DBSCAN_min_samples'] ) path_csv = os.path.join(path_out, FOLDER_CENTER, name + '.csv') tl_data.save_landmarks_csv(path_csv, tl_data.swap_coord_x_y(centres)) path_visu = os.path.join(path_out, FOLDER_CLUSTER_VISUAL) img, segm = None, None if dict_row['path_image'] is not None and os.path.isfile(dict_row['path_image']): img = tl_data.io_imread(dict_row['path_image']) if dict_row['path_segm'] is not None and os.path.isfile(dict_row['path_segm']): segm = tl_data.io_imread(dict_row['path_segm']) export_draw_image_centers_clusters(path_visu, name, img, centres, points, clust_labels, segm) dict_row.update({'image': name, 'path_centers': path_csv, 'nb_centres': len(centres)}) return dict_row # def load_centers_images_segm(path_pattern_csv, path_images, path_segms): # list_csv = sorted(glob.glob(path_pattern_csv)) # logging.info('found %i csv files', len(list_csv)) # # filter only csv files win specific format # # list_csv = [p for p in list_csv # # if re.match(PATTERN_NAME_CSV_CENTERS, os.path.basename(p)) is not None] # # logging.info('filtered to %i center files', len(list_csv)) # # def add_img_path(name, key, path_dir): # for im_ext in IMAGE_EXTENSIONS: # path_img = os.path.join(path_dir, name + im_ext) # if os.path.exists(path_img): # d[key] = path_img # break # else: # d[key] = None # # df_paths = pd.DataFrame() # for path_csv in list_csv: # d = {'path_points': path_csv} # name = os.path.splitext(os.path.basename(path_csv))[0] # add_img_path(name, 'path_image', os.path.dirname(path_images)) # add_img_path(name, 'path_segm', os.path.dirname(path_segms)) # df_paths = df_paths.append(d, ignore_index=True) # return df_paths def main(params): """ PIPELINE candidate clustering :param dict(str,any) params: """ params['path_expt'] = os.path.join(params['path_output'], FOLDER_EXPERIMENT % params['name']) tl_expt.save_config_yaml(os.path.join(params['path_expt'], NAME_YAML_PARAMS), params) tl_expt.create_subfolders(params['path_expt'], LIST_SUBDIRS) list_paths = [params[k] for k in ['path_images', 'path_segms', 'path_centers']] df_paths = tl_data.find_files_match_names_across_dirs(list_paths) df_paths.columns = ['path_image', 'path_segm', 'path_points'] df_paths.index = range(1, len(df_paths) + 1) path_cover = os.path.join(params['path_expt'], run_train.NAME_CSV_TRIPLES) df_paths.to_csv(path_cover) logging.info('run clustering...') df_paths_new = pd.DataFrame() _wrapper_clustering = partial(cluster_points_draw_export, params=params, path_out=params['path_expt']) rows = (dict(row) for idx, row in df_paths.iterrows()) iterate = tl_expt.WrapExecuteSequence(_wrapper_clustering, rows, nb_workers=params['nb_workers']) for dict_center in iterate: df_paths_new = df_paths_new.append(dict_center, ignore_index=True) df_paths_new.set_index('image', inplace=True) df_paths_new.to_csv(path_cover) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) logging.info('running...') cli_params = run_train.arg_parse_params(DEFAULT_PARAMS) main(cli_params) logging.info('DONE')
[]
[]
[ "DISPLAY" ]
[]
["DISPLAY"]
python
1
0
tests/fixtures.py
""" Most of the code here was initially stolen from C-lightning's test suite. Credits to Rusty Russell and Christian Decker from Blockstream who wrote most of the file i originally copied! (MIT licensed) """ from concurrent import futures from ephemeral_port_reserve import reserve from test_framework.bitcoind import BitcoinD from test_framework.revaultd import ManagerRevaultd, StakeholderRevaultd from test_framework.revault_network import RevaultNetwork from test_framework.utils import ( get_participants, POSTGRES_USER, POSTGRES_PASS, POSTGRES_HOST, POSTGRES_IS_SETUP, EXECUTOR_WORKERS, ) import os import pytest import shutil import tempfile import time __attempts = {} @pytest.fixture(autouse=True) def set_backtrace(): prev = os.getenv("RUST_BACKTRACE", "0") os.environ["RUST_BACKTRACE"] = "1" yield os.environ["RUST_BACKTRACE"] = prev @pytest.fixture(scope="session") def test_base_dir(): d = os.getenv("TEST_DIR", "/tmp") directory = tempfile.mkdtemp(prefix="revaultd-tests-", dir=d) print("Running tests in {}".format(directory)) yield directory content = os.listdir(directory) if content == []: shutil.rmtree(directory) else: print(f"Leaving base dir '{directory}' as it still contains {content}") # Taken from https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): # execute all other hooks to obtain the report object outcome = yield rep = outcome.get_result() # set a report attribute for each phase of a call, which can # be "setup", "call", "teardown" setattr(item, "rep_" + rep.when, rep) @pytest.fixture def directory(request, test_base_dir, test_name): """Return a per-test specific directory. This makes a unique test-directory even if a test is rerun multiple times. """ global __attempts # Auto set value if it isn't in the dict yet __attempts[test_name] = __attempts.get(test_name, 0) + 1 directory = os.path.join( test_base_dir, "{}_{}".format(test_name, __attempts[test_name]) ) if not os.path.exists(directory): os.makedirs(directory) yield directory # test_base_dir is at the session scope, so we can't use request.node as mentioned in # the doc linked in the hook above. if request.session.testsfailed == 0: try: shutil.rmtree(directory) except Exception: files = [ os.path.join(dp, f) for dp, _, fn in os.walk(directory) for f in fn ] print("Directory still contains files:", files) raise else: print(f"Test failed, leaving directory '{directory}' intact") @pytest.fixture def test_name(request): yield request.function.__name__ @pytest.fixture def executor(test_name): ex = futures.ThreadPoolExecutor( max_workers=EXECUTOR_WORKERS, thread_name_prefix=test_name ) yield ex ex.shutdown(wait=False) @pytest.fixture def bitcoind(directory): bitcoind = BitcoinD(bitcoin_dir=directory) bitcoind.startup() bitcoind.rpc.createwallet(bitcoind.rpc.wallet_name, False, False, "", True) while bitcoind.rpc.getbalance() < 50: bitcoind.rpc.generatetoaddress(1, bitcoind.rpc.getnewaddress()) while bitcoind.rpc.getblockcount() <= 1: time.sleep(0.1) yield bitcoind bitcoind.cleanup() @pytest.fixture def revaultd_stakeholder(bitcoind, directory): datadir = os.path.join(directory, "revaultd") os.makedirs(datadir, exist_ok=True) (stks, cosigs, mans, _, _, _) = get_participants(2, 3) stk_config = { "keychain": stks[0], "watchtowers": [{"host": "127.0.0.1:1", "noise_key": os.urandom(32)}], } csv = 35 coordinator_noise_key = ( "d91563973102454a7830137e92d0548bc83b4ea2799f1df04622ca1307381402" ) revaultd = StakeholderRevaultd( datadir, stks, cosigs, mans, csv, os.urandom(32), coordinator_noise_key, reserve(), bitcoind, stk_config=stk_config, ) revaultd.start() yield revaultd revaultd.cleanup() @pytest.fixture def revaultd_manager(bitcoind, directory): datadir = os.path.join(directory, "revaultd") os.makedirs(datadir, exist_ok=True) (stks, cosigs, mans, _, _, _) = get_participants(2, 3) man_config = { "keychain": mans[0], "cosigners": [{"host": "127.0.0.1:1", "noise_key": os.urandom(32)}], } csv = 35 coordinator_noise_key = ( "d91563973102454a7830137e92d0548bc83b4ea2799f1df04622ca1307381402" ) revaultd = ManagerRevaultd( datadir, stks, cosigs, mans, csv, os.urandom(32), coordinator_noise_key, reserve(), bitcoind, man_config=man_config, ) revaultd.start() yield revaultd revaultd.cleanup() @pytest.fixture def revault_network(directory, bitcoind): if not POSTGRES_IS_SETUP: raise ValueError( "Please set the POSTGRES_USER, POSTGRES_PASS and " "POSTGRES_HOST environment variables." ) factory = RevaultNetwork( directory, bitcoind, POSTGRES_USER, POSTGRES_PASS, POSTGRES_HOST ) yield factory factory.cleanup()
[]
[]
[ "TEST_DIR", "RUST_BACKTRACE" ]
[]
["TEST_DIR", "RUST_BACKTRACE"]
python
2
0
launch/test/launch/test_logging.py
# Copyright 2019 Open Source Robotics Foundation, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the launch.logging module.""" import logging import os import pathlib import re from unittest import mock from launch.frontend.parse_substitution import parse_substitution import launch.logging from launch.substitutions import TextSubstitution import pytest @pytest.fixture def log_dir(tmpdir_factory): """Test fixture that generates a temporary directory for log files.""" return str(tmpdir_factory.mktemp('logs')) def test_bad_logging_launch_config(): """Tests that setup throws at bad configuration.""" launch.logging.reset() with pytest.raises(ValueError): launch.logging.launch_config.log_dir = 'not/a/real/dir' with pytest.raises(ValueError): launch.logging.launch_config.set_screen_format('default', screen_style='%') with pytest.raises(ValueError): launch.logging.launch_config.set_log_format(log_format='default', log_style='%') def test_output_loggers_bad_configuration(log_dir): """Tests that output loggers setup throws at bad configuration.""" launch.logging.launch_config.reset() launch.logging.launch_config.log_dir = log_dir with pytest.raises(ValueError): launch.logging.get_output_loggers('some-proc', 'not_an_alias') with pytest.raises(ValueError): launch.logging.get_output_loggers('some-proc', {'garbage': {'log'}}) with pytest.raises(ValueError): launch.logging.get_output_loggers('some-proc', {'stdout': {'garbage'}}) @pytest.mark.parametrize('config,checks', [ ('screen', {'stdout': {'screen'}, 'stderr': {'screen'}}), ('log', {'stdout': {'log'}, 'stderr': {'log', 'screen'}}), ('both', {'both': {'log', 'screen'}}), ('own_log', { 'stdout': {'own_log'}, 'stderr': {'own_log'}, 'both': {'own_log'}, }), ('full', { 'stdout': {'log', 'own_log', 'screen'}, 'stderr': {'log', 'own_log', 'screen'}, 'both': {'own_log'}, }), ( {'stdout': {'screen', 'log'}, 'stderr': {'own_log'}}, { 'stdout': {'screen', 'log'}, 'stderr': {'own_log'} }, ) ]) def test_output_loggers_configuration(capsys, log_dir, config, checks): checks = {'stdout': set(), 'stderr': set(), 'both': set(), **checks} launch.logging.reset() launch.logging.launch_config.log_dir = log_dir logger = launch.logging.get_logger('some-proc') logger.addHandler(launch.logging.launch_config.get_screen_handler()) logger.addHandler(launch.logging.launch_config.get_log_file_handler()) logger.setLevel(logging.ERROR) stdout_logger, stderr_logger = launch.logging.get_output_loggers('some-proc', config) logger.debug('oops') logger.error('baz') stdout_logger.info('foo') stderr_logger.info('bar') capture = capsys.readouterr() lines = list(reversed(capture.out.splitlines())) assert '[ERROR] [some-proc]: baz' == lines.pop() if 'screen' in (checks['stdout'] | checks['both']): assert 'foo' == lines.pop() if 'screen' in (checks['stderr'] | checks['both']): assert 'bar' == lines.pop() assert 0 == len(lines) assert 0 == len(capture.err) launch.logging.launch_config.get_log_file_handler().flush() main_log_path = launch.logging.launch_config.get_log_file_path() assert os.path.exists(main_log_path) assert 0 != os.stat(main_log_path).st_size with open(main_log_path, 'r') as f: lines = list(reversed(f.readlines())) assert re.match(r'[0-9]+\.[0-9]+ \[ERROR\] \[some-proc\]: baz', lines.pop()) is not None if 'log' in (checks['stdout'] | checks['both']): assert re.match(r'[0-9]+\.[0-9]+ foo', lines.pop()) is not None if 'log' in (checks['stderr'] | checks['both']): assert re.match(r'[0-9]+\.[0-9]+ bar', lines.pop()) is not None assert 0 == len(lines) if 'own_log' in (checks['stdout'] | checks['both']): launch.logging.launch_config.get_log_file_handler('some-proc-stdout.log').flush() own_log_path = launch.logging.launch_config.get_log_file_path('some-proc-stdout.log') assert os.path.exists(own_log_path) assert 0 != os.stat(own_log_path).st_size with open(own_log_path, 'r') as f: lines = f.read().splitlines() assert 1 == len(lines) assert 'foo' == lines[0] else: own_log_path = launch.logging.launch_config.get_log_file_path('some-proc-stdout.log') assert (not os.path.exists(own_log_path) or 0 == os.stat(own_log_path).st_size) if 'own_log' in (checks['stderr'] | checks['both']): launch.logging.launch_config.get_log_file_handler('some-proc-stderr.log').flush() own_log_path = launch.logging.launch_config.get_log_file_path('some-proc-stderr.log') assert os.path.exists(own_log_path) assert 0 != os.stat(own_log_path).st_size with open(own_log_path, 'r') as f: lines = f.read().splitlines() assert 1 == len(lines) assert 'bar' == lines[0] else: own_log_path = launch.logging.launch_config.get_log_file_path('some-proc-stderr.log') assert (not os.path.exists(own_log_path) or 0 == os.stat(own_log_path).st_size) if 'own_log' in checks['both']: launch.logging.launch_config.get_log_file_handler('some-proc.log').flush() own_log_path = launch.logging.launch_config.get_log_file_path('some-proc.log') assert os.path.exists(own_log_path) assert 0 != os.stat(own_log_path).st_size with open(own_log_path, 'r') as f: lines = f.read().splitlines() assert 2 == len(lines) assert 'foo' == lines[0] assert 'bar' == lines[1] else: own_log_path = launch.logging.launch_config.get_log_file_path('some-proc.log') assert (not os.path.exists(own_log_path) or 0 == os.stat(own_log_path).st_size) def test_screen_default_format_with_timestamps(capsys, log_dir): """Test screen logging when using the default logs format with timestamps.""" launch.logging.reset() launch.logging.launch_config.level = logging.DEBUG launch.logging.launch_config.log_dir = log_dir launch.logging.launch_config.set_screen_format('default_with_timestamp') logger = launch.logging.get_logger('some-proc') logger.addHandler(launch.logging.launch_config.get_screen_handler()) assert logger.getEffectiveLevel() == logging.DEBUG logger.debug('foo') capture = capsys.readouterr() lines = capture.out.splitlines() assert 1 == len(lines) assert re.match(r'[0-9]+\.[0-9]+ \[DEBUG\] \[some-proc\]: foo', lines[0]) is not None assert 0 == len(capture.err) def test_screen_default_format(capsys): """Test screen logging when using the default logs format.""" launch.logging.reset() logger = launch.logging.get_logger('some-proc') logger.addHandler(launch.logging.launch_config.get_screen_handler()) assert logger.getEffectiveLevel() == logging.INFO logger.info('bar') capture = capsys.readouterr() lines = capture.out.splitlines() assert 1 == len(lines) assert '[INFO] [some-proc]: bar' == lines[0] assert 0 == len(capture.err) def test_log_default_format(log_dir): """Test logging to the main log file when using the default logs format.""" launch.logging.reset() launch.logging.launch_config.level = logging.WARN launch.logging.launch_config.log_dir = log_dir logger = launch.logging.get_logger('some-proc') logger.addHandler(launch.logging.launch_config.get_log_file_handler()) assert logger.getEffectiveLevel() == logging.WARN logger.error('baz') launch.logging.launch_config.get_log_file_handler().flush() assert os.path.exists(launch.logging.launch_config.get_log_file_path()) assert 0 != os.stat(launch.logging.launch_config.get_log_file_path()).st_size with open(launch.logging.launch_config.get_log_file_path(), 'r') as f: lines = f.readlines() assert 1 == len(lines) assert re.match(r'[0-9]+\.[0-9]+ \[ERROR\] \[some-proc\]: baz', lines[0]) is not None def test_log_handler_factory(log_dir): """Test logging using a custom log handlers.""" class TestStreamHandler(launch.logging.handlers.Handler): def __init__(self, output): super().__init__() self._output = output def emit(self, record): self._output.append(self.format(record)) import collections outputs = collections.defaultdict(list) launch.logging.reset() launch.logging.launch_config.level = logging.WARN launch.logging.launch_config.log_dir = log_dir launch.logging.launch_config.log_handler_factory = ( lambda path, encoding=None: TestStreamHandler( output=outputs[path] ) ) logger = launch.logging.get_logger('some-proc') logger.addHandler(launch.logging.launch_config.get_log_file_handler()) logger.debug('foo') logger.error('baz') path = launch.logging.launch_config.get_log_file_path() assert path in outputs assert len(outputs[path]) == 1 assert outputs[path][0].endswith('baz') def fake_make_unique_log_dir(*, base_path): # Passthrough; do not create the directory return base_path @mock.patch('launch.logging._make_unique_log_dir', mock.MagicMock(wraps=fake_make_unique_log_dir)) def test_get_logging_directory(): launch.logging.launch_config.reset() os.environ.pop('ROS_LOG_DIR', None) os.environ.pop('ROS_HOME', None) home = pathlib.Path.home() assert str(home) # Default case without ROS_LOG_DIR or ROS_HOME being set (but with HOME) default_dir = str(home / '.ros/log') # This ensures that the launch config will check the environment again launch.logging.launch_config.log_dir = None assert launch.logging.launch_config.log_dir == default_dir # Use $ROS_LOG_DIR if it is set my_log_dir_raw = '/my/ros_log_dir' my_log_dir = str(pathlib.Path(my_log_dir_raw)) os.environ['ROS_LOG_DIR'] = my_log_dir launch.logging.launch_config.log_dir = None assert launch.logging.launch_config.log_dir == my_log_dir # Make sure it converts path separators when necessary os.environ['ROS_LOG_DIR'] = my_log_dir_raw launch.logging.launch_config.log_dir = None assert launch.logging.launch_config.log_dir == my_log_dir # Setting ROS_HOME won't change anything since ROS_LOG_DIR is used first os.environ['ROS_HOME'] = '/this/wont/be/used' launch.logging.launch_config.log_dir = None assert launch.logging.launch_config.log_dir == my_log_dir os.environ.pop('ROS_HOME', None) # Empty is considered unset os.environ['ROS_LOG_DIR'] = '' launch.logging.launch_config.log_dir = None assert launch.logging.launch_config.log_dir == default_dir # Make sure '~' is expanded to the home directory os.environ['ROS_LOG_DIR'] = '~/logdir' launch.logging.launch_config.log_dir = None assert launch.logging.launch_config.log_dir == str(home / 'logdir') os.environ.pop('ROS_LOG_DIR', None) # Without ROS_LOG_DIR, use $ROS_HOME/log fake_ros_home = home / '.fakeroshome' fake_ros_home_log_dir = str(fake_ros_home / 'log') os.environ['ROS_HOME'] = str(fake_ros_home) launch.logging.launch_config.log_dir = None assert launch.logging.launch_config.log_dir == fake_ros_home_log_dir # Make sure it converts path separators when necessary my_ros_home_raw = '/my/ros/home' my_ros_home_log_dir = str(pathlib.Path(my_ros_home_raw) / 'log') os.environ['ROS_HOME'] = my_ros_home_raw launch.logging.launch_config.log_dir = None assert launch.logging.launch_config.log_dir == my_ros_home_log_dir # Empty is considered unset os.environ['ROS_HOME'] = '' launch.logging.launch_config.log_dir = None assert launch.logging.launch_config.log_dir == default_dir # Make sure '~' is expanded to the home directory os.environ['ROS_HOME'] = '~/.fakeroshome' launch.logging.launch_config.log_dir = None assert launch.logging.launch_config.log_dir == fake_ros_home_log_dir os.environ.pop('ROS_HOME', None) launch.logging.launch_config.reset() def test_get_log_dir_frontend(log_dir): """Test log_dir frontend substitution.""" launch.logging.reset() launch.logging.launch_config.log_dir = log_dir subst = parse_substitution('$(log_dir)') assert len(subst) == 1 result = subst[0] assert isinstance(result, TextSubstitution) assert result.text == log_dir
[]
[]
[ "ROS_HOME", "ROS_LOG_DIR" ]
[]
["ROS_HOME", "ROS_LOG_DIR"]
python
2
0
payments/initiator_test.go
package payments import ( "context" "errors" "fmt" "net/http" "os" "testing" "github.com/cpurta/go-raiden-client/config" "github.com/ethereum/go-ethereum/common" "github.com/jarcoal/httpmock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func ExampleInitiator() { var ( paymentClient *Client config = &config.Config{ Host: "http://localhost:5001", APIVersion: "v1", } tokenAddress = common.HexToAddress("0x89d24a6b4ccb1b6faa2625fe562bdd9a23260359") // DAI Stablecoin targetAddress = common.HexToAddress("") payment *Payment amount = int64(1000) err error ) paymentClient = NewClient(config, http.DefaultClient) if payment, err = paymentClient.Initiate(context.Background(), tokenAddress, targetAddress, amount); err != nil { panic(fmt.Sprintf("unable to initiate payment: %s", err.Error())) } fmt.Printf("successfully initiated payment: %+v\n", payment) } func TestInitiator(t *testing.T) { var ( localhostIP = "[::1]" config = &config.Config{ Host: "http://localhost:5001", APIVersion: "v1", } ) if os.Getenv("USE_IPV4") != "" { localhostIP = "127.0.0.1" } type testcase struct { name string prepHTTPMock func() expectedPayment *Payment expectedError error } testcases := []testcase{ testcase{ name: "successfully initiated a funds transfer", prepHTTPMock: func() { httpmock.RegisterResponder( "POST", "http://localhost:5001/api/v1/payments/0x2a65Aca4D5fC5B5C859090a6c34d164135398226/0x61C808D82A3Ac53231750daDc13c777b59310bD9", httpmock.NewStringResponder( http.StatusOK, `{"initiator_address":"0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8","target_address":"0x61C808D82A3Ac53231750daDc13c777b59310bD9","token_address":"0x2a65Aca4D5fC5B5C859090a6c34d164135398226","amount":200,"identifier":42}`, ), ) }, expectedError: nil, expectedPayment: &Payment{ InitiatorAddress: common.HexToAddress("0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8"), TargetAddress: common.HexToAddress("0x61C808D82A3Ac53231750daDc13c777b59310bD9"), TokenAddress: common.HexToAddress("0x2a65Aca4D5fC5B5C859090a6c34d164135398226"), Amount: int64(200), Identifier: int64(42), }, }, testcase{ name: "unexpected 500 response", prepHTTPMock: func() { httpmock.RegisterResponder( "POST", "http://localhost:5001/api/v1/payments/0x2a65Aca4D5fC5B5C859090a6c34d164135398226/0x61C808D82A3Ac53231750daDc13c777b59310bD9", httpmock.NewStringResponder( http.StatusInternalServerError, ``, ), ) }, expectedError: errors.New("EOF"), expectedPayment: nil, }, testcase{ name: "unable to make http request", prepHTTPMock: func() { httpmock.Deactivate() }, expectedError: fmt.Errorf("Post http://localhost:5001/api/v1/payments/0x2a65Aca4D5fC5B5C859090a6c34d164135398226/0x61C808D82A3Ac53231750daDc13c777b59310bD9: dial tcp %s:5001: connect: connection refused", localhostIP), expectedPayment: nil, }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { var ( err error payment *Payment initiator = NewInitiator(config, http.DefaultClient) ctx = context.Background() tokenAddress = common.HexToAddress("0x2a65Aca4D5fC5B5C859090a6c34d164135398226") partnerAddress = common.HexToAddress("0x61C808D82A3Ac53231750daDc13c777b59310bD9") ) httpmock.Activate() defer httpmock.Deactivate() tc.prepHTTPMock() // test list all payment, err = initiator.Initiate(ctx, tokenAddress, partnerAddress, int64(200)) if tc.expectedError != nil { assert.EqualError(t, err, tc.expectedError.Error()) return } require.NoError(t, err) assert.Equal(t, tc.expectedPayment, payment) }) } }
[ "\"USE_IPV4\"" ]
[]
[ "USE_IPV4" ]
[]
["USE_IPV4"]
go
1
0
examples/bc_image_real.py
import rlkit.torch.pytorch_util as ptu from rlkit.data_management.load_buffer import load_data_from_npy, load_data_from_npy_mult, load_data_from_npy_chaining from rlkit.samplers.data_collector import MdpPathCollector, \ CustomMDPPathCollector, CustomMDPPathCollector_EVAL from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic from rlkit.torch.sac.bc import BCTrainer, BCTrainerMSE from rlkit.torch.conv_networks import CNN, ConcatCNN, VQVAEEncoderCNN from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm from rlkit.util.video import VideoSaveFunction from rlkit.launchers.launcher_util import setup_logger from rlkit.envs.dummy_env import DummyEnv import pickle from rlkit.data_management.load_buffer_real import * import argparse, os import roboverse # '/media/avi/data/Work/github/avisingh599/minibullet/data/' # 'oct6_Widow250DrawerGraspNeutral-v0_20K_save_all_noise_0.1' # '_2020-10-06T19-37-26_100.npy' # DEFAULT_BUFFER = ('/nfs/kun1/users/albert/minibullet_datasets/11270225_10k_grasp_Widow250MultiObjectOneGraspRandomBowlPositionTrain-v0_10K_save_all_noise_0.1_2020-11-27T02-24-16_9750.npy') DEFAULT_BUFFER = ('/nfs/kun1/users/albert/minibullet_datasets/11270225_10k_grasp_Widow250MultiObjectOneGraspRandomBowlPositionTrain-v0_10K_save_all_noise_0.1_2020-11-27T02-24-16_9750.npy') CUSTOM_LOG_DIR = '/home/stian/doodad-output' def experiment(variant): variant['image_shape'] = (48,48,3) if variant['small_image'] else (64,64,3) if variant['kitchen']: eval_env = expl_env = DummyEnv(action_shape=(7,),image_shape=variant['image_shape']) else: eval_env = expl_env = DummyEnv(action_shape=(4,),image_shape=variant['image_shape']) action_dim = eval_env.action_space.low.size if variant['multi_bin']: eval_env.multi_tray = True expl_env.multi_tray = False cnn_params = variant['cnn_params'] if variant['deeper_net']: print('deeper conv net') cnn_params.update( kernel_sizes=[3, 3, 3, 3, 3], n_channels=[32, 32, 32, 32, 32], strides=[1, 1, 1, 1, 1], paddings=[1, 1, 1, 1, 1], pool_sizes=[2, 2, 1, 1, 1], pool_strides=[2, 2, 1, 1, 1], pool_paddings=[0, 0, 0, 0, 0] ) cnn_params.update( input_width=48 if variant['small_image'] else 64, input_height=48 if variant['small_image'] else 64, input_channels=3, output_size=1, added_fc_input_size= action_dim, ) # qf1 = ConcatCNN(**cnn_params) # qf2 = ConcatCNN(**cnn_params) # target_qf1 = ConcatCNN(**cnn_params) # target_qf2 = ConcatCNN(**cnn_params) cnn_params.update( output_size=256, added_fc_input_size=variant['state_dim'] if variant['imgstate'] else 0, hidden_sizes=[1024, 512], ) if variant['vqvae_enc']: policy_obs_processor = VQVAEEncoderCNN(**cnn_params) else: policy_obs_processor = CNN(**cnn_params) policy = TanhGaussianPolicy( obs_dim=cnn_params['output_size'], action_dim=action_dim, hidden_sizes=[256, 256, 256], obs_processor=policy_obs_processor, ) eval_policy = MakeDeterministic(policy) eval_path_collector = MdpPathCollector( eval_env, eval_policy, ) expl_path_collector = CustomMDPPathCollector_EVAL( eval_env, eval_policy, ) observation_key = ('image', 'state') if variant['imgstate'] else 'image' paths = [] if args.azure: data_path = '/home/asap7772/drawer_data' else: data_path = '/nfs/kun1/users/ashvin/data/val_data' if args.buffer == 0: print('lid on') paths.append((os.path.join(data_path,'fixed_pot_demos.npy'), os.path.join(data_path,'fixed_pot_demos_putlidon_rew.pkl'))) elif args.buffer == 1: print('lid off') paths.append((os.path.join(data_path,'fixed_pot_demos.npy'), os.path.join(data_path,'fixed_pot_demos_takeofflid_rew.pkl'))) elif args.buffer == 2: print('tray') paths.append((os.path.join(data_path,'fixed_tray_demos.npy'), os.path.join(data_path,'fixed_tray_demos_rew.pkl'))) elif args.buffer == 3: print('drawer') paths.append((os.path.join(data_path,'fixed_drawer_demos.npy'), os.path.join(data_path,'fixed_drawer_demos_rew.pkl'))) elif args.buffer == 4: print('Stephen Tool Use') path = '/nfs/kun1/users/stephentian/on_policy_longer_1_26_buffers/move_tool_obj_together_fixed_6_2_train.pkl' elif args.buffer == 5: print('Albert Pick Place') px = os.path.join(os.path.expanduser("~"),'val_data_relabeled', 'combined_2021-06-03_21_36_48_labeled.pkl') if args.azure else '/nfs/kun1/users/albert/realrobot_datasets/combined_2021-06-03_21_36_48_labeled.pkl' data_path = '/nfs/kun1/users/albert/realrobot_datasets/combined_2021-06-03_21_36_48_labeled.pkl' if args.azure: data_path = px paths.append((data_path, None)) elif args.buffer == 6: print('Pick Kitchen 1') prior = ['/home/asap7772/asap7772/real_data_kitchen/bridge_data_numpy/toykitchen2_room8052/put_potato_on_plate/out.npy'] num_viewpoints = 5 for p in prior: paths.append((p, None)) else: assert False if args.buffer in [4]: replay_buffer = pickle.load(open(path,'rb')) elif args.buffer in [6,7,8]: replay_buffer = get_buffer(observation_key=observation_key, color_jitter = variant['color_jitter'], num_viewpoints=num_viewpoints, action_shape=(7,)) for path, rew_path in paths: print(path) load_path_kitchen(path, rew_path, replay_buffer) else: replay_buffer = get_buffer(observation_key=observation_key, image_shape=variant['image_shape']) for path, rew_path in paths: load_path(path, rew_path, replay_buffer, small_img=variant['small_image'], bc=True, imgstate = variant['imgstate']) if variant['mse']: trainer = BCTrainerMSE( env=eval_env, policy=policy, #qf1=qf1, #qf2=qf2, #target_qf1=target_qf1, #target_qf2=target_qf2, dist_diff=variant['dist_diff'], log_dir=variant['log_dir'], imgstate=variant['imgstate'], variant_dict=variant, **variant['trainer_kwargs'] ) else: trainer = BCTrainer( env=eval_env, policy=policy, #qf1=qf1, #qf2=qf2, #target_qf1=target_qf1, #target_qf2=target_qf2, dist_diff=variant['dist_diff'], log_dir=variant['log_dir'], imgstate=variant['imgstate'], variant_dict=variant, **variant['trainer_kwargs'] ) algorithm = TorchBatchRLAlgorithm( trainer=trainer, exploration_env=expl_env, evaluation_env=eval_env, exploration_data_collector=expl_path_collector, evaluation_data_collector=eval_path_collector, replay_buffer=replay_buffer, eval_both=False, batch_rl=True, **variant['algorithm_kwargs'] ) video_func = VideoSaveFunction(variant) algorithm.post_epoch_funcs.append(video_func) algorithm.to(ptu.device) algorithm.train() def enable_gpus(gpu_str): if (gpu_str is not ""): os.environ["CUDA_VISIBLE_DEVICES"] = gpu_str return if __name__ == "__main__": # noinspection PyTypeChecker variant = dict( algorithm="BC", version="normal", algorithm_kwargs=dict( # num_epochs=100, # num_eval_steps_per_epoch=50, # num_trains_per_train_loop=100, # num_expl_steps_per_train_loop=100, # min_num_steps_before_training=100, # max_path_length=10, num_epochs=3000, num_eval_steps_per_epoch=5, num_trains_per_train_loop=1000, num_expl_steps_per_train_loop=5, min_num_steps_before_training=1000, max_path_length=30, batch_size=64, ), trainer_kwargs=dict( discount=0.99, soft_target_tau=5e-3, policy_lr=1E-4, qf_lr=3E-4, reward_scale=1, use_automatic_entropy_tuning=True, # Target nets/ policy vs Q-function update policy_eval_start=10000, num_qs=2, # min Q temp=1.0, min_q_version=3, min_q_weight=5.0, # lagrange with_lagrange=False, # Defaults to False lagrange_thresh=10.0, # extra params num_random=1, max_q_backup=False, deterministic_backup=False, ), dump_video_kwargs=dict( imsize=48, save_video_period=1, ), ) parser = argparse.ArgumentParser() parser.add_argument("--max-path-length", type=int, default=1) parser.add_argument("--buffer", type=str, default=DEFAULT_BUFFER) parser.add_argument("--gpu", default='0', type=str) parser.add_argument("--min-q-weight", default=1.0, type=float, help="Value of alpha in CQL") parser.add_argument("--use-lagrange", action="store_true", default=False) parser.add_argument("--lagrange-thresh", default=5.0, type=float, help="Value of tau, used with --use-lagrange") parser.add_argument("--use-positive-rew", action="store_true", default=False) parser.add_argument("--max-q-backup", action="store_true", default=False, help="For max_{a'} backups, set this to true") parser.add_argument("--no-deterministic-backup", action="store_true", default=False, help="By default, deterministic backup is used") parser.add_argument("--policy-eval-start", default=10000, type=int) parser.add_argument("--policy-lr", default=1e-4, type=float) parser.add_argument("--min-q-version", default=3, type=int, help=("min_q_version = 3 (CQL(H)), " "version = 2 (CQL(rho))")) parser.add_argument("--num-eval-per-epoch", type=int, default=5) parser.add_argument("--seed", default=10, type=int) parser.add_argument("--name", default='test', type=str) parser.add_argument("--bin_color", action="store_true", default=False) parser.add_argument("--multi_bin", action="store_true", default=False) parser.add_argument("--mixture", action="store_true", default=False) parser.add_argument("--transfer", action="store_true", default=False) parser.add_argument("--transfer_multiview", action="store_true", default=False) parser.add_argument("--chaining", action="store_true", default=False) parser.add_argument("--p", default=0.2, type=float) parser.add_argument("--prob", default=1.0, type=float) parser.add_argument('--segment_type', default='fixed_other', type = str) parser.add_argument('--eval_multiview', default='single', type = str) parser.add_argument('--dist_diff', action="store_true", default=False) parser.add_argument('--larger_net', action="store_true", default=False) parser.add_argument('--color_jitter', action='store_false', default=True) # Stephen added parser.add_argument('--deeper_net', action="store_true", default=False) parser.add_argument('--azure', action="store_true", default=False) parser.add_argument('--vqvae_enc', action="store_true", default=False) parser.add_argument('--duplicate', action="store_true", default=False) parser.add_argument('--num_traj', default=0, type=int) parser.add_argument('--smimg', default=False, action='store_true') parser.add_argument('--kitchen', default=False, action='store_true') parser.add_argument('--imgstate', default=False, action='store_true') # both image and state parser.add_argument('--state_dim', default=3, type=int) parser.add_argument('--mse', default=False, action='store_true') args = parser.parse_args() variant['mse'] = args.mse variant['color_jitter'] = args.color_jitter variant['kitchen'] = args.kitchen variant['state_dim'] = args.state_dim variant['imgstate'] = args.imgstate variant['transfer'] = args.transfer variant['mixture'] = args.mixture variant['chaining'] = args.chaining variant['p'] = args.p variant['bin'] = args.bin_color variant['segment_type'] = args.segment_type variant['small_image'] = args.smimg variant['transfer_multiview'] = args.transfer_multiview variant['eval_multiview'] = args.eval_multiview variant['dist_diff'] = args.dist_diff variant['deeper_net'] = args.deeper_net variant['vqvae_enc'] = args.vqvae_enc variant['duplicate'] = args.duplicate variant['num_traj'] = args.num_traj if args.buffer.isnumeric(): args.buffer = int(args.buffer) enable_gpus(args.gpu) variant['algorithm_kwargs']['max_path_length'] = args.max_path_length variant['algorithm_kwargs']['num_eval_steps_per_epoch'] = \ args.num_eval_per_epoch*args.max_path_length variant['buffer'] = args.buffer variant['trainer_kwargs']['max_q_backup'] = args.max_q_backup variant['trainer_kwargs']['deterministic_backup'] = \ not args.no_deterministic_backup variant['trainer_kwargs']['min_q_weight'] = args.min_q_weight variant['trainer_kwargs']['policy_lr'] = args.policy_lr variant['trainer_kwargs']['min_q_version'] = args.min_q_version variant['trainer_kwargs']['policy_eval_start'] = args.policy_eval_start variant['trainer_kwargs']['lagrange_thresh'] = args.lagrange_thresh variant['trainer_kwargs']['with_lagrange'] = args.use_lagrange variant['multi_bin'] = args.multi_bin if args.larger_net: variant['cnn_params'] = dict( kernel_sizes=[3, 3, 3, 3, 3, 3], n_channels=[16, 16, 16, 16,16,16], strides=[1, 1, 1, 1, 1, 1], hidden_sizes=[1024, 512, 512, 256, 256], paddings=[1, 1, 1,1,1,1], pool_type='max2d', pool_sizes=[2, 2, 2, 2, 2, 1], # the one at the end means no pool pool_strides=[2, 2, 2,2,2,1], pool_paddings=[0, 0, 0,0,0,0], image_augmentation=True, image_augmentation_padding=4, ) else: variant['cnn_params'] = dict( kernel_sizes=[3, 3, 3], n_channels=[16, 16, 16], strides=[1, 1, 1], hidden_sizes=[1024, 512, 256], paddings=[1, 1, 1], pool_type='max2d', pool_sizes=[2, 2, 1], # the one at the end means no pool pool_strides=[2, 2, 1], pool_paddings=[0, 0, 0], image_augmentation=True, image_augmentation_padding=4, ) variant['seed'] = args.seed ptu.set_gpu_mode(True) if os.path.isdir(CUSTOM_LOG_DIR): base_log_dir = CUSTOM_LOG_DIR else: base_log_dir = None log_dir = setup_logger(args.name, variant=variant, base_log_dir=base_log_dir, snapshot_mode='gap_and_last', snapshot_gap=10,) variant['log_dir'] = log_dir experiment(variant)
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
acme.go
package main import ( "fmt" "io" "os" "os/user" "strconv" "strings" "9fans.net/go/acme" ) type acmeFile struct { name string body []byte offset int runeOffset int } func acmeCurrentFile() (*acmeFile, error) { win, err := acmeCurrentWin() if err != nil { return nil, err } defer win.CloseFiles() _, _, err = win.ReadAddr() // make sure address file is already open. if err != nil { return nil, fmt.Errorf("cannot read address: %v", err) } err = win.Ctl("addr=dot") if err != nil { return nil, fmt.Errorf("cannot set addr=dot: %v", err) } q0, _, err := win.ReadAddr() if err != nil { return nil, fmt.Errorf("cannot read address: %v", err) } body, err := readBody(win) if err != nil { return nil, fmt.Errorf("cannot read body: %v", err) } tagb, err := win.ReadAll("tag") if err != nil { return nil, fmt.Errorf("cannot read tag: %v", err) } tag := string(tagb) i := strings.Index(tag, " ") if i == -1 { return nil, fmt.Errorf("strange tag with no spaces") } w := &acmeFile{ name: tag[0:i], body: body, offset: runeOffset2ByteOffset(body, q0), runeOffset: q0, } return w, nil } // We would use win.ReadAll except for a bug in acme // where it crashes when reading trying to read more // than the negotiated 9P message size. func readBody(win *acme.Win) ([]byte, error) { var body []byte buf := make([]byte, 8000) for { n, err := win.Read("body", buf) if err == io.EOF { break } if err != nil { return nil, err } body = append(body, buf[0:n]...) } return body, nil } func acmeCurrentWin() (*acme.Win, error) { winid := os.Getenv("winid") if winid == "" { return nil, fmt.Errorf("$winid not set - not running inside acme?") } id, err := strconv.Atoi(winid) if err != nil { return nil, fmt.Errorf("invalid $winid %q", winid) } if err := setNameSpace(); err != nil { return nil, err } win, err := acme.Open(id, nil) if err != nil { return nil, fmt.Errorf("cannot open acme window: %v", err) } return win, nil } func runeOffset2ByteOffset(b []byte, off int) int { r := 0 for i, _ := range string(b) { if r == off { return i } r++ } return len(b) } func setNameSpace() error { if ns := os.Getenv("NAMESPACE"); ns != "" { return nil } ns, err := nsFromDisplay() if err != nil { return fmt.Errorf("cannot get name space: %v", err) } os.Setenv("NAMESPACE", ns) return nil } // taken from src/lib9/getns.c // This should go into goplan9/plan9/client. func nsFromDisplay() (string, error) { disp := os.Getenv("DISPLAY") if disp == "" { // original code had heuristic for OS X here; // we'll just assume that and fail anyway if it // doesn't work. disp = ":0.0" } // canonicalize: xxx:0.0 => xxx:0 if i := strings.LastIndex(disp, ":"); i >= 0 { if strings.HasSuffix(disp, ".0") { disp = disp[:len(disp)-2] } } // turn /tmp/launch/:0 into _tmp_launch_:0 (OS X 10.5) disp = strings.Replace(disp, "/", "_", -1) u, err := user.Current() if err != nil { return "", fmt.Errorf("cannot get current user name: %v", err) } ns := fmt.Sprintf("/tmp/ns.%s.%s", u.Username, disp) _, err = os.Stat(ns) if os.IsNotExist(err) { return "", fmt.Errorf("no name space directory found") } if err != nil { return "", fmt.Errorf("cannot stat name space directory: %v", err) } // heuristics for checking permissions and owner of name space // directory omitted. return ns, nil }
[ "\"winid\"", "\"NAMESPACE\"", "\"DISPLAY\"" ]
[]
[ "NAMESPACE", "winid", "DISPLAY" ]
[]
["NAMESPACE", "winid", "DISPLAY"]
go
3
0
bloodhound/__init__.py
#################### # # Copyright (c) 2018 Fox-IT # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # #################### import os, sys, logging, argparse, getpass from impacket.ldap import ldapasn1 from bloodhound.ad import AD, ADDC, ADAuthentication """ BloodHound.py is a Python port of BloodHound, designed to run on Linux. It may very well work on other platforms, this is currently untested. Knock yourself out. """ class BloodHound: def __init__(self, ad): self.ad = ad self.ldap = None self.dc = None self.sessions = [] def connect(self): if len(self.ad.dcs()) == 0: logging.error('I have no information about the domain') sys.exit(1) dc = self.ad.dcs()[0] logging.debug('Using LDAP server: %s' % dc) logging.debug('Using base DN: %s' % self.ad.baseDN) if len(self.ad.kdcs()) > 0: kdc = self.ad.kdcs()[0] logging.debug('Using kerberos KDC: %s' % kdc) logging.debug('Using kerberos realm: %s' % self.ad.realm()) self.dc = ADDC(dc, self.ad) # self.dc.ldap_connect(self.ad.auth.username, self.ad.auth.password, kdc) def run(self, skip_groups=False, skip_computers=False): if not skip_groups: self.dc.fetch_all() elif not skip_computers: # We need to know which computers to query regardless self.dc.get_computers() if not skip_computers: self.ad.fetch_sessions() self.ad.dump_admins() logging.info('Done') def kerberize(): # If the kerberos credential cache is known, use that. krb5cc = os.getenv('KRB5CCNAME') # Otherwise, guess it. if krb5cc is None: krb5cc = '/tmp/krb5cc_%u' % os.getuid() if os.path.isfile(krb5cc): logging.debug('Using kerberos credential cache: %s' % krb5cc) if os.getenv('KRB5CCNAME') is None: os.environ['KRB5CCNAME'] = krb5cc else: logging.error('Could not find kerberos credential cache file') sys.exit(1) def main(): # logging.basicConfig(stream=sys.stderr, level=logging.INFO) logger = logging.getLogger() logger.setLevel(logging.INFO) stream = logging.StreamHandler(sys.stderr) stream.setLevel(logging.DEBUG) formatter = logging.Formatter('%(levelname)s: %(message)s') # formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') stream.setFormatter(formatter) logger.addHandler(stream) parser = argparse.ArgumentParser(add_help=True, description='Python based ingestor for BloodHound\nThis tool is in BETA!\nFor help or reporting issues, visit https://github.com/Fox-IT/BloodHound.py', formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-u', '--username', action='store', help='Username') parser.add_argument('-p', '--password', action='store', help='Password') parser.add_argument('-k', '--kerberos', action='store_true', help='Use kerberos') parser.add_argument('--hashes', action='store', help='NLTM hash') parser.add_argument('-n', action='store_true', help='Do not resolve names') parser.add_argument('-ns', '--nameserver', action='store', help='Alternative name server to use for queries') # Todo: match sharphound profiles parser.add_argument('--skip-groups', action='store_true', help='Do not query Group memberships via LDAP') parser.add_argument('--skip-computers', action='store_true', help='Do not connect to individual computers') parser.add_argument('-d', '--domain', action='store', help='Domain') parser.add_argument('-v', action='store_true', help='Enable verbose output') args = parser.parse_args() if args.v is True: logger.setLevel(logging.DEBUG) if args.kerberos is True: logging.debug('Authentication: kerberos') kerberize() auth = ADAuthentication() elif args.username is not None and args.password is not None: logging.debug('Authentication: username/password') auth = ADAuthentication(username=args.username, password=args.password, domain=args.domain) elif args.username is not None and args.password is None and args.hashes is None: args.password = getpass.getpass() auth = ADAuthentication(username=args.username, password=args.password, domain=args.domain) elif args.username is None and (args.password is not None or args.hashes is not None): logging.error('Authentication: password or hashes provided without username') sys.exit(1) elif args.hashes is not None and args.username is not None: logging.debug('Authentication: NTLM hashes') lm, nt = args.hashes.split(":") auth = ADAuthentication(lm_hash=lm, nt_hash=nt, username=args.username, domain=args.domain) else: parser.print_help() sys.exit(1) ad = AD(auth=auth, domain=args.domain, nameserver=args.nameserver) if args.n is not True: logging.debug('Using DNS to retrieve domain information') ad.dns_resolve(kerberos=args.kerberos, domain=args.domain) bloodhound = BloodHound(ad) bloodhound.connect() bloodhound.run(skip_groups=args.skip_groups, skip_computers=args.skip_computers) if __name__ == '__main__': main()
[]
[]
[ "KRB5CCNAME" ]
[]
["KRB5CCNAME"]
python
1
0
agent/arty_uploader.go
package agent import ( "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "net/url" "os" "strings" "github.com/buildkite/agent/api" "github.com/buildkite/agent/logger" ) type ArtifactoryUploaderConfig struct { // The destination which includes the Artifactory bucket name and the path. // e.g artifactory://my-repo-name/foo/bar Destination string // Whether or not HTTP calls should be debugged DebugHTTP bool } type ArtifactoryUploader struct { // The artifactory bucket path set from the destination Path string // The artifactory bucket name set from the destination Repository string // URL of artifactory instance iURL *url.URL // The artifactory client to use client *http.Client // The configuration conf ArtifactoryUploaderConfig // The logger instance to use logger logger.Logger // Artifactory username user string // Artifactory password password string } func NewArtifactoryUploader(l logger.Logger, c ArtifactoryUploaderConfig) (*ArtifactoryUploader, error) { repo, path := ParseArtifactoryDestination(c.Destination) stringURL := os.Getenv("BUILDKITE_ARTIFACTORY_URL") username := os.Getenv("BUILDKITE_ARTIFACTORY_USER") password := os.Getenv("BUILDKITE_ARTIFACTORY_PASSWORD") // authentication is not set if stringURL == "" || username == "" || password == "" { return nil, errors.New("Must set BUILDKITE_ARTIFACTORY_URL, BUILDKITE_ARTIFACTORY_USER, BUILDKITE_ARTIFACTORY_PASSWORD when using rt:// path") } parsedURL, err := url.Parse(stringURL) if err != nil { return nil, err } return &ArtifactoryUploader{ logger: l, conf: c, client: &http.Client{}, iURL: parsedURL, Path: path, Repository: repo, user: username, password: password, }, nil } func ParseArtifactoryDestination(destination string) (repo string, path string) { parts := strings.Split(strings.TrimPrefix(string(destination), "rt://"), "/") path = strings.Join(parts[1:len(parts)], "/") repo = parts[0] return } func (u *ArtifactoryUploader) URL(artifact *api.Artifact) string { url := *u.iURL // ensure proper URL formatting for upload url.Path = strings.Join([]string{ strings.Trim(url.Path, "/"), u.artifactPath(artifact), }, "/") return url.String() } func (u *ArtifactoryUploader) Upload(artifact *api.Artifact) error { // Open file from filesystem u.logger.Debug("Reading file \"%s\"", artifact.AbsolutePath) f, err := os.Open(artifact.AbsolutePath) if err != nil { return fmt.Errorf("failed to open file %q (%v)", artifact.AbsolutePath, err) } // Upload the file to Artifactory. u.logger.Debug("Uploading \"%s\" to `%s`", artifact.Path, u.Repository) req, err := http.NewRequest("PUT", u.URL(artifact), f) req.SetBasicAuth(u.user, u.password) if err != nil { return err } res, err := u.client.Do(req) if err != nil { return err } if err := checkResponse(res); err != nil { return err } return nil } func (u *ArtifactoryUploader) artifactPath(artifact *api.Artifact) string { parts := []string{u.Repository, u.Path, artifact.Path} return strings.Join(parts, "/") } // An ErrorResponse reports one or more errors caused by an API request. type errorResponse struct { Response *http.Response // HTTP response that caused this error Errors []Error `json:"errors"` // more detail on individual errors } func (r *errorResponse) Error() string { return fmt.Sprintf("%v %v: %d %+v", r.Response.Request.Method, r.Response.Request.URL, r.Response.StatusCode, r.Errors) } // An Error reports more details on an individual error in an ErrorResponse. type Error struct { Status int `json:"status"` // Error code Message string `json:"message"` // Message describing the error. } // checkResponse checks the API response for errors, and returns them if // present. A response is considered an error if it has a status code outside // the 200 range. // API error responses are expected to have either no response // body, or a JSON response body that maps to ErrorResponse. Any other // response body will be silently ignored. func checkResponse(r *http.Response) error { if c := r.StatusCode; 200 <= c && c <= 299 { return nil } errorResponse := &errorResponse{Response: r} data, err := ioutil.ReadAll(r.Body) if err == nil && data != nil { err := json.Unmarshal(data, errorResponse) if err != nil { return err } } return errorResponse }
[ "\"BUILDKITE_ARTIFACTORY_URL\"", "\"BUILDKITE_ARTIFACTORY_USER\"", "\"BUILDKITE_ARTIFACTORY_PASSWORD\"" ]
[]
[ "BUILDKITE_ARTIFACTORY_USER", "BUILDKITE_ARTIFACTORY_URL", "BUILDKITE_ARTIFACTORY_PASSWORD" ]
[]
["BUILDKITE_ARTIFACTORY_USER", "BUILDKITE_ARTIFACTORY_URL", "BUILDKITE_ARTIFACTORY_PASSWORD"]
go
3
0
towerdef/asgi.py
""" ASGI config for towerdef project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'towerdef.settings') application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
stats/common_stats.go
// Package stats provides methods and functionality to register, track, log, // and StatsD-notify statistics that, for the most part, include "counter" and "latency" kinds. /* * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. */ package stats import ( "encoding/json" "fmt" "io/ioutil" "net/http" "os" "path/filepath" "runtime" "sort" "strings" "sync" "time" "github.com/NVIDIA/aistore/3rdparty/atomic" "github.com/NVIDIA/aistore/3rdparty/glog" "github.com/NVIDIA/aistore/cluster" "github.com/NVIDIA/aistore/cmn" "github.com/NVIDIA/aistore/cmn/cos" "github.com/NVIDIA/aistore/cmn/debug" "github.com/NVIDIA/aistore/cmn/mono" "github.com/NVIDIA/aistore/fs" "github.com/NVIDIA/aistore/hk" "github.com/NVIDIA/aistore/memsys" "github.com/NVIDIA/aistore/stats/statsd" "github.com/NVIDIA/aistore/xaction" jsoniter "github.com/json-iterator/go" "github.com/prometheus/client_golang/prometheus" ) const ( logsMaxSizeCheckTime = 48 * time.Minute // periodically check the logs for max accumulated size startupSleep = 300 * time.Millisecond // periodically poll ClusterStarted() numGorHighCheckTime = 2 * time.Minute // periodically log a warning if the number of goroutines remains high glogPeriodicFlushTime = 40 * time.Second // not to have `go glog.flushDaemon` // TODO -- FIXME startupDeadlineMultiplier = 1000 // deadline = startupDeadlineMultiplier * config.Timeout.Startup ) const ( KindCounter = "counter" KindGauge = "gauge" // + semantics KindLatency = "latency" KindThroughput = "bw" KindComputedThroughput = "compbw" KindSpecial = "special" ) // number-of-goroutines watermarks expressed as multipliers over the number of available logical CPUs (GOMAXPROCS) const ( numGorHigh = 100 numGorExtreme = 1000 ) // logging frequency const ( logIntervalMult = 6 // every (logIntervalMult * config.Periodic.StatsTime) logIntervalMax = int64(2 * time.Minute) // but not less frequently than logIntervalMax ) // NOTE: all supported metrics var kinds = []string{KindCounter, KindGauge, KindLatency, KindThroughput, KindComputedThroughput, KindSpecial} // CoreStats stats const ( // KindCounter GetCount = "get.n" PutCount = "put.n" AppendCount = "append.n" DeleteCount = "del.n" RenameCount = "ren.n" ListCount = "lst.n" ErrCount = "err.n" ErrGetCount = "err.get.n" ErrDeleteCount = "err.delete.n" ErrPostCount = "err.post.n" ErrPutCount = "err.put.n" ErrHeadCount = "err.head.n" ErrListCount = "err.list.n" ErrRangeCount = "err.range.n" ErrDownloadCount = "err.dl.n" // KindLatency GetLatency = "get.ns" ListLatency = "lst.ns" KeepAliveMinLatency = "kalive.ns.min" KeepAliveMaxLatency = "kalive.ns.max" KeepAliveLatency = "kalive.ns" // KindSpecial Uptime = "up.ns.time" ) // // public types // type ( Tracker interface { StartedUp() bool Add(name string, val int64) Get(name string) int64 AddErrorHTTP(method string, val int64) AddMany(namedVal64 ...NamedVal64) CoreStats() *CoreStats GetWhatStats() interface{} RegMetrics(node *cluster.Snode) IsPrometheus() bool } NamedVal64 struct { Name string NameSuffix string // forces immediate send when non-empty (see NOTE below) Value int64 } CoreStats struct { Tracker statsTracker promDesc promDesc statsdC *statsd.Client statsTime time.Duration sgl *memsys.SGL cmu sync.RWMutex // ctracker vs Prometheus Collect() } RebalanceTargetStats struct { xaction.BaseStats Ext ExtRebalanceStats `json:"ext"` } ExtRebalanceStats struct { RebTxCount int64 `json:"reb.tx.n,string"` RebTxSize int64 `json:"reb.tx.size,string"` RebRxCount int64 `json:"reb.rx.n,string"` RebRxSize int64 `json:"reb.rx.size,string"` RebID int64 `json:"glob.id,string"` } TargetStatus struct { RebalanceStats *RebalanceTargetStats `json:"rebalance_stats,omitempty"` } DaemonStatus struct { Snode *cluster.Snode `json:"snode"` Stats *CoreStats `json:"daemon_stats"` Capacity fs.MPCap `json:"capacity"` SysInfo cos.SysInfo `json:"sys_info"` SmapVersion int64 `json:"smap_version,string"` TStatus *TargetStatus `json:"target_status,omitempty"` Status string `json:"status"` DeployedOn string `json:"deployment"` Version string `json:"ais_version"` // major.minor.build BuildTime string `json:"build_time"` // YYYY-MM-DD HH:MM:SS-TZ } ) // interface guard var ( _ Tracker = (*Prunner)(nil) _ Tracker = (*Trunner)(nil) _ cluster.XactStats = (*RebalanceTargetStats)(nil) ) // // private types // type ( metric = statsd.Metric // type alias // implemented by the stats runners statsLogger interface { log(now int64, uptime time.Duration) doAdd(nv NamedVal64) statsTime(newval time.Duration) standingBy() bool } runnerHost interface { ClusterStarted() bool } // implements Tracker, inherited by Prunner and Trunner statsRunner struct { name string stopCh chan struct{} workCh chan NamedVal64 ticker *time.Ticker Core *CoreStats `json:"core"` ctracker copyTracker // to avoid making it at runtime daemon runnerHost nextLogTime int64 // mono.NanoTime() startedUp atomic.Bool } // Stats are tracked via a map of stats names (key) to statsValue (values). // There are two main types of stats: counter and latency declared // using the the kind field. Only latency stats have numSamples used to compute latency. statsValue struct { sync.RWMutex Value int64 `json:"v,string"` kind string label struct { comm string // common part of the metric label (as in: <prefix> . comm . <suffix>) stsd string // StatsD label prom string // Prometheus label } numSamples int64 cumulative int64 isCommon bool // optional, common to the proxy and target } copyValue struct { Value int64 `json:"v,string"` } statsTracker map[string]*statsValue copyTracker map[string]copyValue // values aggregated and computed every statsTime promDesc map[string]*prometheus.Desc ) /////////////// // CoreStats // /////////////// // interface guard var ( _ json.Marshaler = (*CoreStats)(nil) _ json.Unmarshaler = (*CoreStats)(nil) ) // helper: convert bytes to megabytes with a fixed rounding precision = 2 digits (NOTE: MB not MiB) func roundMBs(val int64) (mbs float64) { mbs = float64(val) / 1000 / 10 num := int(mbs + 0.5) mbs = float64(num) / 100 return } // helper not to log idle: when the only updated vars are those that match "idle" prefixes func match(s string, prefs []string) bool { for _, p := range prefs { if strings.HasPrefix(s, p) { return true } } return false } func (s *CoreStats) init(node *cluster.Snode, size int) { s.Tracker = make(statsTracker, size) s.promDesc = make(promDesc, size) // NOTE: // accessible in debug mode via host:port/debug/vars // * all counters including errors // * latencies including keepalive // * mountpath capacities // * mountpath (disk) utilizations (see ios) // * total number of goroutines debug.NewExpvar(glog.SmoduleStats) s.Tracker.regCommonMetrics(node) // reusable sgl => (udp) => StatsD s.sgl = memsys.DefaultPageMM().NewSGL(memsys.PageSize) } // NOTE: nil StatsD client means that we provide metrics to Prometheus (see below) func (s *CoreStats) isPrometheus() bool { return s.statsdC == nil } // vs Collect() func (s *CoreStats) promRLock() { if s.isPrometheus() { s.cmu.RLock() } } func (s *CoreStats) promRUnlock() { if s.isPrometheus() { s.cmu.RUnlock() } } func (s *CoreStats) promLock() { if s.isPrometheus() { s.cmu.Lock() } } func (s *CoreStats) promUnlock() { if s.isPrometheus() { s.cmu.Unlock() } } // init MetricClient client: StatsD (default) or Prometheus func (s *CoreStats) initMetricClient(node *cluster.Snode, parent *statsRunner) { // Either Prometheus if prom := os.Getenv("AIS_PROMETHEUS"); prom != "" { glog.Infoln("Using Prometheus") prometheus.MustRegister(parent) // as prometheus.Collector return } // or StatsD var ( port = 8125 // StatsD default port, see https://github.com/etsy/stats probe = false // test-probe StatsD server at init time ) if portStr := os.Getenv("AIS_STATSD_PORT"); portStr != "" { if portNum, err := cmn.ParsePort(portStr); err != nil { debug.AssertNoErr(err) glog.Error(err) } else { port = portNum } } if probeStr := os.Getenv("AIS_STATSD_PROBE"); probeStr != "" { if probeBool, err := cos.ParseBool(probeStr); err != nil { glog.Error(err) } else { probe = probeBool } } id := strings.ReplaceAll(node.ID(), ":", "_") // ":" delineates name and value for StatsD statsD, err := statsd.New("localhost", port, "ais"+node.Type()+"."+id, probe) if err != nil { glog.Errorf("Starting up without StatsD: %v", err) } else { glog.Infoln("Using StatsD") } s.statsdC = statsD } // populate *prometheus.Desc and statsValue.label.prom // NOTE: naming; compare with statsTracker.register() func (s *CoreStats) initProm(node *cluster.Snode) { if !s.isPrometheus() { return } id := strings.ReplaceAll(node.ID(), ".", "_") for name, v := range s.Tracker { label := strings.ReplaceAll(name, ".", "_") v.label.prom = strings.ReplaceAll(label, ":", "_") help := v.kind if strings.HasSuffix(v.label.prom, "_n") { help = "total number of operations" } else if strings.HasSuffix(v.label.prom, "_size") { help = "total size (MB)" } else if strings.HasSuffix(v.label.prom, "avg_rsize") { help = "average read size (bytes)" } else if strings.HasSuffix(v.label.prom, "avg_wsize") { help = "average write size (bytes)" } else if strings.HasSuffix(v.label.prom, "_ns") { v.label.prom = strings.TrimSuffix(v.label.prom, "_ns") + "_ms" help = "latency (milliseconds)" } else if strings.Contains(v.label.prom, "_ns_") { v.label.prom = strings.ReplaceAll(v.label.prom, "_ns_", "_ms_") if name == Uptime { v.label.prom = strings.ReplaceAll(v.label.prom, "_ns_", "") help = "uptime (seconds)" } else { help = "latency (milliseconds)" } } else if strings.HasSuffix(v.label.prom, "_bps") { v.label.prom = strings.TrimSuffix(v.label.prom, "_bps") + "_mbps" help = "throughput (MB/s)" } fullqn := prometheus.BuildFQName("ais", node.Type(), id+"_"+v.label.prom) s.promDesc[name] = prometheus.NewDesc(fullqn, help, nil /*variableLabels*/, nil /*constLabels*/) } } func (s *CoreStats) updateUptime(d time.Duration) { v := s.Tracker[Uptime] v.Lock() v.Value = d.Nanoseconds() v.Unlock() } func (s *CoreStats) MarshalJSON() ([]byte, error) { return jsoniter.Marshal(s.Tracker) } func (s *CoreStats) UnmarshalJSON(b []byte) error { return jsoniter.Unmarshal(b, &s.Tracker) } func (s *CoreStats) get(name string) (val int64) { v := s.Tracker[name] v.RLock() val = v.Value v.RUnlock() return } // NOTE naming convention: ".n" for the count and ".ns" for duration (nanoseconds) func (s *CoreStats) doAdd(name, nameSuffix string, val int64) { v, ok := s.Tracker[name] debug.Assertf(ok, "invalid stats name %q", name) switch v.kind { case KindLatency: v.Lock() v.numSamples++ v.cumulative += val v.Value += val v.Unlock() case KindThroughput: v.Lock() v.cumulative += val v.Value += val v.Unlock() case KindCounter: v.Lock() v.Value += val v.Unlock() // NOTE: // - currently only counters; // - non-empty suffix forces an immediate Tx with no aggregation (see below); // - suffix is an arbitrary string that can be defined at runtime; // - e.g. usage: per-mountpath error counters. if !s.isPrometheus() && nameSuffix != "" { s.statsdC.Send(v.label.comm+"."+nameSuffix, 1, metric{Type: statsd.Counter, Name: "count", Value: val}) } default: debug.AssertMsg(false, v.kind) } } func (s *CoreStats) copyT(ctracker copyTracker, idlePrefs []string) (idle bool) { idle = true s.sgl.Reset() for name, v := range s.Tracker { switch v.kind { case KindLatency: var lat int64 v.Lock() if v.numSamples > 0 { lat = v.Value / v.numSamples ctracker[name] = copyValue{lat} if !match(name, idlePrefs) { idle = false } } v.Value = 0 v.numSamples = 0 v.Unlock() // NOTE: ns to ms and not reporting zeros millis := cos.DivRound(lat, int64(time.Millisecond)) if !s.isPrometheus() && millis > 0 && strings.HasSuffix(name, ".ns") { s.statsdC.AppMetric(metric{Type: statsd.Timer, Name: v.label.stsd, Value: float64(millis)}, s.sgl) } case KindThroughput, KindComputedThroughput: var throughput int64 v.Lock() if v.Value > 0 { throughput = v.Value if v.kind != KindComputedThroughput { throughput /= cos.MaxI64(int64(s.statsTime.Seconds()), 1) } ctracker[name] = copyValue{throughput} idle = false v.Value = 0 } v.Unlock() if !s.isPrometheus() && throughput > 0 { fv := roundMBs(throughput) s.statsdC.AppMetric(metric{Type: statsd.Gauge, Name: v.label.stsd, Value: fv}, s.sgl) } case KindCounter: var cnt int64 v.RLock() if v.Value > 0 { cnt = v.Value if prev, ok := ctracker[name]; !ok || prev.Value != cnt { ctracker[name] = copyValue{cnt} if !match(name, idlePrefs) { idle = false } } else { cnt = 0 } } v.RUnlock() if !s.isPrometheus() && cnt > 0 { if strings.HasSuffix(name, ".size") { // target only suffix metricType := statsd.Counter if v.label.comm == "dl" { metricType = statsd.PersistentCounter } fv := roundMBs(cnt) s.statsdC.AppMetric(metric{Type: metricType, Name: v.label.stsd, Value: fv}, s.sgl) } else { s.statsdC.AppMetric(metric{Type: statsd.Counter, Name: v.label.stsd, Value: cnt}, s.sgl) } } case KindGauge: ctracker[name] = copyValue{v.Value} if !s.isPrometheus() { s.statsdC.AppMetric(metric{Type: statsd.Gauge, Name: v.label.stsd, Value: float64(v.Value)}, s.sgl) } default: ctracker[name] = copyValue{v.Value} // KindSpecial/KindDelta as is and wo/ lock } } if !s.isPrometheus() { s.statsdC.SendSGL(s.sgl) } debug.SetExpvar(glog.SmoduleStats, "num-goroutines", int64(runtime.NumGoroutine())) return } // serves to satisfy REST API what=stats query func (s *CoreStats) copyCumulative(ctracker copyTracker) { for name, v := range s.Tracker { v.RLock() if v.kind == KindLatency || v.kind == KindThroughput { ctracker[name] = copyValue{v.cumulative} } else if v.kind == KindCounter { if v.Value != 0 { ctracker[name] = copyValue{v.Value} } } else { // KindSpecial, KindComputedThroughput, KindGauge ctracker[name] = copyValue{v.Value} } v.RUnlock() } } //////////////// // statsValue // //////////////// // interface guard var ( _ json.Marshaler = (*statsValue)(nil) _ json.Unmarshaler = (*statsValue)(nil) ) func (v *statsValue) MarshalJSON() (b []byte, err error) { v.RLock() b, err = jsoniter.Marshal(v.Value) v.RUnlock() return } func (v *statsValue) UnmarshalJSON(b []byte) error { return jsoniter.Unmarshal(b, &v.Value) } /////////////// // copyValue // /////////////// // interface guard var ( _ json.Marshaler = (*copyValue)(nil) _ json.Unmarshaler = (*copyValue)(nil) ) func (v copyValue) MarshalJSON() (b []byte, err error) { return jsoniter.Marshal(v.Value) } func (v *copyValue) UnmarshalJSON(b []byte) error { return jsoniter.Unmarshal(b, &v.Value) } ////////////////// // statsTracker // ////////////////// // NOTE: naming; compare with CoreStats.initProm() func (tracker statsTracker) register(node *cluster.Snode, name, kind string, isCommon ...bool) { v := &statsValue{kind: kind} if len(isCommon) > 0 { v.isCommon = isCommon[0] } debug.Assertf(cos.StringInSlice(kind, kinds), "invalid metric kind %q", kind) // in StatsD metrics ":" delineates the name and the value - replace with underscore switch kind { case KindCounter: if strings.HasSuffix(name, ".size") { v.label.comm = strings.TrimSuffix(name, ".size") v.label.comm = strings.ReplaceAll(v.label.comm, ":", "_") v.label.stsd = fmt.Sprintf("%s.%s.%s.%s", "ais"+node.Type(), node.ID(), v.label.comm, "mbytes") } else { debug.AssertMsg(strings.HasSuffix(name, ".n"), name) v.label.comm = strings.TrimSuffix(name, ".n") v.label.comm = strings.ReplaceAll(v.label.comm, ":", "_") v.label.stsd = fmt.Sprintf("%s.%s.%s.%s", "ais"+node.Type(), node.ID(), v.label.comm, "count") } case KindLatency: debug.AssertMsg(strings.Contains(name, ".ns"), name) v.label.comm = strings.TrimSuffix(name, ".ns") v.label.comm = strings.ReplaceAll(v.label.comm, ".ns.", ".") v.label.comm = strings.ReplaceAll(v.label.comm, ":", "_") v.label.stsd = fmt.Sprintf("%s.%s.%s.%s", "ais"+node.Type(), node.ID(), v.label.comm, "ms") case KindThroughput, KindComputedThroughput: debug.AssertMsg(strings.HasSuffix(name, ".bps"), name) v.label.comm = strings.TrimSuffix(name, ".bps") v.label.comm = strings.ReplaceAll(v.label.comm, ":", "_") v.label.stsd = fmt.Sprintf("%s.%s.%s.%s", "ais"+node.Type(), node.ID(), v.label.comm, "mbps") default: v.label.comm = name v.label.comm = strings.ReplaceAll(v.label.comm, ":", "_") if name == Uptime { v.label.comm = strings.ReplaceAll(v.label.comm, ".ns.", ".") v.label.stsd = fmt.Sprintf("%s.%s.%s.%s", "ais"+node.Type(), node.ID(), v.label.comm, "seconds") } else { v.label.stsd = fmt.Sprintf("%s.%s.%s", "ais"+node.Type(), node.ID(), v.label.comm) } } tracker[name] = v } // register common metrics; see RegMetrics() in target_stats.go func (tracker statsTracker) regCommonMetrics(node *cluster.Snode) { tracker.register(node, GetCount, KindCounter, true) tracker.register(node, PutCount, KindCounter, true) tracker.register(node, AppendCount, KindCounter, true) tracker.register(node, DeleteCount, KindCounter, true) tracker.register(node, RenameCount, KindCounter, true) tracker.register(node, ListCount, KindCounter, true) tracker.register(node, GetLatency, KindLatency, true) tracker.register(node, ListLatency, KindLatency, true) tracker.register(node, KeepAliveMinLatency, KindLatency, true) tracker.register(node, KeepAliveMaxLatency, KindLatency, true) tracker.register(node, KeepAliveLatency, KindLatency, true) tracker.register(node, ErrCount, KindCounter, true) tracker.register(node, ErrGetCount, KindCounter, true) tracker.register(node, ErrDeleteCount, KindCounter, true) tracker.register(node, ErrPostCount, KindCounter, true) tracker.register(node, ErrPutCount, KindCounter, true) tracker.register(node, ErrHeadCount, KindCounter, true) tracker.register(node, ErrListCount, KindCounter, true) tracker.register(node, ErrRangeCount, KindCounter, true) tracker.register(node, ErrDownloadCount, KindCounter, true) tracker.register(node, Uptime, KindSpecial, true) } ///////////////// // statsRunner // ///////////////// // interface guard var ( _ prometheus.Collector = (*statsRunner)(nil) ) func (r *statsRunner) IsPrometheus() bool { return r.Core.isPrometheus() } func (r *statsRunner) Describe(ch chan<- *prometheus.Desc) { for _, desc := range r.Core.promDesc { ch <- desc } } func (r *statsRunner) Collect(ch chan<- prometheus.Metric) { if !r.StartedUp() { return } r.Core.promRLock() for name, v := range r.Core.Tracker { var ( val int64 fv float64 ) copyV, okc := r.ctracker[name] if !okc { continue } val = copyV.Value fv = float64(val) // 1. convert units switch v.kind { case KindCounter: if strings.HasSuffix(name, ".size") { fv = roundMBs(val) } case KindLatency: millis := cos.DivRound(val, int64(time.Millisecond)) fv = float64(millis) case KindThroughput: fv = roundMBs(val) default: if name == Uptime { seconds := cos.DivRound(val, int64(time.Second)) fv = float64(seconds) } } // 2. convert kind promMetricType := prometheus.GaugeValue if v.kind == KindCounter { promMetricType = prometheus.CounterValue } // 3. publish desc, ok := r.Core.promDesc[name] debug.AssertMsg(ok, name) m, err := prometheus.NewConstMetric(desc, promMetricType, fv) debug.AssertNoErr(err) ch <- m } r.Core.promRUnlock() } func (r *statsRunner) Name() string { return r.name } func (r *statsRunner) CoreStats() *CoreStats { return r.Core } func (r *statsRunner) Get(name string) (val int64) { return r.Core.get(name) } func (r *statsRunner) runcommon(logger statsLogger) error { var ( i, j time.Duration sleep = startupSleep ticker = time.NewTicker(sleep) // NOTE: the maximum time we agree to wait for r.daemon.ClusterStarted() config = cmn.GCO.Get() deadline = startupDeadlineMultiplier * config.Timeout.Startup.D() ) waitStartup: for { select { case <-r.workCh: // Drain workCh until the daemon (proxy or target) starts up. case <-r.stopCh: ticker.Stop() return nil case <-ticker.C: if r.daemon.ClusterStarted() { break waitStartup } if logger.standingBy() && sleep == startupSleep { sleep = config.Periodic.StatsTime.D() ticker.Reset(sleep) deadline = time.Hour continue } j += sleep if j > deadline { ticker.Stop() return cmn.ErrStartupTimeout } i += sleep if i > config.Timeout.Startup.D() && !logger.standingBy() { glog.Errorln("startup is taking unusually long time...") i = 0 } } } ticker.Stop() config = cmn.GCO.Get() goMaxProcs := runtime.GOMAXPROCS(0) glog.Infof("Starting %s", r.Name()) hk.Reg(r.Name()+".gc.logs", recycleLogs, logsMaxSizeCheckTime) statsTime := config.Periodic.StatsTime.D() r.ticker = time.NewTicker(statsTime) r.startedUp.Store(true) var ( checkNumGorHigh int64 startTime = mono.NanoTime() lastGlogFlushTime = startTime ) for { select { case nv, ok := <-r.workCh: if ok { logger.doAdd(nv) } case <-r.ticker.C: now := mono.NanoTime() logger.log(now, time.Duration(now-startTime)) // uptime checkNumGorHigh = _whingeGoroutines(now, checkNumGorHigh, goMaxProcs) config = cmn.GCO.Get() if statsTime != config.Periodic.StatsTime.D() { statsTime = config.Periodic.StatsTime.D() r.ticker.Reset(statsTime) logger.statsTime(statsTime) } now = mono.NanoTime() if time.Duration(now-lastGlogFlushTime) > glogPeriodicFlushTime { glog.Flush() lastGlogFlushTime = mono.NanoTime() } case <-r.stopCh: r.ticker.Stop() return nil } } } func _whingeGoroutines(now, checkNumGorHigh int64, goMaxProcs int) int64 { var ( ngr = runtime.NumGoroutine() extreme bool ) if ngr < goMaxProcs*numGorHigh { return 0 } if ngr >= goMaxProcs*numGorExtreme { extreme = true glog.Errorf("Extremely high number of goroutines: %d", ngr) } if checkNumGorHigh == 0 { checkNumGorHigh = now } else if time.Duration(now-checkNumGorHigh) > numGorHighCheckTime { if !extreme { glog.Warningf("High number of goroutines: %d", ngr) } checkNumGorHigh = 0 } return checkNumGorHigh } func (r *statsRunner) StartedUp() bool { return r.startedUp.Load() } func (r *statsRunner) Stop(err error) { glog.Infof("Stopping %s, err: %v", r.Name(), err) r.stopCh <- struct{}{} if !r.IsPrometheus() { r.Core.statsdC.Close() } close(r.stopCh) } // common impl // NOTE: currently, proxy's stats == common and hardcoded func (r *statsRunner) Add(name string, val int64) { r.workCh <- NamedVal64{Name: name, Value: val} } func (r *statsRunner) AddMany(nvs ...NamedVal64) { for _, nv := range nvs { r.workCh <- nv } } func recycleLogs() time.Duration { // keep total log size below the configured max go removeLogs(cmn.GCO.Get()) return logsMaxSizeCheckTime } func removeLogs(config *cmn.Config) { maxtotal := int64(config.Log.MaxTotal) logfinfos, err := ioutil.ReadDir(config.LogDir) if err != nil { glog.Errorf("GC logs: cannot read log dir %s, err: %v", config.LogDir, err) _ = cos.CreateDir(config.LogDir) // FIXME: (local non-containerized + kill/restart under test) return } // sample name ais.ip-10-0-2-19.root.log.INFO.20180404-031540.2249 logtypes := []string{".INFO.", ".WARNING.", ".ERROR."} for _, logtype := range logtypes { var ( tot = int64(0) infos = make([]os.FileInfo, 0, len(logfinfos)) ) for _, logfi := range logfinfos { if logfi.IsDir() { continue } if !strings.Contains(logfi.Name(), ".log.") { continue } if strings.Contains(logfi.Name(), logtype) { tot += logfi.Size() infos = append(infos, logfi) } } if tot > maxtotal { removeOlderLogs(tot, maxtotal, config.LogDir, logtype, infos) } } } func removeOlderLogs(tot, maxtotal int64, logdir, logtype string, filteredInfos []os.FileInfo) { l := len(filteredInfos) if l <= 1 { glog.Warningf("GC logs: cannot cleanup %s, dir %s, tot %d, max %d", logtype, logdir, tot, maxtotal) return } fiLess := func(i, j int) bool { return filteredInfos[i].ModTime().Before(filteredInfos[j].ModTime()) } if glog.FastV(4, glog.SmoduleStats) { glog.Infof("GC logs: started") } sort.Slice(filteredInfos, fiLess) filteredInfos = filteredInfos[:l-1] // except the last = current for _, logfi := range filteredInfos { logfqn := filepath.Join(logdir, logfi.Name()) if err := cos.RemoveFile(logfqn); err == nil { tot -= logfi.Size() if glog.FastV(4, glog.SmoduleStats) { glog.Infof("GC logs: removed %s", logfqn) } if tot < maxtotal { break } } else { glog.Errorf("GC logs: failed to remove %s", logfqn) } } if glog.FastV(4, glog.SmoduleStats) { glog.Infof("GC logs: done") } } func (r *statsRunner) AddErrorHTTP(method string, val int64) { switch method { case http.MethodGet: r.workCh <- NamedVal64{Name: ErrGetCount, Value: val} case http.MethodDelete: r.workCh <- NamedVal64{Name: ErrDeleteCount, Value: val} case http.MethodPost: r.workCh <- NamedVal64{Name: ErrPostCount, Value: val} case http.MethodPut: r.workCh <- NamedVal64{Name: ErrPutCount, Value: val} case http.MethodHead: r.workCh <- NamedVal64{Name: ErrHeadCount, Value: val} default: r.workCh <- NamedVal64{Name: ErrCount, Value: val} } }
[ "\"AIS_PROMETHEUS\"", "\"AIS_STATSD_PORT\"", "\"AIS_STATSD_PROBE\"" ]
[]
[ "AIS_STATSD_PROBE", "AIS_STATSD_PORT", "AIS_PROMETHEUS" ]
[]
["AIS_STATSD_PROBE", "AIS_STATSD_PORT", "AIS_PROMETHEUS"]
go
3
0
src/core/build_label.go
package core import ( "fmt" "os" "path" "strings" "time" "github.com/thought-machine/go-flags" "gopkg.in/op/go-logging.v1" "github.com/thought-machine/please/src/process" ) var log = logging.MustGetLogger("core") // A BuildLabel is a representation of an identifier of a build target, e.g. //spam/eggs:ham // corresponds to BuildLabel{PackageName: spam/eggs name: ham} // BuildLabels are always absolute, so relative identifiers // like :ham are always parsed into an absolute form. // There is also implicit expansion of the final element of a target (ala Blaze) // so //spam/eggs is equivalent to //spam/eggs:eggs // // It can also be in a subrepo, in which case the syntax is @subrepo//spam/eggs:ham. type BuildLabel struct { PackageName string Name string Subrepo string } // WholeGraph represents parsing the entire graph (i.e. //...). // We use this specially in one or two places. var WholeGraph = []BuildLabel{{PackageName: "", Name: "..."}} // BuildLabelStdin is used to indicate that we're going to consume build labels from stdin. var BuildLabelStdin = BuildLabel{PackageName: "", Name: "_STDIN"} // OriginalTarget is used to indicate one of the originally requested targets on the command line. var OriginalTarget = BuildLabel{PackageName: "", Name: "_ORIGINAL"} // String returns a string representation of this build label. func (label BuildLabel) String() string { zero := BuildLabel{} if label == zero { return "" } s := "//" + label.PackageName if label.Subrepo != "" { s = "///" + label.Subrepo + s } if label.IsAllSubpackages() { if label.PackageName == "" { return s + "..." } return s + "/..." } return s + ":" + label.Name } // ShortString returns a string representation of this build label, abbreviated if // possible, and relative to the given label. func (label BuildLabel) ShortString(context BuildLabel) string { if label.Subrepo != context.Subrepo { return label.String() } else if label.PackageName == context.PackageName { return ":" + label.Name } else if label.Name == path.Base(label.PackageName) { return "//" + label.PackageName } label.Subrepo = "" return label.String() } // NewBuildLabel constructs a new build label from the given components. Panics on failure. func NewBuildLabel(pkgName, name string) BuildLabel { label, err := TryNewBuildLabel(pkgName, name) if err != nil { panic(err) } return label } // TryNewBuildLabel constructs a new build label from the given components. func TryNewBuildLabel(pkgName, name string) (BuildLabel, error) { if err := validateNames(pkgName, name); err != nil { return BuildLabel{}, err } return BuildLabel{PackageName: pkgName, Name: name}, nil } // validateNames returns an error if the package name of target name isn't accepted. func validateNames(pkgName, name string) error { if !validatePackageName(pkgName) { return fmt.Errorf("Invalid package name: %s", pkgName) } else if !validateTargetName(name) { return fmt.Errorf("Invalid target name: %s", name) } else if err := validateSuffixes(pkgName, name); err != nil { return err } return nil } // validateSuffixes checks that there are no invalid suffixes on the target name. func validateSuffixes(pkgName, name string) error { if strings.HasSuffix(name, buildDirSuffix) || strings.HasSuffix(name, testDirSuffix) || strings.HasSuffix(pkgName, buildDirSuffix) || strings.HasSuffix(pkgName, testDirSuffix) { return fmt.Errorf("._build and ._test are reserved suffixes") } return nil } // validatePackageName checks whether this string is a valid package name and returns true if so. func validatePackageName(name string) bool { return name == "" || (name[0] != '/' && name[len(name)-1] != '/' && !strings.ContainsAny(name, `|$*?[]{}:()&\`) && !strings.Contains(name, "//")) } // validateTargetName checks whether this string is a valid target name and returns true if so. func validateTargetName(name string) bool { return name != "" && !strings.ContainsAny(name, `|$*?[]{}:()&/\`) && (name[0] != '.' || name == "...") && !strings.HasSuffix(name, buildDirSuffix) && !strings.HasSuffix(name, testDirSuffix) } // ParseBuildLabel parses a single build label from a string. Panics on failure. func ParseBuildLabel(target, currentPath string) BuildLabel { label, err := TryParseBuildLabel(target, currentPath, "") if err != nil { panic(err) } return label } // TryParseBuildLabel attempts to parse a single build label from a string. Returns an error if unsuccessful. func TryParseBuildLabel(target, currentPath, subrepo string) (BuildLabel, error) { if pkg, name, subrepo := parseBuildLabelParts(target, currentPath, subrepo); name != "" { return BuildLabel{PackageName: pkg, Name: name, Subrepo: subrepo}, nil } return BuildLabel{}, fmt.Errorf("Invalid build label: %s", target) } // ParseBuildLabelContext parses a build label in the context of a package. // It panics on error. func ParseBuildLabelContext(target string, pkg *Package) BuildLabel { if p, name, subrepo := parseBuildLabelParts(target, pkg.Name, pkg.SubrepoName); name != "" { if subrepo == "" && pkg.Subrepo != nil && (target[0] != '@' && !strings.HasPrefix(target, "///")) { subrepo = pkg.Subrepo.Name } else { subrepo = pkg.SubrepoArchName(subrepo) } return BuildLabel{PackageName: p, Name: name, Subrepo: subrepo} } // It's gonna fail, let this guy panic for us. return ParseBuildLabel(target, pkg.Name) } // parseBuildLabelParts parses a build label into the package & name parts. // If valid, the name string will always be populated; the package string might not be if it's a local form. func parseBuildLabelParts(target, currentPath, subrepo string) (string, string, string) { if len(target) < 2 { // Always must start with // or : and must have at least one char following. return "", "", "" } else if target[0] == ':' { if !validateTargetName(target[1:]) { return "", "", "" } return currentPath, target[1:], "" } else if target[0] == '@' { // @subrepo//pkg:target or @subrepo:target syntax return parseBuildLabelSubrepo(target[1:], currentPath) } else if strings.HasPrefix(target, "///") { // ///subrepo/pkg:target syntax. return parseBuildLabelSubrepo(target[3:], currentPath) } else if target[0] != '/' || target[1] != '/' { return "", "", "" } else if idx := strings.IndexRune(target, ':'); idx != -1 { pkg := target[2:idx] name := target[idx+1:] // Check ... explicitly to prevent :... which isn't allowed. if !validatePackageName(pkg) || !validateTargetName(name) || name == "..." { return "", "", "" } return pkg, name, subrepo } else if !validatePackageName(target[2:]) { return "", "", "" } // Must be the abbreviated form (//pkg) or subtargets (//pkg/...), there's no : in it. if strings.HasSuffix(target, "/...") { return strings.TrimRight(target[2:len(target)-3], "/"), "...", "" } else if idx := strings.LastIndexByte(target, '/'); idx != -1 { return target[2:], target[idx+1:], subrepo } return target[2:], target[2:], subrepo } // parseBuildLabelSubrepo parses a build label that began with a subrepo symbol (either @ or ///). func parseBuildLabelSubrepo(target, currentPath string) (string, string, string) { idx := strings.Index(target, "//") if idx == -1 { // if subrepo and target are the same name, then @subrepo syntax will also suffice if idx = strings.IndexByte(target, ':'); idx == -1 { if idx := strings.LastIndexByte(target, '/'); idx != -1 { return "", target[idx+1:], target } return "", target, target } } pkg, name, _ := parseBuildLabelParts(target[idx:], currentPath, "") return pkg, name, target[:idx] } // As above, but allows parsing of relative labels (eg. rules:python_rules) // which is convenient at the shell prompt func parseMaybeRelativeBuildLabel(target, subdir string) (BuildLabel, error) { // Try the ones that don't need locating the repo root first. startsWithColon := strings.HasPrefix(target, ":") if !startsWithColon { if label, err := TryParseBuildLabel(target, "", ""); err == nil || strings.HasPrefix(target, "//") { return label, err } } // Now we need to locate the repo root and initial package. // Deliberately leave this till after the above to facilitate the --repo_root flag. if subdir == "" { MustFindRepoRoot() subdir = initialPackage } if startsWithColon { return TryParseBuildLabel(target, subdir, "") } // Presumably it's just underneath this directory (note that if it was absolute we returned above) return TryParseBuildLabel("//"+path.Join(subdir, target), "", "") } // ParseBuildLabels parses a bunch of build labels from strings. It dies on failure. // Relative labels are allowed since this is generally used at initialisation. func ParseBuildLabels(targets []string) []BuildLabel { ret := make([]BuildLabel, len(targets)) for i, target := range targets { if label, err := parseMaybeRelativeBuildLabel(target, ""); err != nil { log.Fatalf("%s", err) } else { ret[i] = label } } return ret } // IsAllSubpackages returns true if the label ends in ..., ie. it includes all subpackages. func (label BuildLabel) IsAllSubpackages() bool { return label.Name == "..." } // IsAllTargets returns true if the label is the pseudo-label referring to all targets in this package. func (label BuildLabel) IsAllTargets() bool { return label.Name == "all" } // Includes returns true if label includes the other label (//pkg:target1 is covered by //pkg:all etc). func (label BuildLabel) Includes(that BuildLabel) bool { if (label.PackageName == "" && label.IsAllSubpackages()) || that.PackageName == label.PackageName || strings.HasPrefix(that.PackageName, label.PackageName+"/") { // We're in the same package or a subpackage of this visibility spec if label.IsAllSubpackages() { return true } else if label.PackageName == that.PackageName { if label.Name == that.Name || label.IsAllTargets() { return true } } } return false } // Less returns true if this build label would sort less than another one. func (label BuildLabel) Less(other BuildLabel) bool { if label.PackageName == other.PackageName { return label.Name < other.Name } return label.PackageName < other.PackageName } // Paths is an implementation of BuildInput interface; we use build labels directly as inputs. func (label BuildLabel) Paths(graph *BuildGraph) []string { return addPathPrefix(graph.TargetOrDie(label).Outputs(), label.PackageName) } // FullPaths is an implementation of BuildInput interface. func (label BuildLabel) FullPaths(graph *BuildGraph) []string { target := graph.TargetOrDie(label) return addPathPrefix(target.Outputs(), target.OutDir()) } // addPathPrefix adds a prefix to all the entries in a slice. func addPathPrefix(paths []string, prefix string) []string { ret := make([]string, len(paths)) for i, output := range paths { ret[i] = path.Join(prefix, output) } return ret } // LocalPaths is an implementation of BuildInput interface. func (label BuildLabel) LocalPaths(graph *BuildGraph) []string { return graph.TargetOrDie(label).Outputs() } // Label is an implementation of BuildInput interface. It always returns this label. func (label BuildLabel) Label() *BuildLabel { return &label } func (label BuildLabel) nonOutputLabel() *BuildLabel { return &label } // UnmarshalFlag unmarshals a build label from a command line flag. Implementation of flags.Unmarshaler interface. func (label *BuildLabel) UnmarshalFlag(value string) error { // This is only allowable here, not in any other usage of build labels. if value == "-" { *label = BuildLabelStdin return nil } else if l, err := parseMaybeRelativeBuildLabel(value, ""); err != nil { // This has to be fatal because of the way we're using the flags package; // we lose incoming flags if we return errors. // But don't die in completion mode. if os.Getenv("PLZ_COMPLETE") == "" { log.Fatalf("%s", err) } } else { *label = l } return nil } // UnmarshalText implements the encoding.TextUnmarshaler interface. // This is used by gcfg to unmarshal the config files. func (label *BuildLabel) UnmarshalText(text []byte) error { l, err := TryParseBuildLabel(string(text), "", "") *label = l return err } // Parent returns what would be the parent of a build label, or the label itself if it's parentless. // Note that there is not a concrete guarantee that the returned label exists in the build graph, // and that the label returned is the ultimate ancestor (ie. not necessarily immediate parent). func (label BuildLabel) Parent() BuildLabel { index := strings.IndexRune(label.Name, '#') if index == -1 || !strings.HasPrefix(label.Name, "_") { return label } label.Name = strings.TrimLeft(label.Name[:index], "_") return label } // HasParent returns true if the build label has a parent that's not itself. func (label BuildLabel) HasParent() bool { return label.Parent() != label } // IsEmpty returns true if this is an empty build label, i.e. nothing's populated it yet. func (label BuildLabel) IsEmpty() bool { return label.PackageName == "" && label.Name == "" } // PackageDir returns a path to the directory this target is in. // This is equivalent to PackageName in all cases except when at the repo root, when this // will return . instead. This is often easier to use in build rules. func (label BuildLabel) PackageDir() string { if label.PackageName == "" { return "." } return label.PackageName } // SubrepoLabel returns a build label corresponding to the subrepo part of this build label. func (label BuildLabel) SubrepoLabel() BuildLabel { if idx := strings.LastIndexByte(label.Subrepo, '/'); idx != -1 { return BuildLabel{PackageName: label.Subrepo[:idx], Name: label.Subrepo[idx+1:]} } // This is legit, the subrepo is defined at the root. return BuildLabel{Name: label.Subrepo} } // CanSee returns true if label can see the given dependency, or false if not. func (label BuildLabel) CanSee(state *BuildState, dep *BuildTarget) bool { // Targets are always visible to other targets in the same directory. if label.PackageName == dep.Label.PackageName { return true } else if dep.Label.isExperimental(state) && !label.isExperimental(state) { log.Error("Target %s cannot depend on experimental target %s", label, dep.Label) return false } parent := label.Parent() for _, vis := range dep.Visibility { if vis.Includes(parent) { return true } } if dep.Label.PackageName == parent.PackageName { return true } if label.isExperimental(state) { log.Warning("Visibility restrictions suppressed for %s since %s is in the experimental tree", dep.Label, label) return true } return false } // isExperimental returns true if this label is in the "experimental" tree func (label BuildLabel) isExperimental(state *BuildState) bool { for _, exp := range state.experimentalLabels { if exp.Includes(label) { return true } } return false } // Complete implements the flags.Completer interface, which is used for shell completion. // Unfortunately it's rather awkward to handle here; we need to do a proper parse in order // to find out what the possible build labels are, and we're not ready for that yet. // Returning to main is also awkward since the flags haven't parsed properly; all in all // it seems an easier (albeit inelegant) solution to start things over by re-execing ourselves. func (label BuildLabel) Complete(match string) []flags.Completion { if match == "" { os.Exit(0) } os.Setenv("PLZ_COMPLETE", match) os.Unsetenv("GO_FLAGS_COMPLETION") exec, _ := os.Executable() out, _, err := process.New("").ExecWithTimeout(nil, "", os.Environ(), 10*time.Second, false, false, false, append([]string{exec}, os.Args[1:]...)) if err != nil { return nil } ret := []flags.Completion{} for _, line := range strings.Split(string(out), "\n") { if line != "" { ret = append(ret, flags.Completion{Item: line}) } } return ret } // MarshalText implements the encoding.TextMarshaler interface, which makes BuildLabels // usable as map keys in JSON. // This implementation never returns an error. func (label BuildLabel) MarshalText() ([]byte, error) { return []byte(label.String()), nil } // A packageKey is a cut-down version of BuildLabel that only contains the package part. // It's used to key maps and so forth that don't care about the target name. type packageKey struct { Name, Subrepo string } // String implements the traditional fmt.Stringer interface. func (key packageKey) String() string { if key.Subrepo != "" { return "@" + key.Subrepo + "//" + key.Name } return key.Name } // LooksLikeABuildLabel returns true if the string appears to be a build label, false if not. // Useful for cases like rule sources where sources can be a filename or a label. func LooksLikeABuildLabel(str string) bool { return strings.HasPrefix(str, "//") || strings.HasPrefix(str, ":") || (strings.HasPrefix(str, "@") && (strings.ContainsRune(str, ':') || strings.Contains(str, "//"))) } // BuildLabels makes slices of build labels sortable. type BuildLabels []BuildLabel func (slice BuildLabels) Len() int { return len(slice) } func (slice BuildLabels) Less(i, j int) bool { return slice[i].Less(slice[j]) } func (slice BuildLabels) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] }
[ "\"PLZ_COMPLETE\"" ]
[]
[ "PLZ_COMPLETE" ]
[]
["PLZ_COMPLETE"]
go
1
0
pwnlib/gdb.py
# -*- coding: utf-8 -*- """ During exploit development, it is frequently useful to debug the target binary under GDB. Pwntools makes this easy-to-do with a handful of helper routines, designed to make your exploit-debug-update cycles much faster. Useful Functions ---------------- - :func:`attach` - Attach to an existing process - :func:`debug` - Start a new process under a debugger, stopped at the first instruction - :func:`debug_shellcode` - Build a binary with the provided shellcode, and start it under a debugger Debugging Tips -------------- The :func:`attach` and :func:`debug` functions will likely be your bread and butter for debugging. Both allow you to provide a script to pass to GDB when it is started, so that it can automatically set your breakpoints. Attaching to Processes ~~~~~~~~~~~~~~~~~~~~~~ To attach to an existing process, just use :func:`attach`. It is surprisingly versatile, and can attach to a :class:`.process` for simple binaries, or will automatically find the correct process to attach to for a forking server, if given a :class:`.remote` object. Spawning New Processes ~~~~~~~~~~~~~~~~~~~~~~ Attaching to processes with :func:`attach` is useful, but the state the process is in may vary. If you need to attach to a process very early, and debug it from the very first instruction (or even the start of ``main``), you instead should use :func:`debug`. When you use :func:`debug`, the return value is a :class:`.tube` object that you interact with exactly like normal. Tips and Troubleshooting ------------------------ ``NOPTRACE`` magic argument ~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's quite cumbersom to comment and un-comment lines containing `attach`. You can cause these lines to be a no-op by running your script with the ``NOPTRACE`` argument appended, or with ``PWNLIB_NOPTRACE=1`` in the environment. :: $ python exploit.py NOPTRACE [+] Starting local process '/bin/bash': Done [!] Skipping debug attach since context.noptrace==True ... Kernel Yama ptrace_scope ~~~~~~~~~~~~~~~~~~~~~~~~ The Linux kernel v3.4 introduced a security mechanism called ``ptrace_scope``, which is intended to prevent processes from debugging eachother unless there is a direct parent-child relationship. This causes some issues with the normal Pwntools workflow, since the process heirarchy looks like this: :: python ---> target `--> gdb Note that ``python`` is the parent of ``target``, not ``gdb``. In order to avoid this being a problem, Pwntools uses the function ``prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY)``. This disables Yama for any processes launched by Pwntools via :class:`.process` or via :meth:`.ssh.process`. Older versions of Pwntools did not perform the ``prctl`` step, and required that the Yama security feature was disabled systemwide, which requires ``root`` access. Member Documentation =============================== """ from __future__ import absolute_import from __future__ import division import os import random import re import shlex import tempfile import time from pwnlib import adb from pwnlib import atexit from pwnlib import elf from pwnlib import qemu from pwnlib import tubes from pwnlib.asm import _bfdname from pwnlib.asm import make_elf from pwnlib.asm import make_elf_from_assembly from pwnlib.context import LocalContext from pwnlib.context import context from pwnlib.log import getLogger from pwnlib.util import misc from pwnlib.util import proc log = getLogger(__name__) @LocalContext def debug_assembly(asm, gdbscript=None, vma=None): """debug_assembly(asm, gdbscript=None, vma=None) -> tube Creates an ELF file, and launches it under a debugger. This is identical to debug_shellcode, except that any defined symbols are available in GDB, and it saves you the explicit call to asm(). Arguments: asm(str): Assembly code to debug gdbscript(str): Script to run in GDB vma(int): Base address to load the shellcode at **kwargs: Override any :obj:`pwnlib.context.context` values. Returns: :class:`.process` Example: .. code-block:: python assembly = shellcraft.echo("Hello world!\n") io = gdb.debug_assembly(assembly) io.recvline() # 'Hello world!' """ tmp_elf = make_elf_from_assembly(asm, vma=vma, extract=False) os.chmod(tmp_elf, 0777) atexit.register(lambda: os.unlink(tmp_elf)) if context.os == 'android': android_path = '/data/data/%s' % os.path.basename(tmp_elf) adb.push(tmp_elf, android_path) tmp_elf = android_path return debug(tmp_elf, gdbscript=gdbscript, arch=context.arch) @LocalContext def debug_shellcode(data, gdbscript=None, vma=None): """ Creates an ELF file, and launches it under a debugger. Arguments: data(str): Assembled shellcode bytes gdbscript(str): Script to run in GDB vma(int): Base address to load the shellcode at **kwargs: Override any :obj:`pwnlib.context.context` values. Returns: :class:`.process` Example: .. code-block:: python assembly = shellcraft.echo("Hello world!\n") shellcode = asm(assembly) io = gdb.debug_shellcode(shellcode) io.recvline() # 'Hello world!' """ if isinstance(data, unicode): log.error("Shellcode is cannot be unicode. Did you mean debug_assembly?") tmp_elf = make_elf(data, extract=False, vma=vma) os.chmod(tmp_elf, 0777) atexit.register(lambda: os.unlink(tmp_elf)) if context.os == 'android': android_path = '/data/data/%s' % os.path.basename(tmp_elf) adb.push(tmp_elf, android_path) tmp_elf = android_path return debug(tmp_elf, gdbscript=gdbscript, arch=context.arch) def _gdbserver_args(pid=None, path=None, args=None, which=None): """_gdbserver_args(pid=None, path=None) -> list Sets up a listening gdbserver, to either connect to the specified PID, or launch the specified binary by its full path. Arguments: pid(int): Process ID to attach to path(str): Process to launch args(list): List of arguments to provide on the debugger command line which(callaable): Function to find the path of a binary. Returns: A list of arguments to invoke gdbserver. """ if [pid, path, args].count(None) != 2: log.error("Must specify exactly one of pid, path, or args") if not which: log.error("Must specify which.") gdbserver = '' if not args: args = [str(path or pid)] # Android targets have a distinct gdbserver if context.bits == 64: gdbserver = which('gdbserver64') if not gdbserver: gdbserver = which('gdbserver') if not gdbserver: log.error("gdbserver is not installed") orig_args = args gdbserver_args = [gdbserver, '--multi'] if context.aslr: gdbserver_args += ['--no-disable-randomization'] else: log.warn_once("Debugging process with ASLR disabled") if pid: gdbserver_args += ['--once', '--attach'] gdbserver_args += ['localhost:0'] gdbserver_args += args return gdbserver_args def _gdbserver_port(gdbserver, ssh): which = _get_which(ssh) # Process /bin/bash created; pid = 14366 # Listening on port 34816 process_created = gdbserver.recvline() if process_created.startswith('ERROR:'): raise ValueError( 'Failed to spawn process under gdbserver. gdbserver error message: %s' % process_created ) gdbserver.pid = int(process_created.split()[-1], 0) listening_on = '' while 'Listening' not in listening_on: listening_on = gdbserver.recvline() port = int(listening_on.split()[-1]) # Set up port forarding for SSH if ssh: remote = ssh.connect_remote('127.0.0.1', port) listener = tubes.listen.listen(0) port = listener.lport # Disable showing GDB traffic when debugging verbosity is increased remote.level = 'error' listener.level = 'error' # Hook them up remote <> listener # Set up port forwarding for ADB elif context.os == 'android': adb.forward(port) return port def _get_which(ssh=None): if ssh: return ssh.which elif context.os == 'android': return adb.which else: return misc.which def _get_runner(ssh=None): if ssh: return ssh.process elif context.os == 'android': return adb.process else: return tubes.process.process @LocalContext def debug(args, gdbscript=None, exe=None, ssh=None, env=None, sysroot=None, **kwargs): """debug(args) -> tube Launch a GDB server with the specified command line, and launches GDB to attach to it. Arguments: args(list): Arguments to the process, similar to :class:`.process`. gdbscript(str): GDB script to run. exe(str): Path to the executable on disk env(dict): Environment to start the binary in ssh(:class:`.ssh`): Remote ssh session to use to launch the process. sysroot(str): Foreign-architecture sysroot, used for QEMU-emulated binaries and Android targets. Returns: :class:`.process` or :class:`.ssh_channel`: A tube connected to the target process Notes: The debugger is attached automatically, and you can debug everything from the very beginning. This requires that both ``gdb`` and ``gdbserver`` are installed on your machine. When GDB opens via :func:`debug`, it will initially be stopped on the very first instruction of the dynamic linker (``ld.so``) for dynamically-linked binaries. Only the target binary and the linker will be loaded in memory, so you cannot set breakpoints on shared library routines like ``malloc`` since ``libc.so`` has not even been loaded yet. There are several ways to handle this: 1. Set a breakpoint on the executable's entry point (generally, ``_start``) - This is only invoked after all of the required shared libraries are loaded. - You can generally get the address via the GDB command ``info file``. 2. Use pending breakpoints via ``set breakpoint pending on`` - This has the side-effect of setting breakpoints for **every** function which matches the name. For ``malloc``, this will generally set a breakpoint in the executable's PLT, in the linker's internal ``malloc``, and eventaully in ``libc``'s malloc. 3. Wait for libraries to be loaded with ``set stop-on-solib-event 1`` - There is no way to stop on any specific library being loaded, and sometimes multiple libraries are loaded and only a single breakpoint is issued. - Generally, you just add a few ``continue`` commands until things are set up the way you want it to be. Examples: .. code-block:: python # Create a new process, and stop it at 'main' io = gdb.debug('bash', ''' break main continue ''') # Send a command to Bash io.sendline("echo hello") # Interact with the process io.interactive() .. code-block:: python # Create a new process, and stop it at 'main' io = gdb.debug('bash', ''' # Wait until we hit the main executable's entry point break _start continue # Now set breakpoint on shared library routines break malloc break free continue ''') # Send a command to Bash io.sendline("echo hello") # Interact with the process io.interactive() You can use :func:`debug` to spawn new processes on remote machines as well, by using the ``ssh=`` keyword to pass in your :class:`.ssh` instance. .. code-block:: python # Connect to the SSH server shell = ssh('passcode', 'pwnable.kr', 2222, password='guest') # Start a process on the server io = gdb.debug(['bash'], ssh=shell, gdbscript=''' break main continue ''') # Send a command to Bash io.sendline("echo hello") # Interact with the process io.interactive() """ if isinstance(args, (int, tubes.process.process, tubes.ssh.ssh_channel)): log.error("Use gdb.attach() to debug a running process") if env is None: env = os.environ if isinstance(args, (str, unicode)): args = [args] orig_args = args runner = _get_runner(ssh) which = _get_which(ssh) gdbscript = gdbscript or '' if context.noptrace: log.warn_once("Skipping debugger since context.noptrace==True") return runner(args, executable=exe, env=env) if ssh or context.native or (context.os == 'android'): args = _gdbserver_args(args=args, which=which) else: qemu_port = random.randint(1024, 65535) qemu_user = qemu.user_path() sysroot = sysroot or qemu.ld_prefix(env=env) if not qemu_user: log.error("Cannot debug %s binaries without appropriate QEMU binaries" % context.arch) args = [qemu_user, '-g', str(qemu_port)] + args # Use a sane default sysroot for Android if not sysroot and context.os == 'android': sysroot = 'remote:/' # Make sure gdbserver/qemu is installed if not which(args[0]): log.error("%s is not installed" % args[0]) exe = exe or which(orig_args[0]) if not exe: log.error("%s does not exist" % orig_args[0]) else: gdbscript = 'file "%s"\n%s' % (exe, gdbscript) # Start gdbserver/qemu # (Note: We override ASLR here for the gdbserver process itself.) gdbserver = runner(args, env=env, aslr=1, **kwargs) # Set the .executable on the process object. gdbserver.executable = which(orig_args[0]) # Find what port we need to connect to if context.native or (context.os == 'android'): port = _gdbserver_port(gdbserver, ssh) else: port = qemu_port host = '127.0.0.1' if not ssh and context.os == 'android': host = context.adb_host attach((host, port), exe=exe, gdbscript=gdbscript, need_ptrace_scope = False, ssh=ssh, sysroot=sysroot) # gdbserver outputs a message when a client connects garbage = gdbserver.recvline(timeout=1) # Some versions of gdbserver output an additional message garbage2 = gdbserver.recvline_startswith("Remote debugging from host ", timeout=1) return gdbserver def get_gdb_arch(): return { 'amd64': 'i386:x86-64', 'powerpc': 'powerpc:common', 'powerpc64': 'powerpc:common64', 'mips64': 'mips:isa64', 'thumb': 'arm' }.get(context.arch, context.arch) def binary(): """binary() -> str Returns: str: Path to the appropriate ``gdb`` binary to use. Example: >>> gdb.binary() # doctest: +SKIP '/usr/bin/gdb' """ gdb = misc.which('pwntools-gdb') or misc.which('gdb') if not context.native: multiarch = misc.which('gdb-multiarch') if multiarch: return multiarch log.warn_once('Cross-architecture debugging usually requires gdb-multiarch\n' \ '$ apt-get install gdb-multiarch') if not gdb: log.error('GDB is not installed\n' '$ apt-get install gdb') return gdb @LocalContext def attach(target, gdbscript = None, exe = None, need_ptrace_scope = True, gdb_args = None, ssh = None, sysroot = None): """attach(target, gdbscript = None, exe = None, arch = None, ssh = None) -> None Start GDB in a new terminal and attach to `target`. Arguments: target: The target to attach to. gdbscript(:obj:`str` or :obj:`file`): GDB script to run after attaching. exe(str): The path of the target binary. arch(str): Architechture of the target binary. If `exe` known GDB will detect the architechture automatically (if it is supported). gdb_args(list): List of additional arguments to pass to GDB. sysroot(str): Foreign-architecture sysroot, used for QEMU-emulated binaries and Android targets. Returns: PID of the GDB process (or the window which it is running in). Notes: The ``target`` argument is very robust, and can be any of the following: :obj:`int` PID of a process :obj:`str` Process name. The youngest process is selected. :obj:`tuple` Host, port pair of a listening ``gdbserver`` :class:`.process` Process to connect to :class:`.sock` Connected socket. The executable on the other end of the connection is attached to. Can be any socket type, including :class:`.listen` or :class:`.remote`. :class:`.ssh_channel` Remote process spawned via :meth:`.ssh.process`. This will use the GDB installed on the remote machine. If a password is required to connect, the ``sshpass`` program must be installed. Examples: .. code-block:: python # Attach directly to pid 1234 gdb.attach(1234) .. code-block:: python # Attach to the youngest "bash" process gdb.attach('bash') .. code-block:: python # Start a process bash = process('bash') # Attach the debugger gdb.attach(bash, ''' set follow-fork-mode child break execve continue ''') # Interact with the process bash.sendline('whoami') .. code-block:: python # Start a forking server server = process(['socat', 'tcp-listen:1234,fork,reuseaddr', 'exec:/bin/sh']) # Connect to the server io = remote('localhost', 1234) # Connect the debugger to the server-spawned process gdb.attach(io, ''' break exit continue ''') # Talk to the spawned 'sh' io.sendline('exit') .. code-block:: python # Connect to the SSH server shell = ssh('bandit0', 'bandit.labs.overthewire.org', password='bandit0', port=2220) # Start a process on the server cat = shell.process(['cat']) # Attach a debugger to it gdb.attach(cat, ''' break exit continue ''') # Cause `cat` to exit cat.close() """ if context.noptrace: log.warn_once("Skipping debug attach since context.noptrace==True") return # if gdbscript is a file object, then read it; we probably need to run some # more gdb script anyway if isinstance(gdbscript, file): with gdbscript: gdbscript = gdbscript.read() # enable gdb.attach(p, 'continue') if gdbscript and not gdbscript.endswith('\n'): gdbscript += '\n' # Use a sane default sysroot for Android if not sysroot and context.os == 'android': sysroot = 'remote:/' # gdb script to run before `gdbscript` pre = '' if not context.native: pre += 'set endian %s\n' % context.endian pre += 'set architecture %s\n' % get_gdb_arch() if sysroot: pre += 'set sysroot %s\n' % sysroot if context.os == 'android': pre += 'set gnutarget ' + _bfdname() + '\n' # let's see if we can find a pid to attach to pid = None if isinstance(target, (int, long)): # target is a pid, easy peasy pid = target elif isinstance(target, str): # pidof picks the youngest process pidof = proc.pidof if context.os == 'android': pidof = adb.pidof pids = pidof(target) if not pids: log.error('No such process: %s' % target) pid = pids[0] log.info('Attaching to youngest process "%s" (PID = %d)' % (target, pid)) elif isinstance(target, tubes.ssh.ssh_channel): if not target.pid: log.error("PID unknown for channel") shell = target.parent tmpfile = shell.mktemp() gdbscript = 'shell rm %s\n%s' % (tmpfile, gdbscript) shell.upload_data(gdbscript or '', tmpfile) cmd = ['ssh', '-C', '-t', '-p', str(shell.port), '-l', shell.user, shell.host] if shell.password: if not misc.which('sshpass'): log.error("sshpass must be installed to debug ssh processes") cmd = ['sshpass', '-p', shell.password] + cmd if shell.keyfile: cmd += ['-i', shell.keyfile] cmd += ['gdb -q %r %s -x "%s"' % (target.executable, target.pid, tmpfile)] misc.run_in_new_terminal(' '.join(cmd)) return elif isinstance(target, tubes.sock.sock): pids = proc.pidof(target) if not pids: log.error('could not find remote process (%s:%d) on this machine' % target.sock.getpeername()) pid = pids[0] elif isinstance(target, tubes.process.process): pid = proc.pidof(target)[0] exe = exe or target.executable elif isinstance(target, tuple) and len(target) == 2: host, port = target if context.os != 'android': pre += 'target remote %s:%d\n' % (host, port) else: # Android debugging is done over gdbserver, which can't follow # new inferiors (tldr; follow-fork-mode child) unless it is run # in extended-remote mode. pre += 'target extended-remote %s:%d\n' % (host, port) pre += 'set detach-on-fork off\n' def findexe(): for spid in proc.pidof(target): sexe = proc.exe(spid) name = os.path.basename(sexe) # XXX: parse cmdline if name.startswith('qemu-') or name.startswith('gdbserver'): exe = proc.cmdline(spid)[-1] return os.path.join(proc.cwd(spid), exe) exe = exe or findexe() elif isinstance(target, elf.corefile.Corefile): pre += 'target core %s\n' % target.path else: log.error("don't know how to attach to target: %r" % target) # if we have a pid but no exe, just look it up in /proc/ if pid and not exe: exe_fn = proc.exe if context.os == 'android': exe_fn = adb.proc_exe exe = exe_fn(pid) if not pid and not exe: log.error('could not find target process') if exe: # The 'file' statement should go first pre = 'file "%s"\n%s' % (exe, pre) cmd = binary() if gdb_args: cmd += ' ' cmd += ' '.join(gdb_args) if context.gdbinit: cmd += ' -nh ' # ignore ~/.gdbinit cmd += ' -x %s ' % context.gdbinit # load custom gdbinit cmd += ' -q ' if exe and context.native: if not ssh and not os.path.isfile(exe): log.error('No such file: %s' % exe) cmd += ' "%s"' % exe if pid and not context.os == 'android': cmd += ' %d' % pid if context.os == 'android' and pid: runner = _get_runner() which = _get_which() gdb_cmd = _gdbserver_args(pid=pid, which=which) gdbserver = runner(gdb_cmd) port = _gdbserver_port(gdbserver, None) host = context.adb_host pre += 'target extended-remote %s:%i\n' % (context.adb_host, port) # gdbserver on Android sets 'detach-on-fork on' which breaks things # when you're trying to debug anything that forks. pre += 'set detach-on-fork off\n' gdbscript = pre + (gdbscript or '') if gdbscript: tmp = tempfile.NamedTemporaryFile(prefix = 'pwn', suffix = '.gdb', delete = False) log.debug('Wrote gdb script to %r\n%s' % (tmp.name, gdbscript)) gdbscript = 'shell rm %s\n%s' % (tmp.name, gdbscript) tmp.write(gdbscript) tmp.close() cmd += ' -x "%s"' % (tmp.name) log.info('running in new terminal: %s' % cmd) gdb_pid = misc.run_in_new_terminal(cmd) if pid and context.native: proc.wait_for_debugger(pid) return gdb_pid def ssh_gdb(ssh, argv, gdbscript = None, arch = None, **kwargs): if not isinstance(argv, (list, tuple)): argv = [argv] exe = argv[0] argv = ["gdbserver", "--multi", "127.0.0.1:0"] + argv # Download the executable local_exe = os.path.basename(exe) ssh.download_file(ssh.which(exe), local_exe) # Run the process c = ssh.process(argv, **kwargs) # Find the port for the gdb server c.recvuntil('port ') line = c.recvline().strip() gdbport = re.match('[0-9]+', line) if gdbport: gdbport = int(gdbport.group(0)) l = tubes.listen.listen(0) forwardport = l.lport attach(('127.0.0.1', forwardport), gdbscript, local_exe, arch, ssh=ssh) l.wait_for_connection() <> ssh.connect_remote('127.0.0.1', gdbport) return c def find_module_addresses(binary, ssh=None, ulimit=False): """ Cheat to find modules by using GDB. We can't use ``/proc/$pid/map`` since some servers forbid it. This breaks ``info proc`` in GDB, but ``info sharedlibrary`` still works. Additionally, ``info sharedlibrary`` works on FreeBSD, which may not have procfs enabled or accessible. The output looks like this: :: info proc mapping process 13961 warning: unable to open /proc file '/proc/13961/maps' info sharedlibrary From To Syms Read Shared Object Library 0xf7fdc820 0xf7ff505f Yes (*) /lib/ld-linux.so.2 0xf7fbb650 0xf7fc79f8 Yes /lib32/libpthread.so.0 0xf7e26f10 0xf7f5b51c Yes (*) /lib32/libc.so.6 (*): Shared library is missing debugging information. Note that the raw addresses provided by ``info sharedlibrary`` are actually the address of the ``.text`` segment, not the image base address. This routine automates the entire process of: 1. Downloading the binaries from the remote server 2. Scraping GDB for the information 3. Loading each library into an ELF 4. Fixing up the base address vs. the ``.text`` segment address Arguments: binary(str): Path to the binary on the remote server ssh(pwnlib.tubes.tube): SSH connection through which to load the libraries. If left as :const:`None`, will use a :class:`pwnlib.tubes.process.process`. ulimit(bool): Set to :const:`True` to run "ulimit -s unlimited" before GDB. Returns: A list of pwnlib.elf.ELF objects, with correct base addresses. Example: >>> with context.local(log_level=9999): # doctest: +SKIP ... shell = ssh(host='bandit.labs.overthewire.org',user='bandit0',password='bandit0', port=2220) ... bash_libs = gdb.find_module_addresses('/bin/bash', shell) >>> os.path.basename(bash_libs[0].path) # doctest: +SKIP 'libc.so.6' >>> hex(bash_libs[0].symbols['system']) # doctest: +SKIP '0x7ffff7634660' """ # # Download all of the remote libraries # if ssh: runner = ssh.run local_bin = ssh.download_file(binary) local_elf = elf.ELF(os.path.basename(binary)) local_libs = ssh.libs(binary) else: runner = tubes.process.process local_elf = elf.ELF(binary) local_libs = local_elf.libs entry = local_elf.header.e_entry # # Get the addresses from GDB # libs = {} cmd = "gdb -q --args %s" % (binary) expr = re.compile(r'(0x\S+)[^/]+(.*)') if ulimit: cmd = 'sh -c "(ulimit -s unlimited; %s)"' % cmd cmd = shlex.split(cmd) with runner(cmd) as gdb: if context.aslr: gdb.sendline('set disable-randomization off') gdb.send(""" set prompt break *%#x run """ % entry) gdb.clean(2) gdb.sendline('info sharedlibrary') lines = gdb.recvrepeat(2) for line in lines.splitlines(): m = expr.match(line) if m: libs[m.group(2)] = int(m.group(1),16) gdb.sendline('kill') gdb.sendline('y') gdb.sendline('quit') # # Fix up all of the addresses against the .text address # rv = [] for remote_path,text_address in sorted(libs.items()): # Match up the local copy to the remote path try: path = next(p for p in local_libs.keys() if remote_path in p) except StopIteration: print "Skipping %r" % remote_path continue # Load it lib = elf.ELF(path) # Find its text segment text = lib.get_section_by_name('.text') # Fix the address lib.address = text_address - text.header.sh_addr rv.append(lib) return rv def corefile(process): r"""Drops a core file for the process. Arguments: process: Process to dump Returns: :class:`.Core`: The generated core file """ if context.noptrace: log.warn_once("Skipping corefile since context.noptrace==True") return corefile_path = './core.%s.%i' % (os.path.basename(process.executable), process.pid) # Due to https://sourceware.org/bugzilla/show_bug.cgi?id=16092 # will disregard coredump_filter, and will not dump private mappings. if version() < (7,11): log.warn_once('The installed GDB (%s) does not emit core-dumps which ' 'contain all of the data in the process.\n' 'Upgrade to GDB >= 7.11 for better core-dumps.' % binary()) # This is effectively the same as what the 'gcore' binary does gdb_args = ['-batch', '-q', '--nx', '-ex', '"set pagination off"', '-ex', '"set height 0"', '-ex', '"set width 0"', '-ex', '"set use-coredump-filter on"', '-ex', '"generate-core-file %s"' % corefile_path, '-ex', 'detach'] with context.local(terminal = ['sh', '-c']): with context.quiet: pid = attach(process, gdb_args=gdb_args) os.waitpid(pid, 0) return elf.corefile.Core(corefile_path) def version(program='gdb'): """Gets the current GDB version. Note: Requires that GDB version meets the following format: ``GNU gdb (GDB) 7.12`` Returns: tuple: A tuple containing the version numbers Example: >>> (7,0) <= gdb.version() <= (8,0) True """ program = misc.which(program) expr = r'([0-9]+\.?)+' with tubes.process.process([program, '--version'], level='error') as gdb: version = gdb.recvline() versions = re.search(expr, version).group() return tuple(map(int, versions.split('.')))
[]
[]
[]
[]
[]
python
0
0
conn_test.go
package pgx_test import ( "context" "os" "strings" "sync" "testing" "time" "github.com/jackc/pgconn" "github.com/jackc/pgconn/stmtcache" "github.com/jackc/pgtype" "github.com/nappspt/schemapgx/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestCrateDBConnect(t *testing.T) { t.Parallel() connString := os.Getenv("PGX_TEST_CRATEDB_CONN_STRING") if connString == "" { t.Skipf("Skipping due to missing environment variable %v", "PGX_TEST_CRATEDB_CONN_STRING") } conn, err := pgx.Connect(context.Background(), connString) require.Nil(t, err) defer closeConn(t, conn) assert.Equal(t, connString, conn.Config().ConnString()) var result int err = conn.QueryRow(context.Background(), "select 1 +1").Scan(&result) if err != nil { t.Fatalf("QueryRow Scan unexpectedly failed: %v", err) } if result != 2 { t.Errorf("bad result: %d", result) } } func TestConnect(t *testing.T) { t.Parallel() connString := os.Getenv("PGX_TEST_DATABASE") config := mustParseConfig(t, connString) conn, err := pgx.ConnectConfig(context.Background(), config) if err != nil { t.Fatalf("Unable to establish connection: %v", err) } assertConfigsEqual(t, config, conn.Config(), "Conn.Config() returns original config") var currentDB string err = conn.QueryRow(context.Background(), "select current_database()").Scan(&currentDB) if err != nil { t.Fatalf("QueryRow Scan unexpectedly failed: %v", err) } if currentDB != config.Config.Database { t.Errorf("Did not connect to specified database (%v)", config.Config.Database) } var user string err = conn.QueryRow(context.Background(), "select current_user").Scan(&user) if err != nil { t.Fatalf("QueryRow Scan unexpectedly failed: %v", err) } if user != config.Config.User { t.Errorf("Did not connect as specified user (%v)", config.Config.User) } err = conn.Close(context.Background()) if err != nil { t.Fatal("Unable to close connection") } } func TestConnectWithPreferSimpleProtocol(t *testing.T) { t.Parallel() connConfig := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE")) connConfig.PreferSimpleProtocol = true conn := mustConnect(t, connConfig) defer closeConn(t, conn) // If simple protocol is used we should be able to correctly scan the result // into a pgtype.Text as the integer will have been encoded in text. var s pgtype.Text err := conn.QueryRow(context.Background(), "select $1::int4", 42).Scan(&s) if err != nil { t.Fatal(err) } if s.Get() != "42" { t.Fatalf(`expected "42", got %v`, s) } ensureConnValid(t, conn) } func TestConnectConfigRequiresConnConfigFromParseConfig(t *testing.T) { config := &pgx.ConnConfig{} require.PanicsWithValue(t, "config must be created by ParseConfig", func() { pgx.ConnectConfig(context.Background(), config) }) } func TestConfigContainsConnStr(t *testing.T) { connStr := os.Getenv("PGX_TEST_DATABASE") config, err := pgx.ParseConfig(connStr) require.NoError(t, err) assert.Equal(t, connStr, config.ConnString()) } func TestConfigCopyReturnsEqualConfig(t *testing.T) { connString := "postgres://jack:secret@localhost:5432/mydb?application_name=pgxtest&search_path=myschema&connect_timeout=5" original, err := pgx.ParseConfig(connString) require.NoError(t, err) copied := original.Copy() assertConfigsEqual(t, original, copied, t.Name()) } func TestConfigCopyCanBeUsedToConnect(t *testing.T) { connString := os.Getenv("PGX_TEST_DATABASE") original, err := pgx.ParseConfig(connString) require.NoError(t, err) copied := original.Copy() assert.NotPanics(t, func() { _, err = pgx.ConnectConfig(context.Background(), copied) }) assert.NoError(t, err) } func TestParseConfigExtractsStatementCacheOptions(t *testing.T) { t.Parallel() config, err := pgx.ParseConfig("statement_cache_capacity=0") require.NoError(t, err) require.Nil(t, config.BuildStatementCache) config, err = pgx.ParseConfig("statement_cache_capacity=42") require.NoError(t, err) require.NotNil(t, config.BuildStatementCache) c := config.BuildStatementCache(nil) require.NotNil(t, c) require.Equal(t, 42, c.Cap()) require.Equal(t, stmtcache.ModePrepare, c.Mode()) config, err = pgx.ParseConfig("statement_cache_capacity=42 statement_cache_mode=prepare") require.NoError(t, err) require.NotNil(t, config.BuildStatementCache) c = config.BuildStatementCache(nil) require.NotNil(t, c) require.Equal(t, 42, c.Cap()) require.Equal(t, stmtcache.ModePrepare, c.Mode()) config, err = pgx.ParseConfig("statement_cache_capacity=42 statement_cache_mode=describe") require.NoError(t, err) require.NotNil(t, config.BuildStatementCache) c = config.BuildStatementCache(nil) require.NotNil(t, c) require.Equal(t, 42, c.Cap()) require.Equal(t, stmtcache.ModeDescribe, c.Mode()) } func TestParseConfigExtractsPreferSimpleProtocol(t *testing.T) { t.Parallel() for _, tt := range []struct { connString string preferSimpleProtocol bool }{ {"", false}, {"prefer_simple_protocol=false", false}, {"prefer_simple_protocol=0", false}, {"prefer_simple_protocol=true", true}, {"prefer_simple_protocol=1", true}, } { config, err := pgx.ParseConfig(tt.connString) require.NoError(t, err) require.Equalf(t, tt.preferSimpleProtocol, config.PreferSimpleProtocol, "connString: `%s`", tt.connString) require.Empty(t, config.RuntimeParams["prefer_simple_protocol"]) } } func TestExec(t *testing.T) { t.Parallel() testWithAndWithoutPreferSimpleProtocol(t, func(t *testing.T, conn *pgx.Conn) { if results := mustExec(t, conn, "create temporary table foo(id integer primary key);"); string(results) != "CREATE TABLE" { t.Error("Unexpected results from Exec") } // Accept parameters if results := mustExec(t, conn, "insert into foo(id) values($1)", 1); string(results) != "INSERT 0 1" { t.Errorf("Unexpected results from Exec: %v", results) } if results := mustExec(t, conn, "drop table foo;"); string(results) != "DROP TABLE" { t.Error("Unexpected results from Exec") } // Multiple statements can be executed -- last command tag is returned if results := mustExec(t, conn, "create temporary table foo(id serial primary key); drop table foo;"); string(results) != "DROP TABLE" { t.Error("Unexpected results from Exec") } // Can execute longer SQL strings than sharedBufferSize if results := mustExec(t, conn, strings.Repeat("select 42; ", 1000)); string(results) != "SELECT 1" { t.Errorf("Unexpected results from Exec: %v", results) } // Exec no-op which does not return a command tag if results := mustExec(t, conn, "--;"); string(results) != "" { t.Errorf("Unexpected results from Exec: %v", results) } }) } func TestExecFailure(t *testing.T) { t.Parallel() testWithAndWithoutPreferSimpleProtocol(t, func(t *testing.T, conn *pgx.Conn) { if _, err := conn.Exec(context.Background(), "selct;"); err == nil { t.Fatal("Expected SQL syntax error") } rows, _ := conn.Query(context.Background(), "select 1") rows.Close() if rows.Err() != nil { t.Fatalf("Exec failure appears to have broken connection: %v", rows.Err()) } }) } func TestExecFailureWithArguments(t *testing.T) { t.Parallel() testWithAndWithoutPreferSimpleProtocol(t, func(t *testing.T, conn *pgx.Conn) { _, err := conn.Exec(context.Background(), "selct $1;", 1) if err == nil { t.Fatal("Expected SQL syntax error") } assert.False(t, pgconn.SafeToRetry(err)) _, err = conn.Exec(context.Background(), "select $1::varchar(1);", "1", "2") require.Error(t, err) }) } func TestExecContextWithoutCancelation(t *testing.T) { t.Parallel() testWithAndWithoutPreferSimpleProtocol(t, func(t *testing.T, conn *pgx.Conn) { ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() commandTag, err := conn.Exec(ctx, "create temporary table foo(id integer primary key);") if err != nil { t.Fatal(err) } if string(commandTag) != "CREATE TABLE" { t.Fatalf("Unexpected results from Exec: %v", commandTag) } assert.False(t, pgconn.SafeToRetry(err)) }) } func TestExecContextFailureWithoutCancelation(t *testing.T) { t.Parallel() testWithAndWithoutPreferSimpleProtocol(t, func(t *testing.T, conn *pgx.Conn) { ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() _, err := conn.Exec(ctx, "selct;") if err == nil { t.Fatal("Expected SQL syntax error") } assert.False(t, pgconn.SafeToRetry(err)) rows, _ := conn.Query(context.Background(), "select 1") rows.Close() if rows.Err() != nil { t.Fatalf("ExecEx failure appears to have broken connection: %v", rows.Err()) } assert.False(t, pgconn.SafeToRetry(err)) }) } func TestExecContextFailureWithoutCancelationWithArguments(t *testing.T) { t.Parallel() testWithAndWithoutPreferSimpleProtocol(t, func(t *testing.T, conn *pgx.Conn) { ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() _, err := conn.Exec(ctx, "selct $1;", 1) if err == nil { t.Fatal("Expected SQL syntax error") } assert.False(t, pgconn.SafeToRetry(err)) }) } func TestExecFailureCloseBefore(t *testing.T) { t.Parallel() conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) closeConn(t, conn) _, err := conn.Exec(context.Background(), "select 1") require.Error(t, err) assert.True(t, pgconn.SafeToRetry(err)) } func TestExecStatementCacheModes(t *testing.T) { t.Parallel() config := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE")) tests := []struct { name string buildStatementCache pgx.BuildStatementCacheFunc }{ { name: "disabled", buildStatementCache: nil, }, { name: "prepare", buildStatementCache: func(conn *pgconn.PgConn) stmtcache.Cache { return stmtcache.New(conn, stmtcache.ModePrepare, 32) }, }, { name: "describe", buildStatementCache: func(conn *pgconn.PgConn) stmtcache.Cache { return stmtcache.New(conn, stmtcache.ModeDescribe, 32) }, }, } for _, tt := range tests { func() { config.BuildStatementCache = tt.buildStatementCache conn := mustConnect(t, config) defer closeConn(t, conn) commandTag, err := conn.Exec(context.Background(), "select 1") assert.NoError(t, err, tt.name) assert.Equal(t, "SELECT 1", string(commandTag), tt.name) commandTag, err = conn.Exec(context.Background(), "select 1 union all select 1") assert.NoError(t, err, tt.name) assert.Equal(t, "SELECT 2", string(commandTag), tt.name) commandTag, err = conn.Exec(context.Background(), "select 1") assert.NoError(t, err, tt.name) assert.Equal(t, "SELECT 1", string(commandTag), tt.name) ensureConnValid(t, conn) }() } } func TestExecPerQuerySimpleProtocol(t *testing.T) { t.Parallel() conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer closeConn(t, conn) ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() commandTag, err := conn.Exec(ctx, "create temporary table foo(name varchar primary key);") if err != nil { t.Fatal(err) } if string(commandTag) != "CREATE TABLE" { t.Fatalf("Unexpected results from Exec: %v", commandTag) } commandTag, err = conn.Exec(ctx, "insert into foo(name) values($1);", pgx.QuerySimpleProtocol(true), "bar'; drop table foo;--", ) if err != nil { t.Fatal(err) } if string(commandTag) != "INSERT 0 1" { t.Fatalf("Unexpected results from Exec: %v", commandTag) } } func TestPrepare(t *testing.T) { t.Parallel() conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer closeConn(t, conn) _, err := conn.Prepare(context.Background(), "test", "select $1::varchar") if err != nil { t.Errorf("Unable to prepare statement: %v", err) return } var s string err = conn.QueryRow(context.Background(), "test", "hello").Scan(&s) if err != nil { t.Errorf("Executing prepared statement failed: %v", err) } if s != "hello" { t.Errorf("Prepared statement did not return expected value: %v", s) } err = conn.Deallocate(context.Background(), "test") if err != nil { t.Errorf("conn.Deallocate failed: %v", err) } // Create another prepared statement to ensure Deallocate left the connection // in a working state and that we can reuse the prepared statement name. _, err = conn.Prepare(context.Background(), "test", "select $1::integer") if err != nil { t.Errorf("Unable to prepare statement: %v", err) return } var n int32 err = conn.QueryRow(context.Background(), "test", int32(1)).Scan(&n) if err != nil { t.Errorf("Executing prepared statement failed: %v", err) } if n != 1 { t.Errorf("Prepared statement did not return expected value: %v", s) } err = conn.Deallocate(context.Background(), "test") if err != nil { t.Errorf("conn.Deallocate failed: %v", err) } } func TestPrepareBadSQLFailure(t *testing.T) { t.Parallel() conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer closeConn(t, conn) if _, err := conn.Prepare(context.Background(), "badSQL", "select foo"); err == nil { t.Fatal("Prepare should have failed with syntax error") } ensureConnValid(t, conn) } func TestPrepareIdempotency(t *testing.T) { t.Parallel() conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer closeConn(t, conn) for i := 0; i < 2; i++ { _, err := conn.Prepare(context.Background(), "test", "select 42::integer") if err != nil { t.Fatalf("%d. Unable to prepare statement: %v", i, err) } var n int32 err = conn.QueryRow(context.Background(), "test").Scan(&n) if err != nil { t.Errorf("%d. Executing prepared statement failed: %v", i, err) } if n != int32(42) { t.Errorf("%d. Prepared statement did not return expected value: %v", i, n) } } _, err := conn.Prepare(context.Background(), "test", "select 'fail'::varchar") if err == nil { t.Fatalf("Prepare statement with same name but different SQL should have failed but it didn't") return } } func TestListenNotify(t *testing.T) { t.Parallel() listener := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer closeConn(t, listener) if listener.PgConn().ParameterStatus("crdb_version") != "" { t.Skip("Server does not support LISTEN / NOTIFY (https://github.com/cockroachdb/cockroach/issues/41522)") } mustExec(t, listener, "listen chat") notifier := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer closeConn(t, notifier) mustExec(t, notifier, "notify chat") // when notification is waiting on the socket to be read notification, err := listener.WaitForNotification(context.Background()) require.NoError(t, err) assert.Equal(t, "chat", notification.Channel) // when notification has already been read during previous query mustExec(t, notifier, "notify chat") rows, _ := listener.Query(context.Background(), "select 1") rows.Close() require.NoError(t, rows.Err()) ctx, cancelFn := context.WithCancel(context.Background()) cancelFn() notification, err = listener.WaitForNotification(ctx) require.NoError(t, err) assert.Equal(t, "chat", notification.Channel) // when timeout occurs ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() notification, err = listener.WaitForNotification(ctx) assert.True(t, pgconn.Timeout(err)) // listener can listen again after a timeout mustExec(t, notifier, "notify chat") notification, err = listener.WaitForNotification(context.Background()) require.NoError(t, err) assert.Equal(t, "chat", notification.Channel) } func TestListenNotifyWhileBusyIsSafe(t *testing.T) { t.Parallel() func() { conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer closeConn(t, conn) skipCockroachDB(t, conn, "Server does not support LISTEN / NOTIFY (https://github.com/cockroachdb/cockroach/issues/41522)") }() listenerDone := make(chan bool) notifierDone := make(chan bool) go func() { conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer closeConn(t, conn) defer func() { listenerDone <- true }() mustExec(t, conn, "listen busysafe") for i := 0; i < 5000; i++ { var sum int32 var rowCount int32 rows, err := conn.Query(context.Background(), "select generate_series(1,$1)", 100) if err != nil { t.Errorf("conn.Query failed: %v", err) return } for rows.Next() { var n int32 if err := rows.Scan(&n); err != nil { t.Errorf("Row scan failed: %v", err) return } sum += n rowCount++ } if rows.Err() != nil { t.Errorf("conn.Query failed: %v", err) return } if sum != 5050 { t.Errorf("Wrong rows sum: %v", sum) return } if rowCount != 100 { t.Errorf("Wrong number of rows: %v", rowCount) return } time.Sleep(1 * time.Microsecond) } }() go func() { conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer closeConn(t, conn) defer func() { notifierDone <- true }() for i := 0; i < 100000; i++ { mustExec(t, conn, "notify busysafe, 'hello'") time.Sleep(1 * time.Microsecond) } }() <-listenerDone <-notifierDone } func TestListenNotifySelfNotification(t *testing.T) { t.Parallel() conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer closeConn(t, conn) skipCockroachDB(t, conn, "Server does not support LISTEN / NOTIFY (https://github.com/cockroachdb/cockroach/issues/41522)") mustExec(t, conn, "listen self") // Notify self and WaitForNotification immediately mustExec(t, conn, "notify self") ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() notification, err := conn.WaitForNotification(ctx) require.NoError(t, err) assert.Equal(t, "self", notification.Channel) // Notify self and do something else before WaitForNotification mustExec(t, conn, "notify self") rows, _ := conn.Query(context.Background(), "select 1") rows.Close() if rows.Err() != nil { t.Fatalf("Unexpected error on Query: %v", rows.Err()) } ctx, cncl := context.WithTimeout(context.Background(), time.Second) defer cncl() notification, err = conn.WaitForNotification(ctx) require.NoError(t, err) assert.Equal(t, "self", notification.Channel) } func TestFatalRxError(t *testing.T) { t.Parallel() conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer closeConn(t, conn) skipCockroachDB(t, conn, "Server does not support pg_terminate_backend() (https://github.com/cockroachdb/cockroach/issues/35897)") var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() var n int32 var s string err := conn.QueryRow(context.Background(), "select 1::int4, pg_sleep(10)::varchar").Scan(&n, &s) if pgErr, ok := err.(*pgconn.PgError); ok && pgErr.Severity == "FATAL" { } else { t.Errorf("Expected QueryRow Scan to return fatal PgError, but instead received %v", err) return } }() otherConn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer otherConn.Close(context.Background()) if _, err := otherConn.Exec(context.Background(), "select pg_terminate_backend($1)", conn.PgConn().PID()); err != nil { t.Fatalf("Unable to kill backend PostgreSQL process: %v", err) } wg.Wait() if !conn.IsClosed() { t.Fatal("Connection should be closed") } } func TestFatalTxError(t *testing.T) { t.Parallel() // Run timing sensitive test many times for i := 0; i < 50; i++ { func() { conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer closeConn(t, conn) skipCockroachDB(t, conn, "Server does not support pg_terminate_backend() (https://github.com/cockroachdb/cockroach/issues/35897)") otherConn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer otherConn.Close(context.Background()) _, err := otherConn.Exec(context.Background(), "select pg_terminate_backend($1)", conn.PgConn().PID()) if err != nil { t.Fatalf("Unable to kill backend PostgreSQL process: %v", err) } err = conn.QueryRow(context.Background(), "select 1").Scan(nil) if err == nil { t.Fatal("Expected error but none occurred") } if !conn.IsClosed() { t.Fatalf("Connection should be closed but isn't. Previous Query err: %v", err) } }() } } func TestInsertBoolArray(t *testing.T) { t.Parallel() testWithAndWithoutPreferSimpleProtocol(t, func(t *testing.T, conn *pgx.Conn) { if results := mustExec(t, conn, "create temporary table foo(spice bool[]);"); string(results) != "CREATE TABLE" { t.Error("Unexpected results from Exec") } // Accept parameters if results := mustExec(t, conn, "insert into foo(spice) values($1)", []bool{true, false, true}); string(results) != "INSERT 0 1" { t.Errorf("Unexpected results from Exec: %v", results) } }) } func TestInsertTimestampArray(t *testing.T) { t.Parallel() testWithAndWithoutPreferSimpleProtocol(t, func(t *testing.T, conn *pgx.Conn) { if results := mustExec(t, conn, "create temporary table foo(spice timestamp[]);"); string(results) != "CREATE TABLE" { t.Error("Unexpected results from Exec") } // Accept parameters if results := mustExec(t, conn, "insert into foo(spice) values($1)", []time.Time{time.Unix(1419143667, 0), time.Unix(1419143672, 0)}); string(results) != "INSERT 0 1" { t.Errorf("Unexpected results from Exec: %v", results) } }) } type testLog struct { lvl pgx.LogLevel msg string data map[string]interface{} } type testLogger struct { logs []testLog } func (l *testLogger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) { data["ctxdata"] = ctx.Value("ctxdata") l.logs = append(l.logs, testLog{lvl: level, msg: msg, data: data}) } func TestLogPassesContext(t *testing.T) { t.Parallel() l1 := &testLogger{} config := mustParseConfig(t, os.Getenv("PGX_TEST_DATABASE")) config.Logger = l1 conn := mustConnect(t, config) defer closeConn(t, conn) l1.logs = l1.logs[0:0] // Clear logs written when establishing connection ctx := context.WithValue(context.Background(), "ctxdata", "foo") if _, err := conn.Exec(ctx, ";"); err != nil { t.Fatal(err) } if len(l1.logs) != 1 { t.Fatal("Expected logger to be called once, but it wasn't") } if l1.logs[0].data["ctxdata"] != "foo" { t.Fatal("Expected context data to be passed to logger, but it wasn't") } } func TestIdentifierSanitize(t *testing.T) { t.Parallel() tests := []struct { ident pgx.Identifier expected string }{ { ident: pgx.Identifier{`foo`}, expected: `"foo"`, }, { ident: pgx.Identifier{`select`}, expected: `"select"`, }, { ident: pgx.Identifier{`foo`, `bar`}, expected: `"foo"."bar"`, }, { ident: pgx.Identifier{`you should " not do this`}, expected: `"you should "" not do this"`, }, { ident: pgx.Identifier{`you should " not do this`, `please don't`}, expected: `"you should "" not do this"."please don't"`, }, { ident: pgx.Identifier{`you should ` + string([]byte{0}) + `not do this`}, expected: `"you should not do this"`, }, } for i, tt := range tests { qval := tt.ident.Sanitize() if qval != tt.expected { t.Errorf("%d. Expected Sanitize %v to return %v but it was %v", i, tt.ident, tt.expected, qval) } } } func TestConnInitConnInfo(t *testing.T) { conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer closeConn(t, conn) // spot check that the standard postgres type names aren't qualified nameOIDs := map[string]uint32{ "_int8": pgtype.Int8ArrayOID, "int8": pgtype.Int8OID, "json": pgtype.JSONOID, "text": pgtype.TextOID, } for name, oid := range nameOIDs { dtByName, ok := conn.ConnInfo().DataTypeForName(name) if !ok { t.Fatalf("Expected type named %v to be present", name) } dtByOID, ok := conn.ConnInfo().DataTypeForOID(oid) if !ok { t.Fatalf("Expected type OID %v to be present", oid) } if dtByName != dtByOID { t.Fatalf("Expected type named %v to be the same as type OID %v", name, oid) } } ensureConnValid(t, conn) } func TestUnregisteredTypeUsableAsStringArgumentAndBaseResult(t *testing.T) { testWithAndWithoutPreferSimpleProtocol(t, func(t *testing.T, conn *pgx.Conn) { skipCockroachDB(t, conn, "Server does support domain types (https://github.com/cockroachdb/cockroach/issues/27796)") var n uint64 err := conn.QueryRow(context.Background(), "select $1::uint64", "42").Scan(&n) if err != nil { t.Fatal(err) } if n != 42 { t.Fatalf("Expected n to be 42, but was %v", n) } }) } func TestDomainType(t *testing.T) { testWithAndWithoutPreferSimpleProtocol(t, func(t *testing.T, conn *pgx.Conn) { skipCockroachDB(t, conn, "Server does support domain types (https://github.com/cockroachdb/cockroach/issues/27796)") var n uint64 // Domain type uint64 is a PostgreSQL domain of underlying type numeric. err := conn.QueryRow(context.Background(), "select $1::uint64", uint64(24)).Scan(&n) require.NoError(t, err) // A string can be used. But a string cannot be the result because the describe result from the PostgreSQL server gives // the underlying type of numeric. err = conn.QueryRow(context.Background(), "select $1::uint64", "42").Scan(&n) if err != nil { t.Fatal(err) } if n != 42 { t.Fatalf("Expected n to be 42, but was %v", n) } var uint64OID uint32 err = conn.QueryRow(context.Background(), "select t.oid from pg_type t where t.typname='uint64';").Scan(&uint64OID) if err != nil { t.Fatalf("did not find uint64 OID, %v", err) } conn.ConnInfo().RegisterDataType(pgtype.DataType{Value: &pgtype.Numeric{}, Name: "uint64", OID: uint64OID}) // String is still an acceptable argument after registration err = conn.QueryRow(context.Background(), "select $1::uint64", "7").Scan(&n) if err != nil { t.Fatal(err) } if n != 7 { t.Fatalf("Expected n to be 7, but was %v", n) } // But a uint64 is acceptable err = conn.QueryRow(context.Background(), "select $1::uint64", uint64(24)).Scan(&n) if err != nil { t.Fatal(err) } if n != 24 { t.Fatalf("Expected n to be 24, but was %v", n) } }) } func TestStmtCacheInvalidationConn(t *testing.T) { ctx := context.Background() conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer closeConn(t, conn) // create a table and fill it with some data _, err := conn.Exec(ctx, ` DROP TABLE IF EXISTS drop_cols; CREATE TABLE drop_cols ( id SERIAL PRIMARY KEY NOT NULL, f1 int NOT NULL, f2 int NOT NULL ); `) require.NoError(t, err) _, err = conn.Exec(ctx, "INSERT INTO drop_cols (f1, f2) VALUES (1, 2)") require.NoError(t, err) getSQL := "SELECT * FROM drop_cols WHERE id = $1" // This query will populate the statement cache. We don't care about the result. rows, err := conn.Query(ctx, getSQL, 1) require.NoError(t, err) rows.Close() // Now, change the schema of the table out from under the statement, making it invalid. _, err = conn.Exec(ctx, "ALTER TABLE drop_cols DROP COLUMN f1") require.NoError(t, err) // We must get an error the first time we try to re-execute a bad statement. // It is up to the application to determine if it wants to try again. We punt to // the application because there is no clear recovery path in the case of failed transactions // or batch operations and because automatic retry is tricky and we don't want to get // it wrong at such an importaint layer of the stack. rows, err = conn.Query(ctx, getSQL, 1) require.NoError(t, err) rows.Next() nextErr := rows.Err() rows.Close() for _, err := range []error{nextErr, rows.Err()} { if err == nil { t.Fatal("expected InvalidCachedStatementPlanError: no error") } if !strings.Contains(err.Error(), "cached plan must not change result type") { t.Fatalf("expected InvalidCachedStatementPlanError, got: %s", err.Error()) } } // On retry, the statement should have been flushed from the cache. rows, err = conn.Query(ctx, getSQL, 1) require.NoError(t, err) rows.Next() err = rows.Err() require.NoError(t, err) rows.Close() require.NoError(t, rows.Err()) ensureConnValid(t, conn) } func TestStmtCacheInvalidationTx(t *testing.T) { ctx := context.Background() conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE")) defer closeConn(t, conn) // create a table and fill it with some data _, err := conn.Exec(ctx, ` DROP TABLE IF EXISTS drop_cols; CREATE TABLE drop_cols ( id SERIAL PRIMARY KEY NOT NULL, f1 int NOT NULL, f2 int NOT NULL ); `) require.NoError(t, err) _, err = conn.Exec(ctx, "INSERT INTO drop_cols (f1, f2) VALUES (1, 2)") require.NoError(t, err) tx, err := conn.Begin(ctx) require.NoError(t, err) getSQL := "SELECT * FROM drop_cols WHERE id = $1" // This query will populate the statement cache. We don't care about the result. rows, err := tx.Query(ctx, getSQL, 1) require.NoError(t, err) rows.Close() // Now, change the schema of the table out from under the statement, making it invalid. _, err = tx.Exec(ctx, "ALTER TABLE drop_cols DROP COLUMN f1") require.NoError(t, err) // We must get an error the first time we try to re-execute a bad statement. // It is up to the application to determine if it wants to try again. We punt to // the application because there is no clear recovery path in the case of failed transactions // or batch operations and because automatic retry is tricky and we don't want to get // it wrong at such an importaint layer of the stack. rows, err = tx.Query(ctx, getSQL, 1) require.NoError(t, err) rows.Next() nextErr := rows.Err() rows.Close() for _, err := range []error{nextErr, rows.Err()} { if err == nil { t.Fatal("expected InvalidCachedStatementPlanError: no error") } if !strings.Contains(err.Error(), "cached plan must not change result type") { t.Fatalf("expected InvalidCachedStatementPlanError, got: %s", err.Error()) } } rows, err = tx.Query(ctx, getSQL, 1) require.NoError(t, err) // error does not pop up immediately rows.Next() err = rows.Err() // Retries within the same transaction are errors (really anything except a rollbakc // will be an error in this transaction). require.Error(t, err) rows.Close() err = tx.Rollback(ctx) require.NoError(t, err) // once we've rolled back, retries will work rows, err = conn.Query(ctx, getSQL, 1) require.NoError(t, err) rows.Next() err = rows.Err() require.NoError(t, err) rows.Close() ensureConnValid(t, conn) } func TestInsertDurationInterval(t *testing.T) { testWithAndWithoutPreferSimpleProtocol(t, func(t *testing.T, conn *pgx.Conn) { _, err := conn.Exec(context.Background(), "create temporary table t(duration INTERVAL(0) NOT NULL)") require.NoError(t, err) result, err := conn.Exec(context.Background(), "insert into t(duration) values($1)", time.Minute) require.NoError(t, err) n := result.RowsAffected() require.EqualValues(t, 1, n) }) }
[ "\"PGX_TEST_CRATEDB_CONN_STRING\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"", "\"PGX_TEST_DATABASE\"" ]
[]
[ "PGX_TEST_CRATEDB_CONN_STRING", "PGX_TEST_DATABASE" ]
[]
["PGX_TEST_CRATEDB_CONN_STRING", "PGX_TEST_DATABASE"]
go
2
0
service/runner_docker.go
// // DISCLAIMER // // Copyright 2017 ArangoDB GmbH, Cologne, Germany // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Copyright holder is ArangoDB GmbH, Cologne, Germany // // Author Ewout Prangsma // package service import ( "context" "fmt" "io" "io/ioutil" "net" "os" "path/filepath" "strconv" "strings" "sync" "time" "github.com/arangodb-helper/arangodb/pkg/definitions" docker "github.com/fsouza/go-dockerclient" "github.com/pkg/errors" "github.com/rs/zerolog" ) const ( stopContainerTimeout = 60 // Seconds before a container is killed (after graceful stop) containerFileName = "CONTAINER" createdByKey = "created-by" createdByValue = "arangodb-starter" dockerDataDir = "/data" ) // NewDockerRunner creates a runner that starts processes in a docker container. func NewDockerRunner(log zerolog.Logger, endpoint, arangodImage, arangoSyncImage string, imagePullPolicy ImagePullPolicy, user, volumesFrom string, gcDelay time.Duration, networkMode string, privileged, tty bool) (Runner, error) { os.Setenv("DOCKER_HOST", endpoint) client, err := docker.NewClientFromEnv() if err != nil { return nil, maskAny(err) } return &dockerRunner{ log: log, client: client, arangodImage: arangodImage, arangoSyncImage: arangoSyncImage, imagePullPolicy: imagePullPolicy, user: user, volumesFrom: volumesFrom, containerIDs: make(map[string]time.Time), gcDelay: gcDelay, networkMode: networkMode, privileged: privileged, tty: tty, }, nil } // dockerRunner implements a Runner that starts processes in a docker container. type dockerRunner struct { log zerolog.Logger client *docker.Client arangodImage string arangoSyncImage string imagePullPolicy ImagePullPolicy user string volumesFrom string mutex sync.Mutex containerIDs map[string]time.Time gcOnce sync.Once gcDelay time.Duration networkMode string privileged bool tty bool } type dockerContainer struct { log zerolog.Logger client *docker.Client container *docker.Container waiter docker.CloseWaiter } func (r *dockerRunner) GetContainerDir(hostDir, defaultContainerDir string) string { if r.volumesFrom != "" { return hostDir } return defaultContainerDir } func (p *dockerContainer) WaitCh() <-chan struct{} { c := make(chan struct{}) go func() { defer close(c) p.Wait() }() return c } // GetRunningServer checks if there is already a server process running in the given server directory. // If that is the case, its process is returned. // Otherwise nil is returned. func (r *dockerRunner) GetRunningServer(serverDir string) (Process, error) { containerContent, err := ioutil.ReadFile(filepath.Join(serverDir, containerFileName)) if os.IsNotExist(err) { return nil, nil } else if err != nil { return nil, maskAny(err) } id := string(containerContent) // We found a CONTAINER file, see if this container is still running c, err := r.client.InspectContainer(id) if err != nil { // Container cannot be inspected, assume it no longer exists return nil, nil } // Container can be inspected, check its state if !c.State.Running { // Container is not running return nil, nil } r.recordContainerID(c.ID) // Start gc (once) r.startGC() // Return container return &dockerContainer{ client: r.client, container: c, }, nil } func (r *dockerRunner) Start(ctx context.Context, processType definitions.ProcessType, command string, args []string, volumes []Volume, ports []int, containerName, serverDir string, output io.Writer) (Process, error) { // Start gc (once) r.startGC() // Select image var image string switch processType { case definitions.ProcessTypeArangod: image = r.arangodImage case definitions.ProcessTypeArangoSync: image = r.arangoSyncImage default: return nil, maskAny(fmt.Errorf("Unknown process type: %s", processType)) } // Pull docker image switch r.imagePullPolicy { case ImagePullPolicyAlways: if err := r.pullImage(ctx, image); err != nil { return nil, maskAny(err) } case ImagePullPolicyIfNotPresent: if found, err := r.imageExists(ctx, image); err != nil { return nil, maskAny(err) } else if !found { if err := r.pullImage(ctx, image); err != nil { return nil, maskAny(err) } } case ImagePullPolicyNever: if found, err := r.imageExists(ctx, image); err != nil { return nil, maskAny(err) } else if !found { return nil, maskAny(fmt.Errorf("Image '%s' not found", image)) } } // Ensure container name is valid containerName = strings.Replace(containerName, ":", "", -1) var result Process op := func() error { // Make sure the container is really gone r.log.Debug().Msgf("Removing container '%s' (if it exists)", containerName) if err := r.client.RemoveContainer(docker.RemoveContainerOptions{ ID: containerName, Force: true, }); err != nil && !isNoSuchContainer(err) { r.log.Error().Err(err).Msgf("Failed to remove container '%s'", containerName) } // Try starting it now p, err := r.start(image, command, args, volumes, ports, containerName, serverDir, output) if err != nil { return maskAny(err) } result = p return nil } if err := retry(ctx, op, time.Minute*2); err != nil { return nil, maskAny(err) } return result, nil } // startGC ensures GC is started (only once) func (r *dockerRunner) startGC() { // Start gc (once) r.gcOnce.Do(func() { go r.gc() }) } // Try to start a command with given arguments func (r *dockerRunner) start(image string, command string, args []string, volumes []Volume, ports []int, containerName, serverDir string, output io.Writer) (Process, error) { env := make([]string, 0, 1) licenseKey := os.Getenv("ARANGO_LICENSE_KEY") if licenseKey != "" { env = append(env, "ARANGO_LICENSE_KEY="+licenseKey) } opts := docker.CreateContainerOptions{ Name: containerName, Config: &docker.Config{ Image: image, Entrypoint: []string{command}, Cmd: args, Env: env, Tty: r.tty, AttachStdout: output != nil, AttachStderr: output != nil, User: r.user, ExposedPorts: make(map[docker.Port]struct{}), Labels: map[string]string{ createdByKey: createdByValue, }, }, HostConfig: &docker.HostConfig{ PortBindings: make(map[docker.Port][]docker.PortBinding), PublishAllPorts: false, AutoRemove: false, Privileged: r.privileged, }, } if r.volumesFrom != "" { opts.HostConfig.VolumesFrom = []string{r.volumesFrom} } else { for _, v := range volumes { bind := fmt.Sprintf("%s:%s", v.HostPath, v.ContainerPath) if v.ReadOnly { bind = bind + ":ro" } opts.HostConfig.Binds = append(opts.HostConfig.Binds, bind) } } if r.networkMode != "" && r.networkMode != "default" { opts.HostConfig.NetworkMode = r.networkMode } else { for _, p := range ports { dockerPort := docker.Port(fmt.Sprintf("%d/tcp", p)) opts.Config.ExposedPorts[dockerPort] = struct{}{} opts.HostConfig.PortBindings[dockerPort] = []docker.PortBinding{ docker.PortBinding{ HostIP: "0.0.0.0", HostPort: strconv.Itoa(p), }, } } } r.log.Debug().Msgf("Creating container %s", containerName) c, err := r.client.CreateContainer(opts) if err != nil { r.log.Error().Err(err).Interface("options", opts).Msg("Creating container failed") return nil, maskAny(err) } r.recordContainerID(c.ID) // Record ID so we can clean it up later var waiter docker.CloseWaiter if output != nil { // Attach output to container r.log.Debug().Msgf("Attaching to output of container %s", containerName) success := make(chan struct{}) defer close(success) waiter, err = r.client.AttachToContainerNonBlocking(docker.AttachToContainerOptions{ Container: c.ID, OutputStream: output, Logs: true, Stdout: true, Stderr: true, Success: success, Stream: true, RawTerminal: true, }) if err != nil { r.log.Error().Err(err).Msgf("Failed to attach to output of container %s", c.ID) return nil, maskAny(err) } <-success } r.log.Debug().Msgf("Starting container %s", containerName) if err := r.client.StartContainer(c.ID, opts.HostConfig); err != nil { return nil, maskAny(err) } r.log.Debug().Msgf("Started container %s", containerName) // Write container ID to disk containerFilePath := filepath.Join(serverDir, containerFileName) if err := ioutil.WriteFile(containerFilePath, []byte(c.ID), 0755); err != nil { r.log.Error().Err(err).Msgf("Failed to store container ID in '%s'", containerFilePath) } // Inspect container to make sure we have the latest info c, err = r.client.InspectContainer(c.ID) if err != nil { return nil, maskAny(err) } return &dockerContainer{ log: r.log.With().Str("container", c.ID).Logger(), client: r.client, container: c, waiter: waiter, }, nil } // imageExists looks for a local image and returns true it it exists, false otherwise. func (r *dockerRunner) imageExists(ctx context.Context, image string) (bool, error) { found := false op := func() error { if _, err := r.client.InspectImage(image); isNoSuchImage(err) { found = false return nil } else if err != nil { return maskAny(err) } else { found = true return nil } } if err := retry(ctx, op, time.Minute*2); err != nil { return false, maskAny(err) } return found, nil } // pullImage tries to pull the given image. // It retries several times upon failure. func (r *dockerRunner) pullImage(ctx context.Context, image string) error { // Pull docker image repo, tag := docker.ParseRepositoryTag(image) op := func() error { r.log.Debug().Msgf("Pulling image %s:%s", repo, tag) if err := r.client.PullImage(docker.PullImageOptions{ Repository: repo, Tag: tag, }, docker.AuthConfiguration{}); err != nil { if isNotFound(err) { return maskAny(&PermanentError{err}) } return maskAny(err) } return nil } if err := retry(ctx, op, time.Minute*2); err != nil { return maskAny(err) } return nil } func (r *dockerRunner) CreateStartArangodbCommand(myDataDir string, index int, masterIP, masterPort, starterImageName string, clusterConfig ClusterConfig) string { addr := masterIP portOffsetIncrement := clusterConfig.NextPortOffset(0) hostPort := DefaultMasterPort + (portOffsetIncrement * (index - 1)) if masterPort != "" { addr = net.JoinHostPort(addr, masterPort) masterPortI, _ := strconv.Atoi(masterPort) hostPort = masterPortI + (portOffsetIncrement * (index - 1)) } var netArgs string if r.networkMode == "" || r.networkMode == "default" { netArgs = fmt.Sprintf("-p %d:%d", hostPort, DefaultMasterPort) } else { netArgs = fmt.Sprintf("--net=%s", r.networkMode) } lines := []string{ fmt.Sprintf("docker volume create arangodb%d &&", index), fmt.Sprintf("docker run -it --name=adb%d --rm %s -v arangodb%d:%s", index, netArgs, index, dockerDataDir), fmt.Sprintf("-v /var/run/docker.sock:/var/run/docker.sock %s", starterImageName), fmt.Sprintf("--starter.address=%s --starter.join=%s", masterIP, addr), } return strings.Join(lines, " \\\n ") } // Cleanup after all processes are dead and have been cleaned themselves func (r *dockerRunner) Cleanup() error { r.mutex.Lock() defer r.mutex.Unlock() for id := range r.containerIDs { r.log.Info().Msgf("Removing container %s", id) if err := r.client.RemoveContainer(docker.RemoveContainerOptions{ ID: id, Force: true, RemoveVolumes: true, }); err != nil && !isNoSuchContainer(err) { r.log.Warn().Err(err).Msgf("Failed to remove container %s: %#v", id) } } r.containerIDs = make(map[string]time.Time) return nil } // recordContainerID records an ID of a created container func (r *dockerRunner) recordContainerID(id string) { r.mutex.Lock() defer r.mutex.Unlock() if r.containerIDs != nil { r.containerIDs[id] = time.Now() } } // unrecordContainerID removes an ID from the list of created containers func (r *dockerRunner) unrecordContainerID(id string) { r.mutex.Lock() defer r.mutex.Unlock() if r.containerIDs != nil { delete(r.containerIDs, id) } } // gc performs continues garbage collection of stopped old containers func (r *dockerRunner) gc() { canGC := func(c *docker.Container) bool { gcBoundary := time.Now().UTC().Add(-r.gcDelay) switch c.State.StateString() { case "dead", "exited": if c.State.FinishedAt.Before(gcBoundary) { // Dead or exited long enough return true } case "created": if c.Created.Before(gcBoundary) { // Created but not running long enough return true } } return false } for { ids := r.gatherCollectableContainerIDs() for _, id := range ids { c, err := r.client.InspectContainer(id) if err != nil { if isNoSuchContainer(err) { // container no longer exists r.unrecordContainerID(id) } else { r.log.Warn().Err(err).Msgf("Failed to inspect container %s", id) } } else if canGC(c) { // Container is dead for more than 10 minutes, gc it. r.log.Info().Msgf("Removing old container %s", id) if err := r.client.RemoveContainer(docker.RemoveContainerOptions{ ID: id, RemoveVolumes: true, }); err != nil { r.log.Warn().Err(err).Msgf("Failed to remove container %s", id) } else { // Remove succeeded r.unrecordContainerID(id) } } } time.Sleep(time.Minute) } } // gatherCollectableContainerIDs returns all container ID's that are old enough to be consider for garbage collection. func (r *dockerRunner) gatherCollectableContainerIDs() []string { r.mutex.Lock() defer r.mutex.Unlock() var result []string gcBoundary := time.Now().Add(-r.gcDelay) for id, ts := range r.containerIDs { if ts.Before(gcBoundary) { result = append(result, id) } } return result } // ProcessID returns the pid of the process (if not running in docker) func (p *dockerContainer) ProcessID() int { return 0 } // ContainerID returns the ID of the docker container that runs the process. func (p *dockerContainer) ContainerID() string { return p.container.ID } // ContainerIP returns the IP address of the docker container that runs the process. func (p *dockerContainer) ContainerIP() string { if ns := p.container.NetworkSettings; ns != nil { return ns.IPAddress } return "" } // HostPort returns the port on the host that is used to access the given port of the process. func (p *dockerContainer) HostPort(containerPort int) (int, error) { if hostConfig := p.container.HostConfig; hostConfig != nil { if hostConfig.NetworkMode == "host" { return containerPort, nil } dockerPort := docker.Port(fmt.Sprintf("%d/tcp", containerPort)) if binding, ok := hostConfig.PortBindings[dockerPort]; ok && len(binding) > 0 { return strconv.Atoi(binding[0].HostPort) } } return 0, fmt.Errorf("Cannot find port mapping.") } func (p *dockerContainer) Wait() int { if p.waiter != nil { p.waiter.Wait() } exitCode, err := p.client.WaitContainer(p.container.ID) if err != nil { p.log.Error().Err(err).Msg("WaitContainer failed") } else if exitCode != 0 { p.log.Info().Int("exitcode", exitCode).Msg("Container terminated with non-zero exit code") } return exitCode } func (p *dockerContainer) Terminate() error { if err := p.client.StopContainer(p.container.ID, stopContainerTimeout); err != nil { return maskAny(err) } return nil } func (p *dockerContainer) Kill() error { if err := p.client.KillContainer(docker.KillContainerOptions{ ID: p.container.ID, }); err != nil { return maskAny(err) } return nil } // Hup sends a SIGHUP to the process func (p *dockerContainer) Hup() error { if err := p.client.KillContainer(docker.KillContainerOptions{ ID: p.container.ID, Signal: docker.SIGHUP, }); err != nil { return maskAny(err) } return nil } func (p *dockerContainer) Cleanup() error { opts := docker.RemoveContainerOptions{ ID: p.container.ID, Force: true, RemoveVolumes: true, } if err := p.client.RemoveContainer(opts); err != nil { return maskAny(err) } return nil } // GetLogger creates a new logger for the process. func (p *dockerContainer) GetLogger(logger zerolog.Logger) zerolog.Logger { cid := p.ContainerID() if len(cid) > 8 { // in logs it is better to see the abbreviation of the long container ID. cid = cid[:8] } return logger.With().Str("cid", cid).Logger() } // isNoSuchContainer returns true if the given error is (or is caused by) a NoSuchContainer error. func isNoSuchContainer(err error) bool { if _, ok := err.(*docker.NoSuchContainer); ok { return true } if _, ok := errors.Cause(err).(*docker.NoSuchContainer); ok { return true } return false } // isNoSuchImage returns true if the given error is (or is caused by) a NoSuchImage error. func isNoSuchImage(err error) bool { return err == docker.ErrNoSuchImage || errors.Cause(err) == docker.ErrNoSuchImage } // isNotFound returns true if the given error is (or is caused by) a 404 response error. func isNotFound(err error) bool { if err, ok := errors.Cause(err).(*docker.Error); ok { return err.Status == 404 } return false }
[ "\"ARANGO_LICENSE_KEY\"" ]
[]
[ "ARANGO_LICENSE_KEY" ]
[]
["ARANGO_LICENSE_KEY"]
go
1
0
compressed_communication/aggregators/group_test.py
# Copyright 2022, Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from absl.testing import parameterized import tensorflow as tf import tensorflow_federated as tff from compressed_communication.aggregators import group _measurement = 1.0 _measurement_fn = lambda _: tff.federated_value(_measurement, tff.SERVER) _measurement_aggregator = tff.aggregators.add_measurements( tff.aggregators.SumFactory(), client_measurement_fn=_measurement_fn) class GroupComputationTest(tff.test.TestCase, parameterized.TestCase): @parameterized.named_parameters( ("two_groups", dict( kernel=[0, 2], bias=[1]), dict( kernel=tff.aggregators.SumFactory(), bias=_measurement_aggregator), [(tf.float32, (2,)), (tf.float32, (3,)), (tf.float32, (2,))], tff.StructType([("kernel", ()), ("bias", ())]), tff.StructType([("kernel", ()), ("bias", tf.float32)]))) def test_group_properties(self, grouped_indices, inner_agg_factories, value_type, states_type, measurements_type): factory = group.GroupFactory(grouped_indices, inner_agg_factories) value_type = tff.to_type(value_type) process = factory.create(value_type) self.assertIsInstance(process, tff.templates.AggregationProcess) server_state_type = tff.type_at_server(states_type) expected_initialize_type = tff.FunctionType( parameter=None, result=server_state_type) self.assert_types_equivalent(process.initialize.type_signature, expected_initialize_type) expected_measurements_type = tff.type_at_server(measurements_type) expected_next_type = tff.FunctionType( parameter=collections.OrderedDict( state=server_state_type, value=tff.type_at_clients(value_type)), result=tff.templates.MeasuredProcessOutput( state=server_state_type, result=tff.type_at_server(value_type), measurements=expected_measurements_type)) self.assert_types_equivalent(process.next.type_signature, expected_next_type) @parameterized.named_parameters( ("group_name_mismatch", dict( x=[0, 2], y=[1]), dict( kernel=tff.aggregators.SumFactory(), bias=_measurement_aggregator), [(tf.float32, (2,)), (tf.float32, (3,)), (tf.float32, (2,))]), ) def test_group_init_raises(self, grouped_indices, inner_agg_factories, value_type): self.assertRaises(ValueError, group.GroupFactory, grouped_indices, inner_agg_factories) @parameterized.named_parameters( ("integer_tensors", dict( kernel=[0, 2], bias=[1]), dict( kernel=tff.aggregators.SumFactory(), bias=_measurement_aggregator), [(tf.int32, (2,)), (tf.int32, (3,)), (tf.int32, (2,))]), ("single_float", dict( kernel=[0]), dict( kernel=tff.aggregators.SumFactory()), tf.float32), ) def test_group_create_raises(self, grouped_indices, inner_agg_factories, value_type): factory = group.GroupFactory(grouped_indices, inner_agg_factories) value_type = tff.to_type(value_type) self.assertRaises(ValueError, factory.create, value_type) class GroupExecutionTest(tff.test.TestCase, parameterized.TestCase): @parameterized.named_parameters( ("two_groups", dict( kernel=[1, 2], bias=[0]), dict( kernel=tff.aggregators.SumFactory(), bias=_measurement_aggregator), [(tf.float32, (2,)), (tf.float32, (3,)), (tf.float32, (2,))])) def test_group_impl_first_permutation(self, grouped_indices, inner_agg_factories, value_type): factory = group.GroupFactory(grouped_indices, inner_agg_factories) value_type = tff.to_type(value_type) process = factory.create(value_type) state = process.initialize() client_values = [[tf.ones(t.shape) for t in value_type] for _ in range(2)] expected_result = [tf.ones(t.shape) * 2 for t in value_type] measurements = process.next(state, client_values).measurements self.assertAllEqual(measurements, collections.OrderedDict(kernel=(), bias=_measurement)) result = process.next(state, client_values).result self.assertAllClose(result, expected_result) @parameterized.named_parameters( ("two_groups", dict( kernel=[2, 0], bias=[1]), dict( kernel=tff.aggregators.SumFactory(), bias=_measurement_aggregator), [(tf.float32, (2,)), (tf.float32, (3,)), (tf.float32, (2,))])) def test_group_impl_second_permutation(self, grouped_indices, inner_agg_factories, value_type): factory = group.GroupFactory(grouped_indices, inner_agg_factories) value_type = tff.to_type(value_type) process = factory.create(value_type) state = process.initialize() client_values = [[tf.ones(t.shape) for t in value_type] for _ in range(2)] expected_result = [tf.ones(t.shape) * 2 for t in value_type] measurements = process.next(state, client_values).measurements self.assertAllEqual(measurements, collections.OrderedDict(kernel=(), bias=_measurement)) result = process.next(state, client_values).result self.assertAllClose(result, expected_result) @parameterized.named_parameters( ("two_groups", dict( kernel=[0, 1], bias=[2]), dict( kernel=tff.aggregators.SumFactory(), bias=_measurement_aggregator), [(tf.float32, (2,)), (tf.float32, (3,)), (tf.float32, (2,))])) def test_group_impl_third_permutation(self, grouped_indices, inner_agg_factories, value_type): factory = group.GroupFactory(grouped_indices, inner_agg_factories) value_type = tff.to_type(value_type) process = factory.create(value_type) state = process.initialize() client_values = [[tf.ones(t.shape) for t in value_type] for _ in range(2)] expected_result = [tf.ones(t.shape) * 2 for t in value_type] measurements = process.next(state, client_values).measurements self.assertAllEqual(measurements, collections.OrderedDict(kernel=(), bias=_measurement)) result = process.next(state, client_values).result self.assertAllClose(result, expected_result) if __name__ == "__main__": tff.test.main()
[]
[]
[]
[]
[]
python
null
null
null
src/options.go
package fzf import ( "fmt" "os" "regexp" "strconv" "strings" "unicode" "unicode/utf8" "github.com/junegunn/fzf/src/algo" "github.com/junegunn/fzf/src/tui" "github.com/mattn/go-runewidth" "github.com/mattn/go-shellwords" ) const usage = `usage: fzf [options] Search -x, --extended Extended-search mode (enabled by default; +x or --no-extended to disable) -e, --exact Enable Exact-match --algo=TYPE Fuzzy matching algorithm: [v1|v2] (default: v2) -i Case-insensitive match (default: smart-case match) +i Case-sensitive match --literal Do not normalize latin script letters before matching -n, --nth=N[,..] Comma-separated list of field index expressions for limiting search scope. Each can be a non-zero integer or a range expression ([BEGIN]..[END]). --with-nth=N[,..] Transform the presentation of each line using field index expressions -d, --delimiter=STR Field delimiter regex (default: AWK-style) +s, --no-sort Do not sort the result --tac Reverse the order of the input --phony Do not perform search --tiebreak=CRI[,..] Comma-separated list of sort criteria to apply when the scores are tied [length|begin|end|index] (default: length) Interface -m, --multi[=MAX] Enable multi-select with tab/shift-tab --no-mouse Disable mouse --bind=KEYBINDS Custom key bindings. Refer to the man page. --cycle Enable cyclic scroll --keep-right Keep the right end of the line visible on overflow --no-hscroll Disable horizontal scroll --hscroll-off=COL Number of screen columns to keep to the right of the highlighted substring (default: 10) --filepath-word Make word-wise movements respect path separators --jump-labels=CHARS Label characters for jump and jump-accept Layout --height=HEIGHT[%] Display fzf window below the cursor with the given height instead of using fullscreen --min-height=HEIGHT Minimum height when --height is given in percent (default: 10) --layout=LAYOUT Choose layout: [default|reverse|reverse-list] --border[=STYLE] Draw border around the finder [rounded|sharp|horizontal|vertical| top|bottom|left|right] (default: rounded) --margin=MARGIN Screen margin (TRBL / TB,RL / T,RL,B / T,R,B,L) --info=STYLE Finder info style [default|inline|hidden] --prompt=STR Input prompt (default: '> ') --pointer=STR Pointer to the current line (default: '>') --marker=STR Multi-select marker (default: '>') --header=STR String to print as header --header-lines=N The first N lines of the input are treated as header Display --ansi Enable processing of ANSI color codes --tabstop=SPACES Number of spaces for a tab character (default: 8) --color=COLSPEC Base scheme (dark|light|16|bw) and/or custom colors --no-bold Do not use bold text History --history=FILE History file --history-size=N Maximum number of history entries (default: 1000) Preview --preview=COMMAND Command to preview highlighted line ({}) --preview-window=OPT Preview window layout (default: right:50%) [up|down|left|right][:SIZE[%]] [:[no]wrap][:[no]cycle][:[no]hidden] [:rounded|sharp|noborder] [:+SCROLL[-OFFSET]] [:default] Scripting -q, --query=STR Start the finder with the given query -1, --select-1 Automatically select the only match -0, --exit-0 Exit immediately when there's no match -f, --filter=STR Filter mode. Do not start interactive finder. --print-query Print query as the first line --expect=KEYS Comma-separated list of keys to complete fzf --read0 Read input delimited by ASCII NUL characters --print0 Print output delimited by ASCII NUL characters --sync Synchronous search for multi-staged filtering --version Display version information and exit Environment variables FZF_DEFAULT_COMMAND Default command to use when input is tty FZF_DEFAULT_OPTS Default options (e.g. '--layout=reverse --inline-info') ` // Case denotes case-sensitivity of search type Case int // Case-sensitivities const ( CaseSmart Case = iota CaseIgnore CaseRespect ) // Sort criteria type criterion int const ( byScore criterion = iota byLength byBegin byEnd ) type sizeSpec struct { size float64 percent bool } func defaultMargin() [4]sizeSpec { return [4]sizeSpec{} } type windowPosition int const ( posUp windowPosition = iota posDown posLeft posRight ) type layoutType int const ( layoutDefault layoutType = iota layoutReverse layoutReverseList ) type infoStyle int const ( infoDefault infoStyle = iota infoInline infoHidden ) type previewOpts struct { command string position windowPosition size sizeSpec scroll string hidden bool wrap bool cycle bool border tui.BorderShape } // Options stores the values of command-line options type Options struct { Fuzzy bool FuzzyAlgo algo.Algo Extended bool Phony bool Case Case Normalize bool Nth []Range WithNth []Range Delimiter Delimiter Sort int Tac bool Criteria []criterion Multi int Ansi bool Mouse bool Theme *tui.ColorTheme Black bool Bold bool Height sizeSpec MinHeight int Layout layoutType Cycle bool KeepRight bool Hscroll bool HscrollOff int FileWord bool InfoStyle infoStyle JumpLabels string Prompt string Pointer string Marker string Query string Select1 bool Exit0 bool Filter *string ToggleSort bool Expect map[int]string Keymap map[int][]action Preview previewOpts PrintQuery bool ReadZero bool Printer func(string) PrintSep string Sync bool History *History Header []string HeaderLines int Margin [4]sizeSpec BorderShape tui.BorderShape Unicode bool Tabstop int ClearOnExit bool Version bool } func defaultPreviewOpts(command string) previewOpts { return previewOpts{command, posRight, sizeSpec{50, true}, "", false, false, false, tui.BorderRounded} } func defaultOptions() *Options { return &Options{ Fuzzy: true, FuzzyAlgo: algo.FuzzyMatchV2, Extended: true, Phony: false, Case: CaseSmart, Normalize: true, Nth: make([]Range, 0), WithNth: make([]Range, 0), Delimiter: Delimiter{}, Sort: 1000, Tac: false, Criteria: []criterion{byScore, byLength}, Multi: 0, Ansi: false, Mouse: true, Theme: tui.EmptyTheme(), Black: false, Bold: true, MinHeight: 10, Layout: layoutDefault, Cycle: false, KeepRight: false, Hscroll: true, HscrollOff: 10, FileWord: false, InfoStyle: infoDefault, JumpLabels: defaultJumpLabels, Prompt: "> ", Pointer: ">", Marker: ">", Query: "", Select1: false, Exit0: false, Filter: nil, ToggleSort: false, Expect: make(map[int]string), Keymap: make(map[int][]action), Preview: defaultPreviewOpts(""), PrintQuery: false, ReadZero: false, Printer: func(str string) { fmt.Println(str) }, PrintSep: "\n", Sync: false, History: nil, Header: make([]string, 0), HeaderLines: 0, Margin: defaultMargin(), Unicode: true, Tabstop: 8, ClearOnExit: true, Version: false} } func help(code int) { os.Stdout.WriteString(usage) os.Exit(code) } func errorExit(msg string) { os.Stderr.WriteString(msg + "\n") os.Exit(exitError) } func optString(arg string, prefixes ...string) (bool, string) { for _, prefix := range prefixes { if strings.HasPrefix(arg, prefix) { return true, arg[len(prefix):] } } return false, "" } func nextString(args []string, i *int, message string) string { if len(args) > *i+1 { *i++ } else { errorExit(message) } return args[*i] } func optionalNextString(args []string, i *int) (bool, string) { if len(args) > *i+1 && !strings.HasPrefix(args[*i+1], "-") && !strings.HasPrefix(args[*i+1], "+") { *i++ return true, args[*i] } return false, "" } func atoi(str string) int { num, err := strconv.Atoi(str) if err != nil { errorExit("not a valid integer: " + str) } return num } func atof(str string) float64 { num, err := strconv.ParseFloat(str, 64) if err != nil { errorExit("not a valid number: " + str) } return num } func nextInt(args []string, i *int, message string) int { if len(args) > *i+1 { *i++ } else { errorExit(message) } return atoi(args[*i]) } func optionalNumeric(args []string, i *int, defaultValue int) int { if len(args) > *i+1 { if strings.IndexAny(args[*i+1], "0123456789") == 0 { *i++ return atoi(args[*i]) } } return defaultValue } func splitNth(str string) []Range { if match, _ := regexp.MatchString("^[0-9,-.]+$", str); !match { errorExit("invalid format: " + str) } tokens := strings.Split(str, ",") ranges := make([]Range, len(tokens)) for idx, s := range tokens { r, ok := ParseRange(&s) if !ok { errorExit("invalid format: " + str) } ranges[idx] = r } return ranges } func delimiterRegexp(str string) Delimiter { // Special handling of \t str = strings.Replace(str, "\\t", "\t", -1) // 1. Pattern does not contain any special character if regexp.QuoteMeta(str) == str { return Delimiter{str: &str} } rx, e := regexp.Compile(str) // 2. Pattern is not a valid regular expression if e != nil { return Delimiter{str: &str} } // 3. Pattern as regular expression. Slow. return Delimiter{regex: rx} } func isAlphabet(char uint8) bool { return char >= 'a' && char <= 'z' } func isNumeric(char uint8) bool { return char >= '0' && char <= '9' } func parseAlgo(str string) algo.Algo { switch str { case "v1": return algo.FuzzyMatchV1 case "v2": return algo.FuzzyMatchV2 default: errorExit("invalid algorithm (expected: v1 or v2)") } return algo.FuzzyMatchV2 } func parseBorder(str string, optional bool) tui.BorderShape { switch str { case "rounded": return tui.BorderRounded case "sharp": return tui.BorderSharp case "horizontal": return tui.BorderHorizontal case "vertical": return tui.BorderVertical case "top": return tui.BorderTop case "bottom": return tui.BorderBottom case "left": return tui.BorderLeft case "right": return tui.BorderRight default: if optional && str == "" { return tui.BorderRounded } errorExit("invalid border style (expected: rounded|sharp|horizontal|vertical|top|bottom|left|right)") } return tui.BorderNone } func parseKeyChords(str string, message string) map[int]string { if len(str) == 0 { errorExit(message) } tokens := strings.Split(str, ",") if str == "," || strings.HasPrefix(str, ",,") || strings.HasSuffix(str, ",,") || strings.Contains(str, ",,,") { tokens = append(tokens, ",") } chords := make(map[int]string) for _, key := range tokens { if len(key) == 0 { continue // ignore } lkey := strings.ToLower(key) chord := 0 switch lkey { case "up": chord = tui.Up case "down": chord = tui.Down case "left": chord = tui.Left case "right": chord = tui.Right case "enter", "return": chord = tui.CtrlM case "space": chord = tui.AltZ + int(' ') case "bspace", "bs": chord = tui.BSpace case "ctrl-space": chord = tui.CtrlSpace case "ctrl-^", "ctrl-6": chord = tui.CtrlCaret case "ctrl-/", "ctrl-_": chord = tui.CtrlSlash case "ctrl-\\": chord = tui.CtrlBackSlash case "ctrl-]": chord = tui.CtrlRightBracket case "change": chord = tui.Change case "backward-eof": chord = tui.BackwardEOF case "alt-enter", "alt-return": chord = tui.CtrlAltM case "alt-space": chord = tui.AltSpace case "alt-/": chord = tui.AltSlash case "alt-bs", "alt-bspace": chord = tui.AltBS case "alt-up": chord = tui.AltUp case "alt-down": chord = tui.AltDown case "alt-left": chord = tui.AltLeft case "alt-right": chord = tui.AltRight case "tab": chord = tui.Tab case "btab", "shift-tab": chord = tui.BTab case "esc": chord = tui.ESC case "del": chord = tui.Del case "home": chord = tui.Home case "end": chord = tui.End case "insert": chord = tui.Insert case "pgup", "page-up": chord = tui.PgUp case "pgdn", "page-down": chord = tui.PgDn case "shift-up": chord = tui.SUp case "shift-down": chord = tui.SDown case "shift-left": chord = tui.SLeft case "shift-right": chord = tui.SRight case "left-click": chord = tui.LeftClick case "right-click": chord = tui.RightClick case "double-click": chord = tui.DoubleClick case "f10": chord = tui.F10 case "f11": chord = tui.F11 case "f12": chord = tui.F12 default: if len(key) == 10 && strings.HasPrefix(lkey, "ctrl-alt-") && isAlphabet(lkey[9]) { chord = tui.CtrlAltA + int(lkey[9]) - 'a' } else if len(key) == 6 && strings.HasPrefix(lkey, "ctrl-") && isAlphabet(lkey[5]) { chord = tui.CtrlA + int(lkey[5]) - 'a' } else if len(key) == 5 && strings.HasPrefix(lkey, "alt-") && isAlphabet(lkey[4]) { chord = tui.AltA + int(lkey[4]) - 'a' } else if len(key) == 5 && strings.HasPrefix(lkey, "alt-") && isNumeric(lkey[4]) { chord = tui.Alt0 + int(lkey[4]) - '0' } else if len(key) == 2 && strings.HasPrefix(lkey, "f") && key[1] >= '1' && key[1] <= '9' { chord = tui.F1 + int(key[1]) - '1' } else if utf8.RuneCountInString(key) == 1 { chord = tui.AltZ + int([]rune(key)[0]) } else { errorExit("unsupported key: " + key) } } if chord > 0 { chords[chord] = key } } return chords } func parseTiebreak(str string) []criterion { criteria := []criterion{byScore} hasIndex := false hasLength := false hasBegin := false hasEnd := false check := func(notExpected *bool, name string) { if *notExpected { errorExit("duplicate sort criteria: " + name) } if hasIndex { errorExit("index should be the last criterion") } *notExpected = true } for _, str := range strings.Split(strings.ToLower(str), ",") { switch str { case "index": check(&hasIndex, "index") case "length": check(&hasLength, "length") criteria = append(criteria, byLength) case "begin": check(&hasBegin, "begin") criteria = append(criteria, byBegin) case "end": check(&hasEnd, "end") criteria = append(criteria, byEnd) default: errorExit("invalid sort criterion: " + str) } } return criteria } func dupeTheme(theme *tui.ColorTheme) *tui.ColorTheme { dupe := *theme return &dupe } func parseTheme(defaultTheme *tui.ColorTheme, str string) *tui.ColorTheme { theme := dupeTheme(defaultTheme) rrggbb := regexp.MustCompile("^#[0-9a-fA-F]{6}$") for _, str := range strings.Split(strings.ToLower(str), ",") { switch str { case "dark": theme = dupeTheme(tui.Dark256) case "light": theme = dupeTheme(tui.Light256) case "16": theme = dupeTheme(tui.Default16) case "bw", "no": theme = tui.NoColorTheme() default: fail := func() { errorExit("invalid color specification: " + str) } // Color is disabled if theme == nil { continue } components := strings.Split(str, ":") if len(components) < 2 { fail() } cattr := tui.NewColorAttr() for _, component := range components[1:] { switch component { case "regular": cattr.Attr = tui.AttrRegular case "bold", "strong": cattr.Attr |= tui.Bold case "dim": cattr.Attr |= tui.Dim case "italic": cattr.Attr |= tui.Italic case "underline": cattr.Attr |= tui.Underline case "blink": cattr.Attr |= tui.Blink case "reverse": cattr.Attr |= tui.Reverse case "": default: if rrggbb.MatchString(component) { cattr.Color = tui.HexToColor(component) } else { ansi32, err := strconv.Atoi(component) if err != nil || ansi32 < -1 || ansi32 > 255 { fail() } cattr.Color = tui.Color(ansi32) } } } switch components[0] { case "input": theme.Input = cattr case "fg": theme.Fg = cattr case "bg": theme.Bg = cattr case "preview-fg": theme.PreviewFg = cattr case "preview-bg": theme.PreviewBg = cattr case "fg+": theme.Current = cattr case "bg+": theme.DarkBg = cattr case "gutter": theme.Gutter = cattr case "hl": theme.Match = cattr case "hl+": theme.CurrentMatch = cattr case "border": theme.Border = cattr case "prompt": theme.Prompt = cattr case "spinner": theme.Spinner = cattr case "info": theme.Info = cattr case "pointer": theme.Cursor = cattr case "marker": theme.Selected = cattr case "header": theme.Header = cattr default: fail() } } } return theme } var executeRegexp *regexp.Regexp func firstKey(keymap map[int]string) int { for k := range keymap { return k } return 0 } const ( escapedColon = 0 escapedComma = 1 escapedPlus = 2 ) func init() { // Backreferences are not supported. // "~!@#$%^&*;/|".each_char.map { |c| Regexp.escape(c) }.map { |c| "#{c}[^#{c}]*#{c}" }.join('|') executeRegexp = regexp.MustCompile( `(?si)[:+](execute(?:-multi|-silent)?|reload|preview):.+|[:+](execute(?:-multi|-silent)?|reload|preview)(\([^)]*\)|\[[^\]]*\]|~[^~]*~|![^!]*!|@[^@]*@|\#[^\#]*\#|\$[^\$]*\$|%[^%]*%|\^[^\^]*\^|&[^&]*&|\*[^\*]*\*|;[^;]*;|/[^/]*/|\|[^\|]*\|)`) } func parseKeymap(keymap map[int][]action, str string) { masked := executeRegexp.ReplaceAllStringFunc(str, func(src string) string { symbol := ":" if strings.HasPrefix(src, "+") { symbol = "+" } prefix := symbol + "execute" if strings.HasPrefix(src[1:], "reload") { prefix = symbol + "reload" } else if strings.HasPrefix(src[1:], "preview") { prefix = symbol + "preview" } else if src[len(prefix)] == '-' { c := src[len(prefix)+1] if c == 's' || c == 'S' { prefix += "-silent" } else { prefix += "-multi" } } return prefix + "(" + strings.Repeat(" ", len(src)-len(prefix)-2) + ")" }) masked = strings.Replace(masked, "::", string([]rune{escapedColon, ':'}), -1) masked = strings.Replace(masked, ",:", string([]rune{escapedComma, ':'}), -1) masked = strings.Replace(masked, "+:", string([]rune{escapedPlus, ':'}), -1) idx := 0 for _, pairStr := range strings.Split(masked, ",") { origPairStr := str[idx : idx+len(pairStr)] idx += len(pairStr) + 1 pair := strings.SplitN(pairStr, ":", 2) if len(pair) < 2 { errorExit("bind action not specified: " + origPairStr) } var key int if len(pair[0]) == 1 && pair[0][0] == escapedColon { key = ':' + tui.AltZ } else if len(pair[0]) == 1 && pair[0][0] == escapedComma { key = ',' + tui.AltZ } else if len(pair[0]) == 1 && pair[0][0] == escapedPlus { key = '+' + tui.AltZ } else { keys := parseKeyChords(pair[0], "key name required") key = firstKey(keys) } idx2 := len(pair[0]) + 1 specs := strings.Split(pair[1], "+") actions := make([]action, 0, len(specs)) appendAction := func(types ...actionType) { actions = append(actions, toActions(types...)...) } prevSpec := "" for specIndex, maskedSpec := range specs { spec := origPairStr[idx2 : idx2+len(maskedSpec)] idx2 += len(maskedSpec) + 1 spec = prevSpec + spec specLower := strings.ToLower(spec) switch specLower { case "ignore": appendAction(actIgnore) case "beginning-of-line": appendAction(actBeginningOfLine) case "abort": appendAction(actAbort) case "accept": appendAction(actAccept) case "accept-non-empty": appendAction(actAcceptNonEmpty) case "print-query": appendAction(actPrintQuery) case "refresh-preview": appendAction(actRefreshPreview) case "replace-query": appendAction(actReplaceQuery) case "backward-char": appendAction(actBackwardChar) case "backward-delete-char": appendAction(actBackwardDeleteChar) case "backward-delete-char/eof": appendAction(actBackwardDeleteCharEOF) case "backward-word": appendAction(actBackwardWord) case "clear-screen": appendAction(actClearScreen) case "delete-char": appendAction(actDeleteChar) case "delete-char/eof": appendAction(actDeleteCharEOF) case "end-of-line": appendAction(actEndOfLine) case "cancel": appendAction(actCancel) case "clear-query": appendAction(actClearQuery) case "clear-selection": appendAction(actClearSelection) case "forward-char": appendAction(actForwardChar) case "forward-word": appendAction(actForwardWord) case "jump": appendAction(actJump) case "jump-accept": appendAction(actJumpAccept) case "kill-line": appendAction(actKillLine) case "kill-word": appendAction(actKillWord) case "unix-line-discard", "line-discard": appendAction(actUnixLineDiscard) case "unix-word-rubout", "word-rubout": appendAction(actUnixWordRubout) case "yank": appendAction(actYank) case "backward-kill-word": appendAction(actBackwardKillWord) case "toggle-down": appendAction(actToggle, actDown) case "toggle-up": appendAction(actToggle, actUp) case "toggle-in": appendAction(actToggleIn) case "toggle-out": appendAction(actToggleOut) case "toggle-all": appendAction(actToggleAll) case "select-all": appendAction(actSelectAll) case "deselect-all": appendAction(actDeselectAll) case "toggle": appendAction(actToggle) case "down": appendAction(actDown) case "up": appendAction(actUp) case "top": appendAction(actTop) case "page-up": appendAction(actPageUp) case "page-down": appendAction(actPageDown) case "half-page-up": appendAction(actHalfPageUp) case "half-page-down": appendAction(actHalfPageDown) case "previous-history": appendAction(actPreviousHistory) case "next-history": appendAction(actNextHistory) case "toggle-preview": appendAction(actTogglePreview) case "toggle-preview-wrap": appendAction(actTogglePreviewWrap) case "toggle-sort": appendAction(actToggleSort) case "preview-up": appendAction(actPreviewUp) case "preview-down": appendAction(actPreviewDown) case "preview-page-up": appendAction(actPreviewPageUp) case "preview-page-down": appendAction(actPreviewPageDown) case "preview-half-page-up": appendAction(actPreviewHalfPageUp) case "preview-half-page-down": appendAction(actPreviewHalfPageDown) default: t := isExecuteAction(specLower) if t == actIgnore { if specIndex == 0 && specLower == "" { actions = append(keymap[key], actions...) } else { errorExit("unknown action: " + spec) } } else { var offset int switch t { case actReload: offset = len("reload") case actPreview: offset = len("preview") case actExecuteSilent: offset = len("execute-silent") case actExecuteMulti: offset = len("execute-multi") default: offset = len("execute") } if spec[offset] == ':' { if specIndex == len(specs)-1 { actions = append(actions, action{t: t, a: spec[offset+1:]}) } else { prevSpec = spec + "+" continue } } else { actions = append(actions, action{t: t, a: spec[offset+1 : len(spec)-1]}) } } } prevSpec = "" } keymap[key] = actions } } func isExecuteAction(str string) actionType { matches := executeRegexp.FindAllStringSubmatch(":"+str, -1) if matches == nil || len(matches) != 1 { return actIgnore } prefix := matches[0][1] if len(prefix) == 0 { prefix = matches[0][2] } switch prefix { case "reload": return actReload case "preview": return actPreview case "execute": return actExecute case "execute-silent": return actExecuteSilent case "execute-multi": return actExecuteMulti } return actIgnore } func parseToggleSort(keymap map[int][]action, str string) { keys := parseKeyChords(str, "key name required") if len(keys) != 1 { errorExit("multiple keys specified") } keymap[firstKey(keys)] = toActions(actToggleSort) } func strLines(str string) []string { return strings.Split(strings.TrimSuffix(str, "\n"), "\n") } func parseSize(str string, maxPercent float64, label string) sizeSpec { var val float64 percent := strings.HasSuffix(str, "%") if percent { val = atof(str[:len(str)-1]) if val < 0 { errorExit(label + " must be non-negative") } if val > maxPercent { errorExit(fmt.Sprintf("%s too large (max: %d%%)", label, int(maxPercent))) } } else { if strings.Contains(str, ".") { errorExit(label + " (without %) must be a non-negative integer") } val = float64(atoi(str)) if val < 0 { errorExit(label + " must be non-negative") } } return sizeSpec{val, percent} } func parseHeight(str string) sizeSpec { size := parseSize(str, 100, "height") return size } func parseLayout(str string) layoutType { switch str { case "default": return layoutDefault case "reverse": return layoutReverse case "reverse-list": return layoutReverseList default: errorExit("invalid layout (expected: default / reverse / reverse-list)") } return layoutDefault } func parseInfoStyle(str string) infoStyle { switch str { case "default": return infoDefault case "inline": return infoInline case "hidden": return infoHidden default: errorExit("invalid info style (expected: default / inline / hidden)") } return infoDefault } func parsePreviewWindow(opts *previewOpts, input string) { tokens := strings.Split(input, ":") sizeRegex := regexp.MustCompile("^[0-9]+%?$") offsetRegex := regexp.MustCompile("^\\+([0-9]+|{-?[0-9]+})(-[0-9]+|-/[1-9][0-9]*)?$") for _, token := range tokens { switch token { case "": case "default": *opts = defaultPreviewOpts(opts.command) case "hidden": opts.hidden = true case "nohidden": opts.hidden = false case "wrap": opts.wrap = true case "nowrap": opts.wrap = false case "cycle": opts.cycle = true case "nocycle": opts.cycle = false case "up", "top": opts.position = posUp case "down", "bottom": opts.position = posDown case "left": opts.position = posLeft case "right": opts.position = posRight case "rounded", "border": opts.border = tui.BorderRounded case "sharp": opts.border = tui.BorderSharp case "noborder": opts.border = tui.BorderNone default: if sizeRegex.MatchString(token) { opts.size = parseSize(token, 99, "window size") } else if offsetRegex.MatchString(token) { opts.scroll = token[1:] } else { errorExit("invalid preview window option: " + token) } } } } func parseMargin(margin string) [4]sizeSpec { margins := strings.Split(margin, ",") checked := func(str string) sizeSpec { return parseSize(str, 49, "margin") } switch len(margins) { case 1: m := checked(margins[0]) return [4]sizeSpec{m, m, m, m} case 2: tb := checked(margins[0]) rl := checked(margins[1]) return [4]sizeSpec{tb, rl, tb, rl} case 3: t := checked(margins[0]) rl := checked(margins[1]) b := checked(margins[2]) return [4]sizeSpec{t, rl, b, rl} case 4: return [4]sizeSpec{ checked(margins[0]), checked(margins[1]), checked(margins[2]), checked(margins[3])} default: errorExit("invalid margin: " + margin) } return defaultMargin() } func parseOptions(opts *Options, allArgs []string) { var historyMax int if opts.History == nil { historyMax = defaultHistoryMax } else { historyMax = opts.History.maxSize } setHistory := func(path string) { h, e := NewHistory(path, historyMax) if e != nil { errorExit(e.Error()) } opts.History = h } setHistoryMax := func(max int) { historyMax = max if historyMax < 1 { errorExit("history max must be a positive integer") } if opts.History != nil { opts.History.maxSize = historyMax } } validateJumpLabels := false validatePointer := false validateMarker := false for i := 0; i < len(allArgs); i++ { arg := allArgs[i] switch arg { case "-h", "--help": help(exitOk) case "-x", "--extended": opts.Extended = true case "-e", "--exact": opts.Fuzzy = false case "--extended-exact": // Note that we now don't have --no-extended-exact opts.Fuzzy = false opts.Extended = true case "+x", "--no-extended": opts.Extended = false case "+e", "--no-exact": opts.Fuzzy = true case "-q", "--query": opts.Query = nextString(allArgs, &i, "query string required") case "-f", "--filter": filter := nextString(allArgs, &i, "query string required") opts.Filter = &filter case "--literal": opts.Normalize = false case "--no-literal": opts.Normalize = true case "--algo": opts.FuzzyAlgo = parseAlgo(nextString(allArgs, &i, "algorithm required (v1|v2)")) case "--expect": for k, v := range parseKeyChords(nextString(allArgs, &i, "key names required"), "key names required") { opts.Expect[k] = v } case "--no-expect": opts.Expect = make(map[int]string) case "--no-phony": opts.Phony = false case "--phony": opts.Phony = true case "--tiebreak": opts.Criteria = parseTiebreak(nextString(allArgs, &i, "sort criterion required")) case "--bind": parseKeymap(opts.Keymap, nextString(allArgs, &i, "bind expression required")) case "--color": _, spec := optionalNextString(allArgs, &i) if len(spec) == 0 { opts.Theme = tui.EmptyTheme() } else { opts.Theme = parseTheme(opts.Theme, spec) } case "--toggle-sort": parseToggleSort(opts.Keymap, nextString(allArgs, &i, "key name required")) case "-d", "--delimiter": opts.Delimiter = delimiterRegexp(nextString(allArgs, &i, "delimiter required")) case "-n", "--nth": opts.Nth = splitNth(nextString(allArgs, &i, "nth expression required")) case "--with-nth": opts.WithNth = splitNth(nextString(allArgs, &i, "nth expression required")) case "-s", "--sort": opts.Sort = optionalNumeric(allArgs, &i, 1) case "+s", "--no-sort": opts.Sort = 0 case "--tac": opts.Tac = true case "--no-tac": opts.Tac = false case "-i": opts.Case = CaseIgnore case "+i": opts.Case = CaseRespect case "-m", "--multi": opts.Multi = optionalNumeric(allArgs, &i, maxMulti) case "+m", "--no-multi": opts.Multi = 0 case "--ansi": opts.Ansi = true case "--no-ansi": opts.Ansi = false case "--no-mouse": opts.Mouse = false case "+c", "--no-color": opts.Theme = tui.NoColorTheme() case "+2", "--no-256": opts.Theme = tui.Default16 case "--black": opts.Black = true case "--no-black": opts.Black = false case "--bold": opts.Bold = true case "--no-bold": opts.Bold = false case "--layout": opts.Layout = parseLayout( nextString(allArgs, &i, "layout required (default / reverse / reverse-list)")) case "--reverse": opts.Layout = layoutReverse case "--no-reverse": opts.Layout = layoutDefault case "--cycle": opts.Cycle = true case "--no-cycle": opts.Cycle = false case "--keep-right": opts.KeepRight = true case "--no-keep-right": opts.KeepRight = false case "--hscroll": opts.Hscroll = true case "--no-hscroll": opts.Hscroll = false case "--hscroll-off": opts.HscrollOff = nextInt(allArgs, &i, "hscroll offset required") case "--filepath-word": opts.FileWord = true case "--no-filepath-word": opts.FileWord = false case "--info": opts.InfoStyle = parseInfoStyle( nextString(allArgs, &i, "info style required")) case "--no-info": opts.InfoStyle = infoHidden case "--inline-info": opts.InfoStyle = infoInline case "--no-inline-info": opts.InfoStyle = infoDefault case "--jump-labels": opts.JumpLabels = nextString(allArgs, &i, "label characters required") validateJumpLabels = true case "-1", "--select-1": opts.Select1 = true case "+1", "--no-select-1": opts.Select1 = false case "-0", "--exit-0": opts.Exit0 = true case "+0", "--no-exit-0": opts.Exit0 = false case "--read0": opts.ReadZero = true case "--no-read0": opts.ReadZero = false case "--print0": opts.Printer = func(str string) { fmt.Print(str, "\x00") } opts.PrintSep = "\x00" case "--no-print0": opts.Printer = func(str string) { fmt.Println(str) } opts.PrintSep = "\n" case "--print-query": opts.PrintQuery = true case "--no-print-query": opts.PrintQuery = false case "--prompt": opts.Prompt = nextString(allArgs, &i, "prompt string required") case "--pointer": opts.Pointer = nextString(allArgs, &i, "pointer sign string required") validatePointer = true case "--marker": opts.Marker = nextString(allArgs, &i, "selected sign string required") validateMarker = true case "--sync": opts.Sync = true case "--no-sync": opts.Sync = false case "--async": opts.Sync = false case "--no-history": opts.History = nil case "--history": setHistory(nextString(allArgs, &i, "history file path required")) case "--history-size": setHistoryMax(nextInt(allArgs, &i, "history max size required")) case "--no-header": opts.Header = []string{} case "--no-header-lines": opts.HeaderLines = 0 case "--header": opts.Header = strLines(nextString(allArgs, &i, "header string required")) case "--header-lines": opts.HeaderLines = atoi( nextString(allArgs, &i, "number of header lines required")) case "--preview": opts.Preview.command = nextString(allArgs, &i, "preview command required") case "--no-preview": opts.Preview.command = "" case "--preview-window": parsePreviewWindow(&opts.Preview, nextString(allArgs, &i, "preview window layout required: [up|down|left|right][:SIZE[%]][:rounded|sharp|noborder][:wrap][:cycle][:hidden][:+SCROLL[-OFFSET]][:default]")) case "--height": opts.Height = parseHeight(nextString(allArgs, &i, "height required: HEIGHT[%]")) case "--min-height": opts.MinHeight = nextInt(allArgs, &i, "height required: HEIGHT") case "--no-height": opts.Height = sizeSpec{} case "--no-margin": opts.Margin = defaultMargin() case "--no-border": opts.BorderShape = tui.BorderNone case "--border": hasArg, arg := optionalNextString(allArgs, &i) opts.BorderShape = parseBorder(arg, !hasArg) case "--no-unicode": opts.Unicode = false case "--unicode": opts.Unicode = true case "--margin": opts.Margin = parseMargin( nextString(allArgs, &i, "margin required (TRBL / TB,RL / T,RL,B / T,R,B,L)")) case "--tabstop": opts.Tabstop = nextInt(allArgs, &i, "tab stop required") case "--clear": opts.ClearOnExit = true case "--no-clear": opts.ClearOnExit = false case "--version": opts.Version = true default: if match, value := optString(arg, "--algo="); match { opts.FuzzyAlgo = parseAlgo(value) } else if match, value := optString(arg, "-q", "--query="); match { opts.Query = value } else if match, value := optString(arg, "-f", "--filter="); match { opts.Filter = &value } else if match, value := optString(arg, "-d", "--delimiter="); match { opts.Delimiter = delimiterRegexp(value) } else if match, value := optString(arg, "--border="); match { opts.BorderShape = parseBorder(value, false) } else if match, value := optString(arg, "--prompt="); match { opts.Prompt = value } else if match, value := optString(arg, "--pointer="); match { opts.Pointer = value validatePointer = true } else if match, value := optString(arg, "--marker="); match { opts.Marker = value validateMarker = true } else if match, value := optString(arg, "-n", "--nth="); match { opts.Nth = splitNth(value) } else if match, value := optString(arg, "--with-nth="); match { opts.WithNth = splitNth(value) } else if match, _ := optString(arg, "-s", "--sort="); match { opts.Sort = 1 // Don't care } else if match, value := optString(arg, "-m", "--multi="); match { opts.Multi = atoi(value) } else if match, value := optString(arg, "--height="); match { opts.Height = parseHeight(value) } else if match, value := optString(arg, "--min-height="); match { opts.MinHeight = atoi(value) } else if match, value := optString(arg, "--layout="); match { opts.Layout = parseLayout(value) } else if match, value := optString(arg, "--info="); match { opts.InfoStyle = parseInfoStyle(value) } else if match, value := optString(arg, "--toggle-sort="); match { parseToggleSort(opts.Keymap, value) } else if match, value := optString(arg, "--expect="); match { for k, v := range parseKeyChords(value, "key names required") { opts.Expect[k] = v } } else if match, value := optString(arg, "--tiebreak="); match { opts.Criteria = parseTiebreak(value) } else if match, value := optString(arg, "--color="); match { opts.Theme = parseTheme(opts.Theme, value) } else if match, value := optString(arg, "--bind="); match { parseKeymap(opts.Keymap, value) } else if match, value := optString(arg, "--history="); match { setHistory(value) } else if match, value := optString(arg, "--history-size="); match { setHistoryMax(atoi(value)) } else if match, value := optString(arg, "--header="); match { opts.Header = strLines(value) } else if match, value := optString(arg, "--header-lines="); match { opts.HeaderLines = atoi(value) } else if match, value := optString(arg, "--preview="); match { opts.Preview.command = value } else if match, value := optString(arg, "--preview-window="); match { parsePreviewWindow(&opts.Preview, value) } else if match, value := optString(arg, "--margin="); match { opts.Margin = parseMargin(value) } else if match, value := optString(arg, "--tabstop="); match { opts.Tabstop = atoi(value) } else if match, value := optString(arg, "--hscroll-off="); match { opts.HscrollOff = atoi(value) } else if match, value := optString(arg, "--jump-labels="); match { opts.JumpLabels = value validateJumpLabels = true } else { errorExit("unknown option: " + arg) } } } if opts.HeaderLines < 0 { errorExit("header lines must be a non-negative integer") } if opts.HscrollOff < 0 { errorExit("hscroll offset must be a non-negative integer") } if opts.Tabstop < 1 { errorExit("tab stop must be a positive integer") } if len(opts.JumpLabels) == 0 { errorExit("empty jump labels") } if validateJumpLabels { for _, r := range opts.JumpLabels { if r < 32 || r > 126 { errorExit("non-ascii jump labels are not allowed") } } } if validatePointer { if err := validateSign(opts.Pointer, "pointer"); err != nil { errorExit(err.Error()) } } if validateMarker { if err := validateSign(opts.Marker, "marker"); err != nil { errorExit(err.Error()) } } } func validateSign(sign string, signOptName string) error { if sign == "" { return fmt.Errorf("%v cannot be empty", signOptName) } widthSum := 0 for _, r := range sign { if !unicode.IsGraphic(r) { return fmt.Errorf("invalid character in %v", signOptName) } widthSum += runewidth.RuneWidth(r) if widthSum > 2 { return fmt.Errorf("%v display width should be up to 2", signOptName) } } return nil } func postProcessOptions(opts *Options) { if !tui.IsLightRendererSupported() && opts.Height.size > 0 { errorExit("--height option is currently not supported on this platform") } // Default actions for CTRL-N / CTRL-P when --history is set if opts.History != nil { if _, prs := opts.Keymap[tui.CtrlP]; !prs { opts.Keymap[tui.CtrlP] = toActions(actPreviousHistory) } if _, prs := opts.Keymap[tui.CtrlN]; !prs { opts.Keymap[tui.CtrlN] = toActions(actNextHistory) } } // Extend the default key map keymap := defaultKeymap() for key, actions := range opts.Keymap { for _, act := range actions { if act.t == actToggleSort { opts.ToggleSort = true } } keymap[key] = actions } opts.Keymap = keymap // If we're not using extended search mode, --nth option becomes irrelevant // if it contains the whole range if !opts.Extended || len(opts.Nth) == 1 { for _, r := range opts.Nth { if r.begin == rangeEllipsis && r.end == rangeEllipsis { opts.Nth = make([]Range, 0) return } } } if opts.Bold { theme := opts.Theme boldify := func(c tui.ColorAttr) tui.ColorAttr { dup := c if !theme.Colored { dup.Attr |= tui.Bold } else if (c.Attr & tui.AttrRegular) == 0 { dup.Attr |= tui.Bold } return dup } theme.Current = boldify(theme.Current) theme.CurrentMatch = boldify(theme.CurrentMatch) theme.Prompt = boldify(theme.Prompt) theme.Input = boldify(theme.Input) theme.Cursor = boldify(theme.Cursor) theme.Spinner = boldify(theme.Spinner) } } // ParseOptions parses command-line options func ParseOptions() *Options { opts := defaultOptions() // Options from Env var words, _ := shellwords.Parse(os.Getenv("FZF_DEFAULT_OPTS")) if len(words) > 0 { parseOptions(opts, words) } // Options from command-line arguments parseOptions(opts, os.Args[1:]) postProcessOptions(opts) return opts }
[ "\"FZF_DEFAULT_OPTS\"" ]
[]
[ "FZF_DEFAULT_OPTS" ]
[]
["FZF_DEFAULT_OPTS"]
go
1
0
tools/train.py
# Copyright (c) SenseTime. All Rights Reserved. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import logging import os import time import math import json import random import numpy as np import torch import torch.nn as nn from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.utils import clip_grad_norm_ from torch.utils.data.distributed import DistributedSampler from siamban.utils.lr_scheduler import build_lr_scheduler from siamban.utils.log_helper import init_log, print_speed, add_file_handler from siamban.utils.distributed import dist_init, DistModule, reduce_gradients,\ average_reduce, get_rank, get_world_size from siamban.utils.model_load import load_pretrain, restore_from from siamban.utils.average_meter import AverageMeter from siamban.utils.misc import describe, commit from siamban.models.model_builder import ModelBuilder from siamban.datasets.dataset import BANDataset from siamban.core.config import cfg logger = logging.getLogger('global') parser = argparse.ArgumentParser(description='siamese tracking') parser.add_argument('--cfg', type=str, default='config.yaml', help='configuration of tracking') parser.add_argument('--seed', type=int, default=123456, help='random seed') parser.add_argument('--local_rank', type=int, default=0, help='compulsory for pytorch launcer') args = parser.parse_args() def seed_torch(seed=0): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True def build_data_loader(): logger.info("build train dataset") # train_dataset if cfg.BAN.BAN: train_dataset = BANDataset() logger.info("build dataset done") train_sampler = None if get_world_size() > 1: train_sampler = DistributedSampler(train_dataset) train_loader = DataLoader(train_dataset, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKERS, pin_memory=True, sampler=train_sampler) return train_loader def build_opt_lr(model, current_epoch=0): for param in model.backbone.parameters(): param.requires_grad = False for m in model.backbone.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() if current_epoch >= cfg.BACKBONE.TRAIN_EPOCH: for layer in cfg.BACKBONE.TRAIN_LAYERS: for param in getattr(model.backbone, layer).parameters(): param.requires_grad = True for m in getattr(model.backbone, layer).modules(): if isinstance(m, nn.BatchNorm2d): m.train() trainable_params = [] trainable_params += [{'params': filter(lambda x: x.requires_grad, model.backbone.parameters()), 'lr': cfg.BACKBONE.LAYERS_LR * cfg.TRAIN.BASE_LR}] if cfg.ADJUST.ADJUST: trainable_params += [{'params': model.neck.parameters(), 'lr': cfg.TRAIN.BASE_LR}] trainable_params += [{'params': model.head.parameters(), 'lr': cfg.TRAIN.BASE_LR}] optimizer = torch.optim.SGD(trainable_params, momentum=cfg.TRAIN.MOMENTUM, weight_decay=cfg.TRAIN.WEIGHT_DECAY) lr_scheduler = build_lr_scheduler(optimizer, epochs=cfg.TRAIN.EPOCH) lr_scheduler.step(cfg.TRAIN.START_EPOCH) return optimizer, lr_scheduler def log_grads(model, tb_writer, tb_index): def weights_grads(model): grad = {} weights = {} for name, param in model.named_parameters(): if param.grad is not None: grad[name] = param.grad weights[name] = param.data return grad, weights grad, weights = weights_grads(model) feature_norm, head_norm = 0, 0 for k, g in grad.items(): _norm = g.data.norm(2) weight = weights[k] w_norm = weight.norm(2) if 'feature' in k: feature_norm += _norm ** 2 else: head_norm += _norm ** 2 tb_writer.add_scalar('grad_all/'+k.replace('.', '/'), _norm, tb_index) tb_writer.add_scalar('weight_all/'+k.replace('.', '/'), w_norm, tb_index) tb_writer.add_scalar('w-g/'+k.replace('.', '/'), w_norm/(1e-20 + _norm), tb_index) tot_norm = feature_norm + head_norm tot_norm = tot_norm ** 0.5 feature_norm = feature_norm ** 0.5 head_norm = head_norm ** 0.5 tb_writer.add_scalar('grad/tot', tot_norm, tb_index) tb_writer.add_scalar('grad/feature', feature_norm, tb_index) tb_writer.add_scalar('grad/head', head_norm, tb_index) def train(train_loader, model, optimizer, lr_scheduler, tb_writer): cur_lr = lr_scheduler.get_cur_lr() rank = get_rank() average_meter = AverageMeter() def is_valid_number(x): return not(math.isnan(x) or math.isinf(x) or x > 1e4) world_size = get_world_size() num_per_epoch = len(train_loader.dataset) // \ cfg.TRAIN.EPOCH // (cfg.TRAIN.BATCH_SIZE * world_size) start_epoch = cfg.TRAIN.START_EPOCH epoch = start_epoch if not os.path.exists(cfg.TRAIN.SNAPSHOT_DIR) and \ get_rank() == 0: os.makedirs(cfg.TRAIN.SNAPSHOT_DIR) logger.info("model\n{}".format(describe(model.module))) end = time.time() for idx, data in enumerate(train_loader): if epoch != idx // num_per_epoch + start_epoch: epoch = idx // num_per_epoch + start_epoch if get_rank() == 0: torch.save( {'epoch': epoch, 'state_dict': model.module.state_dict(), 'optimizer': optimizer.state_dict()}, cfg.TRAIN.SNAPSHOT_DIR+'/checkpoint_e%d.pth' % (epoch)) if epoch == cfg.TRAIN.EPOCH: return if cfg.BACKBONE.TRAIN_EPOCH == epoch: logger.info('start training backbone.') optimizer, lr_scheduler = build_opt_lr(model.module, epoch) logger.info("model\n{}".format(describe(model.module))) lr_scheduler.step(epoch) cur_lr = lr_scheduler.get_cur_lr() logger.info('epoch: {}'.format(epoch+1)) tb_idx = idx + start_epoch * num_per_epoch if idx % num_per_epoch == 0 and idx != 0: for idx, pg in enumerate(optimizer.param_groups): logger.info('epoch {} lr {}'.format(epoch+1, pg['lr'])) if rank == 0: tb_writer.add_scalar('lr/group{}'.format(idx+1), pg['lr'], tb_idx) data_time = average_reduce(time.time() - end) if rank == 0: tb_writer.add_scalar('time/data', data_time, tb_idx) outputs = model(data) loss = outputs['total_loss'] if is_valid_number(loss.data.item()): optimizer.zero_grad() loss.backward() reduce_gradients(model) if rank == 0 and cfg.TRAIN.LOG_GRADS: log_grads(model.module, tb_writer, tb_idx) # clip gradient clip_grad_norm_(model.parameters(), cfg.TRAIN.GRAD_CLIP) optimizer.step() batch_time = time.time() - end batch_info = {} batch_info['batch_time'] = average_reduce(batch_time) batch_info['data_time'] = average_reduce(data_time) for k, v in sorted(outputs.items()): batch_info[k] = average_reduce(v.data.item()) average_meter.update(**batch_info) if rank == 0: for k, v in batch_info.items(): tb_writer.add_scalar(k, v, tb_idx) if (idx+1) % cfg.TRAIN.PRINT_FREQ == 0: info = "Epoch: [{}][{}/{}] lr: {:.6f}\n".format( epoch+1, (idx+1) % num_per_epoch, num_per_epoch, cur_lr) for cc, (k, v) in enumerate(batch_info.items()): if cc % 2 == 0: info += ("\t{:s}\t").format( getattr(average_meter, k)) else: info += ("{:s}\n").format( getattr(average_meter, k)) logger.info(info) print_speed(idx+1+start_epoch*num_per_epoch, average_meter.batch_time.avg, cfg.TRAIN.EPOCH * num_per_epoch) end = time.time() def main(): rank, world_size = dist_init() logger.info("init done") # load cfg cfg.merge_from_file(args.cfg) if rank == 0: if not os.path.exists(cfg.TRAIN.LOG_DIR): os.makedirs(cfg.TRAIN.LOG_DIR) init_log('global', logging.INFO) if cfg.TRAIN.LOG_DIR: add_file_handler('global', os.path.join(cfg.TRAIN.LOG_DIR, 'logs.txt'), logging.INFO) logger.info("Version Information: \n{}\n".format(commit())) logger.info("config \n{}".format(json.dumps(cfg, indent=4))) # create model model = ModelBuilder().cuda().train() # dist_model = DistModule(model) # load pretrained backbone weights if cfg.BACKBONE.PRETRAINED: cur_path = os.path.dirname(os.path.realpath(__file__)) backbone_path = os.path.join(cur_path, '../', cfg.BACKBONE.PRETRAINED) load_pretrain(model.backbone, backbone_path) # create tensorboard writer if rank == 0 and cfg.TRAIN.LOG_DIR: tb_writer = SummaryWriter(cfg.TRAIN.LOG_DIR) else: tb_writer = None # build dataset loader train_loader = build_data_loader() # build optimizer and lr_scheduler optimizer, lr_scheduler = build_opt_lr(model, cfg.TRAIN.START_EPOCH) # resume training if cfg.TRAIN.RESUME: logger.info("resume from {}".format(cfg.TRAIN.RESUME)) assert os.path.isfile(cfg.TRAIN.RESUME), \ '{} is not a valid file.'.format(cfg.TRAIN.RESUME) model, optimizer, cfg.TRAIN.START_EPOCH = \ restore_from(model, optimizer, cfg.TRAIN.RESUME) # load pretrain elif cfg.TRAIN.PRETRAINED: load_pretrain(model, cfg.TRAIN.PRETRAINED) dist_model = DistModule(model) logger.info(lr_scheduler) logger.info("model prepare done") # start training train(train_loader, dist_model, optimizer, lr_scheduler, tb_writer) if __name__ == '__main__': seed_torch(args.seed) main()
[]
[]
[ "PYTHONHASHSEED" ]
[]
["PYTHONHASHSEED"]
python
1
0
django_travis_setup/django_travis_setup/wsgi.py
""" WSGI config for django_travis_setup project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_travis_setup.settings") application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
ansible/roles/db/molecule/default/tests/test_default.py
import os import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') # check if MongoDB is enabled and running def test_mongo_running_and_enabled(host): mongo = host.service("mongod") assert mongo.is_running assert mongo.is_enabled # check if configuration file contains the required line def test_config_file(host): config_file = host.file('/etc/mongod.conf') assert config_file.contains('bindIp: 0.0.0.0') assert config_file.is_file def test_mongo_socket(host): host.socket("tcp://0.0.0.0:27017").is_listening
[]
[]
[ "MOLECULE_INVENTORY_FILE" ]
[]
["MOLECULE_INVENTORY_FILE"]
python
1
0
ss_baselines/savi/ppo/slurm_utils.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import os import shlex import signal import subprocess import threading from os import path as osp from typing import Any, Optional, Tuple import ifcfg import torch from habitat import logger EXIT = threading.Event() EXIT.clear() REQUEUE = threading.Event() REQUEUE.clear() MAIN_PID = os.getpid() SLURM_JOBID = os.environ.get("SLURM_JOB_ID", None) INTERRUPTED_STATE_FILE = osp.join( os.environ["HOME"], ".interrupted_states", f"{SLURM_JOBID}.pth" ) def _clean_exit_handler(signum, frame): EXIT.set() print("Exiting cleanly", flush=True) def _requeue_handler(signal, frame): print("Got signal to requeue", flush=True) EXIT.set() REQUEUE.set() def add_signal_handlers(): signal.signal(signal.SIGINT, _clean_exit_handler) signal.signal(signal.SIGTERM, _clean_exit_handler) # SIGUSR2 can be sent to all processes to have them cleanup # and exit nicely. This is nice to use with SLURM as scancel <job_id> # sets a 30 second timer for the job to exit, and it can take more than # 30 seconds for the job to cleanup and exit nicely. When using NCCL, # forcing the job to exit without cleaning up can be bad. # scancel --signal SIGUSR2 <job_id> will set no such timer and will give # the job ample time to cleanup and exit. signal.signal(signal.SIGUSR2, _clean_exit_handler) signal.signal(signal.SIGUSR1, _requeue_handler) def save_interrupted_state(state: Any, filename: str = None, model_dir: str = None): r"""Saves the interrupted job state to the specified filename. This is useful when working with preemptable job partitions. This method will do nothing if SLURM is not currently being used and the filename is the default :param state: The state to save :param filename: The filename. Defaults to "${HOME}/.interrupted_states/${SLURM_JOBID}.pth" """ if SLURM_JOBID is None and filename is None: logger.warn("SLURM_JOBID is none, not saving interrupted state") return if filename is None: if model_dir is not None: filename = os.path.join(model_dir, 'interrupted_state.pth') else: filename = INTERRUPTED_STATE_FILE torch.save(state, filename) def load_interrupted_state(filename: str = None, model_dir: str = None) -> Optional[Any]: r"""Loads the saved interrupted state :param filename: The filename of the saved state. Defaults to "${HOME}/.interrupted_states/${SLURM_JOBID}.pth" :return: The saved state if the file exists, else none """ if SLURM_JOBID is None and filename is None: return None if filename is None: if model_dir is not None: filename = os.path.join(model_dir, 'interrupted_state.pth') else: filename = INTERRUPTED_STATE_FILE if not osp.exists(filename): return None return torch.load(filename, map_location="cpu") def requeue_job(): r"""Requeue the job by calling ``scontrol requeue ${SLURM_JOBID}``""" if SLURM_JOBID is None: return if os.environ['SLURM_PROCID'] == '0' and os.getpid() == MAIN_PID: logger.info(f"Requeueing job {SLURM_JOBID}") subprocess.check_call(shlex.split(f"scontrol requeue {SLURM_JOBID}")) def get_ifname(): return ifcfg.default_interface()["device"]
[]
[]
[ "SLURM_JOB_ID", "HOME", "SLURM_PROCID" ]
[]
["SLURM_JOB_ID", "HOME", "SLURM_PROCID"]
python
3
0
util/progress/progressui/printer.go
package progressui import ( "container/ring" "context" "fmt" "io" "os" "sort" "strings" "time" digest "github.com/opencontainers/go-digest" "github.com/tonistiigi/units" ) const antiFlicker = 5 * time.Second const maxDelay = 10 * time.Second const minTimeDelta = 5 * time.Second const minProgressDelta = 0.05 // % const logsBufferSize = 10 type lastStatus struct { Current int64 Timestamp time.Time } type textMux struct { w io.Writer current digest.Digest last map[string]lastStatus notFirst bool nextIndex int } func (p *textMux) printVtx(t *trace, dgst digest.Digest) { if p.last == nil { p.last = make(map[string]lastStatus) } v, ok := t.byDigest[dgst] if !ok { return } if v.index == 0 { p.nextIndex++ v.index = p.nextIndex } if dgst != p.current { if p.current != "" { old := t.byDigest[p.current] if old.logsPartial { fmt.Fprintln(p.w, "") } old.logsOffset = 0 old.count = 0 fmt.Fprintf(p.w, "#%d ...\n", old.index) } if p.notFirst { fmt.Fprintln(p.w, "") } else { p.notFirst = true } if os.Getenv("PROGRESS_NO_TRUNC") == "0" { fmt.Fprintf(p.w, "#%d %s\n", v.index, limitString(v.Name, 72)) } else { fmt.Fprintf(p.w, "#%d %s\n", v.index, v.Name) } } if len(v.events) != 0 { v.logsOffset = 0 } for _, ev := range v.events { fmt.Fprintf(p.w, "#%d %s\n", v.index, ev) } v.events = v.events[:0] isOpenStatus := false // remote cache loading can currently produce status updates without active vertex for _, s := range v.statuses { if _, ok := v.statusUpdates[s.ID]; ok { doPrint := true if last, ok := p.last[s.ID]; ok && s.Completed == nil { var progressDelta float64 if s.Total > 0 { progressDelta = float64(s.Current-last.Current) / float64(s.Total) } timeDelta := s.Timestamp.Sub(last.Timestamp) if progressDelta < minProgressDelta && timeDelta < minTimeDelta { doPrint = false } } if !doPrint { continue } p.last[s.ID] = lastStatus{ Timestamp: s.Timestamp, Current: s.Current, } var bytes string if s.Total != 0 { bytes = fmt.Sprintf(" %.2f / %.2f", units.Bytes(s.Current), units.Bytes(s.Total)) } else if s.Current != 0 { bytes = fmt.Sprintf(" %.2f", units.Bytes(s.Current)) } var tm string endTime := s.Timestamp if s.Completed != nil { endTime = *s.Completed } if s.Started != nil { diff := endTime.Sub(*s.Started).Seconds() if diff > 0.01 { tm = fmt.Sprintf(" %.1fs", diff) } } if s.Completed != nil { tm += " done" } else { isOpenStatus = true } fmt.Fprintf(p.w, "#%d %s%s%s\n", v.index, s.ID, bytes, tm) } } v.statusUpdates = map[string]struct{}{} for _, w := range v.warnings[v.warningIdx:] { fmt.Fprintf(p.w, "#%d WARN: %s\n", v.index, w.Short) v.warningIdx++ } for i, l := range v.logs { if i == 0 { l = l[v.logsOffset:] } fmt.Fprintf(p.w, "%s", []byte(l)) if i != len(v.logs)-1 || !v.logsPartial { fmt.Fprintln(p.w, "") } if v.logsBuffer == nil { v.logsBuffer = ring.New(logsBufferSize) } v.logsBuffer.Value = l if !v.logsPartial { v.logsBuffer = v.logsBuffer.Next() } } if len(v.logs) > 0 { if v.logsPartial { v.logs = v.logs[len(v.logs)-1:] v.logsOffset = len(v.logs[0]) } else { v.logs = nil v.logsOffset = 0 } } p.current = dgst if v.isCompleted() && !isOpenStatus { p.current = "" v.count = 0 if v.Error != "" { if v.logsPartial { fmt.Fprintln(p.w, "") } if strings.HasSuffix(v.Error, context.Canceled.Error()) { fmt.Fprintf(p.w, "#%d CANCELED\n", v.index) } else { fmt.Fprintf(p.w, "#%d ERROR: %s\n", v.index, v.Error) } } else if v.Cached { fmt.Fprintf(p.w, "#%d CACHED\n", v.index) } else { tm := "" var ivals []interval for _, ival := range v.intervals { ivals = append(ivals, ival) } ivals = mergeIntervals(ivals) if len(ivals) > 0 { var dt float64 for _, ival := range ivals { dt += ival.duration().Seconds() } tm = fmt.Sprintf(" %.1fs", dt) } fmt.Fprintf(p.w, "#%d DONE%s\n", v.index, tm) } } delete(t.updates, dgst) } func sortCompleted(t *trace, m map[digest.Digest]struct{}) []digest.Digest { out := make([]digest.Digest, 0, len(m)) for k := range m { out = append(out, k) } sort.Slice(out, func(i, j int) bool { vtxi := t.byDigest[out[i]] vtxj := t.byDigest[out[j]] return vtxi.mostRecentInterval().stop.Before(*vtxj.mostRecentInterval().stop) }) return out } func (p *textMux) print(t *trace) { completed := map[digest.Digest]struct{}{} rest := map[digest.Digest]struct{}{} for dgst := range t.updates { v, ok := t.byDigest[dgst] if !ok { continue } if v.ProgressGroup != nil || v.hidden { // skip vtxs in a group (they are merged into a single vtx) and hidden ones continue } if v.isCompleted() { completed[dgst] = struct{}{} } else { rest[dgst] = struct{}{} } } current := p.current // items that have completed need to be printed first if _, ok := completed[current]; ok { p.printVtx(t, current) } for _, dgst := range sortCompleted(t, completed) { if dgst != current { p.printVtx(t, dgst) } } if len(rest) == 0 { if current != "" { if v := t.byDigest[current]; v.isStarted() && !v.isCompleted() { return } } // make any open vertex active for dgst, v := range t.byDigest { if v.isStarted() && !v.isCompleted() { p.printVtx(t, dgst) return } } return } // now print the active one if _, ok := rest[current]; ok { p.printVtx(t, current) } stats := map[digest.Digest]*vtxStat{} now := time.Now() sum := 0.0 var max digest.Digest if current != "" { rest[current] = struct{}{} } for dgst := range rest { v, ok := t.byDigest[dgst] if !ok { continue } tm := now.Sub(*v.lastBlockTime) speed := float64(v.count) / tm.Seconds() overLimit := tm > maxDelay && dgst != current stats[dgst] = &vtxStat{blockTime: tm, speed: speed, overLimit: overLimit} sum += speed if overLimit || max == "" || stats[max].speed < speed { max = dgst } } for dgst := range stats { stats[dgst].share = stats[dgst].speed / sum } if _, ok := completed[current]; ok || current == "" { p.printVtx(t, max) return } // show items that were hidden for dgst := range rest { if stats[dgst].overLimit { p.printVtx(t, dgst) return } } // fair split between vertexes if 1.0/(1.0-stats[current].share)*antiFlicker.Seconds() < stats[current].blockTime.Seconds() { p.printVtx(t, max) return } } type vtxStat struct { blockTime time.Duration speed float64 share float64 overLimit bool } func limitString(s string, l int) string { if len(s) > l { return s[:l] + "..." } return s }
[ "\"PROGRESS_NO_TRUNC\"" ]
[]
[ "PROGRESS_NO_TRUNC" ]
[]
["PROGRESS_NO_TRUNC"]
go
1
0
tools/invalid.py
import sys import os sys.path.insert(0, os.path.dirname(__file__) + '/..') import django import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "monkeys.settings") import django django.setup() from typer.models import Die, DieImage, TypedDie ''' http://cs.sipr0n.org/typer/Sega_315-5571_xpol/adminSummary/467/ xwins 17-07-24 1:11 says its length 0 weird ''' print 'Querying...' for td in TypedDie.objects.all(): if USER and str(td.submitter) != USER: continue k = str(td.dieImage.image) if k.find('sega_315-5571_xpol_14_18.png') < 0: continue print 'got it' tf = str(td.typedField) print len(tf) print tf
[]
[]
[]
[]
[]
python
0
0
cmd/gardener-extension-provider-vsphere/app/app.go
/* * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * */ package app import ( "context" "fmt" "os" druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1" "github.com/gardener/gardener/extensions/pkg/controller" controllercmd "github.com/gardener/gardener/extensions/pkg/controller/cmd" "github.com/gardener/gardener/extensions/pkg/controller/worker" "github.com/gardener/gardener/extensions/pkg/util" webhookcmd "github.com/gardener/gardener/extensions/pkg/webhook/cmd" "github.com/pkg/errors" vsphereinstall "github.com/gardener/gardener-extension-provider-vsphere/pkg/apis/vsphere/install" vspherecmd "github.com/gardener/gardener-extension-provider-vsphere/pkg/cmd" vspherecontrolplane "github.com/gardener/gardener-extension-provider-vsphere/pkg/controller/controlplane" "github.com/gardener/gardener-extension-provider-vsphere/pkg/controller/healthcheck" vsphereinfrastructure "github.com/gardener/gardener-extension-provider-vsphere/pkg/controller/infrastructure" vsphereworker "github.com/gardener/gardener-extension-provider-vsphere/pkg/controller/worker" "github.com/gardener/gardener-extension-provider-vsphere/pkg/vsphere" vspherecontrolplaneexposure "github.com/gardener/gardener-extension-provider-vsphere/pkg/webhook/controlplaneexposure" machinev1alpha1 "github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" autoscalingv1beta2 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" "k8s.io/component-base/version/verflag" "sigs.k8s.io/controller-runtime/pkg/manager" ) // NewControllerManagerCommand creates a new command for running a vSphere provider controller. func NewControllerManagerCommand(ctx context.Context) *cobra.Command { var ( restOpts = &controllercmd.RESTOptions{} mgrOpts = &controllercmd.ManagerOptions{ LeaderElection: true, LeaderElectionID: controllercmd.LeaderElectionNameID(vsphere.Name), LeaderElectionNamespace: os.Getenv("LEADER_ELECTION_NAMESPACE"), WebhookServerPort: 443, } configFileOpts = &vspherecmd.ConfigOptions{} // options for the infrastructure controller infraCtrlOpts = &controllercmd.ControllerOptions{ MaxConcurrentReconciles: 5, } reconcileOpts = &controllercmd.ReconcilerOptions{} // options for the health care controller healthCareCtrlOpts = &controllercmd.ControllerOptions{ MaxConcurrentReconciles: 5, } // options for the control plane controller controlPlaneCtrlOpts = &controllercmd.ControllerOptions{ MaxConcurrentReconciles: 5, } // options for the worker controller workerCtrlOpts = &controllercmd.ControllerOptions{ MaxConcurrentReconciles: 5, } workerReconcileOpts = &worker.Options{ DeployCRDs: true, } workerCtrlOptsUnprefixed = controllercmd.NewOptionAggregator(workerCtrlOpts, workerReconcileOpts) // options for the webhook server webhookServerOptions = &webhookcmd.ServerOptions{ Namespace: os.Getenv("WEBHOOK_CONFIG_NAMESPACE"), } controllerSwitches = vspherecmd.ControllerSwitchOptions() webhookSwitches = vspherecmd.WebhookSwitchOptions() webhookOptions = webhookcmd.NewAddToManagerOptions(vsphere.Name, webhookServerOptions, webhookSwitches) aggOption = controllercmd.NewOptionAggregator( restOpts, mgrOpts, controllercmd.PrefixOption("controlplane-", controlPlaneCtrlOpts), controllercmd.PrefixOption("infrastructure-", infraCtrlOpts), controllercmd.PrefixOption("worker-", &workerCtrlOptsUnprefixed), controllercmd.PrefixOption("healthcheck-", healthCareCtrlOpts), controllerSwitches, configFileOpts, reconcileOpts, webhookOptions, ) ) cmd := &cobra.Command{ Use: fmt.Sprintf("%s-controller-manager", vsphere.Name), RunE: func(cmd *cobra.Command, args []string) error { verflag.PrintAndExitIfRequested() if err := aggOption.Complete(); err != nil { return errors.Wrap(err, "Error completing options") } util.ApplyClientConnectionConfigurationToRESTConfig(configFileOpts.Completed().Config.ClientConnection, restOpts.Completed().Config) if workerReconcileOpts.Completed().DeployCRDs { if err := worker.ApplyMachineResourcesForConfig(ctx, restOpts.Completed().Config); err != nil { return errors.Wrap(err, "Error ensuring the machine CRDs") } } mgr, err := manager.New(restOpts.Completed().Config, mgrOpts.Completed().Options()) if err != nil { return errors.Wrap(err, "Could not instantiate manager") } scheme := mgr.GetScheme() if err := controller.AddToScheme(scheme); err != nil { return errors.Wrap(err, "Could not update manager scheme") } if err := vsphereinstall.AddToScheme(scheme); err != nil { return errors.Wrap(err, "Could not update manager scheme") } if err := druidv1alpha1.AddToScheme(scheme); err != nil { return errors.Wrap(err, "Could not update manager scheme") } if err := machinev1alpha1.AddToScheme(scheme); err != nil { return errors.Wrap(err, "Could not update manager scheme") } if err := autoscalingv1beta2.AddToScheme(scheme); err != nil { return errors.Wrap(err, "Could not update manager scheme") } // add common meta types to schema for controller-runtime to use v1.ListOptions metav1.AddToGroupVersion(scheme, machinev1alpha1.SchemeGroupVersion) configFileOpts.Completed().ApplyETCDStorage(&vspherecontrolplaneexposure.DefaultAddOptions.ETCDStorage) configFileOpts.Completed().ApplyGardenId(&vspherecontrolplane.DefaultAddOptions.GardenId) configFileOpts.Completed().ApplyGardenId(&vsphereinfrastructure.DefaultAddOptions.GardenId) configFileOpts.Completed().ApplyHealthCheckConfig(&healthcheck.DefaultAddOptions.HealthCheckConfig) healthCareCtrlOpts.Completed().Apply(&healthcheck.DefaultAddOptions.Controller) controlPlaneCtrlOpts.Completed().Apply(&vspherecontrolplane.DefaultAddOptions.Controller) infraCtrlOpts.Completed().Apply(&vsphereinfrastructure.DefaultAddOptions.Controller) reconcileOpts.Completed().Apply(&vsphereinfrastructure.DefaultAddOptions.IgnoreOperationAnnotation) reconcileOpts.Completed().Apply(&vspherecontrolplane.DefaultAddOptions.IgnoreOperationAnnotation) reconcileOpts.Completed().Apply(&vsphereworker.DefaultAddOptions.IgnoreOperationAnnotation) workerCtrlOpts.Completed().Apply(&vsphereworker.DefaultAddOptions.Controller) if _, _, err := webhookOptions.Completed().AddToManager(mgr); err != nil { return errors.Wrap(err, "Could not add webhooks to manager") } if err := controllerSwitches.Completed().AddToManager(mgr); err != nil { return errors.Wrap(err, "Could not add controllers to manager") } if err := mgr.Start(ctx); err != nil { return errors.Wrap(err, "Error running manager") } return nil }, } flags := cmd.Flags() aggOption.AddFlags(flags) verflag.AddFlags(flags) return cmd }
[ "\"LEADER_ELECTION_NAMESPACE\"", "\"WEBHOOK_CONFIG_NAMESPACE\"" ]
[]
[ "LEADER_ELECTION_NAMESPACE", "WEBHOOK_CONFIG_NAMESPACE" ]
[]
["LEADER_ELECTION_NAMESPACE", "WEBHOOK_CONFIG_NAMESPACE"]
go
2
0
storage_test.go
/* Copyright The Helm Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package storage import ( "fmt" "os" "testing" "time" "github.com/stretchr/testify/suite" ) type StorageTestSuite struct { suite.Suite StorageBackends map[string]Backend TempDirectory string } func (suite *StorageTestSuite) setupStorageBackends() { timestamp := time.Now().Format("20060102150405") suite.TempDirectory = fmt.Sprintf("../../.test/storage-storage/%s", timestamp) suite.StorageBackends = make(map[string]Backend) suite.StorageBackends["LocalFilesystem"] = Backend(NewLocalFilesystemBackend(suite.TempDirectory)) // create empty dir in local storage to make sure it doesnt end up in ListObjects err := os.MkdirAll(fmt.Sprintf("%s/%s", suite.TempDirectory, "ignoreme"), 0777) suite.Nil(err, "No error creating ignored dir in local storage") if os.Getenv("TEST_CLOUD_STORAGE") == "1" { prefix := fmt.Sprintf("unittest/%s", timestamp) s3Bucket := os.Getenv("TEST_STORAGE_AMAZON_BUCKET") s3Region := os.Getenv("TEST_STORAGE_AMAZON_REGION") gcsBucket := os.Getenv("TEST_STORAGE_GOOGLE_BUCKET") blobContainer := os.Getenv("TEST_STORAGE_MICROSOFT_CONTAINER") ossBucket := os.Getenv("TEST_STORAGE_ALIBABA_BUCKET") ossEndpoint := os.Getenv("TEST_STORAGE_ALIBABA_ENDPOINT") osContainer := os.Getenv("TEST_STORAGE_OPENSTACK_CONTAINER") osRegion := os.Getenv("TEST_STORAGE_OPENSTACK_REGION") ocsBucket := os.Getenv("TEST_STORAGE_ORACLE_BUCKET") ocsRegion := os.Getenv("TEST_STORAGE_ORACLE_REGION") ocsCompartmentId := os.Getenv("TEST_STORAGE_ORACLE_COMPARTMENTID") bosBucket := os.Getenv("TEST_STORAGE_BAIDU_BUCKET") bosEndpoint := os.Getenv("TEST_STORAGE_BAIDU_ENDPOINT") if s3Bucket != "" && s3Region != "" { suite.StorageBackends["AmazonS3"] = Backend(NewAmazonS3Backend(s3Bucket, prefix, s3Region, "", "")) } if gcsBucket != "" { suite.StorageBackends["GoogleCS"] = Backend(NewGoogleCSBackend(gcsBucket, prefix)) } if blobContainer != "" { suite.StorageBackends["MicrosoftBlob"] = Backend(NewMicrosoftBlobBackend(blobContainer, prefix)) } if ossBucket != "" { suite.StorageBackends["AlibabaCloudOSS"] = Backend(NewAlibabaCloudOSSBackend(ossBucket, prefix, ossEndpoint, "")) } if osContainer != "" { suite.StorageBackends["OpenStackOS"] = Backend(NewOpenstackOSBackend(osContainer, prefix, osRegion, "")) } if ocsBucket != "" { suite.StorageBackends["OracleCS"] = Backend(NewOracleCSBackend(ocsBucket, prefix, ocsRegion, ocsCompartmentId)) } if bosBucket != "" { suite.StorageBackends["BaiduCloudBOS"] = Backend(NewBaiDuBOSBackend(bosBucket, prefix, bosEndpoint)) } } } func (suite *StorageTestSuite) SetupSuite() { suite.setupStorageBackends() for i := 1; i <= 9; i++ { data := []byte(fmt.Sprintf("test content %d", i)) path := fmt.Sprintf("test%d.txt", i) for key, backend := range suite.StorageBackends { err := backend.PutObject(path, data) message := fmt.Sprintf("no error putting object %s using %s backend", path, key) suite.Nil(err, message) } } for key, backend := range suite.StorageBackends { if key == "LocalFilesystem" { continue } data := []byte("skipped object") path := "this/is/a/skipped/object.txt" err := backend.PutObject(path, data) message := fmt.Sprintf("no error putting skipped object %s using %s backend", path, key) suite.Nil(err, message) } } func (suite *StorageTestSuite) TearDownSuite() { defer os.RemoveAll(suite.TempDirectory) for i := 1; i <= 9; i++ { path := fmt.Sprintf("test%d.txt", i) for key, backend := range suite.StorageBackends { err := backend.DeleteObject(path) message := fmt.Sprintf("no error deleting object %s using %s backend", path, key) suite.Nil(err, message) } } for key, backend := range suite.StorageBackends { if key == "LocalFilesystem" { continue } path := "this/is/a/skipped/object.txt" err := backend.DeleteObject(path) message := fmt.Sprintf("no error deleting skipped object %s using %s backend", path, key) suite.Nil(err, message) } } func (suite *StorageTestSuite) TestListObjects() { for key, backend := range suite.StorageBackends { objects, err := backend.ListObjects("") message := fmt.Sprintf("no error listing objects using %s backend", key) suite.Nil(err, message) expectedNumObjects := 9 message = fmt.Sprintf("%d objects listed using %s backend", expectedNumObjects, key) suite.Equal(expectedNumObjects, len(objects), message) for i, object := range objects { path := fmt.Sprintf("test%d.txt", (i + 1)) message = fmt.Sprintf("object %s found in list objects using %s backend", path, key) suite.Equal(path, object.Path, message) } } } func (suite *StorageTestSuite) TestGetObject() { for key, backend := range suite.StorageBackends { for i := 1; i <= 9; i++ { path := fmt.Sprintf("test%d.txt", i) object, err := backend.GetObject(path) message := fmt.Sprintf("no error getting object %s using %s backend", path, key) suite.Nil(err, message) message = fmt.Sprintf("object %s content as expected using %s backend", path, key) suite.Equal(object.Content, []byte(fmt.Sprintf("test content %d", i)), message) } } } func (suite *StorageTestSuite) TestHasSuffix() { now := time.Now() o1 := Object{ Path: "mychart-0.1.0.tgz", Content: []byte{}, LastModified: now, } suite.True(o1.HasExtension("tgz"), "object has tgz suffix") o2 := Object{ Path: "mychart-0.1.0.txt", Content: []byte{}, LastModified: now, } suite.False(o2.HasExtension("tgz"), "object does not have tgz suffix") } func (suite *StorageTestSuite) TestGetObjectSliceDiff() { now := time.Now() os1 := []Object{ { Path: "test1.txt", Content: []byte{}, LastModified: now, }, } os2 := []Object{} diff := GetObjectSliceDiff(os1, os2) suite.True(diff.Change, "change detected") suite.Equal(diff.Removed, os1, "removed slice populated") suite.Empty(diff.Added, "added slice empty") suite.Empty(diff.Updated, "updated slice empty") os2 = append(os2, os1[0]) diff = GetObjectSliceDiff(os1, os2) suite.False(diff.Change, "no change detected") suite.Empty(diff.Removed, "removed slice empty") suite.Empty(diff.Added, "added slice empty") suite.Empty(diff.Updated, "updated slice empty") os2[0].LastModified = now.Add(1) diff = GetObjectSliceDiff(os1, os2) suite.True(diff.Change, "change detected") suite.Empty(diff.Removed, "removed slice empty") suite.Empty(diff.Added, "added slice empty") suite.Equal(diff.Updated, os2, "updated slice populated") os2[0].LastModified = now os2 = append(os2, Object{ Path: "test2.txt", Content: []byte{}, LastModified: now, }) diff = GetObjectSliceDiff(os1, os2) suite.True(diff.Change, "change detected") suite.Empty(diff.Removed, "removed slice empty") suite.Equal(diff.Added, []Object{os2[1]}, "added slice empty") suite.Empty(diff.Updated, "updated slice empty") } func TestStorageTestSuite(t *testing.T) { suite.Run(t, new(StorageTestSuite)) }
[ "\"TEST_CLOUD_STORAGE\"", "\"TEST_STORAGE_AMAZON_BUCKET\"", "\"TEST_STORAGE_AMAZON_REGION\"", "\"TEST_STORAGE_GOOGLE_BUCKET\"", "\"TEST_STORAGE_MICROSOFT_CONTAINER\"", "\"TEST_STORAGE_ALIBABA_BUCKET\"", "\"TEST_STORAGE_ALIBABA_ENDPOINT\"", "\"TEST_STORAGE_OPENSTACK_CONTAINER\"", "\"TEST_STORAGE_OPENSTACK_REGION\"", "\"TEST_STORAGE_ORACLE_BUCKET\"", "\"TEST_STORAGE_ORACLE_REGION\"", "\"TEST_STORAGE_ORACLE_COMPARTMENTID\"", "\"TEST_STORAGE_BAIDU_BUCKET\"", "\"TEST_STORAGE_BAIDU_ENDPOINT\"" ]
[]
[ "TEST_STORAGE_ORACLE_COMPARTMENTID", "TEST_STORAGE_BAIDU_BUCKET", "TEST_STORAGE_OPENSTACK_REGION", "TEST_STORAGE_AMAZON_BUCKET", "TEST_STORAGE_AMAZON_REGION", "TEST_STORAGE_ALIBABA_ENDPOINT", "TEST_STORAGE_OPENSTACK_CONTAINER", "TEST_STORAGE_GOOGLE_BUCKET", "TEST_STORAGE_ORACLE_BUCKET", "TEST_STORAGE_ORACLE_REGION", "TEST_STORAGE_BAIDU_ENDPOINT", "TEST_STORAGE_ALIBABA_BUCKET", "TEST_STORAGE_MICROSOFT_CONTAINER", "TEST_CLOUD_STORAGE" ]
[]
["TEST_STORAGE_ORACLE_COMPARTMENTID", "TEST_STORAGE_BAIDU_BUCKET", "TEST_STORAGE_OPENSTACK_REGION", "TEST_STORAGE_AMAZON_BUCKET", "TEST_STORAGE_AMAZON_REGION", "TEST_STORAGE_ALIBABA_ENDPOINT", "TEST_STORAGE_OPENSTACK_CONTAINER", "TEST_STORAGE_GOOGLE_BUCKET", "TEST_STORAGE_ORACLE_BUCKET", "TEST_STORAGE_ORACLE_REGION", "TEST_STORAGE_BAIDU_ENDPOINT", "TEST_STORAGE_ALIBABA_BUCKET", "TEST_STORAGE_MICROSOFT_CONTAINER", "TEST_CLOUD_STORAGE"]
go
14
0
molecule/default/tests/test_default.py
import os import pytest import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') @pytest.mark.parametrize("server, redhat, debian", [ ("zabbix-server-pgsql", "zabbix-web-pgsql", "zabbix-frontend-php"), ("zabbix-server-mysql", "zabbix-web-mysql", "zabbix-frontend-php"), ]) def test_zabbix_package(host, server, redhat, debian): host = host.backend.get_hostname() host = host.replace("-centos", "") host = host.replace("-debian", "") host = host.replace("-ubuntu", "") if host == server: if host.system_info.distribution in ['debian', 'ubuntu']: zabbix_web = host.package(debian) assert zabbix_web.version.startswith("1:4.4") elif host.system_info.distribution == 'centos': zabbix_web = host.package(redhat) assert zabbix_web.version.startswith("4.4") assert zabbix_web.is_installed def test_zabbix_web(host): zabbix_web = host.file("/etc/zabbix/web/zabbix.conf.php") if host.system_info.distribution in ['debian', 'ubuntu']: assert zabbix_web.user == "www-data" assert zabbix_web.group == "www-data" elif host.system_info.distribution == 'centos': assert zabbix_web.user == "apache" assert zabbix_web.group == "apache" assert zabbix_web.mode == 0o640 def test_zabbix_api(host): my_host = host.ansible.get_variables() zabbix_url = str(my_host['zabbix_url']) hostname = 'http://' + zabbix_url + '/api_jsonrpc.php' post_data = '{"jsonrpc": "2.0", "method": "user.login", "params": { "user": "Admin", "password": "zabbix" }, "id": 1, "auth": null}' headers = 'Content-Type: application/json-rpc' command = "curl -XPOST -H '" + str(headers) + "' -d '" + str(post_data) + "' '" + hostname + "'" cmd = host.run(command) assert '"jsonrpc":"2.0","result":"' in cmd.stdout
[]
[]
[ "MOLECULE_INVENTORY_FILE" ]
[]
["MOLECULE_INVENTORY_FILE"]
python
1
0
src/main/java/com/aslan/kafka/Application.java
package com.aslan.kafka; import org.springframework.boot.ApplicationRunner; import org.springframework.boot.CommandLineRunner; import org.springframework.boot.SpringApplication; import org.springframework.boot.WebApplicationType; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.ComponentScan; import org.springframework.context.annotation.Profile; import org.springframework.kafka.core.KafkaTemplate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Hello world! * */ @SpringBootApplication public class Application implements CommandLineRunner{ private static final Logger logger = LoggerFactory.getLogger(Application.class); public static void main(String[] args) { System.out.printf("%s\n", "sb-aslan-kafka-template::main(): STARTING THE APPLICATION"); //SpringApplication.run(Application.class, args); SpringApplication app = new SpringApplication(Application.class); app.setWebApplicationType(WebApplicationType.REACTIVE); String APP_CURRENT_ENV = "local"; if(System.getenv("work.environment")!=null) { String env_value = System.getenv("work.environment").toLowerCase(); if(env_value.trim().equals("dev")) { APP_CURRENT_ENV = "dev"; } System.out.printf("%s\n", "sb-aslan-kafka-template::main(): work.environment: " + env_value); } // set active profile System.setProperty("spring.profiles.active", APP_CURRENT_ENV); // current directory System.out.printf("%s\n", "sb-aslan-kafka-template::main(): Current Directory= " + System.getProperty("user.dir")); // Disabling restart: Make this false when you push to cloud (means on cloud), otherwise comment it for local use //System.setProperty("spring.devtools.restart.enabled", "false"); // run app app.run(args); System.out.printf("%s\n", "sb-aslan-kafka-template::main(): APPLICATION FINISHED"); System.out.printf("%s\n", "sb-aslan-kafka-template::main(): env(): " + APP_CURRENT_ENV); logger.error("Error level is On"); logger.warn("Warn level is On"); logger.info("Info level is On"); logger.debug("Debug level is On"); logger.trace("Trace level is On"); } @Override public void run(String... args) { System.out.printf("%s\n", "sb-aslan-kafka-template::run(): EXECUTING => command line runner"); try { } catch (Exception ex) { System.out.printf("%s\n", "sb-aslan-kafka-template::run(): Exception: " + ex.getMessage()); ex.printStackTrace(); } } /* @Bean CommandLineRunner start(){ return args -> { System.out.println("Hello World!"); }; } */ /* @Bean //@Profile("default") // Don't run from test(s) public ApplicationRunner runner(/*KafkaTemplate<String, String> template) { return args -> { System.out.printf("%s\n", "sb-aslan-kafka-template::runner(): EXECUTING => command line runner"); System.in.read(); try { } catch (Exception ex) { System.out.printf("%s\n", "sb-aslan-kafka-template::runner(): Exception: " + ex.getMessage()); ex.printStackTrace(); } }; }*/ } //end class
[ "\"work.environment\"", "\"work.environment\"" ]
[]
[ "work.environment" ]
[]
["work.environment"]
java
1
0
web/openerp/tools/misc.py
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """ Miscellaneous tools used by OpenERP. """ from functools import wraps import cProfile from contextlib import contextmanager import subprocess import logging import os import socket import sys import threading import time import werkzeug.utils import zipfile from collections import defaultdict, Mapping from datetime import datetime from itertools import islice, izip, groupby from lxml import etree from which import which from threading import local import traceback try: from html2text import html2text except ImportError: html2text = None from config import config from cache import * from .parse_version import parse_version import openerp # get_encodings, ustr and exception_to_unicode were originally from tools.misc. # There are moved to loglevels until we refactor tools. from openerp.loglevels import get_encodings, ustr, exception_to_unicode # noqa _logger = logging.getLogger(__name__) # List of etree._Element subclasses that we choose to ignore when parsing XML. # We include the *Base ones just in case, currently they seem to be subclasses of the _* ones. SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase) def find_in_path(name): try: return which(name) except IOError: return None def find_pg_tool(name): path = None if config['pg_path'] and config['pg_path'] != 'None': path = config['pg_path'] try: return which(name, path=path) except IOError: return None def exec_pg_command(name, *args): prog = find_pg_tool(name) if not prog: raise Exception('Couldn\'t find %s' % name) args2 = (prog,) + args with open(os.devnull) as dn: return subprocess.call(args2, stdout=dn, stderr=subprocess.STDOUT) def exec_pg_command_pipe(name, *args): prog = find_pg_tool(name) if not prog: raise Exception('Couldn\'t find %s' % name) # on win32, passing close_fds=True is not compatible # with redirecting std[in/err/out] pop = subprocess.Popen((prog,) + args, bufsize= -1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=(os.name=="posix")) return pop.stdin, pop.stdout def exec_command_pipe(name, *args): prog = find_in_path(name) if not prog: raise Exception('Couldn\'t find %s' % name) # on win32, passing close_fds=True is not compatible # with redirecting std[in/err/out] pop = subprocess.Popen((prog,) + args, bufsize= -1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=(os.name=="posix")) return pop.stdin, pop.stdout #---------------------------------------------------------- # File paths #---------------------------------------------------------- #file_path_root = os.getcwd() #file_path_addons = os.path.join(file_path_root, 'addons') def file_open(name, mode="r", subdir='addons', pathinfo=False): """Open a file from the OpenERP root, using a subdir folder. Example:: >>> file_open('hr/report/timesheer.xsl') >>> file_open('addons/hr/report/timesheet.xsl') >>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True) @param name name of the file @param mode file open mode @param subdir subdirectory @param pathinfo if True returns tuple (fileobject, filepath) @return fileobject if pathinfo is False else (fileobject, filepath) """ import openerp.modules as addons adps = addons.module.ad_paths rtp = os.path.normcase(os.path.abspath(config['root_path'])) basename = name if os.path.isabs(name): # It is an absolute path # Is it below 'addons_path' or 'root_path'? name = os.path.normcase(os.path.normpath(name)) for root in adps + [rtp]: root = os.path.normcase(os.path.normpath(root)) + os.sep if name.startswith(root): base = root.rstrip(os.sep) name = name[len(base) + 1:] break else: # It is outside the OpenERP root: skip zipfile lookup. base, name = os.path.split(name) return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename) if name.replace(os.sep, '/').startswith('addons/'): subdir = 'addons' name2 = name[7:] elif subdir: name = os.path.join(subdir, name) if name.replace(os.sep, '/').startswith('addons/'): subdir = 'addons' name2 = name[7:] else: name2 = name # First, try to locate in addons_path if subdir: for adp in adps: try: return _fileopen(name2, mode=mode, basedir=adp, pathinfo=pathinfo, basename=basename) except IOError: pass # Second, try to locate in root_path return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename) def _fileopen(path, mode, basedir, pathinfo, basename=None): name = os.path.normpath(os.path.join(basedir, path)) if basename is None: basename = name # Give higher priority to module directories, which is # a more common case than zipped modules. if os.path.isfile(name): fo = open(name, mode) if pathinfo: return fo, name return fo # Support for loading modules in zipped form. # This will not work for zipped modules that are sitting # outside of known addons paths. head = os.path.normpath(path) zipname = False while os.sep in head: head, tail = os.path.split(head) if not tail: break if zipname: zipname = os.path.join(tail, zipname) else: zipname = tail zpath = os.path.join(basedir, head + '.zip') if zipfile.is_zipfile(zpath): from cStringIO import StringIO zfile = zipfile.ZipFile(zpath) try: fo = StringIO() fo.write(zfile.read(os.path.join( os.path.basename(head), zipname).replace( os.sep, '/'))) fo.seek(0) if pathinfo: return fo, name return fo except Exception: pass # Not found if name.endswith('.rml'): raise IOError('Report %r doesn\'t exist or deleted' % basename) raise IOError('File not found: %s' % basename) #---------------------------------------------------------- # iterables #---------------------------------------------------------- def flatten(list): """Flatten a list of elements into a uniqu list Author: Christophe Simonis ([email protected]) Examples:: >>> flatten(['a']) ['a'] >>> flatten('b') ['b'] >>> flatten( [] ) [] >>> flatten( [[], [[]]] ) [] >>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] ) ['a', 'b', 'c', 'd', 'e', 'f'] >>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]]) >>> flatten(t) [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] """ def isiterable(x): return hasattr(x, "__iter__") r = [] for e in list: if isiterable(e): map(r.append, flatten(e)) else: r.append(e) return r def reverse_enumerate(l): """Like enumerate but in the other sens Usage:: >>> a = ['a', 'b', 'c'] >>> it = reverse_enumerate(a) >>> it.next() (2, 'c') >>> it.next() (1, 'b') >>> it.next() (0, 'a') >>> it.next() Traceback (most recent call last): File "<stdin>", line 1, in <module> StopIteration """ return izip(xrange(len(l)-1, -1, -1), reversed(l)) def topological_sort(elems): """ Return a list of elements sorted so that their dependencies are listed before them in the result. :param elems: specifies the elements to sort with their dependencies; it is a dictionary like `{element: dependencies}` where `dependencies` is a collection of elements that must appear before `element`. The elements of `dependencies` are not required to appear in `elems`; they will simply not appear in the result. :returns: a list with the keys of `elems` sorted according to their specification. """ # the algorithm is inspired by [Tarjan 1976], # http://en.wikipedia.org/wiki/Topological_sorting#Algorithms result = [] visited = set() def visit(n): if n not in visited: visited.add(n) if n in elems: # first visit all dependencies of n, then append n to result map(visit, elems[n]) result.append(n) map(visit, elems) return result class UpdateableStr(local): """ Class that stores an updateable string (used in wizards) """ def __init__(self, string=''): self.string = string def __str__(self): return str(self.string) def __repr__(self): return str(self.string) def __nonzero__(self): return bool(self.string) class UpdateableDict(local): """Stores an updateable dict to use in wizards """ def __init__(self, dict=None): if dict is None: dict = {} self.dict = dict def __str__(self): return str(self.dict) def __repr__(self): return str(self.dict) def clear(self): return self.dict.clear() def keys(self): return self.dict.keys() def __setitem__(self, i, y): self.dict.__setitem__(i, y) def __getitem__(self, i): return self.dict.__getitem__(i) def copy(self): return self.dict.copy() def iteritems(self): return self.dict.iteritems() def iterkeys(self): return self.dict.iterkeys() def itervalues(self): return self.dict.itervalues() def pop(self, k, d=None): return self.dict.pop(k, d) def popitem(self): return self.dict.popitem() def setdefault(self, k, d=None): return self.dict.setdefault(k, d) def update(self, E, **F): return self.dict.update(E, F) def values(self): return self.dict.values() def get(self, k, d=None): return self.dict.get(k, d) def has_key(self, k): return self.dict.has_key(k) def items(self): return self.dict.items() def __cmp__(self, y): return self.dict.__cmp__(y) def __contains__(self, k): return self.dict.__contains__(k) def __delitem__(self, y): return self.dict.__delitem__(y) def __eq__(self, y): return self.dict.__eq__(y) def __ge__(self, y): return self.dict.__ge__(y) def __gt__(self, y): return self.dict.__gt__(y) def __hash__(self): return self.dict.__hash__() def __iter__(self): return self.dict.__iter__() def __le__(self, y): return self.dict.__le__(y) def __len__(self): return self.dict.__len__() def __lt__(self, y): return self.dict.__lt__(y) def __ne__(self, y): return self.dict.__ne__(y) class currency(float): """ Deprecate .. warning:: Don't use ! Use res.currency.round() """ def __init__(self, value, accuracy=2, rounding=None): if rounding is None: rounding=10**-accuracy self.rounding=rounding self.accuracy=accuracy def __new__(cls, value, accuracy=2, rounding=None): return float.__new__(cls, round(value, accuracy)) #def __str__(self): # display_value = int(self*(10**(-self.accuracy))/self.rounding)*self.rounding/(10**(-self.accuracy)) # return str(display_value) def to_xml(s): return s.replace('&','&amp;').replace('<','&lt;').replace('>','&gt;') def get_iso_codes(lang): if lang.find('_') != -1: if lang.split('_')[0] == lang.split('_')[1].lower(): lang = lang.split('_')[0] return lang ALL_LANGUAGES = { 'ab_RU': u'Abkhazian / аҧсуа', 'am_ET': u'Amharic / አምሃርኛ', 'ar_SY': u'Arabic / الْعَرَبيّة', 'bg_BG': u'Bulgarian / български език', 'bs_BS': u'Bosnian / bosanski jezik', 'ca_ES': u'Catalan / Català', 'cs_CZ': u'Czech / Čeština', 'da_DK': u'Danish / Dansk', 'de_DE': u'German / Deutsch', 'el_GR': u'Greek / Ελληνικά', 'en_CA': u'English (CA)', 'en_GB': u'English (UK)', 'en_US': u'English (US)', 'es_AR': u'Spanish (AR) / Español (AR)', 'es_BO': u'Spanish (BO) / Español (BO)', 'es_CL': u'Spanish (CL) / Español (CL)', 'es_CO': u'Spanish (CO) / Español (CO)', 'es_CR': u'Spanish (CR) / Español (CR)', 'es_DO': u'Spanish (DO) / Español (DO)', 'es_EC': u'Spanish (EC) / Español (EC)', 'es_ES': u'Spanish / Español', 'es_GT': u'Spanish (GT) / Español (GT)', 'es_HN': u'Spanish (HN) / Español (HN)', 'es_MX': u'Spanish (MX) / Español (MX)', 'es_NI': u'Spanish (NI) / Español (NI)', 'es_PA': u'Spanish (PA) / Español (PA)', 'es_PE': u'Spanish (PE) / Español (PE)', 'es_PR': u'Spanish (PR) / Español (PR)', 'es_PY': u'Spanish (PY) / Español (PY)', 'es_SV': u'Spanish (SV) / Español (SV)', 'es_UY': u'Spanish (UY) / Español (UY)', 'es_VE': u'Spanish (VE) / Español (VE)', 'et_EE': u'Estonian / Eesti keel', 'fa_IR': u'Persian / فارس', 'fi_FI': u'Finnish / Suomi', 'fr_BE': u'French (BE) / Français (BE)', 'fr_CA': u'French (CA) / Français (CA)', 'fr_CH': u'French (CH) / Français (CH)', 'fr_FR': u'French / Français', 'gl_ES': u'Galician / Galego', 'gu_IN': u'Gujarati / ગુજરાતી', 'he_IL': u'Hebrew / עִבְרִי', 'hi_IN': u'Hindi / हिंदी', 'hr_HR': u'Croatian / hrvatski jezik', 'hu_HU': u'Hungarian / Magyar', 'id_ID': u'Indonesian / Bahasa Indonesia', 'it_IT': u'Italian / Italiano', 'iu_CA': u'Inuktitut / ᐃᓄᒃᑎᑐᑦ', 'ja_JP': u'Japanese / 日本語', 'ko_KP': u'Korean (KP) / 한국어 (KP)', 'ko_KR': u'Korean (KR) / 한국어 (KR)', 'lo_LA': u'Lao / ພາສາລາວ', 'lt_LT': u'Lithuanian / Lietuvių kalba', 'lv_LV': u'Latvian / latviešu valoda', 'mk_MK': u'Macedonian / македонски јазик', 'ml_IN': u'Malayalam / മലയാളം', 'mn_MN': u'Mongolian / монгол', 'nb_NO': u'Norwegian Bokmål / Norsk bokmål', 'nl_NL': u'Dutch / Nederlands', 'nl_BE': u'Flemish (BE) / Vlaams (BE)', 'oc_FR': u'Occitan (FR, post 1500) / Occitan', 'pl_PL': u'Polish / Język polski', 'pt_BR': u'Portuguese (BR) / Português (BR)', 'pt_PT': u'Portuguese / Português', 'ro_RO': u'Romanian / română', 'ru_RU': u'Russian / русский язык', 'si_LK': u'Sinhalese / සිංහල', 'sl_SI': u'Slovenian / slovenščina', 'sk_SK': u'Slovak / Slovenský jazyk', 'sq_AL': u'Albanian / Shqip', 'sr_RS': u'Serbian (Cyrillic) / српски', 'sr@latin': u'Serbian (Latin) / srpski', 'sv_SE': u'Swedish / svenska', 'te_IN': u'Telugu / తెలుగు', 'tr_TR': u'Turkish / Türkçe', 'vi_VN': u'Vietnamese / Tiếng Việt', 'uk_UA': u'Ukrainian / українська', 'ur_PK': u'Urdu / اردو', 'zh_CN': u'Chinese (CN) / 简体中文', 'zh_HK': u'Chinese (HK)', 'zh_TW': u'Chinese (TW) / 正體字', 'th_TH': u'Thai / ภาษาไทย', 'tlh_TLH': u'Klingon', } def scan_languages(): """ Returns all languages supported by OpenERP for translation :returns: a list of (lang_code, lang_name) pairs :rtype: [(str, unicode)] """ return sorted(ALL_LANGUAGES.iteritems(), key=lambda k: k[1]) def get_user_companies(cr, user): def _get_company_children(cr, ids): if not ids: return [] cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),)) res = [x[0] for x in cr.fetchall()] res.extend(_get_company_children(cr, res)) return res cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,)) user_comp = cr.fetchone()[0] if not user_comp: return [] return [user_comp] + _get_company_children(cr, [user_comp]) def mod10r(number): """ Input number : account or invoice number Output return: the same number completed with the recursive mod10 key """ codec=[0,9,4,6,8,2,7,1,3,5] report = 0 result="" for digit in number: result += digit if digit.isdigit(): report = codec[ (int(digit) + report) % 10 ] return result + str((10 - report) % 10) def human_size(sz): """ Return the size in a human readable format """ if not sz: return False units = ('bytes', 'Kb', 'Mb', 'Gb') if isinstance(sz,basestring): sz=len(sz) s, i = float(sz), 0 while s >= 1024 and i < len(units)-1: s /= 1024 i += 1 return "%0.2f %s" % (s, units[i]) def logged(f): @wraps(f) def wrapper(*args, **kwargs): from pprint import pformat vector = ['Call -> function: %r' % f] for i, arg in enumerate(args): vector.append(' arg %02d: %s' % (i, pformat(arg))) for key, value in kwargs.items(): vector.append(' kwarg %10s: %s' % (key, pformat(value))) timeb4 = time.time() res = f(*args, **kwargs) vector.append(' result: %s' % pformat(res)) vector.append(' time delta: %s' % (time.time() - timeb4)) _logger.debug('\n'.join(vector)) return res return wrapper class profile(object): def __init__(self, fname=None): self.fname = fname def __call__(self, f): @wraps(f) def wrapper(*args, **kwargs): profile = cProfile.Profile() result = profile.runcall(f, *args, **kwargs) profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,))) return result return wrapper __icons_list = ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD', 'STOCK_CANCEL', 'STOCK_CDROM', 'STOCK_CLEAR', 'STOCK_CLOSE', 'STOCK_COLOR_PICKER', 'STOCK_CONNECT', 'STOCK_CONVERT', 'STOCK_COPY', 'STOCK_CUT', 'STOCK_DELETE', 'STOCK_DIALOG_AUTHENTICATION', 'STOCK_DIALOG_ERROR', 'STOCK_DIALOG_INFO', 'STOCK_DIALOG_QUESTION', 'STOCK_DIALOG_WARNING', 'STOCK_DIRECTORY', 'STOCK_DISCONNECT', 'STOCK_DND', 'STOCK_DND_MULTIPLE', 'STOCK_EDIT', 'STOCK_EXECUTE', 'STOCK_FILE', 'STOCK_FIND', 'STOCK_FIND_AND_REPLACE', 'STOCK_FLOPPY', 'STOCK_GOTO_BOTTOM', 'STOCK_GOTO_FIRST', 'STOCK_GOTO_LAST', 'STOCK_GOTO_TOP', 'STOCK_GO_BACK', 'STOCK_GO_DOWN', 'STOCK_GO_FORWARD', 'STOCK_GO_UP', 'STOCK_HARDDISK', 'STOCK_HELP', 'STOCK_HOME', 'STOCK_INDENT', 'STOCK_INDEX', 'STOCK_ITALIC', 'STOCK_JUMP_TO', 'STOCK_JUSTIFY_CENTER', 'STOCK_JUSTIFY_FILL', 'STOCK_JUSTIFY_LEFT', 'STOCK_JUSTIFY_RIGHT', 'STOCK_MEDIA_FORWARD', 'STOCK_MEDIA_NEXT', 'STOCK_MEDIA_PAUSE', 'STOCK_MEDIA_PLAY', 'STOCK_MEDIA_PREVIOUS', 'STOCK_MEDIA_RECORD', 'STOCK_MEDIA_REWIND', 'STOCK_MEDIA_STOP', 'STOCK_MISSING_IMAGE', 'STOCK_NETWORK', 'STOCK_NEW', 'STOCK_NO', 'STOCK_OK', 'STOCK_OPEN', 'STOCK_PASTE', 'STOCK_PREFERENCES', 'STOCK_PRINT', 'STOCK_PRINT_PREVIEW', 'STOCK_PROPERTIES', 'STOCK_QUIT', 'STOCK_REDO', 'STOCK_REFRESH', 'STOCK_REMOVE', 'STOCK_REVERT_TO_SAVED', 'STOCK_SAVE', 'STOCK_SAVE_AS', 'STOCK_SELECT_COLOR', 'STOCK_SELECT_FONT', 'STOCK_SORT_ASCENDING', 'STOCK_SORT_DESCENDING', 'STOCK_SPELL_CHECK', 'STOCK_STOP', 'STOCK_STRIKETHROUGH', 'STOCK_UNDELETE', 'STOCK_UNDERLINE', 'STOCK_UNDO', 'STOCK_UNINDENT', 'STOCK_YES', 'STOCK_ZOOM_100', 'STOCK_ZOOM_FIT', 'STOCK_ZOOM_IN', 'STOCK_ZOOM_OUT', 'terp-account', 'terp-crm', 'terp-mrp', 'terp-product', 'terp-purchase', 'terp-sale', 'terp-tools', 'terp-administration', 'terp-hr', 'terp-partner', 'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph', 'terp-check','terp-go-month','terp-go-year','terp-go-today','terp-document-new','terp-camera_test', 'terp-emblem-important','terp-gtk-media-pause','terp-gtk-stop','terp-gnome-cpu-frequency-applet+', 'terp-dialog-close','terp-gtk-jump-to-rtl','terp-gtk-jump-to-ltr','terp-accessories-archiver', 'terp-stock_align_left_24','terp-stock_effects-object-colorize','terp-go-home','terp-gtk-go-back-rtl', 'terp-gtk-go-back-ltr','terp-personal','terp-personal-','terp-personal+','terp-accessories-archiver-minus', 'terp-accessories-archiver+','terp-stock_symbol-selection','terp-call-start','terp-dolar', 'terp-face-plain','terp-folder-blue','terp-folder-green','terp-folder-orange','terp-folder-yellow', 'terp-gdu-smart-failing','terp-go-week','terp-gtk-select-all','terp-locked','terp-mail-forward', 'terp-mail-message-new','terp-mail-replied','terp-rating-rated','terp-stage','terp-stock_format-scientific', 'terp-dolar_ok!','terp-idea','terp-stock_format-default','terp-mail-','terp-mail_delete' ] def icons(*a, **kw): global __icons_list return [(x, x) for x in __icons_list ] def detect_ip_addr(): """Try a very crude method to figure out a valid external IP or hostname for the current machine. Don't rely on this for binding to an interface, but it could be used as basis for constructing a remote URL to the server. """ def _detect_ip_addr(): from array import array from struct import pack, unpack try: import fcntl except ImportError: fcntl = None ip_addr = None if not fcntl: # not UNIX: host = socket.gethostname() ip_addr = socket.gethostbyname(host) else: # UNIX: # get all interfaces: nbytes = 128 * 32 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) names = array('B', '\0' * nbytes) #print 'names: ', names outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0] namestr = names.tostring() # try 64 bit kernel: for i in range(0, outbytes, 40): name = namestr[i:i+16].split('\0', 1)[0] if name != 'lo': ip_addr = socket.inet_ntoa(namestr[i+20:i+24]) break # try 32 bit kernel: if ip_addr is None: ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)]) for ifname in [iface for iface in ifaces if iface != 'lo']: ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24]) break return ip_addr or 'localhost' try: ip_addr = _detect_ip_addr() except Exception: ip_addr = 'localhost' return ip_addr # RATIONALE BEHIND TIMESTAMP CALCULATIONS AND TIMEZONE MANAGEMENT: # The server side never does any timestamp calculation, always # sends them in a naive (timezone agnostic) format supposed to be # expressed within the server timezone, and expects the clients to # provide timestamps in the server timezone as well. # It stores all timestamps in the database in naive format as well, # which also expresses the time in the server timezone. # For this reason the server makes its timezone name available via the # common/timezone_get() rpc method, which clients need to read # to know the appropriate time offset to use when reading/writing # times. def get_win32_timezone(): """Attempt to return the "standard name" of the current timezone on a win32 system. @return the standard name of the current win32 timezone, or False if it cannot be found. """ res = False if sys.platform == "win32": try: import _winreg hklm = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE) current_tz_key = _winreg.OpenKey(hklm, r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation", 0,_winreg.KEY_ALL_ACCESS) res = str(_winreg.QueryValueEx(current_tz_key,"StandardName")[0]) # [0] is value, [1] is type code _winreg.CloseKey(current_tz_key) _winreg.CloseKey(hklm) except Exception: pass return res def detect_server_timezone(): """Attempt to detect the timezone to use on the server side. Defaults to UTC if no working timezone can be found. @return the timezone identifier as expected by pytz.timezone. """ try: import pytz except Exception: _logger.warning("Python pytz module is not available. " "Timezone will be set to UTC by default.") return 'UTC' # Option 1: the configuration option (did not exist before, so no backwards compatibility issue) # Option 2: to be backwards compatible with 5.0 or earlier, the value from time.tzname[0], but only if it is known to pytz # Option 3: the environment variable TZ sources = [ (config['timezone'], 'OpenERP configuration'), (time.tzname[0], 'time.tzname'), (os.environ.get('TZ',False),'TZ environment variable'), ] # Option 4: OS-specific: /etc/timezone on Unix if os.path.exists("/etc/timezone"): tz_value = False try: f = open("/etc/timezone") tz_value = f.read(128).strip() except Exception: pass finally: f.close() sources.append((tz_value,"/etc/timezone file")) # Option 5: timezone info from registry on Win32 if sys.platform == "win32": # Timezone info is stored in windows registry. # However this is not likely to work very well as the standard name # of timezones in windows is rarely something that is known to pytz. # But that's ok, it is always possible to use a config option to set # it explicitly. sources.append((get_win32_timezone(),"Windows Registry")) for (value,source) in sources: if value: try: tz = pytz.timezone(value) _logger.info("Using timezone %s obtained from %s.", tz.zone, source) return value except pytz.UnknownTimeZoneError: _logger.warning("The timezone specified in %s (%s) is invalid, ignoring it.", source, value) _logger.warning("No valid timezone could be detected, using default UTC " "timezone. You can specify it explicitly with option 'timezone' in " "the server configuration.") return 'UTC' def get_server_timezone(): return "UTC" DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d" DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S" DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % ( DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_TIME_FORMAT) # Python's strftime supports only the format directives # that are available on the platform's libc, so in order to # be cross-platform we map to the directives required by # the C standard (1989 version), always available on platforms # with a C standard implementation. DATETIME_FORMATS_MAP = { '%C': '', # century '%D': '%m/%d/%Y', # modified %y->%Y '%e': '%d', '%E': '', # special modifier '%F': '%Y-%m-%d', '%g': '%Y', # modified %y->%Y '%G': '%Y', '%h': '%b', '%k': '%H', '%l': '%I', '%n': '\n', '%O': '', # special modifier '%P': '%p', '%R': '%H:%M', '%r': '%I:%M:%S %p', '%s': '', #num of seconds since epoch '%T': '%H:%M:%S', '%t': ' ', # tab '%u': ' %w', '%V': '%W', '%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y '%+': '%Y-%m-%d %H:%M:%S', # %Z is a special case that causes 2 problems at least: # - the timezone names we use (in res_user.context_tz) come # from pytz, but not all these names are recognized by # strptime(), so we cannot convert in both directions # when such a timezone is selected and %Z is in the format # - %Z is replaced by an empty string in strftime() when # there is not tzinfo in a datetime value (e.g when the user # did not pick a context_tz). The resulting string does not # parse back if the format requires %Z. # As a consequence, we strip it completely from format strings. # The user can always have a look at the context_tz in # preferences to check the timezone. '%z': '', '%Z': '', } POSIX_TO_LDML = { 'a': 'E', 'A': 'EEEE', 'b': 'MMM', 'B': 'MMMM', #'c': '', 'd': 'dd', 'H': 'HH', 'I': 'hh', 'j': 'DDD', 'm': 'MM', 'M': 'mm', 'p': 'a', 'S': 'ss', 'U': 'w', 'w': 'e', 'W': 'w', 'y': 'yy', 'Y': 'yyyy', # see comments above, and babel's format_datetime assumes an UTC timezone # for naive datetime objects #'z': 'Z', #'Z': 'z', } def posix_to_ldml(fmt, locale): """ Converts a posix/strftime pattern into an LDML date format pattern. :param fmt: non-extended C89/C90 strftime pattern :param locale: babel locale used for locale-specific conversions (e.g. %x and %X) :return: unicode """ buf = [] pc = False quoted = [] for c in fmt: # LDML date format patterns uses letters, so letters must be quoted if not pc and c.isalpha(): quoted.append(c if c != "'" else "''") continue if quoted: buf.append("'") buf.append(''.join(quoted)) buf.append("'") quoted = [] if pc: if c == '%': # escaped percent buf.append('%') elif c == 'x': # date format, short seems to match buf.append(locale.date_formats['short'].pattern) elif c == 'X': # time format, seems to include seconds. short does not buf.append(locale.time_formats['medium'].pattern) else: # look up format char in static mapping buf.append(POSIX_TO_LDML[c]) pc = False elif c == '%': pc = True else: buf.append(c) # flush anything remaining in quoted buffer if quoted: buf.append("'") buf.append(''.join(quoted)) buf.append("'") return ''.join(buf) def server_to_local_timestamp(src_tstamp_str, src_format, dst_format, dst_tz_name, tz_offset=True, ignore_unparsable_time=True): """ Convert a source timestamp string into a destination timestamp string, attempting to apply the correct offset if both the server and local timezone are recognized, or no offset at all if they aren't or if tz_offset is false (i.e. assuming they are both in the same TZ). WARNING: This method is here to allow formatting dates correctly for inclusion in strings where the client would not be able to format/offset it correctly. DO NOT use it for returning date fields directly, these are supposed to be handled by the client!! @param src_tstamp_str: the str value containing the timestamp in the server timezone. @param src_format: the format to use when parsing the server timestamp. @param dst_format: the format to use when formatting the resulting timestamp for the local/client timezone. @param dst_tz_name: name of the destination timezone (such as the 'tz' value of the client context) @param ignore_unparsable_time: if True, return False if src_tstamp_str cannot be parsed using src_format or formatted using dst_format. @return local/client formatted timestamp, expressed in the local/client timezone if possible and if tz_offset is true, or src_tstamp_str if timezone offset could not be determined. """ if not src_tstamp_str: return False res = src_tstamp_str if src_format and dst_format: # find out server timezone server_tz = get_server_timezone() try: # dt_value needs to be a datetime.datetime object (so no time.struct_time or mx.DateTime.DateTime here!) dt_value = datetime.strptime(src_tstamp_str, src_format) if tz_offset and dst_tz_name: try: import pytz src_tz = pytz.timezone(server_tz) dst_tz = pytz.timezone(dst_tz_name) src_dt = src_tz.localize(dt_value, is_dst=True) dt_value = src_dt.astimezone(dst_tz) except Exception: pass res = dt_value.strftime(dst_format) except Exception: # Normal ways to end up here are if strptime or strftime failed if not ignore_unparsable_time: return False return res def split_every(n, iterable, piece_maker=tuple): """Splits an iterable into length-n pieces. The last piece will be shorter if ``n`` does not evenly divide the iterable length. @param ``piece_maker``: function to build the pieces from the slices (tuple,list,...) """ iterator = iter(iterable) piece = piece_maker(islice(iterator, n)) while piece: yield piece piece = piece_maker(islice(iterator, n)) if __name__ == '__main__': import doctest doctest.testmod() class upload_data_thread(threading.Thread): def __init__(self, email, data, type): self.args = [('email',email),('type',type),('data',data)] super(upload_data_thread,self).__init__() def run(self): try: import urllib args = urllib.urlencode(self.args) fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args) fp.read() fp.close() except Exception: pass def upload_data(email, data, type='SURVEY'): a = upload_data_thread(email, data, type) a.start() return True def get_and_group_by_field(cr, uid, obj, ids, field, context=None): """ Read the values of ``field´´ for the given ``ids´´ and group ids by value. :param string field: name of the field we want to read and group by :return: mapping of field values to the list of ids that have it :rtype: dict """ res = {} for record in obj.read(cr, uid, ids, [field], context=context): key = record[field] res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id']) return res def get_and_group_by_company(cr, uid, obj, ids, context=None): return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context) # port of python 2.6's attrgetter with support for dotted notation def resolve_attr(obj, attr): for name in attr.split("."): obj = getattr(obj, name) return obj def attrgetter(*items): if len(items) == 1: attr = items[0] def g(obj): return resolve_attr(obj, attr) else: def g(obj): return tuple(resolve_attr(obj, attr) for attr in items) return g class unquote(str): """A subclass of str that implements repr() without enclosing quotation marks or escaping, keeping the original string untouched. The name come from Lisp's unquote. One of the uses for this is to preserve or insert bare variable names within dicts during eval() of a dict's repr(). Use with care. Some examples (notice that there are never quotes surrounding the ``active_id`` name: >>> unquote('active_id') active_id >>> d = {'test': unquote('active_id')} >>> d {'test': active_id} >>> print d {'test': active_id} """ def __repr__(self): return self class UnquoteEvalContext(defaultdict): """Defaultdict-based evaluation context that returns an ``unquote`` string for any missing name used during the evaluation. Mostly useful for evaluating OpenERP domains/contexts that may refer to names that are unknown at the time of eval, so that when the context/domain is converted back to a string, the original names are preserved. **Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or ``safe_eval()`` will shadow the builtins, which may cause other failures, depending on what is evaluated. Example (notice that ``section_id`` is preserved in the final result) : >>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}" >>> eval(context_str, UnquoteEvalContext(uid=1)) {'default_user_id': 1, 'default_section_id': section_id} """ def __init__(self, *args, **kwargs): super(UnquoteEvalContext, self).__init__(None, *args, **kwargs) def __missing__(self, key): return unquote(key) class mute_logger(object): """Temporary suppress the logging. Can be used as context manager or decorator. @mute_logger('openerp.plic.ploc') def do_stuff(): blahblah() with mute_logger('openerp.foo.bar'): do_suff() """ def __init__(self, *loggers): self.loggers = loggers def filter(self, record): return 0 def __enter__(self): for logger in self.loggers: assert isinstance(logger, basestring),\ "A logger name must be a string, got %s" % type(logger) logging.getLogger(logger).addFilter(self) def __exit__(self, exc_type=None, exc_val=None, exc_tb=None): for logger in self.loggers: logging.getLogger(logger).removeFilter(self) def __call__(self, func): @wraps(func) def deco(*args, **kwargs): with self: return func(*args, **kwargs) return deco _ph = object() class CountingStream(object): """ Stream wrapper counting the number of element it has yielded. Similar role to ``enumerate``, but for use when the iteration process of the stream isn't fully under caller control (the stream can be iterated from multiple points including within a library) ``start`` allows overriding the starting index (the index before the first item is returned). On each iteration (call to :meth:`~.next`), increases its :attr:`~.index` by one. .. attribute:: index ``int``, index of the last yielded element in the stream. If the stream has ended, will give an index 1-past the stream """ def __init__(self, stream, start=-1): self.stream = iter(stream) self.index = start self.stopped = False def __iter__(self): return self def next(self): if self.stopped: raise StopIteration() self.index += 1 val = next(self.stream, _ph) if val is _ph: self.stopped = True raise StopIteration() return val def stripped_sys_argv(*strip_args): """Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses""" strip_args = sorted(set(strip_args) | set(['-s', '--save', '-d', '--database', '-u', '--update', '-i', '--init'])) assert all(config.parser.has_option(s) for s in strip_args) takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args) longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--'))) longs_eq = tuple(l + '=' for l in longs if takes_value[l]) args = sys.argv[:] def strip(args, i): return args[i].startswith(shorts) \ or args[i].startswith(longs_eq) or (args[i] in longs) \ or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]]) return [x for i, x in enumerate(args) if not strip(args, i)] class ConstantMapping(Mapping): """ An immutable mapping returning the provided value for every single key. Useful for default value to methods """ __slots__ = ['_value'] def __init__(self, val): self._value = val def __len__(self): """ defaultdict updates its length for each individually requested key, is that really useful? """ return 0 def __iter__(self): """ same as len, defaultdict udpates its iterable keyset with each key requested, is there a point for this? """ return iter([]) def __getitem__(self, item): return self._value def dumpstacks(sig=None, frame=None): """ Signal handler: dump a stack trace for each existing thread.""" code = [] def extract_stack(stack): for filename, lineno, name, line in traceback.extract_stack(stack): yield 'File: "%s", line %d, in %s' % (filename, lineno, name) if line: yield " %s" % (line.strip(),) # code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696 # modified for python 2.5 compatibility threads_info = dict([(th.ident, {'name': th.name, 'uid': getattr(th, 'uid', 'n/a')}) for th in threading.enumerate()]) for threadId, stack in sys._current_frames().items(): thread_info = threads_info.get(threadId) code.append("\n# Thread: %s (id:%s) (uid:%s)" % (thread_info and thread_info['name'] or 'n/a', threadId, thread_info and thread_info['uid'] or 'n/a')) for line in extract_stack(stack): code.append(line) if openerp.evented: # code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets import gc from greenlet import greenlet for ob in gc.get_objects(): if not isinstance(ob, greenlet) or not ob: continue code.append("\n# Greenlet: %r" % (ob,)) for line in extract_stack(ob.gr_frame): code.append(line) _logger.info("\n".join(code)) class frozendict(dict): """ An implementation of an immutable dictionary. """ def __delitem__(self, key): raise NotImplementedError("'__delitem__' not supported on frozendict") def __setitem__(self, key, val): raise NotImplementedError("'__setitem__' not supported on frozendict") def clear(self): raise NotImplementedError("'clear' not supported on frozendict") def pop(self, key, default=None): raise NotImplementedError("'pop' not supported on frozendict") def popitem(self): raise NotImplementedError("'popitem' not supported on frozendict") def setdefault(self, key, default=None): raise NotImplementedError("'setdefault' not supported on frozendict") def update(self, *args, **kwargs): raise NotImplementedError("'update' not supported on frozendict") @contextmanager def ignore(*exc): try: yield except exc: pass # Avoid DeprecationWarning while still remaining compatible with werkzeug pre-0.9 if parse_version(getattr(werkzeug, '__version__', '0.0')) < parse_version('0.9.0'): def html_escape(text): return werkzeug.utils.escape(text, quote=True) else: def html_escape(text): return werkzeug.utils.escape(text) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
[]
[]
[ "TZ" ]
[]
["TZ"]
python
1
0
connectorx-python/connectorx/tests/test_polars.py
import os import pandas as pd import pytest import polars as pl from .. import read_sql @pytest.fixture(scope="module") # type: ignore def postgres_url() -> str: conn = os.environ["POSTGRES_URL"] return conn def test_modin(postgres_url: str) -> None: query = "SELECT * FROM test_table" df = read_sql( postgres_url, query, partition_on="test_int", partition_range=(0, 2000), partition_num=3, return_type="polars", ) expected = pl.DataFrame( { "test_int": [0, 1, 2, 3, 4, 1314], "test_nullint": [5, 3, None, 7, 9, 2], "test_str": ["a", "str1", "str2", "b", "c", None], "test_float": [3.1, None, 2.2, 3, 7.8, -10], "test_bool": [None, True, False, False, None, True], }, ) df = df.sort('test_int') assert df.frame_equal(expected, null_equal=True)
[]
[]
[ "POSTGRES_URL" ]
[]
["POSTGRES_URL"]
python
1
0
classify_nsfw.py
#!/usr/bin/env python """ Copyright 2016 Yahoo Inc. Licensed under the terms of the 2 clause BSD license. Please see LICENSE file in the project root for terms. """ import numpy as np import pandas as pd import os from tqdm import tqdm import sys import argparse import glob import time from PIL import Image import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt try: from StringIO import StringIO except ImportError: from io import BytesIO as StringIO import caffe import cv2 from class_activation_map import save_CAM_caffe try: caffe_root = os.environ['CAFFE_ROOT'] + '/' except KeyError: raise KeyError("Define CAFFE_ROOT in ~/.bashrc") import visualize_result from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_curve, precision_recall_curve class_dict = { 'notsexy': 0, 'sexy': 1 } def resize_image(data, sz=(256, 256)): """ Resize image. Please use this resize logic for best results instead of the caffe, since it was used to generate training dataset :param str data: The image data :param sz tuple: The resized image dimensions :returns bytearray: A byte array with the resized image """ img_data = data im = Image.open(StringIO(img_data)) if im.mode != "RGB": im = im.convert('RGB') imr = im.resize(sz, resample=Image.BILINEAR) fh_im = StringIO() imr.save(fh_im, format='JPEG') fh_im.seek(0) return bytearray(fh_im.read()) def caffe_preprocess(caffe_net, image_data, caffe_transformer=None): img_data_rs = resize_image(image_data, sz=(256, 256)) image = caffe.io.load_image(StringIO(img_data_rs)) H, W, _ = image.shape _, _, h, w = caffe_net.blobs['data'].data.shape h_off = int(max((H - h) / 2, 0)) w_off = int(max((W - w) / 2, 0)) crop = image[h_off:h_off + h, w_off:w_off + w, :] transformed_image = caffe_transformer.preprocess('data', crop) transformed_image.shape = (1,) + transformed_image.shape return image, transformed_image def caffe_compute(transformed_image, caffe_net=None, output_layers=None): """ Run a Caffe network on an input image after preprocessing it to prepare it for Caffe. :param PIL.Image pimg: PIL image to be input into Caffe. :param caffe.Net caffe_net: A Caffe network with which to process pimg afrer preprocessing. :param list output_layers: A list of the names of the layers from caffe_net whose outputs are to to be returned. If this is None, the default outputs for the network are returned. :return: Returns the requested outputs from the Caffe net. """ if caffe_net is not None: # Grab the default output names if none were requested specifically. if output_layers is None: output_layers = caffe_net.outputs input_name = caffe_net.inputs[0] all_outputs = caffe_net.forward_all(blobs=output_layers, **{input_name: transformed_image}) outputs = all_outputs[output_layers[0]][0].astype(float) return outputs else: return [] def main(argv): pycaffe_dir = os.path.dirname(__file__) parser = argparse.ArgumentParser() # Required arguments: input file. parser.add_argument( "--input_file", help="Path to the input image file" ) parser.add_argument( "--input_label_file", help="Path to the input label file" ) # Optional arguments. parser.add_argument( "--model_def", help="Model definition file." ) parser.add_argument( "--pretrained_model", help="Trained model weights file." ) parser.add_argument( "--threshold", default=0.5, type=float, help="Path to the input image file" ) parser.add_argument( "--save_cam_path", help="Save class activation map flag" ) parser.add_argument( "--save_to_folder_path", help="Classify images and store them to scores folder" ) parser.add_argument( "--save_result_path", default='result', help="Directory where to save ROC curve, confusion matrix" ) args = parser.parse_args() # Pre-load caffe model. nsfw_net = caffe.Net(args.model_def, # pylint: disable=invalid-name args.pretrained_model, caffe.TEST) # Load transformer # Note that the parameters are hard-coded for best results caffe_transformer = caffe.io.Transformer({'data': nsfw_net.blobs['data'].data.shape}) # move image channels to outermost caffe_transformer.set_transpose('data', (2, 0, 1)) # subtract the dataset-mean value in each channel caffe_transformer.set_mean('data', np.array([104, 117, 123])) # rescale from [0, 1] to [0, 255] caffe_transformer.set_raw_scale('data', 255) # swap channels from RGB to BGR caffe_transformer.set_channel_swap('data', (2, 1, 0)) # Preprocess and compute image # One image only if args.input_file is not None: with open(args.input_file, 'rb') as f: image_data = f.read() # Preprocessing original_image, transformed_image = caffe_preprocess( caffe_net=nsfw_net, image_data=image_data, caffe_transformer=caffe_transformer ) # Calculating scores scores = caffe_compute( transformed_image=transformed_image, caffe_net=nsfw_net, output_layers=['prob'] ) # Calculating class activation map if args.save_cam_path is not None: if not os.path.isdir(args.save_cam_path): os.mkdir(args.save_cam_path) out_layer = 'fc_nsfw' last_conv = 'conv_stage3_block2_branch2c' weights_LR = nsfw_net.params[out_layer][0].data activation_lastconv = nsfw_net.blobs[last_conv].data save_CAM_caffe(image_name=args.input_file, image=original_image, fc_weights=weights_LR, activation_lastconv=activation_lastconv, class_dict=class_dict, class_name='sexy', dest_folder='/home/daivuong/Desktop', image_size=224 ) print("NSFW score: {}".format(scores[1])) # Input is a file of many images elif args.input_label_file is not None: scores = [] df = pd.read_csv( args.input_label_file, header=None, delimiter=' ', names=['file_name', 'label'] ) for i in tqdm(range(len(df))): with open(df.iloc[i, 0], 'rb') as f: image_data = f.read() # Preprocessing try: original_image, transformed_image = caffe_preprocess( caffe_net=nsfw_net, image_data=image_data, caffe_transformer=caffe_transformer ) except: print("Cannot load images") continue # Calculating scores sexy_score = caffe_compute( transformed_image=transformed_image, caffe_net=nsfw_net, output_layers=['prob'] )[1] scores.append(sexy_score) # Caclulating class activation map # It will store predicted images into seperated # folders based on rounded scores (from 0.0 to 1.0) # and these two folders will be stored into ground # truth folder if args.save_cam_path is not None: if not os.path.isdir(args.save_cam_path): os.mkdir(args.save_cam_path) # Ground truth folder label_path = os.path.join( args.save_cam_path, str(df.iloc[i, 1]) ) if not os.path.isdir(label_path): os.mkdir(label_path) # Rounded scores folders dest = os.path.join( label_path, str(round(sexy_score, 1)) ) if not os.path.isdir(dest): os.mkdir(dest) # Calculate CAM out_layer = 'fc_nsfw' last_conv = 'conv_stage3_block2_branch2c' weights_LR = nsfw_net.params[out_layer][0].data activation_lastconv = nsfw_net.blobs[last_conv].data save_CAM_caffe(image_name=df.iloc[i, 0], image=original_image, fc_weights=weights_LR, activation_lastconv=activation_lastconv, class_dict=class_dict, class_name='sexy', dest_folder=dest, image_size=256 ) if args.save_to_folder_path is not None: if not os.path.isdir(args.save_to_folder_path): os.mkdir(args.save_to_folder_path) # Ground truth folder label_path = os.path.join( args.save_to_folder_path, str(df.iloc[i, 1]) ) if not os.path.isdir(label_path): os.mkdir(label_path) # Rounded scores folders dest = os.path.join( label_path, str(round(sexy_score, 1)) ) if not os.path.isdir(dest): os.mkdir(dest) src = df.iloc[i, 0] dst = os.path.join(dest, src.split('/')[-1]) os.rename(src, dst) df['scores'] = scores df['NSFW'] = (df['scores'] >= args.threshold) # From boolean to int df['NSFW'] = df['NSFW'] + 0 y = df['label'] y_pred = df['NSFW'] # confusion matrix and classification report visualization target_names = ['nosexy', 'sexy'] cnf_matrix = confusion_matrix(df['label'], df['NSFW']) report = classification_report(y, y_pred, target_names=target_names) file_name = args.pretrained_model.split('/')[-1].split('.')[0] + '_cnf_matrix.png' visualize_result.save_confusion_matrix_classification_report(cnf_matrix=cnf_matrix, classification_report=report, class_names=target_names, file_name=file_name) # Accuracy accuracy = accuracy_score(y, y_pred) print("Accuracy: {}".format(accuracy)) # Plot ROC curve file_name=args.pretrained_model.split('/')[-1].split('.')[0] + '_roc_curve.png' fpr, tpr, thresholds = roc_curve(y, df['scores'], pos_label=1) visualize_result.plot_roc_curve(fpr, tpr, file_name=file_name) # Precision/recall curve file_name = args.pretrained_model.split('/')[-1].split('.')[0] + '_precision_recall.png' precisions, recalls, thresholds = precision_recall_curve(y, df['scores']) visualize_result.plot_precision_recall_vs_threshold(precisions, recalls, thresholds, file_name=file_name) # Score result file_name = args.pretrained_model.split('/')[-1].split('.')[0] + '_result.txt' df[['file_name', 'label', 'scores', 'NSFW']].to_csv( file_name, sep=' ', header=None, index=None) if __name__ == '__main__': main(sys.argv)
[]
[]
[ "CAFFE_ROOT" ]
[]
["CAFFE_ROOT"]
python
1
0
src/client/pkg/discovery/discovery_test.go
package discovery import ( "errors" "fmt" "os" "testing" "github.com/pachyderm/pachyderm/src/client/pkg/require" ) func TestEtcdClient(t *testing.T) { if os.Getenv("ETCD_PORT_2379_TCP_ADDR") == "" { t.Skip("skipping test; $ETCD_PORT_2379_TCP_ADDR not set") } t.Parallel() client, err := getEtcdClient() require.NoError(t, err) runTest(t, client) } func TestEtcdWatch(t *testing.T) { if os.Getenv("ETCD_PORT_2379_TCP_ADDR") == "" { t.Skip("skipping test; $ETCD_PORT_2379_TCP_ADDR not set") } t.Parallel() client, err := getEtcdClient() require.NoError(t, err) runWatchTest(t, client) } func runTest(t *testing.T, client Client) { err := client.Set("foo", "one", 0) require.NoError(t, err) value, err := client.Get("foo") require.NoError(t, err) require.Equal(t, "one", value) err = client.Set("a/b/foo", "one", 0) require.NoError(t, err) err = client.Set("a/b/bar", "two", 0) require.NoError(t, err) values, err := client.GetAll("a/b") require.NoError(t, err) require.Equal(t, map[string]string{"a/b/foo": "one", "a/b/bar": "two"}, values) require.NoError(t, client.Close()) } func runWatchTest(t *testing.T, client Client) { cancel := make(chan bool) err := client.WatchAll( "watchAll/foo", cancel, func(value map[string]string) error { if value == nil { return client.Set("watchAll/foo/bar", "quux", 0) } require.Equal(t, map[string]string{"watchAll/foo/bar": "quux"}, value) close(cancel) return nil }, ) require.Equal(t, ErrCancelled, err) } func getEtcdClient() (Client, error) { etcdAddress, err := getEtcdAddress() if err != nil { return nil, err } return NewEtcdClient(etcdAddress), nil } func getEtcdAddress() (string, error) { etcdAddr := os.Getenv("ETCD_PORT_2379_TCP_ADDR") if etcdAddr == "" { return "", errors.New("ETCD_PORT_2379_TCP_ADDR not set") } return fmt.Sprintf("http://%s:2379", etcdAddr), nil }
[ "\"ETCD_PORT_2379_TCP_ADDR\"", "\"ETCD_PORT_2379_TCP_ADDR\"", "\"ETCD_PORT_2379_TCP_ADDR\"" ]
[]
[ "ETCD_PORT_2379_TCP_ADDR" ]
[]
["ETCD_PORT_2379_TCP_ADDR"]
go
1
0
official/benchmark/bert_benchmark.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Executes BERT benchmarks and accuracy tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import json import math import os import time # pylint: disable=g-bad-import-order from absl import flags from absl.testing import flagsaver import tensorflow as tf # pylint: enable=g-bad-import-order from official.benchmark import bert_benchmark_utils as benchmark_utils from official.nlp.bert import configs from official.nlp.bert import run_classifier from official.utils.misc import distribution_utils from official.benchmark import benchmark_wrappers # pylint: disable=line-too-long PRETRAINED_CHECKPOINT_PATH = 'gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16/bert_model.ckpt' CLASSIFIER_TRAIN_DATA_PATH = 'gs://tf-perfzero-data/bert/classification/mrpc_train.tf_record' CLASSIFIER_EVAL_DATA_PATH = 'gs://tf-perfzero-data/bert/classification/mrpc_eval.tf_record' CLASSIFIER_INPUT_META_DATA_PATH = 'gs://tf-perfzero-data/bert/classification/mrpc_meta_data' MODEL_CONFIG_FILE_PATH = 'gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16/bert_config.json' # pylint: enable=line-too-long TMP_DIR = os.getenv('TMPDIR') FLAGS = flags.FLAGS class BertClassifyBenchmarkBase(benchmark_utils.BertBenchmarkBase): """Base class to hold methods common to test classes in the module.""" def __init__(self, output_dir=None, tpu=None): super(BertClassifyBenchmarkBase, self).__init__(output_dir) self.num_epochs = None self.num_steps_per_epoch = None self.tpu = tpu FLAGS.steps_per_loop = 50 @flagsaver.flagsaver def _run_bert_classifier(self, callbacks=None, use_ds=True): """Starts BERT classification task.""" with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader: input_meta_data = json.loads(reader.read().decode('utf-8')) bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file) epochs = self.num_epochs if self.num_epochs else FLAGS.num_train_epochs if self.num_steps_per_epoch: steps_per_epoch = self.num_steps_per_epoch else: train_data_size = input_meta_data['train_data_size'] steps_per_epoch = int(train_data_size / FLAGS.train_batch_size) warmup_steps = int(epochs * steps_per_epoch * 0.1) eval_steps = int( math.ceil(input_meta_data['eval_data_size'] / FLAGS.eval_batch_size)) if self.tpu: strategy = distribution_utils.get_distribution_strategy( distribution_strategy='tpu', tpu_address=self.tpu) else: strategy = distribution_utils.get_distribution_strategy( distribution_strategy='mirrored' if use_ds else 'off', num_gpus=self.num_gpus) max_seq_length = input_meta_data['max_seq_length'] train_input_fn = run_classifier.get_dataset_fn( FLAGS.train_data_path, max_seq_length, FLAGS.train_batch_size, is_training=True) eval_input_fn = run_classifier.get_dataset_fn( FLAGS.eval_data_path, max_seq_length, FLAGS.eval_batch_size, is_training=False) run_classifier.run_bert_classifier( strategy, bert_config, input_meta_data, FLAGS.model_dir, epochs, steps_per_epoch, FLAGS.steps_per_loop, eval_steps, warmup_steps, FLAGS.learning_rate, FLAGS.init_checkpoint, train_input_fn, eval_input_fn, custom_callbacks=callbacks) class BertClassifyBenchmarkReal(BertClassifyBenchmarkBase): """Short benchmark performance tests for BERT model. Tests BERT classification performance in different GPU, TPU configurations. The naming convention of below test cases follow `benchmark_(number of gpus)_gpu_(dataset type)` for GPUs and `benchmark_(topology)_tpu_(dataset type)` for TPUs. """ def __init__(self, output_dir=TMP_DIR, tpu=None, **kwargs): super(BertClassifyBenchmarkReal, self).__init__( output_dir=output_dir, tpu=tpu) self.train_data_path = CLASSIFIER_TRAIN_DATA_PATH self.eval_data_path = CLASSIFIER_EVAL_DATA_PATH self.bert_config_file = MODEL_CONFIG_FILE_PATH self.input_meta_data_path = CLASSIFIER_INPUT_META_DATA_PATH # Since we only care about performance metrics, we limit # the number of training steps and epochs to prevent unnecessarily # long tests. self.num_steps_per_epoch = 100 self.num_epochs = 1 @benchmark_wrappers.enable_runtime_flags def _run_and_report_benchmark(self, training_summary_path, min_accuracy=0, max_accuracy=1, use_ds=True): """Starts BERT performance benchmark test.""" start_time_sec = time.time() self._run_bert_classifier(callbacks=[self.timer_callback], use_ds=use_ds) wall_time_sec = time.time() - start_time_sec with tf.io.gfile.GFile(training_summary_path, 'rb') as reader: summary = json.loads(reader.read().decode('utf-8')) # Since we do not load from any pretrained checkpoints, we ignore all # accuracy metrics. summary.pop('eval_metrics', None) summary['start_time_sec'] = start_time_sec super(BertClassifyBenchmarkReal, self)._report_benchmark( stats=summary, wall_time_sec=wall_time_sec, min_accuracy=min_accuracy, max_accuracy=max_accuracy) def benchmark_1_gpu_mrpc(self): """Test BERT model performance with 1 GPU.""" self._setup() self.num_gpus = 1 FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_mrpc') FLAGS.train_data_path = self.train_data_path FLAGS.eval_data_path = self.eval_data_path FLAGS.input_meta_data_path = self.input_meta_data_path FLAGS.bert_config_file = self.bert_config_file FLAGS.train_batch_size = 4 FLAGS.eval_batch_size = 4 summary_path = os.path.join(FLAGS.model_dir, 'summaries/training_summary.txt') self._run_and_report_benchmark(summary_path) def benchmark_1_gpu_mrpc_xla(self): """Test BERT model performance with 1 GPU.""" self._setup() self.num_gpus = 1 FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_mrpc_xla') FLAGS.train_data_path = self.train_data_path FLAGS.eval_data_path = self.eval_data_path FLAGS.input_meta_data_path = self.input_meta_data_path FLAGS.bert_config_file = self.bert_config_file FLAGS.train_batch_size = 4 FLAGS.eval_batch_size = 4 FLAGS.enable_xla = True summary_path = os.path.join(FLAGS.model_dir, 'summaries/training_summary.txt') self._run_and_report_benchmark(summary_path) def benchmark_1_gpu_mrpc_no_dist_strat(self): """Test BERT model performance with 1 GPU, no distribution strategy.""" self._setup() self.num_gpus = 1 FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_mrpc_no_dist_strat') FLAGS.train_data_path = self.train_data_path FLAGS.eval_data_path = self.eval_data_path FLAGS.input_meta_data_path = self.input_meta_data_path FLAGS.bert_config_file = self.bert_config_file FLAGS.train_batch_size = 4 FLAGS.eval_batch_size = 4 summary_path = os.path.join(FLAGS.model_dir, 'summaries/training_summary.txt') self._run_and_report_benchmark(summary_path, use_ds=False) def benchmark_8_gpu_mrpc(self): """Test BERT model performance with 8 GPUs.""" self._setup() FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mrpc') FLAGS.train_data_path = self.train_data_path FLAGS.eval_data_path = self.eval_data_path FLAGS.input_meta_data_path = self.input_meta_data_path FLAGS.bert_config_file = self.bert_config_file summary_path = os.path.join(FLAGS.model_dir, 'summaries/training_summary.txt') self._run_and_report_benchmark(summary_path) def benchmark_1_gpu_amp_mrpc_no_dist_strat(self): """Performance for 1 GPU no DS with automatic mixed precision.""" self._setup() self.num_gpus = 1 FLAGS.model_dir = self._get_model_dir( 'benchmark_1_gpu_amp_mrpc_no_dist_strat') FLAGS.train_data_path = self.train_data_path FLAGS.eval_data_path = self.eval_data_path FLAGS.input_meta_data_path = self.input_meta_data_path FLAGS.bert_config_file = self.bert_config_file FLAGS.train_batch_size = 4 FLAGS.eval_batch_size = 4 FLAGS.dtype = 'fp16' FLAGS.fp16_implementation = 'graph_rewrite' summary_path = os.path.join(FLAGS.model_dir, 'summaries/training_summary.txt') self._run_and_report_benchmark(summary_path, use_ds=False) def benchmark_8_gpu_amp_mrpc(self): """Test BERT model performance with 8 GPUs with automatic mixed precision. """ self._setup() self.num_gpus = 8 FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_amp_mrpc') FLAGS.train_data_path = self.train_data_path FLAGS.eval_data_path = self.eval_data_path FLAGS.input_meta_data_path = self.input_meta_data_path FLAGS.bert_config_file = self.bert_config_file FLAGS.train_batch_size = 32 FLAGS.eval_batch_size = 32 FLAGS.dtype = 'fp16' FLAGS.fp16_implementation = 'graph_rewrite' summary_path = os.path.join(FLAGS.model_dir, 'summaries/training_summary.txt') self._run_and_report_benchmark(summary_path, use_ds=False) def benchmark_2x2_tpu_mrpc(self): """Test BERT model performance with 2x2 TPU.""" self._setup() FLAGS.model_dir = self._get_model_dir('benchmark_2x2_tpu_mrpc') FLAGS.train_data_path = self.train_data_path FLAGS.eval_data_path = self.eval_data_path FLAGS.input_meta_data_path = self.input_meta_data_path FLAGS.bert_config_file = self.bert_config_file FLAGS.train_batch_size = 32 FLAGS.eval_batch_size = 32 summary_path = os.path.join(FLAGS.model_dir, 'summaries/training_summary.txt') self._run_and_report_benchmark(summary_path, use_ds=False) class BertClassifyAccuracy(BertClassifyBenchmarkBase): """Short accuracy test for BERT model. Tests BERT classification task model accuracy. The naming convention of below test cases follow `benchmark_(number of gpus)_gpu_(dataset type)` format. """ def __init__(self, output_dir=TMP_DIR, **kwargs): self.train_data_path = CLASSIFIER_TRAIN_DATA_PATH self.eval_data_path = CLASSIFIER_EVAL_DATA_PATH self.bert_config_file = MODEL_CONFIG_FILE_PATH self.input_meta_data_path = CLASSIFIER_INPUT_META_DATA_PATH self.pretrained_checkpoint_path = PRETRAINED_CHECKPOINT_PATH super(BertClassifyAccuracy, self).__init__(output_dir=output_dir) @benchmark_wrappers.enable_runtime_flags def _run_and_report_benchmark(self, training_summary_path, min_accuracy=0.84, max_accuracy=0.88): """Starts BERT accuracy benchmark test.""" start_time_sec = time.time() self._run_bert_classifier(callbacks=[self.timer_callback]) wall_time_sec = time.time() - start_time_sec with tf.io.gfile.GFile(training_summary_path, 'rb') as reader: summary = json.loads(reader.read().decode('utf-8')) super(BertClassifyAccuracy, self)._report_benchmark( stats=summary, wall_time_sec=wall_time_sec, min_accuracy=min_accuracy, max_accuracy=max_accuracy) def _setup(self): super(BertClassifyAccuracy, self)._setup() FLAGS.train_data_path = self.train_data_path FLAGS.eval_data_path = self.eval_data_path FLAGS.input_meta_data_path = self.input_meta_data_path FLAGS.bert_config_file = self.bert_config_file FLAGS.init_checkpoint = self.pretrained_checkpoint_path def benchmark_8_gpu_mrpc(self): """Run BERT model accuracy test with 8 GPUs. Due to comparatively small cardinality of MRPC dataset, training accuracy metric has high variance between trainings. As so, we set the wide range of allowed accuracy (84% to 88%). """ self._setup() FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mrpc') summary_path = os.path.join(FLAGS.model_dir, 'summaries/training_summary.txt') self._run_and_report_benchmark(summary_path) def benchmark_8_gpu_mrpc_xla(self): """Run BERT model accuracy test with 8 GPUs with XLA.""" self._setup() FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mrpc_xla') FLAGS.enable_xla = True summary_path = os.path.join(FLAGS.model_dir, 'summaries/training_summary.txt') self._run_and_report_benchmark(summary_path) if __name__ == '__main__': tf.test.main()
[]
[]
[ "TMPDIR" ]
[]
["TMPDIR"]
python
1
0
launch/docker_test.go
package launch import ( "bytes" "fmt" "os" "os/exec" "strings" "sync" "syscall" "testing" "time" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" ) const ( fakeProcessLifeTime = 100 * time.Second waitForKillTime = 100 * time.Millisecond ) type fakeExecCommand struct { id string execCmd func(command string, args ...string) *exec.Cmd commands []string } type mockInteract struct { Interacter } func newFakeExecCommand(id string) *fakeExecCommand { c := &fakeExecCommand{} c.id = id c.commands = make([]string, 0, 5) c.execCmd = func(name string, args ...string) *exec.Cmd { c.commands = append(c.commands, fmt.Sprintf("%s %s", name, strings.Join(args, " "))) cs := []string{"-test.run=TestHelperProcess", "--", name} cs = append(cs, args...) cmd := exec.Command(os.Args[0], cs...) cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1", fmt.Sprintf("GO_TEST_MODE=%s", id)} return cmd } return c } func (d *mockInteract) Run(c *exec.Cmd, commands [][]string) error { return c.Run() } func TestNewDocker(t *testing.T) { t.Run("success", func(t *testing.T) { expected := &docker{ volume: "SD_LAUNCH_BIN", habVolume: "SD_LAUNCH_HAB", setupImage: "launcher", setupImageVersion: "latest", useSudo: false, interactiveMode: false, commands: make([]*exec.Cmd, 0, 10), mutex: &sync.Mutex{}, flagVerbose: false, interact: &Interact{}, socketPath: "/auth.sock", localVolumes: []string{"path:path"}, } d := newDocker("launcher", "latest", false, false, "/auth.sock", false, []string{"path:path"}, false) assert.Equal(t, expected, d) }) } func TestSetupBin(t *testing.T) { defer func() { execCommand = exec.Command }() d := &docker{ volume: "SD_LAUNCH_BIN", setupImage: "launcher", setupImageVersion: "latest", } testCase := []struct { name string id string expectError error }{ {"success", "SUCCESS_SETUP_BIN", nil}, {"failure volume create", "FAIL_CREATING_VOLUME", fmt.Errorf("failed to create docker volume: exit status 1")}, {"failure container run", "FAIL_CONTAINER_RUN", fmt.Errorf("failed to prepare build scripts: exit status 1")}, {"failure launcher image pull", "FAIL_LAUNCHER_PULL", fmt.Errorf("failed to pull launcher image: exit status 1")}, } for _, tt := range testCase { t.Run(tt.name, func(t *testing.T) { c := newFakeExecCommand(tt.id) execCommand = c.execCmd err := d.setupBin() assert.Equal(t, tt.expectError, err) }) } } func TestSetupBinWithSudo(t *testing.T) { defer func() { execCommand = exec.Command }() d := &docker{ volume: "SD_LAUNCH_BIN", setupImage: "launcher", setupImageVersion: "latest", useSudo: true, } testCase := []struct { name string id string expectError error }{ {"success", "SUCCESS_SETUP_BIN_SUDO", nil}, {"failure volume create", "FAIL_CREATING_VOLUME_SUDO", fmt.Errorf("failed to create docker volume: exit status 1")}, {"failure container run", "FAIL_CONTAINER_RUN_SUDO", fmt.Errorf("failed to prepare build scripts: exit status 1")}, {"failure launcher image pull", "FAIL_LAUNCHER_PULL_SUDO", fmt.Errorf("failed to pull launcher image: exit status 1")}, } for _, tt := range testCase { t.Run(tt.name, func(t *testing.T) { c := newFakeExecCommand(tt.id) execCommand = c.execCmd err := d.setupBin() assert.Equal(t, tt.expectError, err) }) } } func TestRunBuild(t *testing.T) { defer func() { execCommand = exec.Command }() d := &docker{ volume: "SD_LAUNCH_BIN", setupImage: "launcher", setupImageVersion: "latest", } testCase := []struct { name string id string expectError error expectedCommands []string buildEntry buildEntry }{ {"success", "SUCCESS_RUN_BUILD", nil, []string{ "docker pull node:12", fmt.Sprintf("docker container run --rm -v /:/sd/workspace/src/screwdriver.cd/sd-local/local-build -v sd-artifacts/:/test/artifacts -v %s:/opt/sd -v %s:/opt/sd/hab -v %s:/tmp/auth.sock -e SSH_AUTH_SOCK=/tmp/auth.sock node:12 /opt/sd/local_run.sh ", d.volume, d.habVolume, os.Getenv("SSH_AUTH_SOCK"))}, newBuildEntry()}, {"success with memory limit", "SUCCESS_RUN_BUILD", nil, []string{ "docker pull node:12", fmt.Sprintf("docker container run -m2GB --rm -v /:/sd/workspace/src/screwdriver.cd/sd-local/local-build -v sd-artifacts/:/test/artifacts -v %s:/opt/sd -v %s:/opt/sd/hab -v %s:/tmp/auth.sock -e SSH_AUTH_SOCK=/tmp/auth.sock node:12 /opt/sd/local_run.sh ", d.volume, d.habVolume, os.Getenv("SSH_AUTH_SOCK"))}, newBuildEntry(func(b *buildEntry) { b.MemoryLimit = "2GB" })}, {"failure build run", "FAIL_BUILD_CONTAINER_RUN", fmt.Errorf("failed to run build container: exit status 1"), []string{}, newBuildEntry()}, {"failure build image pull", "FAIL_BUILD_IMAGE_PULL", fmt.Errorf("failed to pull user image exit status 1"), []string{}, newBuildEntry()}, } for _, tt := range testCase { t.Run(tt.name, func(t *testing.T) { c := newFakeExecCommand(tt.id) execCommand = c.execCmd err := d.runBuild(tt.buildEntry) for i, expectedCommand := range tt.expectedCommands { assert.True(t, strings.Contains(c.commands[i], expectedCommand), "expect %q \nbut got \n%q", expectedCommand, c.commands[i]) } if tt.expectError != nil { assert.Equal(t, tt.expectError.Error(), err.Error()) } else { assert.Nil(t, err) } }) } } func TestRunBuildWithSudo(t *testing.T) { defer func() { execCommand = exec.Command }() d := &docker{ volume: "SD_LAUNCH_BIN", setupImage: "launcher", setupImageVersion: "latest", useSudo: true, } testCase := []struct { name string id string expectError error expectedCommands []string buildEntry buildEntry }{ {"success", "SUCCESS_RUN_BUILD_SUDO", nil, []string{ "sudo docker pull node:12", fmt.Sprintf("sudo docker container run --rm -v /:/sd/workspace/src/screwdriver.cd/sd-local/local-build -v sd-artifacts/:/test/artifacts -v %s:/opt/sd -v %s:/opt/sd/hab -v %s:/tmp/auth.sock -e SSH_AUTH_SOCK=/tmp/auth.sock node:12 /opt/sd/local_run.sh ", d.volume, d.habVolume, os.Getenv("SSH_AUTH_SOCK"))}, newBuildEntry()}, {"success with memory limit", "SUCCESS_RUN_BUILD_SUDO", nil, []string{ "sudo docker pull node:12", fmt.Sprintf("sudo docker container run -m2GB --rm -v /:/sd/workspace/src/screwdriver.cd/sd-local/local-build -v sd-artifacts/:/test/artifacts -v %s:/opt/sd -v %s:/opt/sd/hab -v %s:/tmp/auth.sock -e SSH_AUTH_SOCK=/tmp/auth.sock node:12 /opt/sd/local_run.sh ", d.volume, d.habVolume, os.Getenv("SSH_AUTH_SOCK"))}, newBuildEntry(func(b *buildEntry) { b.MemoryLimit = "2GB" })}, {"failure build run", "FAIL_BUILD_CONTAINER_RUN_SUDO", fmt.Errorf("failed to run build container: exit status 1"), []string{}, newBuildEntry()}, {"failure build image pull", "FAIL_BUILD_IMAGE_PULL_SUDO", fmt.Errorf("failed to pull user image exit status 1"), []string{}, newBuildEntry()}, } for _, tt := range testCase { t.Run(tt.name, func(t *testing.T) { c := newFakeExecCommand(tt.id) execCommand = c.execCmd err := d.runBuild(tt.buildEntry) for i, expectedCommand := range tt.expectedCommands { assert.True(t, strings.Contains(c.commands[i], expectedCommand), "expect %q \nbut got \n%q", expectedCommand, c.commands[i]) } if tt.expectError != nil { assert.Equal(t, tt.expectError.Error(), err.Error()) } else { assert.Nil(t, err) } }) } } func TestRunBuildWithInteractiveMode(t *testing.T) { defer func() { execCommand = exec.Command }() d := &docker{ volume: "SD_LAUNCH_BIN", setupImage: "launcher", setupImageVersion: "latest", useSudo: true, interactiveMode: true, interact: &mockInteract{}, } testCase := []struct { name string id string expectError error expectedCommands []string buildEntry buildEntry }{ {"success", "SUCCESS_RUN_BUILD_INTERACT", nil, []string{ "sudo docker pull node:12", fmt.Sprintf("sudo docker container run -itd --rm -v /:/sd/workspace/src/screwdriver.cd/sd-local/local-build -v sd-artifacts/:/test/artifacts -v %s:/opt/sd -v %s:/opt/sd/hab -v %s:/tmp/auth.sock -e SSH_AUTH_SOCK=/tmp/auth.sock node:12 /bin/sh", d.volume, d.habVolume, os.Getenv("SSH_AUTH_SOCK")), "sudo docker attach "}, newBuildEntry()}, {"success with memory limit", "SUCCESS_RUN_BUILD_INTERACT", nil, []string{ "sudo docker pull node:12", fmt.Sprintf("sudo docker container run -m2GB -itd --rm -v /:/sd/workspace/src/screwdriver.cd/sd-local/local-build -v sd-artifacts/:/test/artifacts -v %s:/opt/sd -v %s:/opt/sd/hab -v %s:/tmp/auth.sock -e SSH_AUTH_SOCK=/tmp/auth.sock node:12 /bin/sh", d.volume, d.habVolume, os.Getenv("SSH_AUTH_SOCK")), "sudo docker attach SUCCESS_RUN_BUILD_INTERACT"}, newBuildEntry(func(b *buildEntry) { b.MemoryLimit = "2GB" })}, {"failure build run", "FAIL_BUILD_CONTAINER_RUN_INTERACT", fmt.Errorf("failed to run build container: exit status 1"), []string{}, newBuildEntry()}, {"failure attach build container", "FAIL_BUILD_CONTAINER_ATTACH_INTERACT", fmt.Errorf("failed to attach build container: exit status 1"), []string{}, newBuildEntry()}, {"failure build image pull", "FAIL_BUILD_IMAGE_PULL_INTERACT", fmt.Errorf("failed to pull user image exit status 1"), []string{}, newBuildEntry()}, } for _, tt := range testCase { t.Run(tt.name, func(t *testing.T) { c := newFakeExecCommand(tt.id) execCommand = c.execCmd err := d.runBuild(tt.buildEntry) for i, expectedCommand := range tt.expectedCommands { assert.True(t, strings.Contains(c.commands[i], expectedCommand), "expect %q \nbut got \n%q", expectedCommand, c.commands[i]) } if tt.expectError != nil { assert.Equal(t, tt.expectError.Error(), err.Error()) } else { assert.Nil(t, err) } }) } } func TestDockerKill(t *testing.T) { t.Run("success with no commands", func(t *testing.T) { defer func() { execCommand = exec.Command logrus.SetOutput(os.Stderr) }() d := &docker{ volume: "SD_LAUNCH_BIN", setupImage: "launcher", setupImageVersion: "latest", useSudo: false, mutex: &sync.Mutex{}, } c := newFakeExecCommand("SUCCESS_TO_KILL") execCommand = c.execCmd buf := bytes.NewBuffer(nil) logrus.SetOutput(buf) d.kill(syscall.SIGINT) assert.Equal(t, "", buf.String()) }) t.Run("success", func(t *testing.T) { defer func() { execCommand = exec.Command logrus.SetOutput(os.Stderr) }() c := newFakeExecCommand("SUCCESS_TO_KILL") execCommand = c.execCmd d := &docker{ volume: "SD_LAUNCH_BIN", setupImage: "launcher", setupImageVersion: "latest", useSudo: false, commands: []*exec.Cmd{execCommand("sleep")}, mutex: &sync.Mutex{}, } d.commands[0].Start() go func() { time.Sleep(waitForKillTime) d.mutex.Lock() // For some reason, "ProcessState" is not changed in "Process.Signal" or "syscall.kill", so change "ProcessState" directly. d.commands[0].ProcessState = &os.ProcessState{} d.mutex.Unlock() }() buf := bytes.NewBuffer(nil) logrus.SetOutput(buf) d.kill(syscall.SIGINT) actual := buf.String() assert.Equal(t, "", actual) }) t.Run("failure", func(t *testing.T) { defer func() { execCommand = exec.Command logrus.SetOutput(os.Stderr) }() c := newFakeExecCommand("FAIL_TO_KILL") execCommand = c.execCmd command := execCommand("sleep") d := &docker{ volume: "SD_LAUNCH_BIN", setupImage: "launcher", setupImageVersion: "latest", useSudo: false, commands: []*exec.Cmd{command}, mutex: &sync.Mutex{}, } d.commands[0].Start() PidTmp := d.commands[0].Process.Pid defer func() { syscall.Kill(PidTmp, syscall.SIGINT) }() d.commands[0].Process.Pid = 0 buf := bytes.NewBuffer(nil) logrus.SetOutput(buf) d.kill(syscall.SIGINT) actual := buf.String() expected := "failed to stop process:" assert.True(t, strings.Contains(actual, expected), fmt.Sprintf("\nexpected: %s \nactual: %s\n", expected, actual)) }) t.Run("success with sudo", func(t *testing.T) { defer func() { execCommand = exec.Command }() c := newFakeExecCommand("SUCCESS_TO_KILL") execCommand = c.execCmd d := &docker{ volume: "SD_LAUNCH_BIN", setupImage: "launcher", setupImageVersion: "latest", useSudo: true, commands: []*exec.Cmd{execCommand("sleep")}, mutex: &sync.Mutex{}, } d.commands[0].Start() go func() { time.Sleep(waitForKillTime) d.mutex.Lock() d.commands[0].ProcessState = &os.ProcessState{} d.mutex.Unlock() }() d.kill(syscall.SIGINT) assert.Equal(t, fmt.Sprintf("sudo kill -2 %v", d.commands[0].Process.Pid), c.commands[1]) }) } func TestDockerClean(t *testing.T) { t.Run("success", func(t *testing.T) { defer func() { execCommand = exec.Command }() c := newFakeExecCommand("SUCCESS_TO_CLEAN") execCommand = c.execCmd d := &docker{ volume: "SD_LAUNCH_BIN", setupImage: "launcher", setupImageVersion: "latest", commands: []*exec.Cmd{}, useSudo: false, } d.clean() assert.Equal(t, fmt.Sprintf("docker volume rm --force %v", d.volume), c.commands[0]) }) t.Run("success with sudo", func(t *testing.T) { defer func() { execCommand = exec.Command }() c := newFakeExecCommand("SUCCESS_TO_CLEAN") execCommand = c.execCmd d := &docker{ volume: "SD_LAUNCH_BIN", setupImage: "launcher", setupImageVersion: "latest", commands: []*exec.Cmd{}, useSudo: true, } d.clean() assert.Equal(t, fmt.Sprintf("sudo docker volume rm --force %v", d.volume), c.commands[0]) }) t.Run("failure", func(t *testing.T) { defer func() { execCommand = exec.Command logrus.SetOutput(os.Stderr) }() c := newFakeExecCommand("FAIL_TO_CLEAN") execCommand = c.execCmd d := &docker{ volume: "SD_LAUNCH_BIN", setupImage: "launcher", setupImageVersion: "latest", commands: []*exec.Cmd{}, useSudo: false, } buf := bytes.NewBuffer(nil) logrus.SetOutput(buf) d.clean() expected := "failed to remove volume:" assert.True(t, strings.Contains(buf.String(), expected), fmt.Sprintf("\nexpected: %s \nactual: %s\n", expected, buf.String())) }) } func TestHelperProcess(t *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } defer os.Exit(0) args := os.Args for len(args) > 0 { if args[0] == "--" { args = args[1:] break } args = args[1:] } if len(args) == 0 { fmt.Fprintf(os.Stderr, "no command\n") os.Exit(2) } cmd, subcmd, args := args[0], args[1], args[2:] _, _ = cmd, args testCase := os.Getenv("GO_TEST_MODE") if strings.Index(testCase, "SUDO") > 0 || strings.Index(testCase, "INTERACT") > 0 { subcmd = args[0] } fmt.Print(testCase) switch testCase { case "": os.Exit(1) case "SUCCESS_SETUP_BIN": os.Exit(0) case "SUCCESS_SETUP_BIN_SUDO": os.Exit(0) case "SUCCESS_SETUP_BIN_INTERACT": os.Exit(0) case "FAIL_CREATING_VOLUME": os.Exit(1) case "FAIL_CREATING_VOLUME_SUDO": os.Exit(1) case "FAIL_CONTAINER_RUN": if subcmd == "volume" { os.Exit(0) } if subcmd == "pull" { os.Exit(0) } os.Exit(1) case "FAIL_CONTAINER_RUN_SUDO": if subcmd == "volume" { os.Exit(0) } if subcmd == "pull" { os.Exit(0) } os.Exit(1) case "SUCCESS_RUN_BUILD": os.Exit(0) case "SUCCESS_RUN_BUILD_SUDO": os.Exit(0) case "SUCCESS_RUN_BUILD_INTERACT": os.Exit(0) case "FAIL_BUILD_CONTAINER_RUN": if subcmd == "pull" { os.Exit(0) } os.Exit(1) case "FAIL_BUILD_CONTAINER_RUN_SUDO": if subcmd == "pull" { os.Exit(0) } os.Exit(1) case "FAIL_BUILD_CONTAINER_RUN_INTERACT": if subcmd == "pull" { os.Exit(0) } os.Exit(1) case "FAIL_BUILD_CONTAINER_ATTACH_INTERACT": if subcmd == "attach" { os.Exit(1) } os.Exit(0) case "FAIL_LAUNCHER_PULL": if subcmd == "pull" { os.Exit(1) } os.Exit(0) case "FAIL_LAUNCHER_PULL_SUDO": if subcmd == "pull" { os.Exit(1) } os.Exit(0) case "FAIL_BUILD_IMAGE_PULL": if subcmd == "pull" { os.Exit(1) } os.Exit(0) case "FAIL_BUILD_IMAGE_PULL_SUDO": if subcmd == "pull" { os.Exit(1) } os.Exit(0) case "FAIL_BUILD_IMAGE_PULL_INTERACT": if subcmd == "pull" { os.Exit(1) } os.Exit(0) case "SUCCESS_TO_KILL": if subcmd == "sleep" { time.Sleep(fakeProcessLifeTime) os.Exit(0) } os.Exit(0) case "FAIL_TO_KILL": if subcmd == "sleep" { time.Sleep(fakeProcessLifeTime) os.Exit(0) } os.Exit(1) case "SUCCESS_TO_CLEAN": os.Exit(0) case "FAIL_TO_CLEAN": os.Exit(1) } }
[ "\"SSH_AUTH_SOCK\"", "\"SSH_AUTH_SOCK\"", "\"SSH_AUTH_SOCK\"", "\"SSH_AUTH_SOCK\"", "\"SSH_AUTH_SOCK\"", "\"SSH_AUTH_SOCK\"", "\"GO_WANT_HELPER_PROCESS\"", "\"GO_TEST_MODE\"" ]
[]
[ "GO_WANT_HELPER_PROCESS", "GO_TEST_MODE", "SSH_AUTH_SOCK" ]
[]
["GO_WANT_HELPER_PROCESS", "GO_TEST_MODE", "SSH_AUTH_SOCK"]
go
3
0
pkg/driver/postgres/postgres_test.go
package postgres import ( "database/sql" "net/url" "os" "runtime" "testing" "github.com/amacneil/dbmate/pkg/dbmate" "github.com/amacneil/dbmate/pkg/dbutil" "github.com/stretchr/testify/require" ) func testPostgresDriver(t *testing.T) *Driver { u := dbutil.MustParseURL(os.Getenv("POSTGRES_TEST_URL")) drv, err := dbmate.New(u).GetDriver() require.NoError(t, err) return drv.(*Driver) } func prepTestPostgresDB(t *testing.T) *sql.DB { drv := testPostgresDriver(t) // drop any existing database err := drv.DropDatabase() require.NoError(t, err) // create database err = drv.CreateDatabase() require.NoError(t, err) // connect database db, err := sql.Open("postgres", drv.databaseURL.String()) require.NoError(t, err) return db } func TestGetDriver(t *testing.T) { db := dbmate.New(dbutil.MustParseURL("postgres://")) drvInterface, err := db.GetDriver() require.NoError(t, err) // driver should have URL and default migrations table set drv, ok := drvInterface.(*Driver) require.True(t, ok) require.Equal(t, db.DatabaseURL.String(), drv.databaseURL.String()) require.Equal(t, "schema_migrations", drv.migrationsTableName) } func defaultConnString() string { switch runtime.GOOS { case "linux": return "postgres://:5432/foo?host=%2Fvar%2Frun%2Fpostgresql" case "darwin", "freebsd", "dragonfly", "openbsd", "netbsd": return "postgres://:5432/foo?host=%2Ftmp" default: return "postgres://localhost:5432/foo" } } func TestConnectionString(t *testing.T) { cases := []struct { input string expected string }{ // defaults {"postgres:///foo", defaultConnString()}, // support custom url params {"postgres://bob:secret@myhost:1234/foo?bar=baz", "postgres://bob:secret@myhost:1234/foo?bar=baz"}, // support `host` and `port` via url params {"postgres://bob:secret@myhost:1234/foo?host=new&port=9999", "postgres://bob:secret@:9999/foo?host=new"}, {"postgres://bob:secret@myhost:1234/foo?port=9999&bar=baz", "postgres://bob:secret@myhost:9999/foo?bar=baz"}, // support unix sockets via `host` or `socket` param {"postgres://bob:secret@myhost:1234/foo?host=/var/run/postgresql", "postgres://bob:secret@:1234/foo?host=%2Fvar%2Frun%2Fpostgresql"}, {"postgres://bob:secret@localhost/foo?socket=/var/run/postgresql", "postgres://bob:secret@:5432/foo?host=%2Fvar%2Frun%2Fpostgresql"}, {"postgres:///foo?socket=/var/run/postgresql", "postgres://:5432/foo?host=%2Fvar%2Frun%2Fpostgresql"}, } for _, c := range cases { t.Run(c.input, func(t *testing.T) { u, err := url.Parse(c.input) require.NoError(t, err) actual := connectionString(u) require.Equal(t, c.expected, actual) }) } } func TestConnectionArgsForDump(t *testing.T) { cases := []struct { input string expected []string }{ // defaults {"postgres:///foo", []string{defaultConnString()}}, // support single schema {"postgres:///foo?search_path=foo", []string{"--schema", "foo", defaultConnString()}}, // support multiple schemas {"postgres:///foo?search_path=foo,public", []string{"--schema", "foo", "--schema", "public", defaultConnString()}}, } for _, c := range cases { t.Run(c.input, func(t *testing.T) { u, err := url.Parse(c.input) require.NoError(t, err) actual := connectionArgsForDump(u) require.Equal(t, c.expected, actual) }) } } func TestPostgresCreateDropDatabase(t *testing.T) { drv := testPostgresDriver(t) // drop any existing database err := drv.DropDatabase() require.NoError(t, err) // create database err = drv.CreateDatabase() require.NoError(t, err) // check that database exists and we can connect to it func() { db, err := sql.Open("postgres", drv.databaseURL.String()) require.NoError(t, err) defer dbutil.MustClose(db) err = db.Ping() require.NoError(t, err) }() // drop the database err = drv.DropDatabase() require.NoError(t, err) // check that database no longer exists func() { db, err := sql.Open("postgres", drv.databaseURL.String()) require.NoError(t, err) defer dbutil.MustClose(db) err = db.Ping() require.Error(t, err) require.Equal(t, "pq: database \"dbmate_test\" does not exist", err.Error()) }() } func TestPostgresDumpSchema(t *testing.T) { t.Run("default migrations table", func(t *testing.T) { drv := testPostgresDriver(t) // prepare database db := prepTestPostgresDB(t) defer dbutil.MustClose(db) err := drv.CreateMigrationsTable(db) require.NoError(t, err) // insert migration err = drv.InsertMigration(db, "abc1") require.NoError(t, err) err = drv.InsertMigration(db, "abc2") require.NoError(t, err) // DumpSchema should return schema schema, err := drv.DumpSchema(db) require.NoError(t, err) require.Contains(t, string(schema), "CREATE TABLE public.schema_migrations") require.Contains(t, string(schema), "\n--\n"+ "-- PostgreSQL database dump complete\n"+ "--\n\n\n"+ "--\n"+ "-- Dbmate schema migrations\n"+ "--\n\n"+ "INSERT INTO public.schema_migrations (version) VALUES\n"+ " ('abc1'),\n"+ " ('abc2');\n") // DumpSchema should return error if command fails drv.databaseURL.Path = "/fakedb" schema, err = drv.DumpSchema(db) require.Nil(t, schema) require.EqualError(t, err, "pg_dump: [archiver (db)] connection to database "+ "\"fakedb\" failed: FATAL: database \"fakedb\" does not exist") }) t.Run("custom migrations table with schema", func(t *testing.T) { drv := testPostgresDriver(t) drv.migrationsTableName = "camelSchema.testMigrations" // prepare database db := prepTestPostgresDB(t) defer dbutil.MustClose(db) err := drv.CreateMigrationsTable(db) require.NoError(t, err) // insert migration err = drv.InsertMigration(db, "abc1") require.NoError(t, err) err = drv.InsertMigration(db, "abc2") require.NoError(t, err) // DumpSchema should return schema schema, err := drv.DumpSchema(db) require.NoError(t, err) require.Contains(t, string(schema), "CREATE TABLE \"camelSchema\".\"testMigrations\"") require.Contains(t, string(schema), "\n--\n"+ "-- PostgreSQL database dump complete\n"+ "--\n\n\n"+ "--\n"+ "-- Dbmate schema migrations\n"+ "--\n\n"+ "INSERT INTO \"camelSchema\".\"testMigrations\" (version) VALUES\n"+ " ('abc1'),\n"+ " ('abc2');\n") }) } func TestPostgresDatabaseExists(t *testing.T) { drv := testPostgresDriver(t) // drop any existing database err := drv.DropDatabase() require.NoError(t, err) // DatabaseExists should return false exists, err := drv.DatabaseExists() require.NoError(t, err) require.Equal(t, false, exists) // create database err = drv.CreateDatabase() require.NoError(t, err) // DatabaseExists should return true exists, err = drv.DatabaseExists() require.NoError(t, err) require.Equal(t, true, exists) } func TestPostgresDatabaseExists_Error(t *testing.T) { drv := testPostgresDriver(t) drv.databaseURL.User = url.User("invalid") exists, err := drv.DatabaseExists() require.Error(t, err) require.Equal(t, "pq: password authentication failed for user \"invalid\"", err.Error()) require.Equal(t, false, exists) } func TestPostgresCreateMigrationsTable(t *testing.T) { t.Run("default schema", func(t *testing.T) { drv := testPostgresDriver(t) db := prepTestPostgresDB(t) defer dbutil.MustClose(db) // migrations table should not exist count := 0 err := db.QueryRow("select count(*) from public.schema_migrations").Scan(&count) require.Error(t, err) require.Equal(t, "pq: relation \"public.schema_migrations\" does not exist", err.Error()) // create table err = drv.CreateMigrationsTable(db) require.NoError(t, err) // migrations table should exist err = db.QueryRow("select count(*) from public.schema_migrations").Scan(&count) require.NoError(t, err) // create table should be idempotent err = drv.CreateMigrationsTable(db) require.NoError(t, err) }) t.Run("custom search path", func(t *testing.T) { drv := testPostgresDriver(t) drv.migrationsTableName = "testMigrations" u, err := url.Parse(drv.databaseURL.String() + "&search_path=camelFoo") require.NoError(t, err) drv.databaseURL = u db := prepTestPostgresDB(t) defer dbutil.MustClose(db) // delete schema _, err = db.Exec("drop schema if exists \"camelFoo\"") require.NoError(t, err) // drop any testMigrations table in public schema _, err = db.Exec("drop table if exists public.\"testMigrations\"") require.NoError(t, err) // migrations table should not exist in either schema count := 0 err = db.QueryRow("select count(*) from \"camelFoo\".\"testMigrations\"").Scan(&count) require.Error(t, err) require.Equal(t, "pq: relation \"camelFoo.testMigrations\" does not exist", err.Error()) err = db.QueryRow("select count(*) from public.\"testMigrations\"").Scan(&count) require.Error(t, err) require.Equal(t, "pq: relation \"public.testMigrations\" does not exist", err.Error()) // create table err = drv.CreateMigrationsTable(db) require.NoError(t, err) // camelFoo schema should be created, and migrations table should exist only in camelFoo schema err = db.QueryRow("select count(*) from \"camelFoo\".\"testMigrations\"").Scan(&count) require.NoError(t, err) err = db.QueryRow("select count(*) from public.\"testMigrations\"").Scan(&count) require.Error(t, err) require.Equal(t, "pq: relation \"public.testMigrations\" does not exist", err.Error()) // create table should be idempotent err = drv.CreateMigrationsTable(db) require.NoError(t, err) }) t.Run("custom schema", func(t *testing.T) { drv := testPostgresDriver(t) drv.migrationsTableName = "camelSchema.testMigrations" u, err := url.Parse(drv.databaseURL.String() + "&search_path=foo") require.NoError(t, err) drv.databaseURL = u db := prepTestPostgresDB(t) defer dbutil.MustClose(db) // delete schemas _, err = db.Exec("drop schema if exists foo") require.NoError(t, err) _, err = db.Exec("drop schema if exists \"camelSchema\"") require.NoError(t, err) // migrations table should not exist count := 0 err = db.QueryRow("select count(*) from \"camelSchema\".\"testMigrations\"").Scan(&count) require.Error(t, err) require.Equal(t, "pq: relation \"camelSchema.testMigrations\" does not exist", err.Error()) // create table err = drv.CreateMigrationsTable(db) require.NoError(t, err) // camelSchema should be created, and testMigrations table should exist err = db.QueryRow("select count(*) from \"camelSchema\".\"testMigrations\"").Scan(&count) require.NoError(t, err) // testMigrations table should not exist in foo schema because // schema specified with migrations table name takes priority over search path err = db.QueryRow("select count(*) from foo.\"testMigrations\"").Scan(&count) require.Error(t, err) require.Equal(t, "pq: relation \"foo.testMigrations\" does not exist", err.Error()) // create table should be idempotent err = drv.CreateMigrationsTable(db) require.NoError(t, err) }) } func TestPostgresSelectMigrations(t *testing.T) { drv := testPostgresDriver(t) drv.migrationsTableName = "test_migrations" db := prepTestPostgresDB(t) defer dbutil.MustClose(db) err := drv.CreateMigrationsTable(db) require.NoError(t, err) _, err = db.Exec(`insert into public.test_migrations (version) values ('abc2'), ('abc1'), ('abc3')`) require.NoError(t, err) migrations, err := drv.SelectMigrations(db, -1) require.NoError(t, err) require.Equal(t, true, migrations["abc1"]) require.Equal(t, true, migrations["abc2"]) require.Equal(t, true, migrations["abc2"]) // test limit param migrations, err = drv.SelectMigrations(db, 1) require.NoError(t, err) require.Equal(t, true, migrations["abc3"]) require.Equal(t, false, migrations["abc1"]) require.Equal(t, false, migrations["abc2"]) } func TestPostgresInsertMigration(t *testing.T) { drv := testPostgresDriver(t) drv.migrationsTableName = "test_migrations" db := prepTestPostgresDB(t) defer dbutil.MustClose(db) err := drv.CreateMigrationsTable(db) require.NoError(t, err) count := 0 err = db.QueryRow("select count(*) from public.test_migrations").Scan(&count) require.NoError(t, err) require.Equal(t, 0, count) // insert migration err = drv.InsertMigration(db, "abc1") require.NoError(t, err) err = db.QueryRow("select count(*) from public.test_migrations where version = 'abc1'"). Scan(&count) require.NoError(t, err) require.Equal(t, 1, count) } func TestPostgresDeleteMigration(t *testing.T) { drv := testPostgresDriver(t) drv.migrationsTableName = "test_migrations" db := prepTestPostgresDB(t) defer dbutil.MustClose(db) err := drv.CreateMigrationsTable(db) require.NoError(t, err) _, err = db.Exec(`insert into public.test_migrations (version) values ('abc1'), ('abc2')`) require.NoError(t, err) err = drv.DeleteMigration(db, "abc2") require.NoError(t, err) count := 0 err = db.QueryRow("select count(*) from public.test_migrations").Scan(&count) require.NoError(t, err) require.Equal(t, 1, count) } func TestPostgresPing(t *testing.T) { drv := testPostgresDriver(t) // drop any existing database err := drv.DropDatabase() require.NoError(t, err) // ping database err = drv.Ping() require.NoError(t, err) // ping invalid host should return error drv.databaseURL.Host = "postgres:404" err = drv.Ping() require.Error(t, err) require.Contains(t, err.Error(), "connect: connection refused") } func TestPostgresQuotedMigrationsTableName(t *testing.T) { t.Run("default schema", func(t *testing.T) { drv := testPostgresDriver(t) db := prepTestPostgresDB(t) defer dbutil.MustClose(db) name, err := drv.quotedMigrationsTableName(db) require.NoError(t, err) require.Equal(t, "public.schema_migrations", name) }) t.Run("custom schema", func(t *testing.T) { drv := testPostgresDriver(t) u, err := url.Parse(drv.databaseURL.String() + "&search_path=foo,bar,public") require.NoError(t, err) drv.databaseURL = u db := prepTestPostgresDB(t) defer dbutil.MustClose(db) _, err = db.Exec("drop schema if exists foo") require.NoError(t, err) _, err = db.Exec("drop schema if exists bar") require.NoError(t, err) // should use first schema from search path name, err := drv.quotedMigrationsTableName(db) require.NoError(t, err) require.Equal(t, "foo.schema_migrations", name) }) t.Run("no schema", func(t *testing.T) { drv := testPostgresDriver(t) db := prepTestPostgresDB(t) defer dbutil.MustClose(db) // this is an unlikely edge case, but if for some reason there is // no current schema then we should default to "public" _, err := db.Exec("select pg_catalog.set_config('search_path', '', false)") require.NoError(t, err) name, err := drv.quotedMigrationsTableName(db) require.NoError(t, err) require.Equal(t, "public.schema_migrations", name) }) t.Run("custom table name", func(t *testing.T) { drv := testPostgresDriver(t) db := prepTestPostgresDB(t) defer dbutil.MustClose(db) drv.migrationsTableName = "simple_name" name, err := drv.quotedMigrationsTableName(db) require.NoError(t, err) require.Equal(t, "public.simple_name", name) }) t.Run("custom table name quoted", func(t *testing.T) { drv := testPostgresDriver(t) db := prepTestPostgresDB(t) defer dbutil.MustClose(db) // this table name will need quoting drv.migrationsTableName = "camelCase" name, err := drv.quotedMigrationsTableName(db) require.NoError(t, err) require.Equal(t, "public.\"camelCase\"", name) }) t.Run("custom table name with custom schema", func(t *testing.T) { drv := testPostgresDriver(t) u, err := url.Parse(drv.databaseURL.String() + "&search_path=foo") require.NoError(t, err) drv.databaseURL = u db := prepTestPostgresDB(t) defer dbutil.MustClose(db) _, err = db.Exec("create schema if not exists foo") require.NoError(t, err) drv.migrationsTableName = "simple_name" name, err := drv.quotedMigrationsTableName(db) require.NoError(t, err) require.Equal(t, "foo.simple_name", name) }) t.Run("custom table name overrides schema", func(t *testing.T) { drv := testPostgresDriver(t) u, err := url.Parse(drv.databaseURL.String() + "&search_path=foo") require.NoError(t, err) drv.databaseURL = u db := prepTestPostgresDB(t) defer dbutil.MustClose(db) _, err = db.Exec("create schema if not exists foo") require.NoError(t, err) _, err = db.Exec("create schema if not exists bar") require.NoError(t, err) // if schema is specified as part of table name, it should override search_path drv.migrationsTableName = "bar.simple_name" name, err := drv.quotedMigrationsTableName(db) require.NoError(t, err) require.Equal(t, "bar.simple_name", name) // schema and table name should be quoted if necessary drv.migrationsTableName = "barName.camelTable" name, err = drv.quotedMigrationsTableName(db) require.NoError(t, err) require.Equal(t, "\"barName\".\"camelTable\"", name) // more than 2 components is unexpected but we will quote and pass it along anyway drv.migrationsTableName = "whyWould.i.doThis" name, err = drv.quotedMigrationsTableName(db) require.NoError(t, err) require.Equal(t, "\"whyWould\".i.\"doThis\"", name) }) }
[ "\"POSTGRES_TEST_URL\"" ]
[]
[ "POSTGRES_TEST_URL" ]
[]
["POSTGRES_TEST_URL"]
go
1
0
src/main/java/ch11holding/D30_EnvironmentVariables.java
package ch11holding; import java.util.*; /** * <pre> * (Execute to see output) * </pre> */ public class D30_EnvironmentVariables { public static void main(String[] args) { for (Map.Entry entry : System.getenv().entrySet()) { System.out.println(entry.getKey() + ": " + entry.getValue()); } } }
[]
[]
[]
[]
[]
java
0
0
visualize.py
""" Mask R-CNN Display and Visualization Functions. Copyright (c) 2017 Matterport, Inc. Licensed under the MIT License (see LICENSE for details) Written by Waleed Abdulla """ import os import random import itertools import colorsys import numpy as np from skimage.measure import find_contours import matplotlib.pyplot as plt if "DISPLAY" not in os.environ: plt.switch_backend('agg') import matplotlib.patches as patches import matplotlib.lines as lines from matplotlib.patches import Polygon import utils ############################################################ # Visualization ############################################################ def display_images(images, titles=None, cols=4, cmap=None, norm=None, interpolation=None): """Display the given set of images, optionally with titles. images: list or array of image tensors in HWC format. titles: optional. A list of titles to display with each image. cols: number of images per row cmap: Optional. Color map to use. For example, "Blues". norm: Optional. A Normalize instance to map values to colors. interpolation: Optional. Image interporlation to use for display. """ titles = titles if titles is not None else [""] * len(images) rows = len(images) // cols + 1 plt.figure(figsize=(14, 14 * rows // cols)) i = 1 for image, title in zip(images, titles): plt.subplot(rows, cols, i) plt.title(title, fontsize=9) plt.axis('off') plt.imshow(image.astype(np.uint8), cmap=cmap, norm=norm, interpolation=interpolation) i += 1 plt.show() def random_colors(N, bright=True): """ Generate random colors. To get visually distinct colors, generate them in HSV space then convert to RGB. """ brightness = 1.0 if bright else 0.7 hsv = [(i / N, 1, brightness) for i in range(N)] colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)) random.shuffle(colors) return colors def apply_mask(image, mask, color, alpha=0.5): """Apply the given mask to the image. """ for c in range(3): image[:, :, c] = np.where(mask == 1, image[:, :, c] * (1 - alpha) + alpha * color[c] * 255, image[:, :, c]) return image def display_instances(image, boxes, masks, class_ids, class_names, scores=None, title="", figsize=(16, 16), ax=None): """ boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates. masks: [height, width, num_instances] class_ids: [num_instances] class_names: list of class names of the dataset scores: (optional) confidence scores for each box figsize: (optional) the size of the image. """ # Number of instances N = boxes.shape[0] if not N: print("\n*** No instances to display *** \n") else: assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0] if not ax: _, ax = plt.subplots(1, figsize=figsize) # Generate random colors colors = random_colors(N) # Show area outside image boundaries. height, width = image.shape[:2] ax.set_ylim(height + 10, -10) ax.set_xlim(-10, width + 10) ax.axis('off') ax.set_title(title) masked_image = image.astype(np.uint32).copy() for i in range(N): color = colors[i] # Bounding box if not np.any(boxes[i]): # Skip this instance. Has no bbox. Likely lost in image cropping. continue y1, x1, y2, x2 = boxes[i] p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=0.7, linestyle="dashed", edgecolor=color, facecolor='none') ax.add_patch(p) # Label class_id = class_ids[i] score = scores[i] if scores is not None else None label = class_names[class_id] x = random.randint(x1, (x1 + x2) // 2) caption = "{} {:.3f}".format(label, score) if score else label ax.text(x1, y1 + 8, caption, color='w', size=11, backgroundcolor="none") # Mask mask = masks[:, :, i] masked_image = apply_mask(masked_image, mask, color) # Mask Polygon # Pad to ensure proper polygons for masks that touch image edges. padded_mask = np.zeros( (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) padded_mask[1:-1, 1:-1] = mask contours = find_contours(padded_mask, 0.5) for verts in contours: # Subtract the padding and flip (y, x) to (x, y) verts = np.fliplr(verts) - 1 p = Polygon(verts, facecolor="none", edgecolor=color) ax.add_patch(p) ax.imshow(masked_image.astype(np.uint8)) plt.show() def draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10): """ anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates. proposals: [n, 4] the same anchors but refined to fit objects better. """ masked_image = image.copy() # Pick random anchors in case there are too many. ids = np.arange(rois.shape[0], dtype=np.int32) ids = np.random.choice( ids, limit, replace=False) if ids.shape[0] > limit else ids fig, ax = plt.subplots(1, figsize=(12, 12)) if rois.shape[0] > limit: plt.title("Showing {} random ROIs out of {}".format( len(ids), rois.shape[0])) else: plt.title("{} ROIs".format(len(ids))) # Show area outside image boundaries. ax.set_ylim(image.shape[0] + 20, -20) ax.set_xlim(-50, image.shape[1] + 20) ax.axis('off') for i, id in enumerate(ids): color = np.random.rand(3) class_id = class_ids[id] # ROI y1, x1, y2, x2 = rois[id] p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, edgecolor=color if class_id else "gray", facecolor='none', linestyle="dashed") ax.add_patch(p) # Refined ROI if class_id: ry1, rx1, ry2, rx2 = refined_rois[id] p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2, edgecolor=color, facecolor='none') ax.add_patch(p) # Connect the top-left corners of the anchor and proposal for easy visualization ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color)) # Label label = class_names[class_id] ax.text(rx1, ry1 + 8, "{}".format(label), color='w', size=11, backgroundcolor="none") # Mask m = utils.unmold_mask(mask[id], rois[id] [:4].astype(np.int32), image.shape) masked_image = apply_mask(masked_image, m, color) ax.imshow(masked_image) # Print stats print("Positive ROIs: ", class_ids[class_ids > 0].shape[0]) print("Negative ROIs: ", class_ids[class_ids == 0].shape[0]) print("Positive Ratio: {:.2f}".format( class_ids[class_ids > 0].shape[0] / class_ids.shape[0])) # TODO: Replace with matplotlib equivalent? def draw_box(image, box, color): """Draw 3-pixel width bounding boxes on the given image array. color: list of 3 int values for RGB. """ y1, x1, y2, x2 = box image[y1:y1 + 2, x1:x2] = color image[y2:y2 + 2, x1:x2] = color image[y1:y2, x1:x1 + 2] = color image[y1:y2, x2:x2 + 2] = color return image def display_top_masks(image, mask, class_ids, class_names, limit=4): """Display the given image and the top few class masks.""" to_display = [] titles = [] to_display.append(image) titles.append("H x W={}x{}".format(image.shape[0], image.shape[1])) # Pick top prominent classes in this image unique_class_ids = np.unique(class_ids) mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]]) for i in unique_class_ids] top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area), key=lambda r: r[1], reverse=True) if v[1] > 0] # Generate images and titles for i in range(limit): class_id = top_ids[i] if i < len(top_ids) else -1 # Pull masks of instances belonging to the same class. m = mask[:, :, np.where(class_ids == class_id)[0]] m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1) to_display.append(m) titles.append(class_names[class_id] if class_id != -1 else "-") display_images(to_display, titles=titles, cols=limit + 1, cmap="Blues_r") def plot_precision_recall(AP, precisions, recalls): """Draw the precision-recall curve. AP: Average precision at IoU >= 0.5 precisions: list of precision values recalls: list of recall values """ # Plot the Precision-Recall curve _, ax = plt.subplots(1) ax.set_title("Precision-Recall Curve. AP@50 = {:.3f}".format(AP)) ax.set_ylim(0, 1.1) ax.set_xlim(0, 1.1) _ = ax.plot(recalls, precisions) def plot_overlaps(gt_class_ids, pred_class_ids, pred_scores, overlaps, class_names, threshold=0.5): """Draw a grid showing how ground truth objects are classified. gt_class_ids: [N] int. Ground truth class IDs pred_class_id: [N] int. Predicted class IDs pred_scores: [N] float. The probability scores of predicted classes overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictins and GT boxes. class_names: list of all class names in the dataset threshold: Float. The prediction probability required to predict a class """ gt_class_ids = gt_class_ids[gt_class_ids != 0] pred_class_ids = pred_class_ids[pred_class_ids != 0] plt.figure(figsize=(12, 10)) plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues) plt.yticks(np.arange(len(pred_class_ids)), ["{} ({:.2f})".format(class_names[int(id)], pred_scores[i]) for i, id in enumerate(pred_class_ids)]) plt.xticks(np.arange(len(gt_class_ids)), [class_names[int(id)] for id in gt_class_ids], rotation=90) thresh = overlaps.max() / 2. for i, j in itertools.product(range(overlaps.shape[0]), range(overlaps.shape[1])): text = "" if overlaps[i, j] > threshold: text = "match" if gt_class_ids[j] == pred_class_ids[i] else "wrong" color = ("white" if overlaps[i, j] > thresh else "black" if overlaps[i, j] > 0 else "grey") plt.text(j, i, "{:.3f}\n{}".format(overlaps[i, j], text), horizontalalignment="center", verticalalignment="center", fontsize=9, color=color) plt.tight_layout() plt.xlabel("Ground Truth") plt.ylabel("Predictions") def draw_boxes(image, boxes=None, refined_boxes=None, masks=None, captions=None, visibilities=None, title="", ax=None): """Draw bounding boxes and segmentation masks with differnt customizations. boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates. refined_boxes: Like boxes, but draw with solid lines to show that they're the result of refining 'boxes'. masks: [N, height, width] captions: List of N titles to display on each box visibilities: (optional) List of values of 0, 1, or 2. Determine how prominant each bounding box should be. title: An optional title to show over the image ax: (optional) Matplotlib axis to draw on. """ # Number of boxes assert boxes is not None or refined_boxes is not None N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0] # Matplotlib Axis if not ax: _, ax = plt.subplots(1, figsize=(12, 12)) # Generate random colors colors = random_colors(N) # Show area outside image boundaries. margin = image.shape[0] // 10 ax.set_ylim(image.shape[0] + margin, -margin) ax.set_xlim(-margin, image.shape[1] + margin) ax.axis('off') ax.set_title(title) masked_image = image.astype(np.uint32).copy() for i in range(N): # Box visibility visibility = visibilities[i] if visibilities is not None else 1 if visibility == 0: color = "gray" style = "dotted" alpha = 0.5 elif visibility == 1: color = colors[i] style = "dotted" alpha = 1 elif visibility == 2: color = colors[i] style = "solid" alpha = 1 # Boxes if boxes is not None: if not np.any(boxes[i]): # Skip this instance. Has no bbox. Likely lost in cropping. continue y1, x1, y2, x2 = boxes[i] p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=alpha, linestyle=style, edgecolor=color, facecolor='none') ax.add_patch(p) # Refined boxes if refined_boxes is not None and visibility > 0: ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32) p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2, edgecolor=color, facecolor='none') ax.add_patch(p) # Connect the top-left corners of the anchor and proposal if boxes is not None: ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color)) # Captions if captions is not None: caption = captions[i] # If there are refined boxes, display captions on them if refined_boxes is not None: y1, x1, y2, x2 = ry1, rx1, ry2, rx2 x = random.randint(x1, (x1 + x2) // 2) ax.text(x1, y1, caption, size=11, verticalalignment='top', color='w', backgroundcolor="none", bbox={'facecolor': color, 'alpha': 0.5, 'pad': 2, 'edgecolor': 'none'}) # Masks if masks is not None: mask = masks[:, :, i] masked_image = apply_mask(masked_image, mask, color) # Mask Polygon # Pad to ensure proper polygons for masks that touch image edges. padded_mask = np.zeros( (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) padded_mask[1:-1, 1:-1] = mask contours = find_contours(padded_mask, 0.5) for verts in contours: # Subtract the padding and flip (y, x) to (x, y) verts = np.fliplr(verts) - 1 p = Polygon(verts, facecolor="none", edgecolor=color) ax.add_patch(p) ax.imshow(masked_image.astype(np.uint8)) def plot_loss(loss, val_loss, save=True, log_dir=None): loss = np.array(loss) val_loss = np.array(val_loss) plt.figure("loss") plt.gcf().clear() plt.plot(loss[:, 0], label='train') plt.plot(val_loss[:, 0], label='valid') plt.xlabel('epoch') plt.ylabel('loss') plt.legend() if save: save_path = os.path.join(log_dir, "loss.png") plt.savefig(save_path) else: plt.show(block=False) plt.pause(0.1) plt.figure("rpn_class_loss") plt.gcf().clear() plt.plot(loss[:, 1], label='train') plt.plot(val_loss[:, 1], label='valid') plt.xlabel('epoch') plt.ylabel('loss') plt.legend() if save: save_path = os.path.join(log_dir, "rpn_class_loss.png") plt.savefig(save_path) else: plt.show(block=False) plt.pause(0.1) plt.figure("rpn_bbox_loss") plt.gcf().clear() plt.plot(loss[:, 2], label='train') plt.plot(val_loss[:, 2], label='valid') plt.xlabel('epoch') plt.ylabel('loss') plt.legend() if save: save_path = os.path.join(log_dir, "rpn_bbox_loss.png") plt.savefig(save_path) else: plt.show(block=False) plt.pause(0.1) plt.figure("mrcnn_class_loss") plt.gcf().clear() plt.plot(loss[:, 3], label='train') plt.plot(val_loss[:, 3], label='valid') plt.xlabel('epoch') plt.ylabel('loss') plt.legend() if save: save_path = os.path.join(log_dir, "mrcnn_class_loss.png") plt.savefig(save_path) else: plt.show(block=False) plt.pause(0.1) plt.figure("mrcnn_bbox_loss") plt.gcf().clear() plt.plot(loss[:, 4], label='train') plt.plot(val_loss[:, 4], label='valid') plt.xlabel('epoch') plt.ylabel('loss') plt.legend() if save: save_path = os.path.join(log_dir, "mrcnn_bbox_loss.png") plt.savefig(save_path) else: plt.show(block=False) plt.pause(0.1) plt.figure("mrcnn_mask_loss") plt.gcf().clear() plt.plot(loss[:, 5], label='train') plt.plot(val_loss[:, 5], label='valid') plt.xlabel('epoch') plt.ylabel('loss') plt.legend() if save: save_path = os.path.join(log_dir, "mrcnn_mask_loss.png") plt.savefig(save_path) else: plt.show(block=False) plt.pause(0.1)
[]
[]
[]
[]
[]
python
0
0
bot.go
package main import ( "encoding/json" "flag" "github.com/ChimeraCoder/anaconda" "log" "net/url" "os" ) type TwitterConfig struct { ConsumerKey string `json:"ConsumerKey"` ConsumerSecret string `json:"ConsumerSecret"` AccessToken string `json:"AccessToken"` AccessTokenSecret string `json:"AccessTokenSecret"` } var api *anaconda.TwitterApi func main() { twitterConfig := TwitterConfig{} loadTwitterConfig(&twitterConfig) anaconda.SetConsumerKey(twitterConfig.ConsumerKey) anaconda.SetConsumerSecret(twitterConfig.ConsumerSecret) api = anaconda.NewTwitterApi(twitterConfig.AccessToken, twitterConfig.AccessTokenSecret) // Read from flags queryPtr := flag.String("q", "none", "Flag to specify twitter query.") actionPtr := flag.String("a", "none", "Flag to specify twitter action.") countPtr := flag.String("c", "0", "Flag to specify count of query items.") flag.Parse() encodedQuery := url.QueryEscape(*queryPtr) switch *actionPtr { case "favorite": favorite(encodedQuery, *countPtr) case "retweet": retweet(encodedQuery, *countPtr) case "follow": follow(encodedQuery, *countPtr) } } // Favorite all tweets for matching query. func favorite(query string, count string) { searchResult, err := api.GetSearch(query, url.Values{"count": []string{count}}) if err != nil { log.Println("Error querying search API.", err) } for _, tweet := range searchResult.Statuses { rt, rtErr := api.Favorite(tweet.Id) if rtErr != nil { log.Println("Error while favorting.", rtErr) } else { log.Println("Favorited: twitter.com/" + rt.User.ScreenName + "/status/" + rt.IdStr) } } } // Re-tweet all tweets for matching query. func retweet(query string, count string) { searchResult, err := api.GetSearch(query, url.Values{"count": []string{count}}) if err != nil { log.Println("Error querying search API.", err) } for _, tweet := range searchResult.Statuses { rt, rtErr := api.Retweet(tweet.Id, false) if rtErr != nil { log.Println("Error while retweeting.", rtErr) } else { log.Println("Retweeted: twitter.com/" + rt.User.ScreenName + "/status/" + rt.IdStr) } } } // Follow users for matching query. func follow(query string, count string) { searchResult, err := api.GetSearch(query, url.Values{"count": []string{count}}) if err != nil { log.Println("Error querying search API.", err) } for _, tweet := range searchResult.Statuses { user, userErr := api.FollowUser(tweet.User.ScreenName) if userErr != nil { log.Println("Error while following.", userErr) } else { log.Println("Followed: twitter.com/" + user.ScreenName) } } } // Load the twitter config from environment var. func loadTwitterConfig(config *TwitterConfig) { configFilePath := os.Getenv("TWITTER_CONFIG_FILE_PATH") file, fileErr := os.Open(configFilePath) if fileErr != nil { log.Fatal("Error opening file.", fileErr) } jsonDecoder := json.NewDecoder(file) decodeErr := jsonDecoder.Decode(&config) if decodeErr != nil { log.Fatal("Error loading twitter json configuration.", decodeErr) } }
[ "\"TWITTER_CONFIG_FILE_PATH\"" ]
[]
[ "TWITTER_CONFIG_FILE_PATH" ]
[]
["TWITTER_CONFIG_FILE_PATH"]
go
1
0
saws/models.py
from sqlalchemy.orm import backref from . import db from werkzeug.security import check_password_hash, generate_password_hash from flask_login import UserMixin class User(db.Model, UserMixin): __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True) password = db.Column(db.String) email = db.Column(db.String) account = db.relationship('Account', backref='users', uselist=False) def check_password(self, password): """Check hashed password.""" return check_password_hash(self.password, password) def set_password(self, password): """Create hashed password.""" self.password = generate_password_hash(password, method='sha256') class Account(db.Model): __tablename__ = 'accounts' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String) access_key = db.Column(db.String) secret_key = db.Column(db.String) user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
[]
[]
[]
[]
[]
python
null
null
null