filename
stringlengths
4
198
content
stringlengths
25
939k
environment
list
variablearg
list
constarg
list
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
appengine_flexible/redis/redis.go
// Copyright 2015 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. // Sample redis demonstrates use of a redis client from App Engine flexible environment. package main import ( "fmt" "net/http" "os" "github.com/gomodule/redigo/redis" "google.golang.org/appengine" ) var redisPool *redis.Pool func main() { redisAddr := os.Getenv("REDIS_ADDR") redisPassword := os.Getenv("REDIS_PASSWORD") redisPool = &redis.Pool{ Dial: func() (redis.Conn, error) { conn, err := redis.Dial("tcp", redisAddr) if redisPassword == "" { return conn, err } if err != nil { return nil, err } if _, err := conn.Do("AUTH", redisPassword); err != nil { conn.Close() return nil, err } return conn, nil }, // TODO: Tune other settings, like IdleTimeout, MaxActive, MaxIdle, TestOnBorrow. } http.HandleFunc("/", handle) appengine.Main() } func handle(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/" { http.NotFound(w, r) return } redisConn := redisPool.Get() defer redisConn.Close() count, err := redisConn.Do("INCR", "count") if err != nil { msg := fmt.Sprintf("Could not increment count: %v", err) http.Error(w, msg, http.StatusInternalServerError) return } fmt.Fprintf(w, "Count: %d", count) }
[ "\"REDIS_ADDR\"", "\"REDIS_PASSWORD\"" ]
[]
[ "REDIS_PASSWORD", "REDIS_ADDR" ]
[]
["REDIS_PASSWORD", "REDIS_ADDR"]
go
2
0
main.go
package main import ( "fmt" "github.com/buger/jsonparser" "github.com/cavaliercoder/grab" "github.com/fatih/color" "github.com/joho/godotenv" "io/ioutil" "log" "net/http" "os" "time" ) const second = time.Second func main() { err := godotenv.Load() if err != nil { log.Fatal("Error loading .env file") color.Red("Error loading .env file") } serverID := os.Getenv("SERVERID") apiKEY := os.Getenv("APIKEY") backupNUM := os.Getenv("BACKUPNUM") panelURL := os.Getenv("PANELURL") req, err := http.NewRequest("GET", "https://" + panelURL + "/api/client/servers/" +serverID + "/backups", nil) if err != nil { // handle err } req.Header.Set("Accept", "application/json") req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", "Bearer " + apiKEY) resp, err := http.DefaultClient.Do(req) if err != nil { // handle err } defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) uuid, _ := jsonparser.GetString(body, "data", "[" + backupNUM + "]", "attributes", "uuid") //fmt.Println(uuid) req1, err1 := http.NewRequest("GET", "https://" + panelURL + "/api/client/servers/" + serverID + "/backups/" + uuid + "/download", nil) if err1 != nil { // handle err } req1.Header.Set("Accept", "application/json") req1.Header.Set("Content-Type", "application/json") req1.Header.Set("Authorization", "Bearer " + apiKEY) resp1, err1 := http.DefaultClient.Do(req1) if err1 != nil { // handle err } defer resp1.Body.Close() body1, _ := ioutil.ReadAll(resp1.Body) dlLink, _ := jsonparser.GetString(body1, "attributes", "url") //////////////////////////////////////////// // create client client := grab.NewClient() req2, _ := grab.NewRequest(".", dlLink) // color defining blueText := color.New(color.FgCyan, color.Bold) greenText := color.New(color.FgHiGreen, color.Bold) // start download blueText.Printf("Downloading %v...\n", req2.URL()) resp2 := client.Do(req2) greenText.Printf(" %v\n", resp2.HTTPResponse.Status) // start UI loop t := time.NewTicker(5000 * time.Millisecond) defer t.Stop() Loop: for { select { case <-t.C: greenText.Printf(" transferred %v / %v megabytes (%.2f%%)\n", resp2.BytesComplete()/1048576, resp2.Size()/1048576, 100*resp2.Progress()) case <-resp2.Done: // download is complete break Loop } } // check for errors if err := resp2.Err(); err != nil { fmt.Fprintf(os.Stderr, "Download failed: %v\n", err) os.Exit(1) } fmt.Printf("Download saved to ./%v \n", resp2.Filename) }
[ "\"SERVERID\"", "\"APIKEY\"", "\"BACKUPNUM\"", "\"PANELURL\"" ]
[]
[ "APIKEY", "PANELURL", "SERVERID", "BACKUPNUM" ]
[]
["APIKEY", "PANELURL", "SERVERID", "BACKUPNUM"]
go
4
0
services/runs/project/config.py
import os class BaseConfig: """Base configuration""" TESTING = False SQLALCHEMY_TRACK_MODIFICATIONS = False SECRET_KEY = "my_precious" DEBUG_TB_ENABLED = False DEBUG_TB_INTERCEPT_REDIRECTS = False class DevelopmentConfig(BaseConfig): """Development configuration""" SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL") DEBUG_TB_ENABLED = True class TestingConfig(BaseConfig): """Testing configuration""" TESTING = True SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_TEST_URL") class ProductionConfig(BaseConfig): """Production configuration""" SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
[]
[]
[ "DATABASE_URL", "DATABASE_TEST_URL" ]
[]
["DATABASE_URL", "DATABASE_TEST_URL"]
python
2
0
apps/slack.py
from talon.voice import Context, Key from ..utils import text ctx = Context("slack", bundle="com.tinyspeck.slackmacgap") keymap = { # Channel "channel": Key("cmd-k"), "channel <dgndictation>": [Key("cmd-k"), text], "([channel] unread last | gopreev)": Key("alt-shift-up"), "([channel] unread next | goneck)": Key("alt-shift-down"), "(slack | lack) [channel] info": Key("cmd-shift-i"), "channel up": Key("alt-up"), "channel down": Key("alt-down"), # Navigation "(move | next) focus": Key("ctrl-`"), "[next] (section | zone)": Key("f6"), "(previous | last) (section | zone)": Key("shift-f6"), "(slack | lack) [direct] messages": Key("cmd-shift-k"), "(slack | lack) threads": Key("cmd-shift-t"), "(slack | lack) (history [next] | back | backward)": Key("cmd-["), "(slack | lack) forward": Key("cmd-]"), "[next] (element | bit)": Key("tab"), "(previous | last) (element | bit)": Key("shift-tab"), "(slack | lack) (my stuff | activity)": Key("cmd-shift-m"), "(slack | lack) directory": Key("cmd-shift-e"), "(slack | lack) (starred [items] | stars)": Key("cmd-shift-s"), "(slack | lack) unread [messages]": Key("cmd-j"), "(go | undo | toggle) full": Key("ctrl-cmd-f"), "(slack | lack) (find | search)": Key("cmd-f"), # Messaging "grab left": Key("shift-up"), "grab right": Key("shift-down"), "add line": Key("shift-enter"), "(slack | lack) (slap | slaw | slapper)": [Key("cmd-right"), Key("shift-enter")], "(slack | lack) (react | reaction)": Key("cmd-shift-\\"), "(insert command | commandify)": Key("cmd-shift-c"), "insert code": [ "``````", Key("left left left"), Key("shift-enter"), Key("shift-enter"), Key("up"), ], "(slack | lack) (bull | bullet | bulleted) [list]": Key("cmd-shift-8"), "(slack | lack) (number | numbered) [list]": Key("cmd-shift-7"), "(slack | lack) (quotes | quotation)": Key("cmd-shift->"), "bold": Key("cmd-b"), "(italic | italicize)": Key("cmd-i"), "(strike | strikethrough)": Key("cmd-shift-x"), "mark all read": Key("shift-esc"), "mark channel read": Key("esc"), "(clear | scrap | scratch)": Key("cmd-a backspace"), # Files and Snippets "(slack | lack) upload": Key("cmd-u"), "(slack | lack) snippet": Key("cmd-shift-enter"), # Calls "([toggle] mute | unmute)": Key("m"), "(slack | lack) ([toggle] video)": Key("v"), "(slack | lack) invite": Key("a"), # Miscellaneous "(slack | lack) shortcuts": Key("cmd-/"), } ctx.keymap(keymap)
[]
[]
[]
[]
[]
python
null
null
null
tests/test_cli_timewarp.py
""" Tests which require user interaction to run for osxphotos timewarp command """ import os import time import pytest from click.testing import CliRunner from osxphotos import PhotosDB from osxphotos.exiftool import ExifTool from tests.conftest import ( get_os_version, ) from tests.parse_timewarp_output import parse_compare_exif, parse_inspect_output # set timezone to avoid issues with comparing dates os.environ["TZ"] = "US/Pacific" time.tzset() TERMINAL_WIDTH = 250 OS_VER = get_os_version()[1] if OS_VER == "15": from tests.config_timewarp_catalina import CATALINA_PHOTOS_5 as TEST_DATA else: pytest.skip(allow_module_level=True) TEST_DATA = {} def say(msg: str) -> None: """Say message with text to speech""" os.system(f"say {msg}") def ask_user_to_make_selection( photoslib, suspend_capture, photo_name: str, retry=3, video=False ) -> bool: """Ask user to make selection Args: photoslib: photoscript.PhotosLibrary instance passed from fixture suspend_capture: suspend capture fixture photo_name: name of the photo ask user for retry: number of times to retry before failing video: set to True if asking for a video instead of a photo """ # needs to be called with a suspend_capture fixture photo_or_video = "video" if video else "photo" tries = 0 while tries < retry: with suspend_capture: prompt = f"Select the {photo_or_video} of the {photo_name} then press Enter in the Terminal." say(prompt) input(f"\n{prompt}") selection = photoslib.selection if ( len(selection) == 1 and selection[0].filename == TEST_DATA["filenames"][photo_name] ): return True tries += 1 return False ########## Interactive tests run first ########## @pytest.mark.timewarp def test_select_pears(photoslib, suspend_capture): """Force user to select the right photo for following tests""" assert ask_user_to_make_selection(photoslib, suspend_capture, "pears") @pytest.mark.timewarp def test_inspect(photoslib, suspend_capture, output_file): """Test --inspect. NOTE: this test requires user interaction""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, ["--inspect", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 values = parse_inspect_output(output_file) assert TEST_DATA["inspect"]["expected"] == values @pytest.mark.timewarp def test_date(photoslib, suspend_capture): """Test --date""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, [ "--date", TEST_DATA["date"]["value"], "--plain", "--force", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 photo = photoslib.selection[0] assert photo.date == TEST_DATA["date"]["date"] @pytest.mark.timewarp @pytest.mark.parametrize("input_value,expected", TEST_DATA["date_delta"]["parameters"]) def test_date_delta(photoslib, suspend_capture, input_value, expected, output_file): """Test --date-delta""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, [ "--date-delta", input_value, "--plain", "--force", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--inspect", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_inspect_output(output_file) assert output_values[0].date_tz == expected @pytest.mark.timewarp @pytest.mark.parametrize("input_value,expected", TEST_DATA["time"]["parameters"]) def test_time(photoslib, suspend_capture, input_value, expected, output_file): """Test --time""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, [ "--time", input_value, "--plain", "--force", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 # inspect to get the updated times # don't use photo.date as it will return local time instead of the time in the timezone result = runner.invoke( timewarp, ["--inspect", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_inspect_output(output_file) assert output_values[0].date_tz == expected @pytest.mark.timewarp @pytest.mark.parametrize("input_value,expected", TEST_DATA["time_delta"]["parameters"]) def test_time_delta(photoslib, suspend_capture, input_value, expected, output_file): """Test --time-delta""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, [ "--time-delta", input_value, "--plain", "--force", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--inspect", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_inspect_output(output_file) assert output_values[0].date_tz == expected @pytest.mark.timewarp @pytest.mark.parametrize( "input_value,expected_date,expected_tz", TEST_DATA["time_zone"]["parameters"] ) def test_time_zone( photoslib, suspend_capture, input_value, expected_date, expected_tz, output_file ): """Test --time-zone""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, [ "--timezone", input_value, "--plain", "--force", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--inspect", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_inspect_output(output_file) assert output_values[0].date_tz == expected_date assert output_values[0].tz_offset == expected_tz @pytest.mark.timewarp @pytest.mark.parametrize("expected", TEST_DATA["compare_exif"]["expected"]) def test_compare_exif(photoslib, suspend_capture, expected, output_file): """Test --compare-exif""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, [ "--compare-exif", "--plain", "--force", "-o", output_file, ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 output_values = parse_compare_exif(output_file) assert output_values[0] == expected @pytest.mark.timewarp @pytest.mark.parametrize( "expected,album", TEST_DATA["compare_exif_add_to_album"]["expected"] ) def test_compare_exif_add_to_album(photoslib, suspend_capture, expected, album): """Test --compare-exif --add-to-album""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, [ "--compare-exif", "--add-to-album", album, "--plain", "--force", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 assert expected in result.output photo = photoslib.selection[0] assert album in [album.name for album in photo.albums] @pytest.mark.timewarp def test_select_sunflowers(photoslib, suspend_capture): """Force user to select the right photo for following tests""" assert ask_user_to_make_selection(photoslib, suspend_capture, "sunflowers") @pytest.mark.timewarp @pytest.mark.parametrize("expected", TEST_DATA["compare_exif_3"]["expected"]) def test_compare_exif_3(photoslib, suspend_capture, expected, output_file): """Test --compare-exif""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 output_values = parse_compare_exif(output_file) assert output_values[0] == expected @pytest.mark.timewarp @pytest.mark.parametrize("input_value,expected", TEST_DATA["match"]["parameters"]) def test_match(photoslib, suspend_capture, input_value, expected, output_file): """Test --timezone --match""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, [ "--timezone", input_value, "--match-time", "--plain", "--force", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--inspect", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_inspect_output(output_file) assert output_values[0].date_tz == expected @pytest.mark.timewarp def test_push_exif_missing_file(): """Test --push-exif when an original file is missing""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, ["--push-exif", "--plain", "--force", "--verbose"], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 assert "Skipping EXIF update for missing photo" in result.output @pytest.mark.timewarp def test_select_pumpkins(photoslib, suspend_capture): """Force user to select the right photo for following tests""" assert ask_user_to_make_selection(photoslib, suspend_capture, "pumpkins") @pytest.mark.timewarp @pytest.mark.parametrize( "match,tz_value,time_delta_value,expected_date,exif_date,exif_offset", TEST_DATA["exiftool"]["parameters"], ) def test_push_exif_1( photoslib, match, tz_value, time_delta_value, expected_date, exif_date, exif_offset, output_file, ): """Test --timezone --match with --push-exif""" from osxphotos.cli.timewarp import timewarp cli_args = [ "--timezone", tz_value, "--time-delta", time_delta_value, "--push-exif", "--plain", "--force", ] if match: cli_args.append("--match-time") runner = CliRunner() result = runner.invoke(timewarp, cli_args, terminal_width=TERMINAL_WIDTH) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--inspect", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_inspect_output(output_file) assert output_values[0].date_tz == expected_date photo = photoslib.selection[0] uuid = photo.uuid path = PhotosDB().get_photo(uuid).path exif = ExifTool(path) exifdict = exif.asdict() assert exifdict["EXIF:DateTimeOriginal"] == exif_date assert exifdict["EXIF:OffsetTimeOriginal"] == exif_offset @pytest.mark.timewarp def test_select_pears_2(photoslib, suspend_capture): """Force user to select the right photo for following tests""" assert ask_user_to_make_selection(photoslib, suspend_capture, "pears") @pytest.mark.timewarp def test_push_exif_2(photoslib, suspend_capture, output_file): """Test --push-exif""" pre_test = TEST_DATA["push_exif"]["pre"] post_test = TEST_DATA["push_exif"]["post"] from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_compare_exif(output_file) assert output_values[0] == pre_test result = runner.invoke( timewarp, [ "--push-exif", "--plain", "--force", "--verbose", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_compare_exif(output_file) assert output_values[0] == post_test @pytest.mark.timewarp def test_pull_exif_1(photoslib, suspend_capture, output_file): """Test --pull-exif""" pre_test = TEST_DATA["pull_exif_1"]["pre"] post_test = TEST_DATA["pull_exif_1"]["post"] from osxphotos.cli.timewarp import timewarp runner = CliRunner() # update the photo so we know if the data is updated result = runner.invoke( timewarp, ["-z", "-0400", "-D", "+1 day", "-m", "-V", "--plain", "--force"], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_compare_exif(output_file) assert output_values[0] == pre_test result = runner.invoke( timewarp, [ "--pull-exif", "--plain", "--force", "--verbose", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_compare_exif(output_file) assert output_values[0] == post_test @pytest.mark.timewarp def test_select_apple_tree(photoslib, suspend_capture): """Force user to select the right photo for following tests""" assert ask_user_to_make_selection(photoslib, suspend_capture, "apple tree") @pytest.mark.timewarp def test_pull_exif_no_time(photoslib, suspend_capture, output_file): """Test --pull-exif when photo has invalid date/time in EXIF""" pre_test = TEST_DATA["pull_exif_no_time"]["pre"] post_test = TEST_DATA["pull_exif_no_time"]["post"] from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_compare_exif(output_file) assert output_values[0] == pre_test result = runner.invoke( timewarp, [ "--pull-exif", "--plain", "--force", "--verbose", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_compare_exif(output_file) assert output_values[0] == post_test @pytest.mark.timewarp def test_select_marigolds(photoslib, suspend_capture): """Force user to select the right photo for following tests""" assert ask_user_to_make_selection(photoslib, suspend_capture, "marigold flowers") @pytest.mark.timewarp def test_pull_exif_no_offset(photoslib, suspend_capture, output_file): """Test --pull-exif when photo has no offset in EXIF""" pre_test = TEST_DATA["pull_exif_no_offset"]["pre"] post_test = TEST_DATA["pull_exif_no_offset"]["post"] from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_compare_exif(output_file) assert output_values[0] == pre_test result = runner.invoke( timewarp, [ "--pull-exif", "--plain", "--force", "--verbose", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_compare_exif(output_file) assert output_values[0] == post_test @pytest.mark.timewarp def test_select_zinnias(photoslib, suspend_capture): """Force user to select the right photo for following tests""" assert ask_user_to_make_selection( photoslib, suspend_capture, "multi-colored zinnia flowers" ) @pytest.mark.timewarp def test_pull_exif_no_data(photoslib, suspend_capture, output_file): """Test --pull-exif when photo has no data in EXIF""" pre_test = TEST_DATA["pull_exif_no_data"]["pre"] post_test = TEST_DATA["pull_exif_no_data"]["post"] from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_compare_exif(output_file) assert output_values[0] == pre_test result = runner.invoke( timewarp, [ "--pull-exif", "--plain", "--force", "--verbose", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 assert "Skipping update for missing EXIF data in photo" in result.output result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_compare_exif(output_file) assert output_values[0] == post_test @pytest.mark.timewarp def test_pull_exif_no_data_use_file_time(photoslib, suspend_capture, output_file): """Test --pull-exif when photo has no data in EXIF with --use-file-time""" pre_test = TEST_DATA["pull_exif_no_data_use_file_time"]["pre"] post_test = TEST_DATA["pull_exif_no_data_use_file_time"]["post"] from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_compare_exif(output_file) assert output_values[0] == pre_test result = runner.invoke( timewarp, [ "--pull-exif", "--plain", "--force", "--verbose", "--use-file-time", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 assert "EXIF date/time missing, using file modify date/time" in result.output result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_compare_exif(output_file) assert output_values[0] == post_test @pytest.mark.timewarp def test_select_sunset_video(photoslib, suspend_capture): """Force user to select the right photo for following tests""" assert ask_user_to_make_selection(photoslib, suspend_capture, "sunset", video=True) @pytest.mark.timewarp @pytest.mark.parametrize("expected", TEST_DATA["compare_video_1"]["expected"]) def test_video_compare_exif(photoslib, suspend_capture, expected, output_file): """Test --compare-exif with video""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, [ "--compare-exif", "--plain", "--force", "-o", output_file, ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 output_values = parse_compare_exif(output_file) assert output_values[0] == expected @pytest.mark.timewarp @pytest.mark.parametrize( "input_value,expected", TEST_DATA["video_date_delta"]["parameters"] ) def test_video_date_delta( photoslib, suspend_capture, input_value, expected, output_file ): """Test --date-delta with video""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, [ "--date-delta", input_value, "--plain", "--force", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--inspect", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_inspect_output(output_file) assert output_values[0].date_tz == expected @pytest.mark.timewarp @pytest.mark.parametrize( "input_value,expected", TEST_DATA["video_time_delta"]["parameters"] ) def test_video_time_delta( photoslib, suspend_capture, input_value, expected, output_file ): """Test --time-delta with video""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, [ "--time-delta", input_value, "--plain", "--force", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--inspect", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_inspect_output(output_file) assert output_values[0].date_tz == expected @pytest.mark.timewarp @pytest.mark.parametrize("input_value,expected", TEST_DATA["video_date"]["parameters"]) def test_video_date(photoslib, suspend_capture, input_value, expected, output_file): """Test --date with video""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, [ "--date", input_value, "--plain", "--force", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 # inspect to get the updated times # don't use photo.date as it will return local time instead of the time in the timezone result = runner.invoke( timewarp, ["--inspect", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_inspect_output(output_file) assert output_values[0].date_tz == expected @pytest.mark.timewarp @pytest.mark.parametrize("input_value,expected", TEST_DATA["video_time"]["parameters"]) def test_video_time(photoslib, suspend_capture, input_value, expected, output_file): """Test --time with video""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, [ "--time", input_value, "--plain", "--force", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 # inspect to get the updated times # don't use photo.date as it will return local time instead of the time in the timezone result = runner.invoke( timewarp, ["--inspect", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_inspect_output(output_file) assert output_values[0].date_tz == expected @pytest.mark.timewarp @pytest.mark.parametrize( "input_value,expected_date,expected_tz", TEST_DATA["video_time_zone"]["parameters"] ) def test_video_time_zone( photoslib, suspend_capture, input_value, expected_date, expected_tz, output_file ): """Test --time-zone""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, [ "--timezone", input_value, "--plain", "--force", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--inspect", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_inspect_output(output_file) assert output_values[0].date_tz == expected_date assert output_values[0].tz_offset == expected_tz @pytest.mark.timewarp @pytest.mark.parametrize("input_value,expected", TEST_DATA["video_match"]["parameters"]) def test_video_match(photoslib, suspend_capture, input_value, expected, output_file): """Test --timezone --match with video""" from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, [ "--timezone", input_value, "--match-time", "--plain", "--force", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--inspect", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_inspect_output(output_file) assert output_values[0].date_tz == expected @pytest.mark.timewarp def test_video_push_exif(photoslib, suspend_capture, output_file): """Test --push-exif with video""" pre_test = TEST_DATA["video_push_exif"]["pre"] post_test = TEST_DATA["video_push_exif"]["post"] from osxphotos.cli.timewarp import timewarp runner = CliRunner() result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_compare_exif(output_file) assert output_values[0] == pre_test result = runner.invoke( timewarp, [ "--push-exif", "--plain", "--force", "--verbose", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_compare_exif(output_file) assert output_values[0] == post_test @pytest.mark.timewarp def test_video_pull_exif(photoslib, suspend_capture, output_file): """Test --pull-exif with video""" pre_test = TEST_DATA["video_pull_exif"]["pre"] post_test = TEST_DATA["video_pull_exif"]["post"] from osxphotos.cli.timewarp import timewarp runner = CliRunner() # update the photo so we know if the data is updated result = runner.invoke( timewarp, [ "-z", "-0500", "-D", "+1 day", "-T", "-10 hours", "-m", "-V", "--plain", "--force", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_compare_exif(output_file) assert output_values[0] == pre_test result = runner.invoke( timewarp, [ "--pull-exif", "--plain", "--force", "--verbose", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--compare-exif", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_compare_exif(output_file) assert output_values[0] == post_test @pytest.mark.timewarp def test_select_pears_3(photoslib, suspend_capture): """Force user to select the right photo for following tests""" assert ask_user_to_make_selection(photoslib, suspend_capture, "pears") @pytest.mark.timewarp def test_function(photoslib, suspend_capture, output_file): """Test timewarp function""" from osxphotos.cli.timewarp import timewarp expected = TEST_DATA["function"]["expected"] runner = CliRunner() result = runner.invoke( timewarp, [ "--function", "tests/timewarp_function_example.py::get_date_time_timezone", "--force", ], terminal_width=TERMINAL_WIDTH, ) assert result.exit_code == 0 result = runner.invoke( timewarp, ["--inspect", "--plain", "--force", "-o", output_file], terminal_width=TERMINAL_WIDTH, ) output_values = parse_inspect_output(output_file) assert output_values[0] == expected
[]
[]
[ "TZ" ]
[]
["TZ"]
python
1
0
etstool.py
# (C) Copyright 2007-2021 Enthought, Inc., Austin, TX # All rights reserved. # # This software is provided without warranty under the terms of the BSD # license included in LICENSE.txt and may be redistributed only under # the conditions described in the aforementioned license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open source! """ Tasks for Test Runs =================== This file is intended to be used with a python environment with the click library to automate the process of setting up test environments and running the test within them. This improves repeatability and reliability of tests be removing many of the variables around the developer's particular Python environment. Test environment setup and package management is performed using `EDM <http://docs.enthought.com/edm/>`_ To use this to run your tests, you will need to install EDM and click into your working environment. You will also need to have git installed to access required source code from github repositories. You can then do:: python etstool.py install --runtime=... --toolkit=... to create a test environment from the current codebase and:: python etstool.py test --runtime=... --toolkit=... to run tests in that environment. You can remove the environment with:: python etstool.py cleanup --runtime=... --toolkit=... If you make changes you will either need to remove and re-install the environment or manually update the environment using ``edm``, as the install performs a ``python setup.py install`` rather than a ``develop``, so changes in your code will not be automatically mirrored in the test environment. You can update with a command like:: edm run --environment ... -- python setup.py install You can run all three tasks at once with:: python etstool.py test_clean --runtime=... --toolkit=... which will create, install, run tests, and then clean-up the environment. And you can run tests in all supported runtimes and toolkits (with cleanup) using:: python etstool.py test_all The only currently supported runtime value is ``3.6``, and currently supported toolkits are ``pyside2``, ``pyqt5``, ``wx`` and ``null``. Not all combinations of toolkits and runtimes will work, but the tasks will fail with a clear error if that is the case. Tests can still be run via the usual means in other environments if that suits a developer's purpose. Changing This File ------------------ To change the packages installed during a test run, change the dependencies variable below. To install a package from github, or one which is not yet available via EDM, add it to the `ci-src-requirements.txt` file (these will be installed by `pip`). Other changes to commands should be a straightforward change to the listed commands for each task. See the EDM documentation for more information about how to run commands within an EDM enviornment. """ import glob import os import subprocess import sys from shutil import rmtree, copy as copyfile, which from tempfile import mkdtemp from contextlib import contextmanager import click # Python runtime versions supported by this tool. available_runtimes = ["3.6"] # Python runtime used by default. default_runtime = "3.6" # Toolkits supported by this tool. available_toolkits = ["pyside2", "pyqt5", "wx", "null"] # Toolkit used by default. default_toolkit = "null" supported_combinations = { "3.6": {"pyside2", "pyqt5", "wx", "null"}, } dependencies = { "apptools", "coverage", "enthought_sphinx_theme", "flake8", "flake8_ets", "ipykernel", "pyface", "sphinx", "traits", "traitsui", } # Dependencies we install from source for cron tests # Order from packages with the most dependencies to one with the least # dependencies. Packages are forced re-installed in this order. source_dependencies = [ "apptools", "traitsui", "pyface", "traits", ] toolkit_dependencies = { # XXX once pyside2 is available in EDM, we will want it here. For now # we do a pip install. "pyside2": set(), "pyqt5": {"pyqt5"}, # XXX once wxPython 4 is available in EDM, we will want it here "wx": set(), "null": set(), } runtime_dependencies = {} environment_vars = { "pyside2": {"ETS_TOOLKIT": "qt4", "QT_API": "pyside2"}, "pyqt5": {"ETS_TOOLKIT": "qt4", "QT_API": "pyqt5"}, "wx": {"ETS_TOOLKIT": "wx"}, "null": {"ETS_TOOLKIT": "null"}, } github_url_fmt = "git+http://github.com/enthought/{0}.git#egg={0}" # Options shared between different click commands. edm_option = click.option( "--edm", help=( "Path to the EDM executable to use. The default is to use the first " "EDM found in the path. The EDM executable can also be specified " "by setting the ETSTOOL_EDM environment variable." ), envvar="ETSTOOL_EDM", ) runtime_option = click.option( "--runtime", default=default_runtime, type=click.Choice(available_runtimes), show_default=True, help="Python runtime version", ) toolkit_option = click.option( "--toolkit", default=default_toolkit, type=click.Choice(available_toolkits), show_default=True, help="GUI toolkit", ) environment_option = click.option( "--environment", default=None, help=( "EDM environment name to use in place of the " "automatically constructed name" ), ) source_option = click.option( "--source/--no-source", default=False, help="Install ETS packages from source", ) editable_option = click.option( "--editable/--not-editable", default=False, help="Install main package in 'editable' mode? [default: --not-editable]", ) @click.group() def cli(): pass @cli.command() @edm_option @runtime_option @toolkit_option @environment_option @editable_option @source_option def install(edm, runtime, toolkit, environment, editable, source): """ Install project and dependencies into a clean EDM environment. """ parameters = get_parameters(edm, runtime, toolkit, environment) packages = " ".join( dependencies | toolkit_dependencies.get(toolkit, set()) | runtime_dependencies.get(runtime, set()) ) # edm commands to setup the development environment commands = [ "{edm} environments create {environment} --force --version={runtime}", "{edm} install -y -e {environment} " + packages, ( "{edm} run -e {environment} -- " "pip install -r ci-src-requirements.txt --no-dependencies" ), ] # pip install pyside2, because we don't have it in EDM yet if toolkit == "pyside2": commands.append( "{edm} run -e {environment} -- pip install pyside2" ) # install wxPython with pip, because we don't have it in EDM yet elif toolkit == "wx": if sys.platform == "darwin": commands.append( "{edm} run -e {environment} -- python -m pip install wxPython<4.1" # noqa: E501 ) elif sys.platform == "linux": # XXX this is mainly for CI workers; need a generic solution commands.append( "{edm} run -e {environment} -- pip install -f https://extras.wxpython.org/wxPython4/extras/linux/gtk3/ubuntu-18.04/ wxPython<4.1" # noqa: E501 ) else: commands.append( "{edm} run -e {environment} -- python -m pip install wxPython" ) click.echo("Creating environment '{environment}'".format(**parameters)) execute(commands, parameters) if source: # Remove EDM ETS packages and install them from source cmd_fmt = ( "{edm} plumbing remove-package " "--environment {environment} --force " ) commands = [cmd_fmt + source_pkg for source_pkg in source_dependencies] execute(commands, parameters) source_pkgs = [ github_url_fmt.format(pkg) for pkg in source_dependencies ] # Without the --no-dependencies flag such that new dependencies on # main branch are brought in. commands = [ "python -m pip install --force-reinstall {pkg}".format(pkg=pkg) for pkg in source_pkgs ] commands = [ "{edm} run -e {environment} -- " + command for command in commands ] execute(commands, parameters) # Always install local source at the end to mitigate risk of testing # against a distributed release. if editable: install_cmd = ( "{edm} run -e {environment} -- pip " "install --editable . --no-dependencies" ) else: install_cmd = ( "{edm} run -e {environment} -- pip install . --no-dependencies" ) execute([install_cmd], parameters) click.echo("Done install") @cli.command() @edm_option @runtime_option @toolkit_option @environment_option def shell(edm, runtime, toolkit, environment): """ Create a shell into the EDM development environment (aka 'activate' it). """ parameters = get_parameters(edm, runtime, toolkit, environment) commands = [ "{edm} shell -e {environment}", ] execute(commands, parameters) @cli.command() @edm_option @runtime_option @toolkit_option @environment_option def flake8(edm, runtime, toolkit, environment): """ Run a flake8 check in a given environment. """ parameters = get_parameters(edm, runtime, toolkit, environment) commands = ["{edm} run -e {environment} -- python -m flake8 "] execute(commands, parameters) @cli.command() @edm_option @runtime_option @toolkit_option @environment_option def test(edm, runtime, toolkit, environment): """ Run the test suite in a given environment with the specified toolkit. """ parameters = get_parameters(edm, runtime, toolkit, environment) environ = environment_vars.get(toolkit, {}).copy() environ["PYTHONUNBUFFERED"] = "1" commands = [ ( "{edm} run -e {environment} -- python -W default -m " "coverage run -p -m unittest discover -v envisage" ), ] # We run in a tempdir to avoid accidentally picking up wrong envisage # code from a local dir. We need to ensure a good .coveragerc is in # that directory, plus coverage has a bug that means a non-local coverage # file doesn't get populated correctly. click.echo("Running tests in '{environment}'".format(**parameters)) with do_in_tempdir(files=[".coveragerc"], capture_files=["./.coverage*"]): os.environ.update(environ) execute(commands, parameters) click.echo("Done test") @cli.command() @edm_option @runtime_option @toolkit_option @environment_option def cleanup(edm, runtime, toolkit, environment): """ Remove a development environment. """ parameters = get_parameters(edm, runtime, toolkit, environment) commands = [ "{edm} run -e {environment} -- python setup.py clean", "{edm} environments remove {environment} --purge -y", ] click.echo("Cleaning up environment '{environment}'".format(**parameters)) execute(commands, parameters) click.echo("Done cleanup") @cli.command() @edm_option @runtime_option @toolkit_option def test_clean(edm, runtime, toolkit): """ Run tests in a clean environment, cleaning up afterwards """ args = ["--toolkit={}".format(toolkit), "--runtime={}".format(runtime)] if edm is not None: args.append("--edm={}".format(edm)) try: install(args=args, standalone_mode=False) test(args=args, standalone_mode=False) finally: cleanup(args=args, standalone_mode=False) @cli.command() @edm_option @runtime_option @toolkit_option @environment_option @editable_option def update(edm, runtime, toolkit, environment, editable): """ Update/Reinstall package into environment. """ parameters = get_parameters(edm, runtime, toolkit, environment) if editable: install_cmd = ( "{edm} run -e {environment} -- " "pip install --editable . --no-dependencies" ) else: install_cmd = ( "{edm} run -e {environment} -- pip install . --no-dependencies" ) commands = [install_cmd] click.echo("Re-installing in '{environment}'".format(**parameters)) execute(commands, parameters) click.echo("Done update") @cli.command() @edm_option def test_all(edm): """ Run test_clean across all supported environment combinations. """ failed_command = False for runtime, toolkits in supported_combinations.items(): for toolkit in toolkits: args = [ "--toolkit={}".format(toolkit), "--runtime={}".format(runtime), ] if edm is not None: args.append("--edm={}".format(edm)) try: test_clean(args, standalone_mode=True) except SystemExit: failed_command = True if failed_command: sys.exit(1) @cli.command() @edm_option @runtime_option @toolkit_option @environment_option def docs(edm, runtime, toolkit, environment): """ Build HTML documentation. """ parameters = get_parameters(edm, runtime, toolkit, environment) parameters["docs_source"] = "docs/source" parameters["docs_build"] = "docs/build" parameters["docs_source_api"] = docs_source_api = "docs/source/api" parameters["templates_dir"] = "docs/source/api/templates/" # Remove any previously autogenerated API documentation. doc_api_files = os.listdir(docs_source_api) permanent = ['envisage.api.rst', 'templates'] previously_autogenerated = \ [file for file in doc_api_files if file not in permanent] for file in previously_autogenerated: os.remove(os.path.join(docs_source_api, file)) apidoc_command = ( "{edm} run -e {environment} -- python -m sphinx.ext.apidoc --separate " "--no-toc -o {docs_source_api} -t {templates_dir} envisage */tests" ) html_build_command = ( "{edm} run -e {environment} -- python -m sphinx -b html " "{docs_source} {docs_build}" ) commands = [apidoc_command, html_build_command] execute(commands, parameters) # Utility routines def get_parameters(edm, runtime, toolkit, environment): """ Set up parameters dictionary for format() substitution """ if edm is None: edm = locate_edm() if environment is None: environment = "envisage-test-{runtime}-{toolkit}".format( runtime=runtime, toolkit=toolkit ) parameters = { "edm": edm, "runtime": runtime, "toolkit": toolkit, "environment": environment, } if toolkit not in supported_combinations[runtime]: msg = ( "Python {runtime} and toolkit {toolkit} not supported by " + "test environments" ) raise RuntimeError(msg.format(**parameters)) return parameters @contextmanager def do_in_tempdir(files=(), capture_files=()): """ Create a temporary directory, cleaning up after done. Creates the temporary directory, and changes into it. On exit returns to original directory and removes temporary dir. Parameters ---------- files : sequence of filenames Files to be copied across to temporary directory. capture_files : sequence of filenames Files to be copied back from temporary directory. """ path = mkdtemp() old_path = os.getcwd() # send across any files we need for filepath in files: click.echo("copying file to tempdir: {}".format(filepath)) copyfile(filepath, path) os.chdir(path) try: yield path # retrieve any result files we want for pattern in capture_files: for filepath in glob.iglob(pattern): click.echo("copying file back: {}".format(filepath)) copyfile(filepath, old_path) finally: os.chdir(old_path) rmtree(path) def execute(commands, parameters): for command in commands: click.echo("[EXECUTING] {}".format(command.format(**parameters))) try: subprocess.check_call( [arg.format(**parameters) for arg in command.split()] ) except subprocess.CalledProcessError as exc: click.echo(str(exc)) sys.exit(1) def locate_edm(): """ Locate an EDM executable if it exists, else raise an exception. Returns the first EDM executable found on the path. On Windows, if that executable turns out to be the "edm.bat" batch file, replaces it with the executable that it wraps: the batch file adds another level of command-line mangling that interferes with things like specifying version restrictions. Returns ------- edm : str Path to the EDM executable to use. Raises ------ click.ClickException If no EDM executable is found in the path. """ edm = which("edm") if edm is None: raise click.ClickException( "This script requires EDM, but no EDM executable " "was found on the path." ) # Resolve edm.bat on Windows. if sys.platform == "win32" and os.path.basename(edm) == "edm.bat": edm = os.path.join(os.path.dirname(edm), "embedded", "edm.exe") return edm if __name__ == "__main__": cli()
[]
[]
[]
[]
[]
python
0
0
xmmdet/tools/train.py
import argparse import copy import os import os.path as osp import time import warnings import mmcv import torch from mmcv import Config, DictAction from mmcv.runner import get_dist_info, init_dist from mmcv.utils import get_git_hash from mmdet import __version__ from xmmdet.apis import set_random_seed, train_detector from xmmdet.datasets import build_dataset from xmmdet.models import build_detector from xmmdet.utils import collect_env, get_root_logger, get_model_complexity_info, \ LoggerStream, XMMDetQuantTrainModule, XMMDetQuantCalibrateModule def parse_args(): parser = argparse.ArgumentParser(description='Train a detector') parser.add_argument('config', help='train config file path') parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument( '--resume-from', help='the checkpoint file to resume from') parser.add_argument( '--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training') group_gpus = parser.add_mutually_exclusive_group() group_gpus.add_argument( '--gpus', type=int, help='number of gpus to use ' '(only applicable to non-distributed training)') group_gpus.add_argument( '--gpu-ids', type=int, nargs='+', help='ids of gpus to use ' '(only applicable to non-distributed training)') parser.add_argument('--seed', type=int, default=None, help='random seed') parser.add_argument( '--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.') parser.add_argument( '--options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file (deprecate), ' 'change to --cfg-options instead.') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file.') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) if args.options and args.cfg_options: raise ValueError( '--options and --cfg-options cannot be both ' 'specified, --options is deprecated in favor of --cfg-options') if args.options: warnings.warn('--options is deprecated in favor of --cfg-options') args.cfg_options = args.options return args def main(args=None): args = args or parse_args() cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from string list. if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # work_dir is determined in this priority: CLI > segment in file > filename if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None cfg.work_dir = args.work_dir elif cfg.get('work_dir', None) is None: # use config filename as default work_dir if cfg.work_dir is None cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) if args.resume_from is not None: cfg.resume_from = args.resume_from if args.gpu_ids is not None: cfg.gpu_ids = args.gpu_ids else: cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # re-set gpu_ids with distributed training mode _, world_size = get_dist_info() cfg.gpu_ids = range(world_size) # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # dump config cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # init the meta dict to record some important information such as # environment info and seed, which will be logged meta = dict() # log env info env_info_dict = collect_env() env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) dash_line = '-' * 60 + '\n' logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line) meta['env_info'] = env_info meta['config'] = cfg.pretty_text # log some basic info logger.info(f'Distributed training: {distributed}') logger.info(f'Config:\n{cfg.pretty_text}') # set random seeds if args.seed is not None: logger.info(f'Set random seed to {args.seed}, ' f'deterministic: {args.deterministic}') set_random_seed(args.seed, deterministic=args.deterministic) cfg.seed = args.seed meta['seed'] = args.seed meta['exp_name'] = osp.basename(args.config) model = build_detector( cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) if hasattr(cfg, 'print_model_complexity') and cfg.print_model_complexity: input_res = (3, *cfg.input_size) if isinstance(cfg.input_size, (list, tuple)) else \ (3, cfg.input_size, cfg.input_size) logger_stream = LoggerStream(logger) macs_count, params_count = get_model_complexity_info(model, input_res, ost=logger_stream) logger.info(f'Compute : {macs_count}') logger.info(f'Parameters: {params_count}') if hasattr(cfg, 'quantize') and cfg.quantize: input_res = (3, *cfg.input_size) if isinstance(cfg.input_size, (list, tuple)) else \ (3, cfg.input_size, cfg.input_size) assert cfg.quantize in ('calibration', 'training', True, False, None), \ f'invalid value for quantize {cfg.quantize}' dummy_input = torch.zeros(*(1,*input_res)) if cfg.quantize == 'calibration': # calibration doesn't support multi-gpu for now, so switch it off cfg.gpu_ids = cfg.gpu_ids[:1] model = XMMDetQuantCalibrateModule(model, dummy_input) elif cfg.quantize: model = XMMDetQuantTrainModule(model, dummy_input) # datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: val_dataset = copy.deepcopy(cfg.data.val) val_dataset.pipeline = cfg.data.train.pipeline datasets.append(build_dataset(val_dataset)) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict( mmdet_version=__version__ + get_git_hash()[:7], CLASSES=datasets[0].CLASSES) # add an attribute for visualization convenience model.CLASSES = datasets[0].CLASSES train_detector( model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta) if __name__ == '__main__': main()
[]
[]
[ "LOCAL_RANK" ]
[]
["LOCAL_RANK"]
python
1
0
tests/docs/test_consoletest.py
import os import sys import inspect import pathlib import tempfile import unittest import contextlib import unittest.mock import importlib.util from dffml.util.asynctestcase import AsyncTestCase from dffml.util.testing.consoletest.commands import * ROOT_PATH = pathlib.Path(__file__).parent.parent.parent DOCS_PATH = ROOT_PATH / "docs" # Load files by path. We have to import literalinclude_diff for diff-files for module_name in ["literalinclude_diff"]: spec = importlib.util.spec_from_file_location( module_name, str(ROOT_PATH / "docs" / "_ext" / f"{module_name}.py"), ) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) setattr(sys.modules[__name__], module_name, module) class TestDocs(unittest.TestCase): """ A testcase for each doc will be added to this class """ TESTABLE_DOCS = [] def test__all_docs_being_tested(self): """ Make sure that there is a jobs.tutorials.strategy.matrix.docs entry for each testable doc. """ # Ensure that we identified some docs to test should_have = sorted(self.TESTABLE_DOCS) self.assertTrue(should_have) # Load the ci testing workflow avoid requiring the yaml module as that # has C dependencies docs = list( sorted( map( lambda i: str( pathlib.Path(ROOT_PATH, i.strip()[2:]) .relative_to(DOCS_PATH) .with_suffix("") ), filter( lambda line: line.strip().startswith("- docs/"), pathlib.Path( ROOT_PATH, ".github", "workflows", "testing.yml" ) .read_text() .split("\n"), ), ) ) ) # Make sure that we have an entry for all the docs we can test self.assertListEqual(should_have, docs) def mktestcase(filepath: pathlib.Path, relative: pathlib.Path): # The test case itself, assigned to test_doctest of each class def testcase(self): from sphinx.cmd.build import ( get_parser, Tee, color_terminal, patch_docutils, docutils_namespace, Sphinx, ) from sphinx.environment import BuildEnvironment os.chdir(str(ROOT_PATH)) filenames = [str(relative)] class SubSetBuildEnvironment(BuildEnvironment): def get_outdated_files(self, updated): added, changed, removed = super().get_outdated_files(updated) added.clear() changed.clear() removed.clear() added.add("index") for filename in filenames: added.add(filename) return added, changed, removed class SubSetSphinx(Sphinx): def _init_env(self, freshenv: bool) -> None: self.env = SubSetBuildEnvironment() self.env.setup(self) self.env.find_files(self.config, self.builder) confdir = str(ROOT_PATH / "docs") pickled_objs = {} def pickle_dump(obj, fileobj, _protocol): pickled_objs[fileobj.name] = obj def pickle_load(fileobj): return pickled_objs[fileobj.name] with patch_docutils( confdir ), docutils_namespace(), unittest.mock.patch( "pickle.dump", new=pickle_dump ), unittest.mock.patch( "pickle.load", new=pickle_load ), tempfile.TemporaryDirectory() as tempdir: app = SubSetSphinx( str(ROOT_PATH / "docs"), confdir, os.path.join(tempdir, "consoletest"), os.path.join(tempdir, "consoletest", ".doctrees"), "consoletest", {}, sys.stdout, sys.stderr, True, False, [], 0, 1, False, ) app.build(False, []) self.assertFalse(app.statuscode) return testcase SKIP_DOCS = ["plugins/dffml_model"] for filepath in DOCS_PATH.rglob("*.rst"): if b":test:" not in pathlib.Path(filepath).read_bytes(): continue relative = filepath.relative_to(DOCS_PATH).with_suffix("") if str(relative) in SKIP_DOCS: continue TestDocs.TESTABLE_DOCS.append(str(relative)) name = "test_" + str(relative).replace(os.sep, "_") # Do not add the tests if we are running with GitHub Actions for the main # package. This is because there are seperate jobs for each tutorial test # and the TestDocs.test__all_docs_being_tested ensures that we are running a # job for each tutorial if ( "GITHUB_ACTIONS" in os.environ and "PLUGIN" in os.environ and os.environ["PLUGIN"] == "." ): continue setattr( TestDocs, name, unittest.skipIf( "RUN_CONSOLETESTS" not in os.environ, "RUN_CONSOLETESTS environment variable not set", )(mktestcase(filepath, relative)), )
[]
[]
[ "PLUGIN" ]
[]
["PLUGIN"]
python
1
0
docker/docker.go
package main import ( "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "log" "net" "os" "path/filepath" "runtime" "strings" "github.com/docker/docker/api" "github.com/docker/docker/api/client" "github.com/docker/docker/builtins" "github.com/docker/docker/dockerversion" "github.com/docker/docker/engine" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/sysinit" "github.com/docker/docker/utils" ) const ( defaultCaFile = "ca.pem" defaultKeyFile = "key.pem" defaultCertFile = "cert.pem" ) var ( dockerConfDir = os.Getenv("DOCKER_CONFIG") ) func main() { if len(dockerConfDir) == 0 { dockerConfDir = filepath.Join(os.Getenv("HOME"), ".docker") } if selfPath := utils.SelfPath(); strings.Contains(selfPath, ".dockerinit") { // Running in init mode sysinit.SysInit() return } var ( flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode") flGraphOpts opts.ListOpts flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode") flAutoRestart = flag.Bool([]string{"r", "-restart"}, true, "Restart previously running containers") bridgeName = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking") bridgeIp = flag.String([]string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b") pidfile = flag.String([]string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file") flRoot = flag.String([]string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the Docker runtime") flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group") flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") flDns = opts.NewListOpts(opts.ValidateIPAddress) flDnsSearch = opts.NewListOpts(opts.ValidateDnsSearch) flEnableIptables = flag.Bool([]string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules") flEnableIpForward = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward") flDefaultIp = flag.String([]string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports") flInterContainerComm = flag.Bool([]string{"#icc", "-icc"}, true, "Enable inter-container communication") flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver") flExecDriver = flag.String([]string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver") flHosts = opts.NewListOpts(api.ValidateHost) flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU\nif no value is provided: default to the default route MTU or 1500 if no default route is available") flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags") flTlsVerify = flag.Bool([]string{"-tlsverify"}, false, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)") flCa = flag.String([]string{"-tlscacert"}, filepath.Join(dockerConfDir, defaultCaFile), "Trust only remotes providing a certificate signed by the CA given here") flCert = flag.String([]string{"-tlscert"}, filepath.Join(dockerConfDir, defaultCertFile), "Path to TLS certificate file") flKey = flag.String([]string{"-tlskey"}, filepath.Join(dockerConfDir, defaultKeyFile), "Path to TLS key file") flSelinuxEnabled = flag.Bool([]string{"-selinux-enabled"}, false, "Enable selinux support. SELinux does not presently support the BTRFS storage driver") ) flag.Var(&flDns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers") flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") flag.Var(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode\nspecified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.") flag.Var(&flGraphOpts, []string{"-storage-opt"}, "Set storage driver options") flag.Parse() if *flVersion { showVersion() return } if flHosts.Len() == 0 { defaultHost := os.Getenv("DOCKER_HOST") if defaultHost == "" || *flDaemon { // If we do not have a host, default to unix socket defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET) } if _, err := api.ValidateHost(defaultHost); err != nil { log.Fatal(err) } flHosts.Set(defaultHost) } if *bridgeName != "" && *bridgeIp != "" { log.Fatal("You specified -b & --bip, mutually exclusive options. Please specify only one.") } if !*flEnableIptables && !*flInterContainerComm { log.Fatal("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.") } if net.ParseIP(*flDefaultIp) == nil { log.Fatalf("Specified --ip=%s is not in correct format \"0.0.0.0\".", *flDefaultIp) } if *flDebug { os.Setenv("DEBUG", "1") } if *flDaemon { if runtime.GOOS != "linux" { log.Fatalf("The Docker daemon is only supported on linux") } if os.Geteuid() != 0 { log.Fatalf("The Docker daemon needs to be run as root") } if flag.NArg() != 0 { flag.Usage() return } // set up the TempDir to use a canonical path tmp := os.TempDir() realTmp, err := utils.ReadSymlinkedDirectory(tmp) if err != nil { log.Fatalf("Unable to get the full path to the TempDir (%s): %s", tmp, err) } os.Setenv("TMPDIR", realTmp) // get the canonical path to the Docker root directory root := *flRoot var realRoot string if _, err := os.Stat(root); err != nil && os.IsNotExist(err) { realRoot = root } else { realRoot, err = utils.ReadSymlinkedDirectory(root) if err != nil { log.Fatalf("Unable to get the full path to root (%s): %s", root, err) } } if err := checkKernelAndArch(); err != nil { log.Fatal(err) } eng := engine.New() // Load builtins if err := builtins.Register(eng); err != nil { log.Fatal(err) } // handle the pidfile early. https://github.com/docker/docker/issues/6973 if len(*pidfile) > 0 { job := eng.Job("initserverpidfile", *pidfile) if err := job.Run(); err != nil { log.Fatal(err) } } // load the daemon in the background so we can immediately start // the http api so that connections don't fail while the daemon // is booting go func() { // Load plugin: httpapi job := eng.Job("initserver") // include the variable here too, for the server config job.Setenv("Pidfile", *pidfile) job.Setenv("Root", realRoot) job.SetenvBool("AutoRestart", *flAutoRestart) job.SetenvList("Dns", flDns.GetAll()) job.SetenvList("DnsSearch", flDnsSearch.GetAll()) job.SetenvBool("EnableIptables", *flEnableIptables) job.SetenvBool("EnableIpForward", *flEnableIpForward) job.Setenv("BridgeIface", *bridgeName) job.Setenv("BridgeIP", *bridgeIp) job.Setenv("DefaultIp", *flDefaultIp) job.SetenvBool("InterContainerCommunication", *flInterContainerComm) job.Setenv("GraphDriver", *flGraphDriver) job.SetenvList("GraphOptions", flGraphOpts.GetAll()) job.Setenv("ExecDriver", *flExecDriver) job.SetenvInt("Mtu", *flMtu) job.SetenvBool("EnableSelinuxSupport", *flSelinuxEnabled) job.SetenvList("Sockets", flHosts.GetAll()) if err := job.Run(); err != nil { log.Fatal(err) } // after the daemon is done setting up we can tell the api to start // accepting connections if err := eng.Job("acceptconnections").Run(); err != nil { log.Fatal(err) } }() // TODO actually have a resolved graphdriver to show? log.Printf("docker daemon: %s %s; execdriver: %s; graphdriver: %s", dockerversion.VERSION, dockerversion.GITCOMMIT, *flExecDriver, *flGraphDriver) // Serve api job := eng.Job("serveapi", flHosts.GetAll()...) job.SetenvBool("Logging", true) job.SetenvBool("EnableCors", *flEnableCors) job.Setenv("Version", dockerversion.VERSION) job.Setenv("SocketGroup", *flSocketGroup) job.SetenvBool("Tls", *flTls) job.SetenvBool("TlsVerify", *flTlsVerify) job.Setenv("TlsCa", *flCa) job.Setenv("TlsCert", *flCert) job.Setenv("TlsKey", *flKey) job.SetenvBool("BufferRequests", true) if err := job.Run(); err != nil { log.Fatal(err) } } else { if flHosts.Len() > 1 { log.Fatal("Please specify only one -H") } protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2) var ( cli *client.DockerCli tlsConfig tls.Config ) tlsConfig.InsecureSkipVerify = true // If we should verify the server, we need to load a trusted ca if *flTlsVerify { *flTls = true certPool := x509.NewCertPool() file, err := ioutil.ReadFile(*flCa) if err != nil { log.Fatalf("Couldn't read ca cert %s: %s", *flCa, err) } certPool.AppendCertsFromPEM(file) tlsConfig.RootCAs = certPool tlsConfig.InsecureSkipVerify = false } // If tls is enabled, try to load and send client certificates if *flTls || *flTlsVerify { _, errCert := os.Stat(*flCert) _, errKey := os.Stat(*flKey) if errCert == nil && errKey == nil { *flTls = true cert, err := tls.LoadX509KeyPair(*flCert, *flKey) if err != nil { log.Fatalf("Couldn't load X509 key pair: %s. Key encrypted?", err) } tlsConfig.Certificates = []tls.Certificate{cert} } } if *flTls || *flTlsVerify { cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], &tlsConfig) } else { cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], nil) } if err := cli.ParseCommands(flag.Args()...); err != nil { if sterr, ok := err.(*utils.StatusError); ok { if sterr.Status != "" { log.Println(sterr.Status) } os.Exit(sterr.StatusCode) } log.Fatal(err) } } } func showVersion() { fmt.Printf("Docker version %s, build %s\n", dockerversion.VERSION, dockerversion.GITCOMMIT) } func checkKernelAndArch() error { // Check for unsupported architectures if runtime.GOARCH != "amd64" { return fmt.Errorf("The Docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH) } // Check for unsupported kernel versions // FIXME: it would be cleaner to not test for specific versions, but rather // test for specific functionalities. // Unfortunately we can't test for the feature "does not cause a kernel panic" // without actually causing a kernel panic, so we need this workaround until // the circumstances of pre-3.8 crashes are clearer. // For details see http://github.com/docker/docker/issues/407 if k, err := utils.GetKernelVersion(); err != nil { log.Printf("WARNING: %s\n", err) } else { if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 { if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String()) } } } return nil }
[ "\"DOCKER_CONFIG\"", "\"HOME\"", "\"DOCKER_HOST\"", "\"DOCKER_NOWARN_KERNEL_VERSION\"" ]
[]
[ "DOCKER_HOST", "DOCKER_CONFIG", "DOCKER_NOWARN_KERNEL_VERSION", "HOME" ]
[]
["DOCKER_HOST", "DOCKER_CONFIG", "DOCKER_NOWARN_KERNEL_VERSION", "HOME"]
go
4
0
quidlib/conf/env.go
package conf import ( "fmt" "os" "strings" ) // DefaultAdminUser : var DefaultAdminUser string = "" // DefaultAdminPassword ; var DefaultAdminPassword string = "" // InitFromEnv : get the config from environment variables func InitFromEnv(isDevMode bool) bool { if isDevMode { fmt.Println("Dev mode is not authorized when initializing from env variables") os.Exit(1) } Port = os.Getenv("PORT") connStr := os.Getenv("DATABASE_URL") ConnStr = strings.Replace(connStr, "postgresql://", "", 1) EncodingKey = os.Getenv("QUID_KEY") DefaultAdminUser = os.Getenv("QUID_ADMIN_USER") DefaultAdminPassword = os.Getenv("QUID_ADMIN_PWD") mustRunAutoconf := false if DefaultAdminUser != "" && DefaultAdminPassword != "" { mustRunAutoconf = true } return mustRunAutoconf }
[ "\"PORT\"", "\"DATABASE_URL\"", "\"QUID_KEY\"", "\"QUID_ADMIN_USER\"", "\"QUID_ADMIN_PWD\"" ]
[]
[ "PORT", "DATABASE_URL", "QUID_ADMIN_USER", "QUID_KEY", "QUID_ADMIN_PWD" ]
[]
["PORT", "DATABASE_URL", "QUID_ADMIN_USER", "QUID_KEY", "QUID_ADMIN_PWD"]
go
5
0
tests/system_tests_link_routes.py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from __future__ import print_function import os from time import sleep, time from threading import Event from subprocess import PIPE, STDOUT from system_test import TestCase, Qdrouterd, main_module, TIMEOUT, Process, TestTimeout, \ AsyncTestSender, AsyncTestReceiver, MgmtMsgProxy, unittest, QdManager from test_broker import FakeBroker from test_broker import FakeService from proton import Delivery from proton import Message from proton.handlers import MessagingHandler from proton.reactor import AtMostOnce, Container, DynamicNodeProperties, LinkOption, AtLeastOnce from proton.reactor import ApplicationEvent from proton.reactor import EventInjector from proton.utils import BlockingConnection from system_tests_drain_support import DrainMessagesHandler, DrainOneMessageHandler, DrainNoMessagesHandler, DrainNoMoreMessagesHandler from qpid_dispatch.management.client import Node from qpid_dispatch.management.error import NotFoundStatus, BadRequestStatus class LinkRouteTest(TestCase): """ Tests the linkRoute property of the dispatch router. Sets up 4 routers (two of which are acting as brokers (QDR.A, QDR.D)). The other two routers have linkRoutes configured such that matching traffic will be directed to/from the 'fake' brokers. (please see configs in the setUpClass method to get a sense of how the routers and their connections are configured) The tests in this class send and receive messages across this network of routers to link routable addresses. Uses the Python Blocking API to send/receive messages. The blocking api plays neatly into the synchronous nature of system tests. QDR.A acting broker #1 +---------+ +---------+ +---------+ +-----------------+ | | <------ | | <----- | |<----| blocking_sender | | QDR.A | | QDR.B | | QDR.C | +-----------------+ | | ------> | | ------> | | +-------------------+ +---------+ +---------+ +---------+---->| blocking_receiver | ^ | +-------------------+ | | | V +---------+ | | | QDR.D | | | +---------+ QDR.D acting broker #2 """ @classmethod def get_router(cls, index): return cls.routers[index] @classmethod def setUpClass(cls): """Start three routers""" super(LinkRouteTest, cls).setUpClass() def router(name, connection): config = [ ('router', {'mode': 'interior', 'id': 'QDR.%s'%name}), ] + connection config = Qdrouterd.Config(config) cls.routers.append(cls.tester.qdrouterd(name, config, wait=False)) cls.routers = [] a_listener_port = cls.tester.get_port() b_listener_port = cls.tester.get_port() c_listener_port = cls.tester.get_port() d_listener_port = cls.tester.get_port() test_tag_listener_port = cls.tester.get_port() router('A', [ ('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': a_listener_port, 'saslMechanisms': 'ANONYMOUS'}), ]) router('B', [ # Listener for clients, note that the tests assume this listener is first in this list: ('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': b_listener_port, 'saslMechanisms': 'ANONYMOUS'}), ('listener', {'name': 'test-tag', 'role': 'route-container', 'host': '0.0.0.0', 'port': test_tag_listener_port, 'saslMechanisms': 'ANONYMOUS'}), # This is an route-container connection made from QDR.B's ephemeral port to a_listener_port ('connector', {'name': 'broker', 'role': 'route-container', 'host': '0.0.0.0', 'port': a_listener_port, 'saslMechanisms': 'ANONYMOUS'}), # Only inter router communication must happen on 'inter-router' connectors. This connector makes # a connection from the router B's ephemeral port to c_listener_port ('connector', {'name': 'routerC', 'role': 'inter-router', 'host': '0.0.0.0', 'port': c_listener_port}), # This is an on-demand connection made from QDR.B's ephemeral port to d_listener_port ('connector', {'name': 'routerD', 'role': 'route-container', 'host': '0.0.0.0', 'port': d_listener_port, 'saslMechanisms': 'ANONYMOUS'}), #('linkRoute', {'prefix': 'org.apache', 'connection': 'broker', 'direction': 'in'}), ('linkRoute', {'prefix': 'org.apache', 'containerId': 'QDR.A', 'direction': 'in'}), ('linkRoute', {'prefix': 'org.apache', 'containerId': 'QDR.A', 'direction': 'out'}), ('linkRoute', {'prefix': 'pulp.task', 'connection': 'test-tag', 'direction': 'in'}), ('linkRoute', {'prefix': 'pulp.task', 'connection': 'test-tag', 'direction': 'out'}), # addresses matching pattern 'a.*.toA.#' route to QDR.A ('linkRoute', {'pattern': 'a.*.toA.#', 'containerId': 'QDR.A', 'direction': 'in'}), ('linkRoute', {'pattern': 'a.*.toA.#', 'containerId': 'QDR.A', 'direction': 'out'}), # addresses matching pattern 'a.*.toD.#' route to QDR.D # Dont change dir to direction here so we can make sure that the dir attribute is still working. ('linkRoute', {'pattern': 'a.*.toD.#', 'containerId': 'QDR.D', 'dir': 'in'}), ('linkRoute', {'pattern': 'a.*.toD.#', 'containerId': 'QDR.D', 'dir': 'out'}) ] ) router('C', [ # The client will exclusively use the following listener to # connect to QDR.C, the tests assume this is the first entry # in the list ('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(), 'saslMechanisms': 'ANONYMOUS'}), ('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': c_listener_port, 'saslMechanisms': 'ANONYMOUS'}), # The dot(.) at the end is ignored by the address hashing scheme. ('linkRoute', {'prefix': 'org.apache.', 'direction': 'in'}), ('linkRoute', {'prefix': 'org.apache.', 'direction': 'out'}), ('linkRoute', {'prefix': 'pulp.task', 'direction': 'in'}), ('linkRoute', {'prefix': 'pulp.task', 'direction': 'out'}), ('linkRoute', {'pattern': 'a.*.toA.#', 'direction': 'in'}), ('linkRoute', {'pattern': 'a.*.toA.#', 'direction': 'out'}), ('linkRoute', {'pattern': 'a.*.toD.#', 'direction': 'in'}), ('linkRoute', {'pattern': 'a.*.toD.#', 'direction': 'out'}) ] ) router('D', # sink for QDR.D routes [ ('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': d_listener_port, 'saslMechanisms': 'ANONYMOUS'}), ]) # Wait for the routers to locate each other, and for route propagation # to settle cls.routers[1].wait_router_connected('QDR.C') cls.routers[2].wait_router_connected('QDR.B') cls.routers[2].wait_address("org.apache", remotes=1, delay=0.5, count=2) # This is not a classic router network in the sense that QDR.A and D are acting as brokers. We allow a little # bit more time for the routers to stabilize. sleep(2) def run_qdstat_linkRoute(self, address, args=None): cmd = ['qdstat', '--bus', str(address), '--timeout', str(TIMEOUT) ] + ['--linkroute'] if args: cmd = cmd + args p = self.popen( cmd, name='qdstat-'+self.id(), stdout=PIPE, expect=None, universal_newlines=True) out = p.communicate()[0] assert p.returncode == 0, "qdstat exit status %s, output:\n%s" % (p.returncode, out) return out def run_qdmanage(self, cmd, input=None, expect=Process.EXIT_OK, address=None): p = self.popen( ['qdmanage'] + cmd.split(' ') + ['--bus', address or self.address(), '--indent=-1', '--timeout', str(TIMEOUT)], stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect, universal_newlines=True) out = p.communicate(input)[0] try: p.teardown() except Exception as e: raise Exception("%s\n%s" % (e, out)) return out def test_aaa_qdmanage_query_link_route(self): """ qdmanage converts short type to long type and this test specifically tests if qdmanage is actually doing the type conversion correctly by querying with short type and long type. """ cmd = 'QUERY --type=linkRoute' out = self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0]) # Make sure there is a dir of in and out. self.assertIn('"direction": "in"', out) self.assertIn('"direction": "out"', out) self.assertIn('"containerId": "QDR.A"', out) # Use the long type and make sure that qdmanage does not mess up the long type cmd = 'QUERY --type=org.apache.qpid.dispatch.router.config.linkRoute' out = self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0]) # Make sure there is a dir of in and out. self.assertIn('"direction": "in"', out) self.assertIn('"direction": "out"', out) self.assertIn('"containerId": "QDR.A"', out) identity = out[out.find("identity") + 12: out.find("identity") + 13] cmd = 'READ --type=linkRoute --identity=' + identity out = self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0]) self.assertIn(identity, out) exception_occurred = False try: # This identity should not be found cmd = 'READ --type=linkRoute --identity=9999' out = self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0]) except Exception as e: exception_occurred = True self.assertIn("NotFoundStatus: Not Found", str(e)) self.assertTrue(exception_occurred) exception_occurred = False try: # There is no identity specified, this is a bad request cmd = 'READ --type=linkRoute' out = self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0]) except Exception as e: exception_occurred = True self.assertIn("BadRequestStatus: No name or identity provided", str(e)) self.assertTrue(exception_occurred) cmd = 'CREATE --type=autoLink address=127.0.0.1 direction=in connection=routerC' out = self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0]) identity = out[out.find("identity") + 12: out.find("identity") + 14] cmd = 'READ --type=autoLink --identity=' + identity out = self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0]) self.assertIn(identity, out) def test_bbb_qdstat_link_routes_routerB(self): """ Runs qdstat on router B to make sure that router B has 4 link routes, each having one 'in' and one 'out' entry """ out = self.run_qdstat_linkRoute(self.routers[1].addresses[0]) for route in ['a.*.toA.#', 'a.*.toD.#', 'org.apache', 'pulp.task']: self.assertIn(route, out) out_list = out.split() self.assertEqual(out_list.count('in'), 4) self.assertEqual(out_list.count('out'), 4) parts = out.split("\n") self.assertEqual(len(parts), 15) out = self.run_qdstat_linkRoute(self.routers[1].addresses[0], args=['--limit=1']) parts = out.split("\n") self.assertEqual(len(parts), 8) def test_ccc_qdstat_link_routes_routerC(self): """ Runs qdstat on router C to make sure that router C has 4 link routes, each having one 'in' and one 'out' entry """ out = self.run_qdstat_linkRoute(self.routers[2].addresses[0]) out_list = out.split() self.assertEqual(out_list.count('in'), 4) self.assertEqual(out_list.count('out'), 4) def test_ddd_partial_link_route_match(self): """ The linkRoute on Routers C and B is set to org.apache. Creates a receiver listening on the address 'org.apache.dev' and a sender that sends to address 'org.apache.dev'. Sends a message to org.apache.dev via router QDR.C and makes sure that the message was successfully routed (using partial address matching) and received using pre-created links that were created as a result of specifying addresses in the linkRoute attribute('org.apache.'). """ hello_world_1 = "Hello World_1!" # Connects to listener #2 on QDR.C addr = self.routers[2].addresses[0] blocking_connection = BlockingConnection(addr) # Receive on org.apache.dev blocking_receiver = blocking_connection.create_receiver(address="org.apache.dev") apply_options = AtMostOnce() # Sender to org.apache.dev blocking_sender = blocking_connection.create_sender(address="org.apache.dev", options=apply_options) msg = Message(body=hello_world_1) # Send a message blocking_sender.send(msg) received_message = blocking_receiver.receive() self.assertEqual(hello_world_1, received_message.body) # Connect to the router acting like the broker (QDR.A) and check the deliveriesIngress and deliveriesEgress local_node = Node.connect(self.routers[0].addresses[0], timeout=TIMEOUT) self.assertEqual(u'QDR.A', local_node.query(type='org.apache.qpid.dispatch.router', attribute_names=[u'id']).results[0][0]) self.assertEqual(1, local_node.read(type='org.apache.qpid.dispatch.router.address', name='M0org.apache.dev').deliveriesEgress) self.assertEqual(1, local_node.read(type='org.apache.qpid.dispatch.router.address', name='M0org.apache.dev').deliveriesIngress) # There should be 4 links - # 1. outbound receiver link on org.apache.dev # 2. inbound sender link on blocking_sender # 3. inbound link to the $management # 4. outbound link to $management # self.assertEqual(4, len() self.assertEqual(4, len(local_node.query(type='org.apache.qpid.dispatch.router.link').results)) blocking_connection.close() def test_partial_link_route_match_1(self): """ This test is pretty much the same as the previous test (test_partial_link_route_match) but the connection is made to router QDR.B instead of QDR.C and we expect to see the same behavior. """ hello_world_2 = "Hello World_2!" addr = self.routers[1].addresses[0] blocking_connection = BlockingConnection(addr) # Receive on org.apache.dev blocking_receiver = blocking_connection.create_receiver(address="org.apache.dev.1") apply_options = AtMostOnce() # Sender to to org.apache.dev blocking_sender = blocking_connection.create_sender(address="org.apache.dev.1", options=apply_options) msg = Message(body=hello_world_2) # Send a message blocking_sender.send(msg) received_message = blocking_receiver.receive() self.assertEqual(hello_world_2, received_message.body) local_node = Node.connect(self.routers[0].addresses[0], timeout=TIMEOUT) # Make sure that the router node acting as the broker (QDR.A) had one message routed through it. This confirms # that the message was link routed self.assertEqual(1, local_node.read(type='org.apache.qpid.dispatch.router.address', name='M0org.apache.dev.1').deliveriesEgress) self.assertEqual(1, local_node.read(type='org.apache.qpid.dispatch.router.address', name='M0org.apache.dev.1').deliveriesIngress) blocking_connection.close() def test_full_link_route_match(self): """ The linkRoute on Routers C and B is set to org.apache. Creates a receiver listening on the address 'org.apache' and a sender that sends to address 'org.apache'. Sends a message to org.apache via router QDR.C and makes sure that the message was successfully routed (using full address matching) and received using pre-created links that were created as a result of specifying addresses in the linkRoute attribute('org.apache.'). """ hello_world_3 = "Hello World_3!" # Connects to listener #2 on QDR.C addr = self.routers[2].addresses[0] blocking_connection = BlockingConnection(addr) # Receive on org.apache blocking_receiver = blocking_connection.create_receiver(address="org.apache") apply_options = AtMostOnce() # Sender to to org.apache blocking_sender = blocking_connection.create_sender(address="org.apache", options=apply_options) msg = Message(body=hello_world_3) # Send a message blocking_sender.send(msg) received_message = blocking_receiver.receive() self.assertEqual(hello_world_3, received_message.body) local_node = Node.connect(self.routers[0].addresses[0], timeout=TIMEOUT) # Make sure that the router node acting as the broker (QDR.A) had one message routed through it. This confirms # that the message was link routed self.assertEqual(1, local_node.read(type='org.apache.qpid.dispatch.router.address', name='M0org.apache').deliveriesEgress) self.assertEqual(1, local_node.read(type='org.apache.qpid.dispatch.router.address', name='M0org.apache').deliveriesIngress) blocking_connection.close() def _link_route_pattern_match(self, connect_node, include_host, exclude_host, test_address, expected_pattern): """ This helper function ensures that messages sent to 'test_address' pass through 'include_host', and are *not* routed to 'exclude_host' """ hello_pattern = "Hello Pattern!" route = 'M0' + test_address # Connect to the two 'waypoints', ensure the route is not present on # either node_A = Node.connect(include_host, timeout=TIMEOUT) node_B = Node.connect(exclude_host, timeout=TIMEOUT) for node in [node_A, node_B]: self.assertRaises(NotFoundStatus, node.read, type='org.apache.qpid.dispatch.router.address', name=route) # wait until the host we're connecting to gets its next hop for the # pattern we're connecting to connect_node.wait_address(expected_pattern, remotes=1, delay=0.1, count=2) # Connect to 'connect_node' and send message to 'address' blocking_connection = BlockingConnection(connect_node.addresses[0]) blocking_receiver = blocking_connection.create_receiver(address=test_address) blocking_sender = blocking_connection.create_sender(address=test_address, options=AtMostOnce()) msg = Message(body=hello_pattern) blocking_sender.send(msg) received_message = blocking_receiver.receive() self.assertEqual(hello_pattern, received_message.body) # verify test_address is only present on include_host and not on exclude_host self.assertRaises(NotFoundStatus, node_B.read, type='org.apache.qpid.dispatch.router.address', name=route) self.assertEqual(1, node_A.read(type='org.apache.qpid.dispatch.router.address', name=route).deliveriesIngress) self.assertEqual(1, node_A.read(type='org.apache.qpid.dispatch.router.address', name=route).deliveriesIngress) # drop the connection and verify that test_address is no longer on include_host blocking_connection.close() timeout = time() + TIMEOUT while True: try: node_A.read(type='org.apache.qpid.dispatch.router.address', name=route) if time() > timeout: raise Exception("Expected route '%s' to expire!" % route) sleep(0.1) except NotFoundStatus: break; node_A.close() node_B.close() def test_link_route_pattern_match(self): """ Verify the addresses match the proper patterns and are routed to the proper 'waypoint' only """ qdr_A = self.routers[0].addresses[0] qdr_D = self.routers[3].addresses[0] qdr_C = self.routers[2] # note: the node, not the address! self._link_route_pattern_match(connect_node=qdr_C, include_host=qdr_A, exclude_host=qdr_D, test_address='a.notD.toA', expected_pattern='a.*.toA.#') self._link_route_pattern_match(connect_node=qdr_C, include_host=qdr_D, exclude_host=qdr_A, test_address='a.notA.toD', expected_pattern='a.*.toD.#') self._link_route_pattern_match(connect_node=qdr_C, include_host=qdr_A, exclude_host=qdr_D, test_address='a.toD.toA.xyz', expected_pattern='a.*.toA.#') self._link_route_pattern_match(connect_node=qdr_C, include_host=qdr_D, exclude_host=qdr_A, test_address='a.toA.toD.abc', expected_pattern='a.*.toD.#') def test_custom_annotations_match(self): """ The linkRoute on Routers C and B is set to org.apache. Creates a receiver listening on the address 'org.apache' and a sender that sends to address 'org.apache'. Sends a message with custom annotations to org.apache via router QDR.C and makes sure that the message was successfully routed (using full address matching) and received using pre-created links that were created as a result of specifying addresses in the linkRoute attribute('org.apache.'). Make sure custom annotations arrived as well. """ hello_world_3 = "Hello World_3!" # Connects to listener #2 on QDR.C addr = self.routers[2].addresses[0] blocking_connection = BlockingConnection(addr) # Receive on org.apache blocking_receiver = blocking_connection.create_receiver(address="org.apache.2") apply_options = AtMostOnce() # Sender to to org.apache blocking_sender = blocking_connection.create_sender(address="org.apache.2", options=apply_options) msg = Message(body=hello_world_3) annotations = {'custom-annotation': '1/Custom_Annotation'} msg.annotations = annotations # Send a message blocking_sender.send(msg) received_message = blocking_receiver.receive() self.assertEqual(hello_world_3, received_message.body) self.assertEqual(received_message.annotations, annotations) blocking_connection.close() def test_full_link_route_match_1(self): """ This test is pretty much the same as the previous test (test_full_link_route_match) but the connection is made to router QDR.B instead of QDR.C and we expect the message to be link routed successfully. """ hello_world_4 = "Hello World_4!" addr = self.routers[1].addresses[0] blocking_connection = BlockingConnection(addr) # Receive on org.apache blocking_receiver = blocking_connection.create_receiver(address="org.apache.1") apply_options = AtMostOnce() # Sender to to org.apache blocking_sender = blocking_connection.create_sender(address="org.apache.1", options=apply_options) msg = Message(body=hello_world_4) # Send a message blocking_sender.send(msg) received_message = blocking_receiver.receive() self.assertEqual(hello_world_4, received_message.body) local_node = Node.connect(self.routers[0].addresses[0], timeout=TIMEOUT) # Make sure that the router node acting as the broker (QDR.A) had one message routed through it. This confirms # that the message was link routed self.assertEqual(1, local_node.read(type='org.apache.qpid.dispatch.router.address', name='M0org.apache.1').deliveriesEgress) self.assertEqual(1, local_node.read(type='org.apache.qpid.dispatch.router.address', name='M0org.apache.1').deliveriesIngress) blocking_connection.close() def test_zzz_qdmanage_delete_link_route(self): """ We are deleting the link route using qdmanage short name. This should be the last test to run """ local_node = Node.connect(self.routers[1].addresses[0], timeout=TIMEOUT) res = local_node.query(type='org.apache.qpid.dispatch.router') results = res.results[0] attribute_list = res.attribute_names result_list = local_node.query(type='org.apache.qpid.dispatch.router.config.linkRoute').results self.assertEqual(results[attribute_list.index('linkRouteCount')], len(result_list)) # First delete linkRoutes on QDR.B for rid in range(8): cmd = 'DELETE --type=linkRoute --identity=' + result_list[rid][1] self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0]) cmd = 'QUERY --type=linkRoute' out = self.run_qdmanage(cmd=cmd, address=self.routers[1].addresses[0]) self.assertEqual(out.rstrip(), '[]') # linkRoutes now gone on QDR.B but remember that it still exist on QDR.C # We will now try to create a receiver on address org.apache.dev on QDR.C. # Since the linkRoute on QDR.B is gone, QDR.C # will not allow a receiver to be created since there is no route to destination. # Connects to listener #2 on QDR.C addr = self.routers[2].addresses[0] # Now delete linkRoutes on QDR.C to eradicate linkRoutes completely local_node = Node.connect(addr, timeout=TIMEOUT) result_list = local_node.query(type='org.apache.qpid.dispatch.router.config.linkRoute').results # QDR.C has 8 link routes configured, nuke 'em: self.assertEqual(8, len(result_list)) for rid in range(8): cmd = 'DELETE --type=linkRoute --identity=' + result_list[rid][1] self.run_qdmanage(cmd=cmd, address=addr) cmd = 'QUERY --type=linkRoute' out = self.run_qdmanage(cmd=cmd, address=addr) self.assertEqual(out.rstrip(), '[]') res = local_node.query(type='org.apache.qpid.dispatch.router') results = res.results[0] attribute_list = res.attribute_names self.assertEqual(results[attribute_list.index('linkRouteCount')], 0) blocking_connection = BlockingConnection(addr, timeout=3) # Receive on org.apache.dev (this address used to be linkRouted but not anymore since we deleted linkRoutes # on both QDR.C and QDR.B) blocking_receiver = blocking_connection.create_receiver(address="org.apache.dev") apply_options = AtMostOnce() hello_world_1 = "Hello World_1!" # Sender to org.apache.dev blocking_sender = blocking_connection.create_sender(address="org.apache.dev", options=apply_options) msg = Message(body=hello_world_1) # Send a message blocking_sender.send(msg) received_message = blocking_receiver.receive(timeout=5) self.assertEqual(hello_world_1, received_message.body) def test_yyy_delivery_tag(self): """ Tests that the router carries over the delivery tag on a link routed delivery """ listening_address = self.routers[1].addresses[1] sender_address = self.routers[2].addresses[0] qdstat_address = self.routers[2].addresses[0] test = DeliveryTagsTest(sender_address, listening_address, qdstat_address) test.run() self.assertEqual(None, test.error) def test_yyy_invalid_delivery_tag(self): test = InvalidTagTest(self.routers[2].addresses[0]) test.run() self.assertEqual(None, test.error) def test_close_with_unsettled(self): test = CloseWithUnsettledTest(self.routers[1].addresses[0], self.routers[1].addresses[1]) test.run() self.assertEqual(None, test.error) def test_www_drain_support_all_messages(self): drain_support = DrainMessagesHandler(self.routers[2].addresses[0]) drain_support.run() self.assertEqual(None, drain_support.error) def test_www_drain_support_one_message(self): drain_support = DrainOneMessageHandler(self.routers[2].addresses[0]) drain_support.run() self.assertEqual(None, drain_support.error) def test_www_drain_support_no_messages(self): drain_support = DrainNoMessagesHandler(self.routers[2].addresses[0]) drain_support.run() self.assertEqual(None, drain_support.error) def test_www_drain_support_no_more_messages(self): drain_support = DrainNoMoreMessagesHandler(self.routers[2].addresses[0]) drain_support.run() self.assertEqual(None, drain_support.error) def test_link_route_terminus_address(self): # The receiver is attaching to router B to a listener that has link route for address 'pulp.task' setup. listening_address = self.routers[1].addresses[1] # Run the query on a normal port query_address_listening = self.routers[1].addresses[0] # Sender is attaching to router C sender_address = self.routers[2].addresses[0] query_address_sending = self.routers[2].addresses[0] test = TerminusAddrTest(sender_address, listening_address, query_address_sending, query_address_listening) test.run() self.assertTrue(test.in_receiver_found) self.assertTrue(test.out_receiver_found) self.assertTrue(test.in_sender_found) self.assertTrue(test.out_sender_found) def test_dynamic_source(self): test = DynamicSourceTest(self.routers[1].addresses[0], self.routers[1].addresses[1]) test.run() self.assertEqual(None, test.error) def test_dynamic_target(self): test = DynamicTargetTest(self.routers[1].addresses[0], self.routers[1].addresses[1]) test.run() self.assertEqual(None, test.error) def test_detach_without_close(self): test = DetachNoCloseTest(self.routers[1].addresses[0], self.routers[1].addresses[1]) test.run() self.assertEqual(None, test.error) def test_detach_mixed_close(self): test = DetachMixedCloseTest(self.routers[1].addresses[0], self.routers[1].addresses[1]) test.run() self.assertEqual(None, test.error) def _multi_link_send_receive(self, send_host, receive_host, name): senders = ["%s/%s" % (send_host, address) for address in ["org.apache.foo", "org.apache.bar"]] receivers = ["%s/%s" % (receive_host, address) for address in ["org.apache.foo", "org.apache.bar"]] test = MultiLinkSendReceive(senders, receivers, name) test.run() self.assertEqual(None, test.error) def test_same_name_route_receivers_through_B(self): self._multi_link_send_receive(self.routers[0].addresses[0], self.routers[1].addresses[0], "recv_through_B") def test_same_name_route_senders_through_B(self): self._multi_link_send_receive(self.routers[1].addresses[0], self.routers[0].addresses[0], "send_through_B") def test_same_name_route_receivers_through_C(self): self._multi_link_send_receive(self.routers[0].addresses[0], self.routers[2].addresses[0], "recv_through_C") def test_same_name_route_senders_through_C(self): self._multi_link_send_receive(self.routers[2].addresses[0], self.routers[0].addresses[0], "send_through_C") def test_echo_detach_received(self): """ Create two receivers to link routed address org.apache.dev Create a sender to the same address that the receiver is listening on and send 100 messages. After the receivers receive 10 messages each, the receivers will detach and expect to receive ten detaches in response. """ test = EchoDetachReceived(self.routers[2].addresses[0], self.routers[2].addresses[0]) test.run() self.assertEqual(None, test.error) def test_bad_link_route_config(self): """ What happens when the link route create request is malformed? """ mgmt = self.routers[1].management # zero length prefix self.assertRaises(BadRequestStatus, mgmt.create, type="org.apache.qpid.dispatch.router.config.linkRoute", name="bad-1", attributes={'prefix': '', 'containerId': 'FakeBroker', 'direction': 'in'}) # pattern wrong type self.assertRaises(BadRequestStatus, mgmt.create, type="org.apache.qpid.dispatch.router.config.linkRoute", name="bad-2", attributes={'pattern': 666, 'containerId': 'FakeBroker', 'direction': 'in'}) # invalid pattern (no tokens) self.assertRaises(BadRequestStatus, mgmt.create, type="org.apache.qpid.dispatch.router.config.linkRoute", name="bad-3", attributes={'pattern': '///', 'containerId': 'FakeBroker', 'direction': 'in'}) # empty attributes self.assertRaises(BadRequestStatus, mgmt.create, type="org.apache.qpid.dispatch.router.config.linkRoute", name="bad-4", attributes={}) # both pattern and prefix self.assertRaises(BadRequestStatus, mgmt.create, type="org.apache.qpid.dispatch.router.config.linkRoute", name="bad-5", attributes={'prefix': 'a1', 'pattern': 'b2', 'containerId': 'FakeBroker', 'direction': 'in'}) # bad direction self.assertRaises(BadRequestStatus, mgmt.create, type="org.apache.qpid.dispatch.router.config.linkRoute", name="bad-6", attributes={'pattern': 'b2', 'containerId': 'FakeBroker', 'direction': 'nowhere'}) # bad distribution self.assertRaises(BadRequestStatus, mgmt.create, type="org.apache.qpid.dispatch.router.config.linkRoute", name="bad-7", attributes={'pattern': 'b2', 'containerId': 'FakeBroker', 'direction': 'in', "distribution": "dilly dilly"}) # no direction self.assertRaises(BadRequestStatus, mgmt.create, type="org.apache.qpid.dispatch.router.config.linkRoute", name="bad-8", attributes={'prefix': 'b2', 'containerId': 'FakeBroker'}) # neither pattern nor prefix self.assertRaises(BadRequestStatus, mgmt.create, type="org.apache.qpid.dispatch.router.config.linkRoute", name="bad-9", attributes={'direction': 'out', 'containerId': 'FakeBroker'}) class DeliveryTagsTest(MessagingHandler): def __init__(self, sender_address, listening_address, qdstat_address): super(DeliveryTagsTest, self).__init__() self.sender_address = sender_address self.listening_address = listening_address self.sender = None self.receiver_connection = None self.sender_connection = None self.qdstat_address = qdstat_address self.id = '1235' self.times = 1 self.sent = 0 self.rcvd = 0 self.delivery_tag_verified = False # The delivery tag we are going to send in the transfer frame # We will later make sure that the same delivery tag shows up on the receiving end in the link routed case. # KAG: force the literal to type 'str' due to SWIG weirdness: on 2.X a # delivery tag cannot be unicode (must be binary), but on 3.X it must # be unicode! See https://issues.apache.org/jira/browse/PROTON-1843 self.delivery_tag = str('92319') self.error = None def timeout(self): self.error = "Timeout expired: sent=%d rcvd=%d" % (self.sent, self.rcvd) if self.receiver_connection: self.receiver_connection.close() if self.sender_connection: self.sender_connection.close() def on_start(self, event): self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self)) self.receiver_connection = event.container.connect(self.listening_address) def on_connection_remote_open(self, event): if event.connection == self.receiver_connection: continue_loop = True # Don't open the sender connection unless we can make sure that there is a remote receiver ready to # accept the message. # If there is no remote receiver, the router will throw a 'No route to destination' error when # creating sender connection. # The following loops introduces a wait before creating the sender connection. It gives time to the # router so that the address Dpulp.task can show up on the remoteCount i = 0 while continue_loop: if i > 100: # If we have run the read command for more than hundred times and we still do not have # the remoteCount set to 1, there is a problem, just exit out of the function instead # of looping to infinity. self.receiver_connection.close() return local_node = Node.connect(self.qdstat_address, timeout=TIMEOUT) out = local_node.read(type='org.apache.qpid.dispatch.router.address', name='Dpulp.task').remoteCount if out == 1: continue_loop = False else: i += 1 sleep(0.25) self.sender_connection = event.container.connect(self.sender_address) self.sender = event.container.create_sender(self.sender_connection, "pulp.task", options=AtMostOnce()) def on_sendable(self, event): if self.times == 1: msg = Message(body="Hello World") self.sender.send(msg, tag=self.delivery_tag) self.times += 1 self.sent += 1 def on_message(self, event): if "Hello World" == event.message.body: self.rcvd += 1 # If the tag on the delivery is the same as the tag we sent with the initial transfer, it means # that the router has propagated the delivery tag successfully because of link routing. if self.delivery_tag != event.delivery.tag: self.error = "Delivery-tag: expected:%r got:%r" % (self.delivery_tag, event.delivery.tag) self.receiver_connection.close() self.sender_connection.close() self.timer.cancel() def run(self): Container(self).run() class CloseWithUnsettledTest(MessagingHandler): ## ## This test sends a message across an attach-routed link. While the message ## is unsettled, the client link is closed. The test is ensuring that the ## router does not crash during the closing of the links. ## def __init__(self, normal_addr, route_addr): super(CloseWithUnsettledTest, self).__init__(prefetch=0, auto_accept=False) self.normal_addr = normal_addr self.route_addr = route_addr self.dest = "pulp.task.CWUtest" self.error = None def timeout(self): self.error = "Timeout Expired - Check for cores" self.conn_normal.close() self.conn_route.close() def on_start(self, event): self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self)) self.conn_route = event.container.connect(self.route_addr) def on_connection_opened(self, event): if event.connection == self.conn_route: self.conn_normal = event.container.connect(self.normal_addr) elif event.connection == self.conn_normal: self.sender = event.container.create_sender(self.conn_normal, self.dest) def on_connection_closed(self, event): self.conn_route.close() self.timer.cancel() def on_link_opened(self, event): if event.receiver: self.receiver = event.receiver self.receiver.flow(1) def on_sendable(self, event): msg = Message(body="CloseWithUnsettled") event.sender.send(msg) def on_message(self, event): self.conn_normal.close() def run(self): Container(self).run() class DynamicSourceTest(MessagingHandler): ## ## This test verifies that a dynamic source can be propagated via link-route to ## a route-container. ## def __init__(self, normal_addr, route_addr): super(DynamicSourceTest, self).__init__(prefetch=0, auto_accept=False) self.normal_addr = normal_addr self.route_addr = route_addr self.dest = "pulp.task.DynamicSource" self.address = "DynamicSourceAddress" self.error = None def timeout(self): self.error = "Timeout Expired - Check for cores" self.conn_normal.close() self.conn_route.close() def on_start(self, event): self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self)) self.conn_route = event.container.connect(self.route_addr) def on_connection_opened(self, event): if event.connection == self.conn_route: self.conn_normal = event.container.connect(self.normal_addr) elif event.connection == self.conn_normal: self.receiver = event.container.create_receiver(self.conn_normal, None, dynamic=True,options=DynamicNodeProperties({"x-opt-qd.address":u"pulp.task.abc"})) def on_link_opened(self, event): if event.receiver == self.receiver: if self.receiver.remote_source.address != self.address: self.error = "Expected %s, got %s" % (self.address, self.receiver.remote_source.address) self.conn_normal.close() self.conn_route.close() self.timer.cancel() def on_link_opening(self, event): if event.sender: self.sender = event.sender if not self.sender.remote_source.dynamic: self.error = "Expected sender with dynamic source" self.conn_normal.close() self.conn_route.close() self.timer.cancel() self.sender.source.address = self.address self.sender.open() def run(self): Container(self).run() class DynamicTarget(LinkOption): def apply(self, link): link.target.dynamic = True link.target.address = None class DynamicTargetTest(MessagingHandler): ## ## This test verifies that a dynamic source can be propagated via link-route to ## a route-container. ## def __init__(self, normal_addr, route_addr): super(DynamicTargetTest, self).__init__(prefetch=0, auto_accept=False) self.normal_addr = normal_addr self.route_addr = route_addr self.dest = "pulp.task.DynamicTarget" self.address = "DynamicTargetAddress" self.error = None def timeout(self): self.error = "Timeout Expired - Check for cores" self.conn_normal.close() self.conn_route.close() def on_start(self, event): self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self)) self.conn_route = event.container.connect(self.route_addr) def on_connection_opened(self, event): if event.connection == self.conn_route: self.conn_normal = event.container.connect(self.normal_addr) elif event.connection == self.conn_normal: self.sender = event.container.create_sender(self.conn_normal, None, options=\ [DynamicTarget(), DynamicNodeProperties({"x-opt-qd.address":u"pulp.task.abc"})]) def on_link_opened(self, event): if event.sender == self.sender: if self.sender.remote_target.address != self.address: self.error = "Expected %s, got %s" % (self.address, self.receiver.remote_source.address) self.conn_normal.close() self.conn_route.close() self.timer.cancel() def on_link_opening(self, event): if event.receiver: self.receiver = event.receiver if not self.receiver.remote_target.dynamic: self.error = "Expected receiver with dynamic source" self.conn_normal.close() self.conn_route.close() self.timer.cancel() self.receiver.target.address = self.address self.receiver.open() def run(self): Container(self).run() class DetachNoCloseTest(MessagingHandler): ## ## This test verifies that link-detach (not close) is propagated properly ## def __init__(self, normal_addr, route_addr): super(DetachNoCloseTest, self).__init__(prefetch=0, auto_accept=False) self.normal_addr = normal_addr self.route_addr = route_addr self.dest = "pulp.task.DetachNoClose" self.error = None def timeout(self): self.error = "Timeout Expired - Check for cores" self.conn_normal.close() self.conn_route.close() def stop(self): self.conn_normal.close() self.conn_route.close() self.timer.cancel() def on_start(self, event): self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self)) self.conn_route = event.container.connect(self.route_addr) def on_connection_opened(self, event): if event.connection == self.conn_route: self.conn_normal = event.container.connect(self.normal_addr) elif event.connection == self.conn_normal: self.receiver = event.container.create_receiver(self.conn_normal, self.dest) def on_link_opened(self, event): if event.receiver == self.receiver: self.receiver.detach() def on_link_remote_detach(self, event): if event.sender == self.sender: self.sender.detach() if event.receiver == self.receiver: ## ## Test passed, we expected a detach on the propagated sender and back ## self.stop() def on_link_closing(self, event): if event.sender == self.sender: self.error = 'Propagated link was closed. Expected it to be detached' self.stop() if event.receiver == self.receiver: self.error = 'Client link was closed. Expected it to be detached' self.stop() def on_link_opening(self, event): if event.sender: self.sender = event.sender self.sender.source.address = self.sender.remote_source.address self.sender.open() def run(self): Container(self).run() class DetachMixedCloseTest(MessagingHandler): ## ## This test verifies that link-detach (not close) is propagated properly ## def __init__(self, normal_addr, route_addr): super(DetachMixedCloseTest, self).__init__(prefetch=0, auto_accept=False) self.normal_addr = normal_addr self.route_addr = route_addr self.dest = "pulp.task.DetachMixedClose" self.error = None def timeout(self): self.error = "Timeout Expired - Check for cores" self.conn_normal.close() self.conn_route.close() def stop(self): self.conn_normal.close() self.conn_route.close() self.timer.cancel() def on_start(self, event): self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self)) self.conn_route = event.container.connect(self.route_addr) def on_connection_opened(self, event): if event.connection == self.conn_route: self.conn_normal = event.container.connect(self.normal_addr) elif event.connection == self.conn_normal: self.receiver = event.container.create_receiver(self.conn_normal, self.dest) def on_link_opened(self, event): if event.receiver == self.receiver: self.receiver.detach() def on_link_remote_detach(self, event): if event.sender == self.sender: self.sender.close() if event.receiver == self.receiver: self.error = 'Client link was detached. Expected it to be closed' self.stop() def on_link_closing(self, event): if event.sender == self.sender: self.error = 'Propagated link was closed. Expected it to be detached' self.stop() if event.receiver == self.receiver: ## ## Test Passed ## self.stop() def on_link_opening(self, event): if event.sender: self.sender = event.sender self.sender.source.address = self.sender.remote_source.address self.sender.open() def run(self): Container(self).run() # Test to validate fix for DISPATCH-927 class EchoDetachReceived(MessagingHandler): def __init__(self, sender_address, recv_address): super(EchoDetachReceived, self).__init__() self.sender_address = sender_address self.recv_address = recv_address self.dest = "org.apache.dev" self.num_msgs = 100 self.num_receivers = 10 self.msgs_sent = 0 self.receiver_conn = None self.sender_conn = None self.sender = None self.receiver_dict = {} self.error = None self.receiver_attaches = 0 self.timer = None self.sender_attached = False self.received_msgs_dict = {} self.receiver_detach_dict = {} self.num_detaches_echoed = 0 @property def msgs_received(self): return sum(self.received_msgs_dict.values()) def timeout(self): self.bail("Timeout Expired: msgs_sent=%d msgs_received=%d, number of detaches received=%d" % (self.msgs_sent, self.msgs_received, self.num_detaches_echoed)) def on_start(self, event): self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self)) # Create two separate connections for sender and receivers self.receiver_conn = event.container.connect(self.recv_address) self.sender_conn = event.container.connect(self.sender_address) for i in range(self.num_receivers): name = "R%d" % i self.receiver_dict[name] = event.container.create_receiver(self.receiver_conn, self.dest, name=name) self.received_msgs_dict[name] = 0 def bail(self, text=None): self.error = text self.sender_conn.close() self.receiver_conn.close() self.timer.cancel() def on_link_opened(self, event): if event.receiver: if event.receiver.name in list(self.receiver_dict): self.receiver_attaches+=1 # The response receiver attaches have been received. The receiver sent attaches which was link routed # all the way to the 'broker' router and the response attaches have come back. # It is now time to create the sender. if self.receiver_attaches == self.num_receivers: self.sender = event.container.create_sender(self.sender_conn, self.dest) elif event.sender: if not self.sender_attached: if event.sender == self.sender: # The sender attaches were link routed as well and the response attach has been received. self.sender_attached = True def on_sendable(self, event): # The sender will send 100 messages if self.receiver_attaches == self.num_receivers and self.sender_attached: if self.msgs_sent < self.num_msgs: msg = Message(body="Hello World") self.sender.send(msg) self.msgs_sent += 1 def on_message(self, event): if event.receiver and event.receiver.name in list(self.receiver_dict): self.received_msgs_dict[event.receiver.name] += 1 if sum(self.received_msgs_dict.values()) == self.num_msgs: # The receivers have received a total of 100 messages. Close the receivers. The detach sent by these # receivers will travel all the way over the link route and the 'broker' router will respond with a # detach for receiver in list(self.receiver_dict): self.receiver_dict[receiver].close() def on_link_closed(self, event): if event.receiver.name in list(self.receiver_dict) and event.receiver.name not in list(self.receiver_detach_dict): self.receiver_detach_dict[event.receiver.name] = event.receiver self.num_detaches_echoed += 1 # Terminate the test only if both detach frames have been received. if all(receiver in list(self.receiver_detach_dict) for receiver in list(self.receiver_dict)): self.bail() def run(self): Container(self).run() class TerminusAddrTest(MessagingHandler): """ This tests makes sure that the link route address is visible in the output of qdstat -l command. Sets up a sender on address pulp.task.terminusTestSender and a receiver on pulp.task.terminusTestReceiver. Connects to the router to which the sender is attached and makes sure that the pulp.task.terminusTestSender address shows up with an 'in' and 'out' Similarly connects to the router to which the receiver is attached and makes sure that the pulp.task.terminusTestReceiver address shows up with an 'in' and 'out' """ def __init__(self, sender_address, listening_address, query_address_sending, query_address_listening): super(TerminusAddrTest, self).__init__() self.sender_address = sender_address self.listening_address = listening_address self.sender = None self.receiver = None self.message_received = False self.receiver_connection = None self.sender_connection = None # We will run a query on the same router where the sender is attached self.query_address_sending = query_address_sending # We will run a query on the same router where the receiver is attached self.query_address_listening = query_address_listening self.count = 0 self.in_receiver_found = False self.out_receiver_found = False self.in_sender_found = False self.out_sender_found = False self.receiver_link_opened = False self.sender_link_opened = False def on_start(self, event): self.receiver_connection = event.container.connect(self.listening_address) def on_connection_remote_open(self, event): if event.connection == self.receiver_connection: continue_loop = True # The following loops introduces a wait. It gives time to the # router so that the address Dpulp.task can show up on the remoteCount i = 0 while continue_loop: if i > 100: # If we have run the read command for more than hundred times and we still do not have # the remoteCount set to 1, there is a problem, just exit out of the function instead # of looping to infinity. self.receiver_connection.close() return local_node = Node.connect(self.query_address_sending, timeout=TIMEOUT) out = local_node.read(type='org.apache.qpid.dispatch.router.address', name='Dpulp.task').remoteCount if out == 1: continue_loop = False i += 1 sleep(0.25) self.sender_connection = event.container.connect(self.sender_address) # Notice here that the receiver and sender are listening on different addresses. Receiver on # pulp.task.terminusTestReceiver and the sender on pulp.task.terminusTestSender self.receiver = event.container.create_receiver(self.receiver_connection, "pulp.task.terminusTestReceiver") self.sender = event.container.create_sender(self.sender_connection, "pulp.task.terminusTestSender", options=AtMostOnce()) def on_link_opened(self, event): if event.receiver == self.receiver: self.receiver_link_opened = True local_node = Node.connect(self.query_address_listening, timeout=TIMEOUT) out = local_node.query(type='org.apache.qpid.dispatch.router.link') link_dir_index = out.attribute_names.index("linkDir") owning_addr_index = out.attribute_names.index("owningAddr") # Make sure that the owningAddr M0pulp.task.terminusTestReceiver shows up on both in and out. # The 'out' link is on address M0pulp.task.terminusTestReceiver outgoing from the router B to the receiver # The 'in' link is on address M0pulp.task.terminusTestReceiver incoming from router C to router B for result in out.results: if result[link_dir_index] == 'in' and result[owning_addr_index] == 'M0pulp.task.terminusTestReceiver': self.in_receiver_found = True if result[link_dir_index] == 'out' and result[owning_addr_index] == 'M0pulp.task.terminusTestReceiver': self.out_receiver_found = True if event.sender == self.sender: self.sender_link_opened = True local_node = Node.connect(self.query_address_sending, timeout=TIMEOUT) out = local_node.query(type='org.apache.qpid.dispatch.router.link') link_dir_index = out.attribute_names.index("linkDir") owning_addr_index = out.attribute_names.index("owningAddr") # Make sure that the owningAddr M0pulp.task.terminusTestSender shows up on both in and out. # The 'in' link is on address M0pulp.task.terminusTestSender incoming from sender to router # The 'out' link is on address M0pulp.task.terminusTestSender outgoing from router C to router B for result in out.results: if result[link_dir_index] == 'in' and result[owning_addr_index] == 'M0pulp.task.terminusTestSender': self.in_sender_found = True if result[link_dir_index] == 'out' and result[owning_addr_index] == 'M0pulp.task.terminusTestSender': self.out_sender_found = True # Shutdown the connections only if the on_link_opened has been called for sender and receiver links. if self.sender_link_opened and self.receiver_link_opened: self.sender.close() self.receiver.close() self.sender_connection.close() self.receiver_connection.close() def run(self): Container(self).run() class MultiLinkSendReceive(MessagingHandler): class SendState(object): def __init__(self, link): self.link = link self.sent = False self.accepted = False self.done = False self.closed = False def send(self, subject, body): if not self.sent: self.link.send(Message(subject=subject,body=body,address=self.link.target.address)) self.sent = True def on_accepted(self): self.accepted = True self.done = True def close(self): if not self.closed: self.closed = True self.link.close() self.link.connection.close() class RecvState(object): def __init__(self, link): self.link = link self.received = False self.done = False self.closed = False def on_message(self): self.received = True self.done = True def close(self): if not self.closed: self.closed = True self.link.close() self.link.connection.close() def __init__(self, send_urls, recv_urls, name, message=None): super(MultiLinkSendReceive, self).__init__() self.send_urls = send_urls self.recv_urls = recv_urls self.senders = {} self.receivers = {} self.message = message or "SendReceiveTest" self.sent = False self.error = None self.name = name def close(self): for sender in self.senders.values(): sender.close() for receiver in self.receivers.values(): receiver.close() def all_done(self): for sender in self.senders.values(): if not sender.done: return False for receiver in self.receivers.values(): if not receiver.done: return False return True def timeout(self): self.error = "Timeout Expired" self.close() def stop_if_all_done(self): if self.all_done(): self.stop() def stop(self): self.close() self.timer.cancel() def on_start(self, event): self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self)) event.container.container_id = None for u in self.send_urls: s = self.SendState(event.container.create_sender(u, name=self.name)) self.senders[s.link.connection.container] = s for u in self.recv_urls: r = self.RecvState(event.container.create_receiver(u, name=self.name)) self.receivers[r.link.connection.container] = r def on_sendable(self, event): self.senders[event.connection.container].send(self.name, self.message) def on_message(self, event): if self.message != event.message.body: error = "Incorrect message. Got %s, expected %s" % (event.message.body, self.message.body) self.receivers[event.connection.container].on_message() self.stop_if_all_done() def on_accepted(self, event): self.senders[event.connection.container].on_accepted() self.stop_if_all_done() def run(self): Container(self).run() class LinkRouteProtocolTest(TestCase): """ Test link route implementation against "misbehaving" containers Uses a custom fake broker (not a router) that can do weird things at the protocol level. +-------------+ +---------+ +-----------------+ | | <------ | | <----- | blocking_sender | | fake broker | | QDR.A | +-----------------+ | | ------> | | ------> +-------------------+ +-------------+ +---------+ | blocking_receiver | +-------------------+ """ @classmethod def setUpClass(cls): """Configure and start QDR.A""" super(LinkRouteProtocolTest, cls).setUpClass() config = [ ('router', {'mode': 'standalone', 'id': 'QDR.A'}), # for client connections: ('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': cls.tester.get_port(), 'saslMechanisms': 'ANONYMOUS'}), # to connect to the fake broker ('connector', {'name': 'broker', 'role': 'route-container', 'host': '127.0.0.1', 'port': cls.tester.get_port(), 'saslMechanisms': 'ANONYMOUS'}), # forward 'org.apache' messages to + from fake broker: ('linkRoute', {'prefix': 'org.apache', 'containerId': 'FakeBroker', 'direction': 'in'}), ('linkRoute', {'prefix': 'org.apache', 'containerId': 'FakeBroker', 'direction': 'out'}) ] config = Qdrouterd.Config(config) cls.router = cls.tester.qdrouterd('A', config, wait=False) def _fake_broker(self, cls): """Spawn a fake broker listening on the broker's connector """ fake_broker = cls(self.router.connector_addresses[0]) # wait until the connection to the fake broker activates self.router.wait_connectors() return fake_broker def test_DISPATCH_1092(self): # This fake broker will force the session closed after the link # detaches. Verify that the session comes back up correctly when the # next client attaches killer = self._fake_broker(SessionKiller) for i in range(2): bconn = BlockingConnection(self.router.addresses[0]) bsender = bconn.create_sender(address="org.apache", options=AtLeastOnce()) msg = Message(body="Hey!") bsender.send(msg) bsender.close() bconn.close() killer.join() class SessionKiller(FakeBroker): """DISPATCH-1092: force a session close when the link closes. This should cause the router to re-create the session when the next client attaches. """ def __init__(self, url): super(SessionKiller, self).__init__(url) def on_link_closing(self, event): event.link.close() event.session.close() class FakeBrokerDrain(FakeBroker): """ DISPATCH-1496 - Make sure that the router does not grant additional credit when drain is issued by a receiver connected to the router on a link routed address """ def __init__(self, url): super(FakeBrokerDrain, self).__init__(url) self.first_flow_received = False self.first_drain_mode = False self.second_drain_mode = False self.error = None self.num_flows = 0 self.success = False def on_link_flow(self, event): if event.link.is_sender: if event.sender.drain_mode: if not self.first_drain_mode: self.first_drain_mode = True event.sender.drained() elif not self.second_drain_mode: self.second_drain_mode = True if event.link.credit == 1000: # Without the patch for DISPATCH-1496, # the event.link.credit value would be 2000 self.success = True else: self.success = False event.sender.drained() else: if not self.first_flow_received: self.first_flow_received = True msg = Message(body="First Drain Transfer") event.link.send(msg) class DrainReceiver(MessagingHandler): def __init__(self, url, fake_broker): super(DrainReceiver, self).__init__(prefetch=0, auto_accept=False) self.url = url self.received = 0 self.receiver = None self.first_drain_sent = False self.second_drain_sent = False self.first_flow_sent = False self.receiver_conn = None self.error = None self.num_flows = 0 self.fake_broker = fake_broker def on_start(self, event): self.receiver_conn = event.container.connect(self.url) self.receiver = event.container.create_receiver(self.receiver_conn, "org.apache") # Step 1: Send a flow of 1000 to the router. The router will forward this # flow to the FakeBroker self.receiver.flow(1000) self.first_flow_sent = True def on_link_flow(self, event): if event.receiver == self.receiver: self.num_flows += 1 if self.num_flows == 1: # Step 4: The response drain received from the FakeBroker # Step 5: Send second flow of 1000 credits. This is forwarded to the FakeBroker self.receiver.flow(1000) self.timer = event.reactor.schedule(3, TestTimeout(self)) elif self.num_flows == 2: if not self.fake_broker.success: self.error = "The FakeBroker did not receive correct credit of 1000" self.receiver_conn.close() def timeout(self): # Step 6: The second drain is sent to the router. The router was forwarding the wrong credit (2000) to the FakeBroker # but with the fix for DISPATCH-1496, the correct credit is forwarded (1000) self.receiver.drain(0) self.second_drain_sent = True def on_message(self, event): if event.receiver == self.receiver: self.received += 1 # Step 2: In response to Step 1, the broker has sent the only message in its queue if self.received == 1: self.first_drain_sent = True #print ("First message received. Doing first drain") # Step 3: The receiver drains after receiving the first message. # This drain is forwarded to the FakeBroker self.receiver.drain(0) def run(self): Container(self).run() class LinkRouteDrainTest(TestCase): """ Test link route drain implementation. DISPATCH-1496 alleges that the router is granting extra credit when forwarding the drain. Uses a router which connects to a FakeBroker (FB) +-------------+ +---------+ | | <------ | | | fake broker | | QDR.A | | | ------> | | ------> +-------------------+ +-------------+ +---------+ | receiver | +-------------------+ The router will grant extra credit when the following sequence is used 1. The receiver attaches to the router on a a link routed address called "org.apache" 2. Receiver issues a flow of 1000. The FakeBroker has only one message in its "examples" queue and it sends it over to the router which forwards it to the receiver 3. After receiving the message the receiver issues a drain(0). This drain is forwarded to the FakeBroker by the router and the FB responds. There is not problem with this drain 4. The receiver again gives a flow of 1000 and it is forwarded to the FB. There are no messages in the broker queue, so the FB sends no messages 5. The receiver again issues a drain(0). At this time, without the fix for DISPATCH-1496, the router issues double the credit to the FB. Instead of issuing a credit of 1000, it issues a credit of 2000. """ @classmethod def setUpClass(cls): """Configure and start QDR.A""" super(LinkRouteDrainTest, cls).setUpClass() config = [ ('router', {'mode': 'standalone', 'id': 'QDR.A'}), # for client connections: ('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': cls.tester.get_port(), 'saslMechanisms': 'ANONYMOUS'}), # to connect to the fake broker ('connector', {'name': 'broker', 'role': 'route-container', 'host': '127.0.0.1', 'port': cls.tester.get_port(), 'saslMechanisms': 'ANONYMOUS'}), # forward 'org.apache' messages to + from fake broker: ('linkRoute', {'prefix': 'org.apache', 'containerId': 'FakeBroker', 'direction': 'in'}), ('linkRoute', {'prefix': 'org.apache', 'containerId': 'FakeBroker', 'direction': 'out'}) ] config = Qdrouterd.Config(config) cls.router = cls.tester.qdrouterd('A', config, wait=False) def _fake_broker(self, cls): """Spawn a fake broker listening on the broker's connector """ fake_broker = cls(self.router.connector_addresses[0]) # wait until the connection to the fake broker activates self.router.wait_connectors() return fake_broker def test_DISPATCH_1496(self): fake_broker = self._fake_broker(FakeBrokerDrain) drain_receiver = DrainReceiver(self.router.addresses[0], fake_broker) drain_receiver.run() self.assertEquals(drain_receiver.error, None) class ConnectionLinkRouteTest(TestCase): """ Test connection scoped link route implementation Base configuration: +-----------------+ +---------+ +---------+<--| blocking_sender | +-----------------+ | | | | +-----------------+ | Fake LR Service |<==>| QDR.A |<==>| QDR.B | +-----------------+ | | | | +-------------------+ +---------+ +---------+-->| blocking_receiver | +-------------------+ The Fake Link Route Service will create connection-scoped link routes to QDR.A, while blocking sender/receivers on QDR.B will send/receive messages via the link route. """ _AS_TYPE = "org.apache.qpid.dispatch.router.connection.linkRoute" @classmethod def setUpClass(cls): super(ConnectionLinkRouteTest, cls).setUpClass() b_port = cls.tester.get_port() configs = [ # QDR.A: [('router', {'mode': 'interior', 'id': 'QDR.A'}), # for fake connection-scoped LRs: ('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': cls.tester.get_port(), 'saslMechanisms': 'ANONYMOUS'}), # for fake route-container LR connections: ('listener', {'role': 'route-container', 'host': '0.0.0.0', 'port': cls.tester.get_port(), 'saslMechanisms': 'ANONYMOUS'}), # to connect to the QDR.B ('connector', {'role': 'inter-router', 'host': '127.0.0.1', 'port': b_port, 'saslMechanisms': 'ANONYMOUS'})], # QDR.B: [('router', {'mode': 'interior', 'id': 'QDR.B'}), # for client connections ('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': cls.tester.get_port(), 'saslMechanisms': 'ANONYMOUS'}), # for connection to QDR.A ('listener', {'role': 'inter-router', 'host': '0.0.0.0', 'port': b_port, 'saslMechanisms': 'ANONYMOUS'})] ] cls.routers=[] for c in configs: config = Qdrouterd.Config(c) cls.routers.append(cls.tester.qdrouterd(config=config, wait=False)) cls.QDR_A = cls.routers[0] cls.QDR_B = cls.routers[1] cls.QDR_A.wait_router_connected('QDR.B') cls.QDR_B.wait_router_connected('QDR.A') def _get_address(self, mgmt, addr): a_type = 'org.apache.qpid.dispatch.router.address' return list(filter(lambda a: a['name'].endswith(addr), mgmt.query(a_type))) def test_config_file_bad(self): # verify that specifying a connection link route in the configuration # file fails config = [('router', {'mode': 'interior', 'id': 'QDR.X'}), ('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': self.tester.get_port(), 'saslMechanisms': 'ANONYMOUS'}), ('connection.linkRoute', {'pattern': "i/am/bad", 'direction': "out"}) ] cfg = Qdrouterd.Config(config) # we expect the router to fail router = self.tester.qdrouterd("X", cfg, wait=False, expect=Process.EXIT_FAIL) def test_mgmt(self): # test create, delete, and query mgmt_conn = BlockingConnection(self.QDR_A.addresses[0]) mgmt_proxy = ConnLinkRouteMgmtProxy(mgmt_conn) for i in range(10): rsp = mgmt_proxy.create_conn_link_route("lr1-%d" % i, {'pattern': "*/hi/there/%d" % i, 'direction': 'out' if i % 2 else 'in'}) self.assertEqual(201, rsp.status_code) # test query rsp = mgmt_proxy.query_conn_link_routes() self.assertEqual(200, rsp.status_code) self.assertEqual(10, len(rsp.results)) entities = rsp.results # test read rsp = mgmt_proxy.read_conn_link_route('lr1-5') self.assertEqual(200, rsp.status_code) self.assertEqual("lr1-5", rsp.attrs['name']) self.assertEqual("*/hi/there/5", rsp.attrs['pattern']) self.assertEqual(mgmt_conn.container.container_id, rsp.attrs['containerId']) # bad creates attrs = [{'pattern': "bad", 'direction': "bad"}, {'direction': 'in'}, {}, {'pattern': ''}, {'pattern': 7}] for a in attrs: rsp = mgmt_proxy.create_conn_link_route("iamnoone", a) self.assertEqual(400, rsp.status_code) # bad read rsp = mgmt_proxy.read_conn_link_route('iamnoone') self.assertEqual(404, rsp.status_code) # bad delete rsp = mgmt_proxy.delete_conn_link_route('iamnoone') self.assertEqual(404, rsp.status_code) # delete all for r in entities: self.assertEqual(200, r.status_code) rsp = mgmt_proxy.delete_conn_link_route(r.attrs['name']) self.assertEqual(204, rsp.status_code) # query - should be none left rsp = mgmt_proxy.query_conn_link_routes() self.assertEqual(200, rsp.status_code) self.assertEqual(0, len(rsp.results)) def test_address_propagation(self): # test service that creates and deletes connection link routes fs = ConnLinkRouteService(self.QDR_A.addresses[1], container_id="FakeService", config = [("clr1", {"pattern": "flea.*", "direction": "out"}), ("clr2", {"pattern": "flea.*", "direction": "in"})]) self.assertEqual(2, len(fs.values)) # the address should propagate to A and B self.QDR_A.wait_address(address="flea.*", count=2) self.QDR_B.wait_address(address="flea.*", count=2) # now have the service delete the config fs.delete_config() # eventually the addresses will be un-published mgmt_A = QdManager(self, address=self.QDR_A.addresses[0]) mgmt_B = QdManager(self, address=self.QDR_B.addresses[0]) deadline = time() + TIMEOUT while (self._get_address(mgmt_A, "flea.*") or self._get_address(mgmt_B, "flea.*")): self.assertTrue(time() < deadline) sleep(0.1) fs.join(); # simple forwarding tests with auto delete def test_send_receive(self): COUNT = 5 mgmt_A = QdManager(self, address=self.QDR_A.addresses[0]) mgmt_B = QdManager(self, address=self.QDR_B.addresses[0]) # connect broker to A route-container fs = ConnLinkRouteService(self.QDR_A.addresses[1], container_id="FakeService", config = [("clr1", {"pattern": "flea.*", "direction": "out"}), ("clr2", {"pattern": "flea.*", "direction": "in"})]) self.assertEqual(2, len(fs.values)) # wait for the address to propagate to B self.QDR_B.wait_address(address="flea.*", count=2) # ensure the link routes are not visible via other connections clrs = mgmt_A.query(self._AS_TYPE) self.assertEqual(0, len(clrs)) # send from A to B r = AsyncTestReceiver(self.QDR_B.addresses[0], "flea.B", container_id="flea.BReceiver") s = AsyncTestSender(self.QDR_A.addresses[0], "flea.B", container_id="flea.BSender", message=Message(body="SENDING TO flea.B"), count=COUNT) s.wait() # for sender to complete for i in range(COUNT): self.assertEqual("SENDING TO flea.B", r.queue.get(timeout=TIMEOUT).body) r.stop() self.assertEqual(COUNT, fs.in_count) # send from B to A r = AsyncTestReceiver(self.QDR_A.addresses[0], "flea.A", container_id="flea.AReceiver") s = AsyncTestSender(self.QDR_B.addresses[0], "flea.A", container_id="flea.ASender", message=Message(body="SENDING TO flea.A"), count=COUNT) s.wait() for i in range(COUNT): self.assertEqual("SENDING TO flea.A", r.queue.get(timeout=TIMEOUT).body) r.stop() self.assertEqual(2 * COUNT, fs.in_count) # once the fake service closes its conn the link routes # are removed so the link route addresses must be gone fs.join() mgmt_A = QdManager(self, address=self.QDR_A.addresses[0]) mgmt_B = QdManager(self, address=self.QDR_B.addresses[0]) deadline = time() + TIMEOUT while (self._get_address(mgmt_A, "flea.*") or self._get_address(mgmt_B, "flea.*")): self.assertTrue(time() < deadline) sleep(0.1) class ConnLinkRouteService(FakeBroker): def __init__(self, url, container_id, config, timeout=TIMEOUT): self.conn = None self.mgmt_proxy = None self.mgmt_sender = None self.mgmt_receiver = None self._config = config self._config_index = 0 self._config_done = Event() self._config_error = None self._config_values = [] self._cleaning_up = False self._delete_done = Event() self._delete_count = 0 self._event_injector = EventInjector() self._delete_event = ApplicationEvent("delete_config") super(ConnLinkRouteService, self).__init__(url, container_id) if self._config_done.wait(timeout) is False: raise Exception("Timed out waiting for configuration setup") if self._config_error is not None: raise Exception("Error: %s" % self._config_error) @property def values(self): return self._config_values def delete_config(self): self._event_injector.trigger(self._delete_event) if self._delete_done.wait(TIMEOUT) is False: raise Exception("Timed out waiting for configuration delete") def on_start(self, event): """ Do not create an acceptor, actively connect instead """ event.container.selectable(self._event_injector) self.conn = event.container.connect(self.url) def on_connection_opened(self, event): if event.connection == self.conn: if self.mgmt_receiver is None: self.mgmt_receiver = event.container.create_receiver(self.conn, dynamic=True) super(ConnLinkRouteService, self).on_connection_opened(event) def on_connection_closed(self, event): if self._event_injector: self._event_injector.close() self._event_injector = None super(ConnLinkRouteService, self).on_connection_closed(event) def on_link_opened(self, event): if event.link == self.mgmt_receiver: self.mgmt_proxy = MgmtMsgProxy(self.mgmt_receiver.remote_source.address) self.mgmt_sender = event.container.create_sender(self.conn, target="$management") def on_link_error(self, event): # when a remote client disconnects the service will get a link error # that is expected - simply clean up the link self.on_link_closing(event) def on_sendable(self, event): if event.sender == self.mgmt_sender: if not self._cleaning_up: if self._config_index < len(self._config): cfg = self._config[self._config_index] msg = self.mgmt_proxy.create_conn_link_route(cfg[0], cfg[1]) self.mgmt_sender.send(msg) self._config_index += 1 elif self._config_values: cv = self._config_values.pop() msg = self.mgmt_proxy.delete_conn_link_route(cv['name']) self._delete_count += 1 else: super(ConnLinkRouteService, self).on_sendable(event) def on_message(self, event): if event.receiver == self.mgmt_receiver: response = self.mgmt_proxy.response(event.message) if response.status_code == 201: # created: self._config_values.append(response.attrs) if len(self._config_values) == len(self._config): self._config_done.set() elif response.status_code == 204: # deleted self._delete_count -= 1 if (not self._config_values) and self._delete_count == 0: self._delete_done.set() else: # error self._config_error = ("mgmt failed: %s" % response.status_description) self._config_done.set() self._delete_done.set() else: super(ConnLinkRouteService, self).on_message(event) def on_delete_config(self, event): if not self._cleaning_up: self._cleaning_up = True if not self._config_values: self._delete_done.set() else: try: while self.mgmt_sender.credit > 0: cv = self._config_values.pop() msg = self.mgmt_proxy.delete_conn_link_route(cv["name"]) self.mgmt_sender.send(msg) self._delete_count += 1 except IndexError: pass class ConnLinkRouteMgmtProxy(object): """ Manage connection scoped link routes over a given connection. While the connection remains open the connection scoped links will remain configured and active """ def __init__(self, bconn, credit=250): self._receiver = bconn.create_receiver(address=None, dynamic=True, credit=credit) self._sender = bconn.create_sender(address="$management") self._proxy = MgmtMsgProxy(self._receiver.link.remote_source.address) def __getattr__(self, key): # wrap accesses to the management message functions so we can send and # receive the messages using the blocking links f = getattr(self._proxy, key) if not callable(f): return f def _func(*args, **kwargs): self._sender.send(f(*args, **kwargs)) return self._proxy.response(self._receiver.receive()) return _func class InvalidTagTest(MessagingHandler): """Verify that a message with an invalid tag length is rejected """ def __init__(self, router_addr): super(InvalidTagTest, self).__init__(auto_accept=False, auto_settle=False) self.test_conn = None self.test_address = router_addr self.tx_ct = 0; self.accept_ct = 0; self.reject_ct = 0; self.error = None def timeout(self): self.error = "Timeout expired: sent=%d rcvd=%d" % (self.tx_ct, self.accept_ct + self.reject_ct) if self.test_conn: self.test_conn.close() def on_start(self, event): self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self)) self.test_conn = event.container.connect(self.test_address) rx = event.container.create_receiver(self.test_conn, "org.apache.foo") def on_link_opened(self, event): if event.receiver: event.receiver.flow(100) event.container.create_sender(event.connection, "org.apache.foo") def on_sendable(self, event): if self.tx_ct < 10: self.tx_ct += 1 if self.tx_ct == 5: event.sender.send(Message(body="YO"), tag=str("X" * 64)) else: event.sender.send(Message(body="YO"), tag=str("BLAH%d" % self.tx_ct)) def on_accepted(self, event): self.accept_ct += 1 event.delivery.settle() if self.accept_ct == 9 and self.reject_ct == 1: event.connection.close() self.timer.cancel() def on_rejected(self, event): self.reject_ct += 1 event.delivery.settle() def on_message(self, event): event.delivery.update(Delivery.ACCEPTED) event.delivery.settle() def run(self): Container(self).run() class Dispatch1428(TestCase): """ Sets up 2 routers (one of which are acting as brokers (QDR.A)). QDR.A acting broker #1 +---------+ +---------+ | | <------ | | | QDR.A | | QDR.B | | | ------> | | +---------+ +---------+ """ @classmethod def get_router(cls, index): return cls.routers[index] @classmethod def setUpClass(cls): """Start two routers""" super(Dispatch1428, cls).setUpClass() def router(name, connection): config = [ ('router', {'mode': 'interior', 'id': 'QDR.%s'%name}), ] + connection config = Qdrouterd.Config(config) cls.routers.append(cls.tester.qdrouterd(name, config, wait=False)) cls.routers = [] a_listener_port = cls.tester.get_port() b_listener_port = cls.tester.get_port() router('A', [ ('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': a_listener_port, 'saslMechanisms': 'ANONYMOUS'}), ]) router('B', [ ('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': b_listener_port, 'saslMechanisms': 'ANONYMOUS'}), ('connector', {'name': 'one', 'role': 'route-container', 'host': '0.0.0.0', 'port': a_listener_port, 'saslMechanisms': 'ANONYMOUS'}), ('connector', {'name': 'two', 'role': 'route-container', 'host': '0.0.0.0', 'port': a_listener_port, 'saslMechanisms': 'ANONYMOUS'}) ] ) sleep(2) def run_qdmanage(self, cmd, input=None, expect=Process.EXIT_OK, address=None): p = self.popen( ['qdmanage'] + cmd.split(' ') + ['--bus', address or self.address(), '--indent=-1', '--timeout', str(TIMEOUT)], stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect, universal_newlines=True) out = p.communicate(input)[0] try: p.teardown() except Exception as e: raise Exception("%s\n%s" % (e, out)) return out def test_both_link_routes_active(self): cmds = [ 'CREATE --type=linkRoute name=foo prefix=foo direction=in connection=one', 'CREATE --type=linkRoute name=bar prefix=bar direction=in connection=two', 'CREATE --type=linkRoute name=baz prefix=baz direction=in containerId=QDR.A' ] for c in cmds: self.run_qdmanage(cmd=c, address=self.routers[1].addresses[0]) # Now that the qdmanage has run, query the link routes and make sure that their "operStatus" is "active" before # running any of the tests. long_type = 'org.apache.qpid.dispatch.router.config.linkRoute' qd_manager = QdManager(self, address=self.routers[1].addresses[0]) for i in range(5): all_link_routes_activated = True link_routes = qd_manager.query(long_type) for link_route in link_routes: oper_status = link_route['operStatus'] if oper_status != "active": all_link_routes_activated = False break if not all_link_routes_activated: # One or more of the link routes have not been activated. # Check after one second. sleep(1) else: break # All link routes created in this test MUST be activated before # we can continue further testing. self.assertTrue(all_link_routes_activated) first = SendReceive("%s/foo" % self.routers[1].addresses[0], "%s/foo" % self.routers[0].addresses[0]) first.run() self.assertEqual(None, first.error) second = SendReceive("%s/bar" % self.routers[1].addresses[0], "%s/bar" % self.routers[0].addresses[0]) second.run() self.assertEqual(None, second.error) third = SendReceive("%s/baz" % self.routers[1].addresses[0], "%s/baz" % self.routers[0].addresses[0]) third.run() self.assertEqual(None, third.error) class SendReceive(MessagingHandler): def __init__(self, send_url, recv_url, message=None): super(SendReceive, self).__init__() self.send_url = send_url self.recv_url = recv_url self.message = message or Message(body="SendReceiveTest") self.sent = False self.error = None def close(self): self.sender.close() self.receiver.close() self.sender.connection.close() self.receiver.connection.close() def timeout(self): self.error = "Timeout Expired - Check for cores" self.close() def stop(self): self.close() self.timer.cancel() def on_start(self, event): self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self)) event.container.container_id = "SendReceiveTestClient" self.sender = event.container.create_sender(self.send_url) self.receiver = event.container.create_receiver(self.recv_url) def on_sendable(self, event): if not self.sent: event.sender.send(self.message) self.sent = True def on_message(self, event): if self.message.body != event.message.body: self.error = "Incorrect message. Got %s, expected %s" % (event.message.body, self.message.body) def on_accepted(self, event): self.stop() def run(self): Container(self).run() class LinkRoute3Hop(TestCase): """ Sets up a linear 3 hop router network for testing multi-hop link routes. +---------+ +---------+ +---------+ +------------------+ | | <------ | | <----- | |<----| blocking_senders | | QDR.A | | QDR.B | | QDR.C | +------------------+ | | ------> | | ------> | | +--------------------+ +---------+ +---------+ +---------+---->| blocking_receivers | ^ +--------------------+ | V +-------------+ | FakeService | +-------------+ """ @classmethod def setUpClass(cls): super(LinkRoute3Hop, cls).setUpClass() b_port = cls.tester.get_port() configs = [ # QDR.A: [('router', {'mode': 'interior', 'id': 'QDR.A'}), # for client access ('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': cls.tester.get_port(), 'saslMechanisms': 'ANONYMOUS'}), # for fake service: ('listener', {'role': 'route-container', 'host': '0.0.0.0', 'port': cls.tester.get_port(), 'saslMechanisms': 'ANONYMOUS'}), # to connect to the QDR.B ('connector', {'role': 'inter-router', 'host': '127.0.0.1', 'port': b_port, 'saslMechanisms': 'ANONYMOUS'}), # the routes ('linkRoute', {'prefix': 'closest/test-client', 'containerId': 'FakeService', 'direction': 'in'}), ('linkRoute', {'prefix': 'closest/test-client', 'containerId': 'FakeService', 'direction': 'out'}) ], # QDR.B: [('router', {'mode': 'interior', 'id': 'QDR.B'}), # for client connections ('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': cls.tester.get_port(), 'saslMechanisms': 'ANONYMOUS'}), # for inter-router connections from QDR.A and QDR.C ('listener', {'role': 'inter-router', 'host': '0.0.0.0', 'port': b_port, 'saslMechanisms': 'ANONYMOUS'}), ('linkRoute', {'prefix': 'closest/test-client', 'direction': 'in'}), ('linkRoute', {'prefix': 'closest/test-client', 'direction': 'out'}) ], # QDR.C [('router', {'mode': 'interior', 'id': 'QDR.C'}), # for client connections ('listener', {'role': 'normal', 'host': '0.0.0.0', 'port': cls.tester.get_port(), 'saslMechanisms': 'ANONYMOUS'}), # to connect to the QDR.B ('connector', {'role': 'inter-router', 'host': '127.0.0.1', 'port': b_port, 'saslMechanisms': 'ANONYMOUS'}), ('linkRoute', {'prefix': 'closest/test-client', 'direction': 'in'}), ('linkRoute', {'prefix': 'closest/test-client', 'direction': 'out'}) ] ] cls.routers=[] for c in configs: config = Qdrouterd.Config(c) cls.routers.append(cls.tester.qdrouterd(config=config, wait=False)) cls.QDR_A = cls.routers[0] cls.QDR_B = cls.routers[1] cls.QDR_C = cls.routers[2] cls.QDR_A.wait_router_connected('QDR.B') cls.QDR_B.wait_router_connected('QDR.A') cls.QDR_B.wait_router_connected('QDR.C') cls.QDR_C.wait_router_connected('QDR.B') cls.QDR_C.wait_router_connected('QDR.A') cls.QDR_A.wait_router_connected('QDR.C') cls.fake_service = FakeService(cls.QDR_A.addresses[1], container_id="FakeService") cls.QDR_C.wait_address("closest/test-client", remotes=1, count=2) def test_01_parallel_link_routes(self): """ Verify Q2/Q3 recovery in the case of multiple link-routes sharing the same session. """ send_clients = 10 send_batch = 10 total = send_clients * send_batch start_in = self.fake_service.in_count start_out = self.fake_service.out_count env = dict(os.environ, PN_TRACE_FRM="1") rx = self.popen(["test-receiver", "-a", self.QDR_C.addresses[0], "-c", str(total), "-s", "closest/test-client"], env=env, expect=Process.EXIT_OK) def _spawn_sender(x): return self.popen(["test-sender", "-a", self.QDR_C.addresses[0], "-c", str(send_batch), "-i", "TestSender-%s" % x, "-sx", # huge message size to trigger Q2/Q3 "-t", "closest/test-client"], env=env, expect=Process.EXIT_OK) senders = [_spawn_sender(s) for s in range(send_clients)] for tx in senders: out_text, out_err = tx.communicate(timeout=TIMEOUT) if tx.returncode: raise Exception("Sender failed: %s %s" % (out_text, out_err)) if rx.wait(timeout=TIMEOUT): raise Exception("Receiver failed to consume all messages in=%s out=%s", self.fake_service.in_count, self.fake_service.out_count) self.assertEqual(start_in + total, self.fake_service.in_count) self.assertEqual(start_out + total, self.fake_service.out_count) if __name__ == '__main__': unittest.main(main_module())
[]
[]
[]
[]
[]
python
0
0
mod/github.com/hashicorp/[email protected]/builtin/logical/transit/backend_test.go
package transit import ( "context" "encoding/base64" "fmt" "math/rand" "os" "path" "reflect" "strconv" "strings" "sync" "testing" "time" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/keysutil" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" logicaltest "github.com/hashicorp/vault/logical/testing" "github.com/mitchellh/mapstructure" ) const ( testPlaintext = "the quick brown fox" ) func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} b := Backend(config) if b == nil { t.Fatalf("failed to create backend") } err := b.Backend.Setup(context.Background(), config) if err != nil { t.Fatal(err) } return b, config.StorageView } func createBackendWithSysView(t *testing.T) (*backend, logical.Storage) { sysView := logical.TestSystemView() storage := &logical.InmemStorage{} conf := &logical.BackendConfig{ StorageView: storage, System: sysView, } b := Backend(conf) if b == nil { t.Fatal("failed to create backend") } err := b.Backend.Setup(context.Background(), conf) if err != nil { t.Fatal(err) } return b, storage } func TestTransit_RSA(t *testing.T) { testTransit_RSA(t, "rsa-2048") testTransit_RSA(t, "rsa-4096") } func testTransit_RSA(t *testing.T, keyType string) { var resp *logical.Response var err error b, storage := createBackendWithStorage(t) keyReq := &logical.Request{ Path: "keys/rsa", Operation: logical.UpdateOperation, Data: map[string]interface{}{ "type": keyType, }, Storage: storage, } resp, err = b.HandleRequest(context.Background(), keyReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v\nresp: %#v", err, resp) } plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" // "the quick brown fox" encryptReq := &logical.Request{ Path: "encrypt/rsa", Operation: logical.UpdateOperation, Storage: storage, Data: map[string]interface{}{ "plaintext": plaintext, }, } resp, err = b.HandleRequest(context.Background(), encryptReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v\nresp: %#v", err, resp) } ciphertext1 := resp.Data["ciphertext"].(string) decryptReq := &logical.Request{ Path: "decrypt/rsa", Operation: logical.UpdateOperation, Storage: storage, Data: map[string]interface{}{ "ciphertext": ciphertext1, }, } resp, err = b.HandleRequest(context.Background(), decryptReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v\nresp: %#v", err, resp) } decryptedPlaintext := resp.Data["plaintext"] if plaintext != decryptedPlaintext { t.Fatalf("bad: plaintext; expected: %q\nactual: %q", plaintext, decryptedPlaintext) } // Rotate the key rotateReq := &logical.Request{ Path: "keys/rsa/rotate", Operation: logical.UpdateOperation, Storage: storage, } resp, err = b.HandleRequest(context.Background(), rotateReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v\nresp: %#v", err, resp) } // Encrypt again resp, err = b.HandleRequest(context.Background(), encryptReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v\nresp: %#v", err, resp) } ciphertext2 := resp.Data["ciphertext"].(string) if ciphertext1 == ciphertext2 { t.Fatalf("expected different ciphertexts") } // See if the older ciphertext can still be decrypted resp, err = b.HandleRequest(context.Background(), decryptReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v\nresp: %#v", err, resp) } if resp.Data["plaintext"].(string) != plaintext { t.Fatal("failed to decrypt old ciphertext after rotating the key") } // Decrypt the new ciphertext decryptReq.Data = map[string]interface{}{ "ciphertext": ciphertext2, } resp, err = b.HandleRequest(context.Background(), decryptReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v\nresp: %#v", err, resp) } if resp.Data["plaintext"].(string) != plaintext { t.Fatal("failed to decrypt ciphertext after rotating the key") } signReq := &logical.Request{ Path: "sign/rsa", Operation: logical.UpdateOperation, Storage: storage, Data: map[string]interface{}{ "input": plaintext, }, } resp, err = b.HandleRequest(context.Background(), signReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v\nresp: %#v", err, resp) } signature := resp.Data["signature"].(string) verifyReq := &logical.Request{ Path: "verify/rsa", Operation: logical.UpdateOperation, Storage: storage, Data: map[string]interface{}{ "input": plaintext, "signature": signature, }, } resp, err = b.HandleRequest(context.Background(), verifyReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v\nresp: %#v", err, resp) } if !resp.Data["valid"].(bool) { t.Fatalf("failed to verify the RSA signature") } signReq.Data = map[string]interface{}{ "input": plaintext, "hash_algorithm": "invalid", } resp, err = b.HandleRequest(context.Background(), signReq) if err == nil { t.Fatal(err) } signReq.Data = map[string]interface{}{ "input": plaintext, "hash_algorithm": "sha2-512", } resp, err = b.HandleRequest(context.Background(), signReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v\nresp: %#v", err, resp) } signature = resp.Data["signature"].(string) verifyReq.Data = map[string]interface{}{ "input": plaintext, "signature": signature, } resp, err = b.HandleRequest(context.Background(), verifyReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v\nresp: %#v", err, resp) } if resp.Data["valid"].(bool) { t.Fatalf("expected validation to fail") } verifyReq.Data = map[string]interface{}{ "input": plaintext, "signature": signature, "hash_algorithm": "sha2-512", } resp, err = b.HandleRequest(context.Background(), verifyReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v\nresp: %#v", err, resp) } if !resp.Data["valid"].(bool) { t.Fatalf("failed to verify the RSA signature") } } func TestBackend_basic(t *testing.T) { decryptData := make(map[string]interface{}) logicaltest.Test(t, logicaltest.TestCase{ LogicalFactory: Factory, Steps: []logicaltest.TestStep{ testAccStepListPolicy(t, "test", true), testAccStepWritePolicy(t, "test", false), testAccStepListPolicy(t, "test", false), testAccStepReadPolicy(t, "test", false, false), testAccStepEncrypt(t, "test", testPlaintext, decryptData), testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepEncrypt(t, "test", "", decryptData), testAccStepDecrypt(t, "test", "", decryptData), testAccStepDeleteNotDisabledPolicy(t, "test"), testAccStepEnableDeletion(t, "test"), testAccStepDeletePolicy(t, "test"), testAccStepWritePolicy(t, "test", false), testAccStepEnableDeletion(t, "test"), testAccStepDisableDeletion(t, "test"), testAccStepDeleteNotDisabledPolicy(t, "test"), testAccStepEnableDeletion(t, "test"), testAccStepDeletePolicy(t, "test"), testAccStepReadPolicy(t, "test", true, false), }, }) } func TestBackend_upsert(t *testing.T) { decryptData := make(map[string]interface{}) logicaltest.Test(t, logicaltest.TestCase{ LogicalFactory: Factory, Steps: []logicaltest.TestStep{ testAccStepReadPolicy(t, "test", true, false), testAccStepListPolicy(t, "test", true), testAccStepEncryptUpsert(t, "test", testPlaintext, decryptData), testAccStepListPolicy(t, "test", false), testAccStepReadPolicy(t, "test", false, false), testAccStepDecrypt(t, "test", testPlaintext, decryptData), }, }) } func TestBackend_datakey(t *testing.T) { dataKeyInfo := make(map[string]interface{}) logicaltest.Test(t, logicaltest.TestCase{ LogicalFactory: Factory, Steps: []logicaltest.TestStep{ testAccStepListPolicy(t, "test", true), testAccStepWritePolicy(t, "test", false), testAccStepListPolicy(t, "test", false), testAccStepReadPolicy(t, "test", false, false), testAccStepWriteDatakey(t, "test", false, 256, dataKeyInfo), testAccStepDecryptDatakey(t, "test", dataKeyInfo), testAccStepWriteDatakey(t, "test", true, 128, dataKeyInfo), }, }) } func TestBackend_rotation(t *testing.T) { defer os.Setenv("TRANSIT_ACC_KEY_TYPE", "") testBackendRotation(t) os.Setenv("TRANSIT_ACC_KEY_TYPE", "CHACHA") testBackendRotation(t) } func testBackendRotation(t *testing.T) { decryptData := make(map[string]interface{}) encryptHistory := make(map[int]map[string]interface{}) logicaltest.Test(t, logicaltest.TestCase{ LogicalFactory: Factory, Steps: []logicaltest.TestStep{ testAccStepListPolicy(t, "test", true), testAccStepWritePolicy(t, "test", false), testAccStepListPolicy(t, "test", false), testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 0, encryptHistory), testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 1, encryptHistory), testAccStepRotate(t, "test"), // now v2 testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 2, encryptHistory), testAccStepRotate(t, "test"), // now v3 testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 3, encryptHistory), testAccStepRotate(t, "test"), // now v4 testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 4, encryptHistory), testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 99, encryptHistory), testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData), testAccStepLoadVX(t, "test", decryptData, 0, encryptHistory), testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepLoadVX(t, "test", decryptData, 1, encryptHistory), testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepLoadVX(t, "test", decryptData, 2, encryptHistory), testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepLoadVX(t, "test", decryptData, 3, encryptHistory), testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepLoadVX(t, "test", decryptData, 99, encryptHistory), testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData), testAccStepLoadVX(t, "test", decryptData, 4, encryptHistory), testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepDeleteNotDisabledPolicy(t, "test"), testAccStepAdjustPolicyMinDecryption(t, "test", 3), testAccStepAdjustPolicyMinEncryption(t, "test", 4), testAccStepReadPolicyWithVersions(t, "test", false, false, 3, 4), testAccStepLoadVX(t, "test", decryptData, 0, encryptHistory), testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData), testAccStepLoadVX(t, "test", decryptData, 1, encryptHistory), testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData), testAccStepLoadVX(t, "test", decryptData, 2, encryptHistory), testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData), testAccStepLoadVX(t, "test", decryptData, 3, encryptHistory), testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepLoadVX(t, "test", decryptData, 4, encryptHistory), testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepAdjustPolicyMinDecryption(t, "test", 1), testAccStepReadPolicyWithVersions(t, "test", false, false, 1, 4), testAccStepLoadVX(t, "test", decryptData, 0, encryptHistory), testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepLoadVX(t, "test", decryptData, 1, encryptHistory), testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepLoadVX(t, "test", decryptData, 2, encryptHistory), testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepRewrap(t, "test", decryptData, 4), testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepEnableDeletion(t, "test"), testAccStepDeletePolicy(t, "test"), testAccStepReadPolicy(t, "test", true, false), testAccStepListPolicy(t, "test", true), }, }) } func TestBackend_basic_derived(t *testing.T) { decryptData := make(map[string]interface{}) logicaltest.Test(t, logicaltest.TestCase{ LogicalFactory: Factory, Steps: []logicaltest.TestStep{ testAccStepListPolicy(t, "test", true), testAccStepWritePolicy(t, "test", true), testAccStepListPolicy(t, "test", false), testAccStepReadPolicy(t, "test", false, true), testAccStepEncryptContext(t, "test", testPlaintext, "my-cool-context", decryptData), testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepEnableDeletion(t, "test"), testAccStepDeletePolicy(t, "test"), testAccStepReadPolicy(t, "test", true, true), }, }) } func testAccStepWritePolicy(t *testing.T, name string, derived bool) logicaltest.TestStep { ts := logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "keys/" + name, Data: map[string]interface{}{ "derived": derived, }, } if os.Getenv("TRANSIT_ACC_KEY_TYPE") == "CHACHA" { ts.Data["type"] = "chacha20-poly1305" } return ts } func testAccStepListPolicy(t *testing.T, name string, expectNone bool) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.ListOperation, Path: "keys", Check: func(resp *logical.Response) error { if resp == nil { return fmt.Errorf("missing response") } if expectNone { keysRaw, ok := resp.Data["keys"] if ok || keysRaw != nil { return fmt.Errorf("response data when expecting none") } return nil } if len(resp.Data) == 0 { return fmt.Errorf("no data returned") } var d struct { Keys []string `mapstructure:"keys"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err } if len(d.Keys) > 0 && d.Keys[0] != name { return fmt.Errorf("bad name: %#v", d) } if len(d.Keys) != 1 { return fmt.Errorf("only 1 key expected, %d returned", len(d.Keys)) } return nil }, } } func testAccStepAdjustPolicyMinDecryption(t *testing.T, name string, minVer int) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "keys/" + name + "/config", Data: map[string]interface{}{ "min_decryption_version": minVer, }, } } func testAccStepAdjustPolicyMinEncryption(t *testing.T, name string, minVer int) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "keys/" + name + "/config", Data: map[string]interface{}{ "min_encryption_version": minVer, }, } } func testAccStepDisableDeletion(t *testing.T, name string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "keys/" + name + "/config", Data: map[string]interface{}{ "deletion_allowed": false, }, } } func testAccStepEnableDeletion(t *testing.T, name string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "keys/" + name + "/config", Data: map[string]interface{}{ "deletion_allowed": true, }, } } func testAccStepDeletePolicy(t *testing.T, name string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.DeleteOperation, Path: "keys/" + name, } } func testAccStepDeleteNotDisabledPolicy(t *testing.T, name string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.DeleteOperation, Path: "keys/" + name, ErrorOk: true, Check: func(resp *logical.Response) error { if resp == nil { return fmt.Errorf("got nil response instead of error") } if resp.IsError() { return nil } return fmt.Errorf("expected error but did not get one") }, } } func testAccStepReadPolicy(t *testing.T, name string, expectNone, derived bool) logicaltest.TestStep { return testAccStepReadPolicyWithVersions(t, name, expectNone, derived, 1, 0) } func testAccStepReadPolicyWithVersions(t *testing.T, name string, expectNone, derived bool, minDecryptionVersion int, minEncryptionVersion int) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.ReadOperation, Path: "keys/" + name, Check: func(resp *logical.Response) error { if resp == nil && !expectNone { return fmt.Errorf("missing response") } else if expectNone { if resp != nil { return fmt.Errorf("response when expecting none") } return nil } var d struct { Name string `mapstructure:"name"` Key []byte `mapstructure:"key"` Keys map[string]int64 `mapstructure:"keys"` Type string `mapstructure:"type"` Derived bool `mapstructure:"derived"` KDF string `mapstructure:"kdf"` DeletionAllowed bool `mapstructure:"deletion_allowed"` ConvergentEncryption bool `mapstructure:"convergent_encryption"` MinDecryptionVersion int `mapstructure:"min_decryption_version"` MinEncryptionVersion int `mapstructure:"min_encryption_version"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err } if d.Name != name { return fmt.Errorf("bad name: %#v", d) } if os.Getenv("TRANSIT_ACC_KEY_TYPE") == "CHACHA" { if d.Type != keysutil.KeyType(keysutil.KeyType_ChaCha20_Poly1305).String() { return fmt.Errorf("bad key type: %#v", d) } } else if d.Type != keysutil.KeyType(keysutil.KeyType_AES256_GCM96).String() { return fmt.Errorf("bad key type: %#v", d) } // Should NOT get a key back if d.Key != nil { return fmt.Errorf("bad: %#v", d) } if d.Keys == nil { return fmt.Errorf("bad: %#v", d) } if d.MinDecryptionVersion != minDecryptionVersion { return fmt.Errorf("bad: %#v", d) } if d.MinEncryptionVersion != minEncryptionVersion { return fmt.Errorf("bad: %#v", d) } if d.DeletionAllowed == true { return fmt.Errorf("bad: %#v", d) } if d.Derived != derived { return fmt.Errorf("bad: %#v", d) } if derived && d.KDF != "hkdf_sha256" { return fmt.Errorf("bad: %#v", d) } return nil }, } } func testAccStepEncrypt( t *testing.T, name, plaintext string, decryptData map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "encrypt/" + name, Data: map[string]interface{}{ "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)), }, Check: func(resp *logical.Response) error { var d struct { Ciphertext string `mapstructure:"ciphertext"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err } if d.Ciphertext == "" { return fmt.Errorf("missing ciphertext") } decryptData["ciphertext"] = d.Ciphertext return nil }, } } func testAccStepEncryptUpsert( t *testing.T, name, plaintext string, decryptData map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.CreateOperation, Path: "encrypt/" + name, Data: map[string]interface{}{ "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)), }, Check: func(resp *logical.Response) error { var d struct { Ciphertext string `mapstructure:"ciphertext"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err } if d.Ciphertext == "" { return fmt.Errorf("missing ciphertext") } decryptData["ciphertext"] = d.Ciphertext return nil }, } } func testAccStepEncryptContext( t *testing.T, name, plaintext, context string, decryptData map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "encrypt/" + name, Data: map[string]interface{}{ "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)), "context": base64.StdEncoding.EncodeToString([]byte(context)), }, Check: func(resp *logical.Response) error { var d struct { Ciphertext string `mapstructure:"ciphertext"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err } if d.Ciphertext == "" { return fmt.Errorf("missing ciphertext") } decryptData["ciphertext"] = d.Ciphertext decryptData["context"] = base64.StdEncoding.EncodeToString([]byte(context)) return nil }, } } func testAccStepDecrypt( t *testing.T, name, plaintext string, decryptData map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "decrypt/" + name, Data: decryptData, Check: func(resp *logical.Response) error { var d struct { Plaintext string `mapstructure:"plaintext"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err } // Decode the base64 plainRaw, err := base64.StdEncoding.DecodeString(d.Plaintext) if err != nil { return err } if string(plainRaw) != plaintext { return fmt.Errorf("plaintext mismatch: %s expect: %s, decryptData was %#v", plainRaw, plaintext, decryptData) } return nil }, } } func testAccStepRewrap( t *testing.T, name string, decryptData map[string]interface{}, expectedVer int) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "rewrap/" + name, Data: decryptData, Check: func(resp *logical.Response) error { var d struct { Ciphertext string `mapstructure:"ciphertext"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err } if d.Ciphertext == "" { return fmt.Errorf("missing ciphertext") } splitStrings := strings.Split(d.Ciphertext, ":") verString := splitStrings[1][1:] ver, err := strconv.Atoi(verString) if err != nil { return fmt.Errorf("error pulling out version from verString '%s', ciphertext was %s", verString, d.Ciphertext) } if ver != expectedVer { return fmt.Errorf("did not get expected version") } decryptData["ciphertext"] = d.Ciphertext return nil }, } } func testAccStepEncryptVX( t *testing.T, name, plaintext string, decryptData map[string]interface{}, ver int, encryptHistory map[int]map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "encrypt/" + name, Data: map[string]interface{}{ "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)), }, Check: func(resp *logical.Response) error { var d struct { Ciphertext string `mapstructure:"ciphertext"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err } if d.Ciphertext == "" { return fmt.Errorf("missing ciphertext") } splitStrings := strings.Split(d.Ciphertext, ":") splitStrings[1] = "v" + strconv.Itoa(ver) ciphertext := strings.Join(splitStrings, ":") decryptData["ciphertext"] = ciphertext encryptHistory[ver] = map[string]interface{}{ "ciphertext": ciphertext, } return nil }, } } func testAccStepLoadVX( t *testing.T, name string, decryptData map[string]interface{}, ver int, encryptHistory map[int]map[string]interface{}) logicaltest.TestStep { // This is really a no-op to allow us to do data manip in the check function return logicaltest.TestStep{ Operation: logical.ReadOperation, Path: "keys/" + name, Check: func(resp *logical.Response) error { decryptData["ciphertext"] = encryptHistory[ver]["ciphertext"].(string) return nil }, } } func testAccStepDecryptExpectFailure( t *testing.T, name, plaintext string, decryptData map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "decrypt/" + name, Data: decryptData, ErrorOk: true, Check: func(resp *logical.Response) error { if !resp.IsError() { return fmt.Errorf("expected error") } return nil }, } } func testAccStepRotate(t *testing.T, name string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "keys/" + name + "/rotate", } } func testAccStepWriteDatakey(t *testing.T, name string, noPlaintext bool, bits int, dataKeyInfo map[string]interface{}) logicaltest.TestStep { data := map[string]interface{}{} subPath := "plaintext" if noPlaintext { subPath = "wrapped" } if bits != 256 { data["bits"] = bits } return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "datakey/" + subPath + "/" + name, Data: data, Check: func(resp *logical.Response) error { var d struct { Plaintext string `mapstructure:"plaintext"` Ciphertext string `mapstructure:"ciphertext"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err } if noPlaintext && len(d.Plaintext) != 0 { return fmt.Errorf("received plaintxt when we disabled it") } if !noPlaintext { if len(d.Plaintext) == 0 { return fmt.Errorf("did not get plaintext when we expected it") } dataKeyInfo["plaintext"] = d.Plaintext plainBytes, err := base64.StdEncoding.DecodeString(d.Plaintext) if err != nil { return fmt.Errorf("could not base64 decode plaintext string '%s'", d.Plaintext) } if len(plainBytes)*8 != bits { return fmt.Errorf("returned key does not have correct bit length") } } dataKeyInfo["ciphertext"] = d.Ciphertext return nil }, } } func testAccStepDecryptDatakey(t *testing.T, name string, dataKeyInfo map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "decrypt/" + name, Data: dataKeyInfo, Check: func(resp *logical.Response) error { var d struct { Plaintext string `mapstructure:"plaintext"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err } if d.Plaintext != dataKeyInfo["plaintext"].(string) { return fmt.Errorf("plaintext mismatch: got '%s', expected '%s', decryptData was %#v", d.Plaintext, dataKeyInfo["plaintext"].(string), resp.Data) } return nil }, } } func TestKeyUpgrade(t *testing.T) { key, _ := uuid.GenerateRandomBytes(32) p := &keysutil.Policy{ Name: "test", Key: key, Type: keysutil.KeyType_AES256_GCM96, } p.MigrateKeyToKeysMap() if p.Key != nil || p.Keys == nil || len(p.Keys) != 1 || !reflect.DeepEqual(p.Keys[strconv.Itoa(1)].Key, key) { t.Errorf("bad key migration, result is %#v", p.Keys) } } func TestDerivedKeyUpgrade(t *testing.T) { testDerivedKeyUpgrade(t, keysutil.KeyType_AES256_GCM96) testDerivedKeyUpgrade(t, keysutil.KeyType_ChaCha20_Poly1305) } func testDerivedKeyUpgrade(t *testing.T, keyType keysutil.KeyType) { storage := &logical.InmemStorage{} key, _ := uuid.GenerateRandomBytes(32) keyContext, _ := uuid.GenerateRandomBytes(32) p := &keysutil.Policy{ Name: "test", Key: key, Type: keyType, Derived: true, } p.MigrateKeyToKeysMap() p.Upgrade(context.Background(), storage) // Need to run the upgrade code to make the migration stick if p.KDF != keysutil.Kdf_hmac_sha256_counter { t.Fatalf("bad KDF value by default; counter val is %d, KDF val is %d, policy is %#v", keysutil.Kdf_hmac_sha256_counter, p.KDF, *p) } derBytesOld, err := p.DeriveKey(keyContext, 1, 0) if err != nil { t.Fatal(err) } derBytesOld2, err := p.DeriveKey(keyContext, 1, 0) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(derBytesOld, derBytesOld2) { t.Fatal("mismatch of same context alg") } p.KDF = keysutil.Kdf_hkdf_sha256 if p.NeedsUpgrade() { t.Fatal("expected no upgrade needed") } derBytesNew, err := p.DeriveKey(keyContext, 1, 64) if err != nil { t.Fatal(err) } derBytesNew2, err := p.DeriveKey(keyContext, 1, 64) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(derBytesNew, derBytesNew2) { t.Fatal("mismatch of same context alg") } if reflect.DeepEqual(derBytesOld, derBytesNew) { t.Fatal("match of different context alg") } } func TestConvergentEncryption(t *testing.T) { testConvergentEncryptionCommon(t, 0, keysutil.KeyType_AES256_GCM96) testConvergentEncryptionCommon(t, 2, keysutil.KeyType_AES256_GCM96) testConvergentEncryptionCommon(t, 2, keysutil.KeyType_ChaCha20_Poly1305) testConvergentEncryptionCommon(t, 3, keysutil.KeyType_AES256_GCM96) testConvergentEncryptionCommon(t, 3, keysutil.KeyType_ChaCha20_Poly1305) } func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyType) { b, storage := createBackendWithSysView(t) req := &logical.Request{ Storage: storage, Operation: logical.UpdateOperation, Path: "keys/testkeynonderived", Data: map[string]interface{}{ "derived": false, "convergent_encryption": true, }, } resp, err := b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) } if resp == nil { t.Fatal("expected non-nil response") } if !resp.IsError() { t.Fatalf("bad: expected error response, got %#v", *resp) } req = &logical.Request{ Storage: storage, Operation: logical.UpdateOperation, Path: "keys/testkey", Data: map[string]interface{}{ "derived": true, "convergent_encryption": true, }, } resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) } if resp != nil { t.Fatal("expected nil response") } p, err := keysutil.LoadPolicy(context.Background(), storage, path.Join("policy", "testkey")) if err != nil { t.Fatal(err) } if p == nil { t.Fatal("got nil policy") } if ver > 2 { p.ConvergentVersion = -1 } else { p.ConvergentVersion = ver } err = p.Persist(context.Background(), storage) if err != nil { t.Fatal(err) } b.invalidate(context.Background(), "policy/testkey") if ver < 3 { // There will be an embedded key version of 3, so specifically clear it key := p.Keys[strconv.Itoa(p.LatestVersion)] key.ConvergentVersion = 0 p.Keys[strconv.Itoa(p.LatestVersion)] = key err = p.Persist(context.Background(), storage) if err != nil { t.Fatal(err) } b.invalidate(context.Background(), "policy/testkey") // Verify it p, err = keysutil.LoadPolicy(context.Background(), storage, path.Join(p.StoragePrefix, "policy", "testkey")) if err != nil { t.Fatal(err) } if p == nil { t.Fatal("got nil policy") } if p.ConvergentVersion != ver { t.Fatalf("bad convergent version %d", p.ConvergentVersion) } key = p.Keys[strconv.Itoa(p.LatestVersion)] if key.ConvergentVersion != 0 { t.Fatalf("bad convergent key version %d", key.ConvergentVersion) } } // First, test using an invalid length of nonce -- this is only used for v1 convergent req.Path = "encrypt/testkey" if ver < 2 { req.Data = map[string]interface{}{ "plaintext": "emlwIHphcA==", // "zip zap" "nonce": "Zm9vIGJhcg==", // "foo bar" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } resp, err = b.HandleRequest(context.Background(), req) if err == nil { t.Fatalf("expected error, got nil, version is %d", ver) } if resp == nil { t.Fatal("expected non-nil response") } if !resp.IsError() { t.Fatalf("expected error response, got %#v", *resp) } // Ensure we fail if we do not provide a nonce req.Data = map[string]interface{}{ "plaintext": "emlwIHphcA==", // "zip zap" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } resp, err = b.HandleRequest(context.Background(), req) if err == nil && (resp == nil || !resp.IsError()) { t.Fatal("expected error response") } } // Now test encrypting the same value twice req.Data = map[string]interface{}{ "plaintext": "emlwIHphcA==", // "zip zap" "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) } if resp == nil { t.Fatal("expected non-nil response") } if resp.IsError() { t.Fatalf("got error response: %#v", *resp) } ciphertext1 := resp.Data["ciphertext"].(string) resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) } if resp == nil { t.Fatal("expected non-nil response") } if resp.IsError() { t.Fatalf("got error response: %#v", *resp) } ciphertext2 := resp.Data["ciphertext"].(string) if ciphertext1 != ciphertext2 { t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext1, ciphertext2) } // For sanity, also check a different nonce value... req.Data = map[string]interface{}{ "plaintext": "emlwIHphcA==", // "zip zap" "nonce": "dHdvdGhyZWVmb3Vy", // "twothreefour" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } if ver < 2 { req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" } else { req.Data["context"] = "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOldandSdd7S" } resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) } if resp == nil { t.Fatal("expected non-nil response") } if resp.IsError() { t.Fatalf("got error response: %#v", *resp) } ciphertext3 := resp.Data["ciphertext"].(string) resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) } if resp == nil { t.Fatal("expected non-nil response") } if resp.IsError() { t.Fatalf("got error response: %#v", *resp) } ciphertext4 := resp.Data["ciphertext"].(string) if ciphertext3 != ciphertext4 { t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext3, ciphertext4) } if ciphertext1 == ciphertext3 { t.Fatalf("expected different ciphertexts") } // ...and a different context value req.Data = map[string]interface{}{ "plaintext": "emlwIHphcA==", // "zip zap" "nonce": "dHdvdGhyZWVmb3Vy", // "twothreefour" "context": "qV4h9iQyvn+raODOer4JNAsOhkXBwdT4HZ677Ql4KLqXSU+Jk4C/fXBWbv6xkSYT", } resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) } if resp == nil { t.Fatal("expected non-nil response") } if resp.IsError() { t.Fatalf("got error response: %#v", *resp) } ciphertext5 := resp.Data["ciphertext"].(string) resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) } if resp == nil { t.Fatal("expected non-nil response") } if resp.IsError() { t.Fatalf("got error response: %#v", *resp) } ciphertext6 := resp.Data["ciphertext"].(string) if ciphertext5 != ciphertext6 { t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext5, ciphertext6) } if ciphertext1 == ciphertext5 { t.Fatalf("expected different ciphertexts") } if ciphertext3 == ciphertext5 { t.Fatalf("expected different ciphertexts") } // If running version 2, check upgrade handling if ver == 2 { curr, err := keysutil.LoadPolicy(context.Background(), storage, path.Join(p.StoragePrefix, "policy", "testkey")) if err != nil { t.Fatal(err) } if curr == nil { t.Fatal("got nil policy") } if curr.ConvergentVersion != 2 { t.Fatalf("bad convergent version %d", curr.ConvergentVersion) } key := curr.Keys[strconv.Itoa(curr.LatestVersion)] if key.ConvergentVersion != 0 { t.Fatalf("bad convergent key version %d", key.ConvergentVersion) } curr.ConvergentVersion = 3 err = curr.Persist(context.Background(), storage) if err != nil { t.Fatal(err) } b.invalidate(context.Background(), "policy/testkey") // Different algorithm, should be different value resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) } if resp == nil { t.Fatal("expected non-nil response") } if resp.IsError() { t.Fatalf("got error response: %#v", *resp) } ciphertext7 := resp.Data["ciphertext"].(string) // Now do it via key-specified version if len(curr.Keys) != 1 { t.Fatalf("unexpected length of keys %d", len(curr.Keys)) } key = curr.Keys[strconv.Itoa(curr.LatestVersion)] key.ConvergentVersion = 3 curr.Keys[strconv.Itoa(curr.LatestVersion)] = key curr.ConvergentVersion = 2 err = curr.Persist(context.Background(), storage) if err != nil { t.Fatal(err) } b.invalidate(context.Background(), "policy/testkey") resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) } if resp == nil { t.Fatal("expected non-nil response") } if resp.IsError() { t.Fatalf("got error response: %#v", *resp) } ciphertext8 := resp.Data["ciphertext"].(string) if ciphertext7 != ciphertext8 { t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext7, ciphertext8) } if ciphertext6 == ciphertext7 { t.Fatalf("expected different ciphertexts") } if ciphertext3 == ciphertext7 { t.Fatalf("expected different ciphertexts") } } // Finally, check operations on empty values // First, check without setting a plaintext at all req.Data = map[string]interface{}{ "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } resp, err = b.HandleRequest(context.Background(), req) if err == nil { t.Fatal("expected error, got nil") } if resp == nil { t.Fatal("expected non-nil response") } if !resp.IsError() { t.Fatalf("expected error response, got: %#v", *resp) } // Now set plaintext to empty req.Data = map[string]interface{}{ "plaintext": "", "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) } if resp == nil { t.Fatal("expected non-nil response") } if resp.IsError() { t.Fatalf("got error response: %#v", *resp) } ciphertext7 := resp.Data["ciphertext"].(string) resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) } if resp == nil { t.Fatal("expected non-nil response") } if resp.IsError() { t.Fatalf("got error response: %#v", *resp) } ciphertext8 := resp.Data["ciphertext"].(string) if ciphertext7 != ciphertext8 { t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext7, ciphertext8) } } func TestPolicyFuzzing(t *testing.T) { var be *backend sysView := logical.TestSystemView() conf := &logical.BackendConfig{ System: sysView, } be = Backend(conf) be.Setup(context.Background(), conf) testPolicyFuzzingCommon(t, be) sysView.CachingDisabledVal = true be = Backend(conf) be.Setup(context.Background(), conf) testPolicyFuzzingCommon(t, be) } func testPolicyFuzzingCommon(t *testing.T, be *backend) { storage := &logical.InmemStorage{} wg := sync.WaitGroup{} funcs := []string{"encrypt", "decrypt", "rotate", "change_min_version"} //keys := []string{"test1", "test2", "test3", "test4", "test5"} keys := []string{"test1", "test2", "test3"} // This is the goroutine loop doFuzzy := func(id int) { // Check for panics, otherwise notify we're done defer func() { wg.Done() }() // Holds the latest encrypted value for each key latestEncryptedText := map[string]string{} startTime := time.Now() req := &logical.Request{ Storage: storage, Data: map[string]interface{}{}, } fd := &framework.FieldData{} var chosenFunc, chosenKey string //t.Errorf("Starting %d", id) for { // Stop after 10 seconds if time.Now().Sub(startTime) > 10*time.Second { return } // Pick a function and a key chosenFunc = funcs[rand.Int()%len(funcs)] chosenKey = keys[rand.Int()%len(keys)] fd.Raw = map[string]interface{}{ "name": chosenKey, } fd.Schema = be.pathKeys().Fields // Try to write the key to make sure it exists _, err := be.pathPolicyWrite(context.Background(), req, fd) if err != nil { t.Errorf("got an error: %v", err) } switch chosenFunc { // Encrypt our plaintext and store the result case "encrypt": //t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id) fd.Raw["plaintext"] = base64.StdEncoding.EncodeToString([]byte(testPlaintext)) fd.Schema = be.pathEncrypt().Fields resp, err := be.pathEncryptWrite(context.Background(), req, fd) if err != nil { t.Errorf("got an error: %v, resp is %#v", err, *resp) } latestEncryptedText[chosenKey] = resp.Data["ciphertext"].(string) // Rotate to a new key version case "rotate": //t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id) fd.Schema = be.pathRotate().Fields resp, err := be.pathRotateWrite(context.Background(), req, fd) if err != nil { t.Errorf("got an error: %v, resp is %#v, chosenKey is %s", err, *resp, chosenKey) } // Decrypt the ciphertext and compare the result case "decrypt": //t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id) ct := latestEncryptedText[chosenKey] if ct == "" { continue } fd.Raw["ciphertext"] = ct fd.Schema = be.pathDecrypt().Fields resp, err := be.pathDecryptWrite(context.Background(), req, fd) if err != nil { // This could well happen since the min version is jumping around if resp.Data["error"].(string) == keysutil.ErrTooOld { continue } t.Errorf("got an error: %v, resp is %#v, ciphertext was %s, chosenKey is %s, id is %d", err, *resp, ct, chosenKey, id) } ptb64, ok := resp.Data["plaintext"].(string) if !ok { t.Errorf("no plaintext found, response was %#v", *resp) return } pt, err := base64.StdEncoding.DecodeString(ptb64) if err != nil { t.Errorf("got an error decoding base64 plaintext: %v", err) return } if string(pt) != testPlaintext { t.Errorf("got bad plaintext back: %s", pt) } // Change the min version, which also tests the archive functionality case "change_min_version": //t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id) resp, err := be.pathPolicyRead(context.Background(), req, fd) if err != nil { t.Errorf("got an error reading policy %s: %v", chosenKey, err) } latestVersion := resp.Data["latest_version"].(int) // keys start at version 1 so we want [1, latestVersion] not [0, latestVersion) setVersion := (rand.Int() % latestVersion) + 1 fd.Raw["min_decryption_version"] = setVersion fd.Schema = be.pathConfig().Fields resp, err = be.pathConfigWrite(context.Background(), req, fd) if err != nil { t.Errorf("got an error setting min decryption version: %v", err) } } } } // Spawn 1000 of these workers for 10 seconds for i := 0; i < 1000; i++ { wg.Add(1) go doFuzzy(i) } // Wait for them all to finish wg.Wait() } func TestBadInput(t *testing.T) { b, storage := createBackendWithSysView(t) req := &logical.Request{ Storage: storage, Operation: logical.UpdateOperation, Path: "keys/test", } resp, err := b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) } if resp != nil { t.Fatal("expected nil response") } req.Path = "decrypt/test" req.Data = map[string]interface{}{ "ciphertext": "vault:v1:abcd", } _, err = b.HandleRequest(context.Background(), req) if err == nil { t.Fatal("expected error") } }
[ "\"TRANSIT_ACC_KEY_TYPE\"", "\"TRANSIT_ACC_KEY_TYPE\"" ]
[]
[ "TRANSIT_ACC_KEY_TYPE" ]
[]
["TRANSIT_ACC_KEY_TYPE"]
go
1
0
kivy/core/text/__init__.py
''' Text ==== An abstraction of text creation. Depending of the selected backend, the accuracy of text rendering may vary. .. versionchanged:: 1.5.0 :attr:`LabelBase.line_height` added. .. versionchanged:: 1.0.7 The :class:`LabelBase` does not generate any texture if the text has a width <= 1. This is the backend layer for getting text out of different text providers, you should only be using this directly if your needs aren't fulfilled by the :class:`~kivy.uix.label.Label`. Usage example:: from kivy.core.text import Label as CoreLabel ... ... my_label = CoreLabel() my_label.text = 'hello' # the label is usually not drawn until needed, so force it to draw. my_label.refresh() # Now access the texture of the label and use it wherever and # however you may please. hello_texture = my_label.texture ''' __all__ = ('LabelBase', 'Label') import re import os from functools import partial from copy import copy from kivy import kivy_data_dir from kivy.utils import platform from kivy.graphics.texture import Texture from kivy.core import core_select_lib from kivy.core.text.text_layout import layout_text, LayoutWord from kivy.resources import resource_find, resource_add_path from kivy.compat import PY2 from kivy.setupconfig import USE_SDL2 DEFAULT_FONT = 'Roboto' FONT_REGULAR = 0 FONT_ITALIC = 1 FONT_BOLD = 2 FONT_BOLDITALIC = 3 whitespace_pat = re.compile('( +)') class LabelBase(object): '''Core text label. This is the abstract class used by different backends to render text. .. warning:: The core text label can't be changed at runtime. You must recreate one. :Parameters: `font_size`: int, defaults to 12 Font size of the text `font_name`: str, defaults to DEFAULT_FONT Font name of the text `bold`: bool, defaults to False Activate "bold" text style `italic`: bool, defaults to False Activate "italic" text style `text_size`: tuple, defaults to (None, None) Add constraint to render the text (inside a bounding box). If no size is given, the label size will be set to the text size. `padding`: float, defaults to None If it's a float, it will set padding_x and padding_y `padding_x`: float, defaults to 0.0 Left/right padding `padding_y`: float, defaults to 0.0 Top/bottom padding `halign`: str, defaults to "left" Horizontal text alignment inside the bounding box `valign`: str, defaults to "bottom" Vertical text alignment inside the bounding box `shorten`: bool, defaults to False Indicate whether the label should attempt to shorten its textual contents as much as possible if a `size` is given. Setting this to True without an appropriately set size will lead to unexpected results. `shorten_from`: str, defaults to `center` The side from which we should shorten the text from, can be left, right, or center. E.g. if left, the ellipsis will appear towards the left side and it will display as much text starting from the right as possible. `split_str`: string, defaults to `' '` (space) The string to use to split the words by when shortening. If empty, we can split after every character filling up the line as much as possible. `max_lines`: int, defaults to 0 (unlimited) If set, this indicate how maximum line are allowed to render the text. Works only if a limitation on text_size is set. `mipmap` : bool, defaults to False Create a mipmap for the texture `strip` : bool, defaults to False Whether each row of text has its leading and trailing spaces stripped. If `halign` is `justify` it is implicitly True. `strip_reflow` : bool, defaults to True Whether text that has been reflowed into a second line should be striped, even if `strip` is False. This is only in effect when `size_hint_x` is not None, because otherwise lines are never split. `unicode_errors` : str, defaults to `'replace'` How to handle unicode decode errors. Can be `'strict'`, `'replace'` or `'ignore'`. .. versionchanged:: 1.9.0 `strip`, `strip_reflow`, `shorten_from`, `split_str`, and `unicode_errors` were added. .. versionchanged:: 1.9.0 `padding_x` and `padding_y` has been fixed to work as expected. In the past, the text was padded by the negative of their values. .. versionchanged:: 1.8.0 `max_lines` parameters has been added. .. versionchanged:: 1.0.8 `size` have been deprecated and replaced with `text_size`. .. versionchanged:: 1.0.7 The `valign` is now respected. This wasn't the case previously so you might have an issue in your application if you have not considered this. ''' __slots__ = ('options', 'texture', '_label', '_text_size') _cached_lines = [] _fonts = {} _fonts_cache = {} _fonts_dirs = [] _font_dirs_files = [] _texture_1px = None def __init__( self, text='', font_size=12, font_name=DEFAULT_FONT, bold=False, italic=False, underline=False, strikethrough=False, halign='left', valign='bottom', shorten=False, text_size=None, mipmap=False, color=None, line_height=1.0, strip=False, strip_reflow=True, shorten_from='center', split_str=' ', unicode_errors='replace', font_hinting='normal', font_kerning=True, font_blended=True, **kwargs): # Include system fonts_dir in resource paths. # This allows us to specify a font from those dirs. LabelBase.get_system_fonts_dir() options = {'text': text, 'font_size': font_size, 'font_name': font_name, 'bold': bold, 'italic': italic, 'underline': underline, 'strikethrough': strikethrough, 'halign': halign, 'valign': valign, 'shorten': shorten, 'mipmap': mipmap, 'line_height': line_height, 'strip': strip, 'strip_reflow': strip_reflow, 'shorten_from': shorten_from, 'split_str': split_str, 'unicode_errors': unicode_errors, 'font_hinting': font_hinting, 'font_kerning': font_kerning, 'font_blended': font_blended} options['color'] = color or (1, 1, 1, 1) options['padding'] = kwargs.get('padding', (0, 0)) if not isinstance(options['padding'], (list, tuple)): options['padding'] = (options['padding'], options['padding']) options['padding_x'] = kwargs.get('padding_x', options['padding'][0]) options['padding_y'] = kwargs.get('padding_y', options['padding'][1]) if 'size' in kwargs: options['text_size'] = kwargs['size'] else: if text_size is None: options['text_size'] = (None, None) else: options['text_size'] = text_size self._text_size = options['text_size'] self._text = options['text'] self._internal_size = 0, 0 # the real computed text size (inclds pad) self._cached_lines = [] self.options = options self.texture = None self.resolve_font_name() @staticmethod def register(name, fn_regular, fn_italic=None, fn_bold=None, fn_bolditalic=None): '''Register an alias for a Font. .. versionadded:: 1.1.0 If you're using a ttf directly, you might not be able to use the bold/italic properties of the ttf version. If the font is delivered in multiple files (one regular, one italic and one bold), then you need to register these files and use the alias instead. All the fn_regular/fn_italic/fn_bold parameters are resolved with :func:`kivy.resources.resource_find`. If fn_italic/fn_bold are None, fn_regular will be used instead. ''' fonts = [] for font_type in fn_regular, fn_italic, fn_bold, fn_bolditalic: if font_type is not None: font = resource_find(font_type) if font is None: raise IOError('File {0}s not found'.format(font_type)) else: fonts.append(font) else: fonts.append(fonts[-1]) # add regular font to list again LabelBase._fonts[name] = tuple(fonts) def resolve_font_name(self): options = self.options fontname = options['font_name'] fonts = self._fonts fontscache = self._fonts_cache # is the font registered? if fontname in fonts: # return the preferred font for the current bold/italic combination italic = int(options['italic']) if options['bold']: bold = FONT_BOLD else: bold = FONT_REGULAR options['font_name_r'] = fonts[fontname][italic | bold] elif fontname in fontscache: options['font_name_r'] = fontscache[fontname] else: filename = resource_find(fontname) if not filename and not fontname.endswith('.ttf'): fontname = '{}.ttf'.format(fontname) filename = resource_find(fontname) if filename is None: # XXX for compatibility, check directly in the data dir filename = os.path.join(kivy_data_dir, fontname) if not os.path.exists(filename): raise IOError('Label: File %r not found' % fontname) fontscache[fontname] = filename options['font_name_r'] = filename @staticmethod def get_system_fonts_dir(): '''Return the directories used by the system for fonts. ''' if LabelBase._fonts_dirs: return LabelBase._fonts_dirs fdirs = [] if platform == 'linux': fdirs = [ '/usr/share/fonts', '/usr/local/share/fonts', os.path.expanduser('~/.fonts'), os.path.expanduser('~/.local/share/fonts')] elif platform == 'macosx': fdirs = ['/Library/Fonts', '/System/Library/Fonts', os.path.expanduser('~/Library/Fonts')] elif platform == 'win': fdirs = [os.path.join(os.environ['SYSTEMROOT'], 'Fonts')] elif platform == 'ios': fdirs = ['/System/Library/Fonts'] elif platform == 'android': fdirs = ['/system/fonts'] else: raise Exception("Unknown platform: {}".format(platform)) fdirs.append(os.path.join(kivy_data_dir, 'fonts')) # register the font dirs rdirs = [] _font_dir_files = [] for fdir in fdirs: for _dir, dirs, files in os.walk(fdir): _font_dir_files.extend(files) resource_add_path(_dir) rdirs.append(_dir) LabelBase._fonts_dirs = rdirs LabelBase._font_dirs_files = _font_dir_files return rdirs def get_extents(self, text): '''Return a tuple (width, height) indicating the size of the specified text''' return (0, 0) def get_cached_extents(self): '''Returns a cached version of the :meth:`get_extents` function. :: >>> func = self._get_cached_extents() >>> func <built-in method size of pygame.font.Font object at 0x01E45650> >>> func('a line') (36, 18) .. warning:: This method returns a size measuring function that is valid for the font settings used at the time :meth:`get_cached_extents` was called. Any change in the font settings will render the returned function incorrect. You should only use this if you know what you're doing. .. versionadded:: 1.9.0 ''' return self.get_extents def _render_begin(self): pass def _render_text(self, text, x, y): pass def _render_end(self): pass def shorten(self, text, margin=2): ''' Shortens the text to fit into a single line by the width specified by :attr:`text_size` [0]. If :attr:`text_size` [0] is None, it returns text text unchanged. :attr:`split_str` and :attr:`shorten_from` determines how the text is shortened. :params: `text` str, the text to be shortened. `margin` int, the amount of space to leave between the margins and the text. This is in addition to :attr:`padding_x`. :retruns: the text shortened to fit into a single line. ''' textwidth = self.get_cached_extents() uw = self.text_size[0] if uw is None or not text: return text opts = self.options uw = max(0, int(uw - opts['padding_x'] * 2 - margin)) # if larger, it won't fit so don't even try extents chr = type(text) text = text.replace(chr('\n'), chr(' ')) if len(text) <= uw and textwidth(text)[0] <= uw: return text c = opts['split_str'] offset = 0 if len(c) else 1 dir = opts['shorten_from'][0] elps = textwidth('...')[0] if elps > uw: if textwidth('..')[0] <= uw: return '..' else: return '.' uw -= elps f = partial(text.find, c) f_rev = partial(text.rfind, c) # now find the first and last word e1, s2 = f(), f_rev() if dir != 'l': # center or right # no split, or the first word doesn't even fit if e1 != -1: l1 = textwidth(text[:e1])[0] l2 = textwidth(text[s2 + 1:])[0] if e1 == -1 or l1 + l2 > uw: if len(c): opts['split_str'] = '' res = self.shorten(text, margin) opts['split_str'] = c return res # at this point we do char by char so e1 must be zero if l1 <= uw: return chr('{0}...').format(text[:e1]) return chr('...') # both word fits, and there's at least on split_str if s2 == e1: # there's only on split_str return chr('{0}...{1}').format(text[:e1], text[s2 + 1:]) # both the first and last word fits, and they start/end at diff pos if dir == 'r': ee1 = f(e1 + 1) while l2 + textwidth(text[:ee1])[0] <= uw: e1 = ee1 if e1 == s2: break ee1 = f(e1 + 1) else: while True: if l1 <= l2: ee1 = f(e1 + 1) l1 = textwidth(text[:ee1])[0] if l2 + l1 > uw: break e1 = ee1 if e1 == s2: break else: ss2 = f_rev(0, s2 - offset) l2 = textwidth(text[ss2 + 1:])[0] if l2 + l1 > uw: break s2 = ss2 if e1 == s2: break else: # left # no split, or the last word doesn't even fit if s2 != -1: l2 = textwidth(text[s2 + (1 if len(c) else -1):])[0] l1 = textwidth(text[:max(0, e1)])[0] # if split_str if s2 == -1 or l2 + l1 > uw: if len(c): opts['split_str'] = '' res = self.shorten(text, margin) opts['split_str'] = c return res return chr('...') # both word fits, and there's at least on split_str if s2 == e1: # there's only on split_str return chr('{0}...{1}').format(text[:e1], text[s2 + 1:]) # both the first and last word fits, and they start/end at diff pos ss2 = f_rev(0, s2 - offset) while l1 + textwidth(text[ss2 + 1:])[0] <= uw: s2 = ss2 if s2 == e1: break ss2 = f_rev(0, s2 - offset) return chr('{0}...{1}').format(text[:e1], text[s2 + 1:]) def _default_line_options(self, lines): for line in lines: if len(line.words): # get opts from first line, first word return line.words[0].options return None def clear_texture(self): self._render_begin() data = self._render_end() assert(data) if data is not None and data.width > 1: self.texture.blit_data(data) return def render_lines(self, lines, options, render_text, y, size): get_extents = self.get_cached_extents() uw, uh = options['text_size'] xpad = options['padding_x'] if uw is not None: uww = uw - 2 * xpad # real width of just text w = size[0] sw = options['space_width'] halign = options['halign'] split = re.split for layout_line in lines: # for plain label each line has only one str lw, lh = layout_line.w, layout_line.h line = '' assert len(layout_line.words) < 2 if len(layout_line.words): last_word = layout_line.words[0] line = last_word.text x = xpad if halign == 'center': x = int((w - lw) / 2.) elif halign == 'right': x = max(0, int(w - lw - xpad)) # right left justify # divide left over space between `spaces` # TODO implement a better method of stretching glyphs? if (uw is not None and halign == 'justify' and line and not layout_line.is_last_line): # number spaces needed to fill, and remainder n, rem = divmod(max(uww - lw, 0), sw) n = int(n) words = None if n or rem: # there's no trailing space when justify is selected words = split(whitespace_pat, line) if words is not None and len(words) > 1: space = type(line)(' ') # words: every even index is spaces, just add ltr n spaces for i in range(n): idx = (2 * i + 1) % (len(words) - 1) words[idx] = words[idx] + space if rem: # render the last word at the edge, also add it to line ext = get_extents(words[-1]) word = LayoutWord(last_word.options, ext[0], ext[1], words[-1]) layout_line.words.append(word) last_word.lw = uww - ext[0] # word was stretched render_text(words[-1], x + last_word.lw, y) last_word.text = line = ''.join(words[:-2]) else: last_word.lw = uww # word was stretched last_word.text = line = ''.join(words) layout_line.w = uww # the line occupies full width if len(line): layout_line.x = x layout_line.y = y render_text(line, x, y) y += lh return y def _render_real(self): lines = self._cached_lines options = self._default_line_options(lines) if options is None: # there was no text to render return self.clear_texture() old_opts = self.options ih = self._internal_size[1] # the real size of text, not texture size = self.size valign = options['valign'] y = ypad = options['padding_y'] # pos in the texture if valign == 'bottom': y = size[1] - ih + ypad elif valign == 'middle' or valign == 'center': y = int((size[1] - ih) / 2 + ypad) self._render_begin() self.render_lines(lines, options, self._render_text, y, size) # get data from provider data = self._render_end() assert(data) self.options = old_opts # If the text is 1px width, usually, the data is black. # Don't blit that kind of data, otherwise, you have a little black bar. if data is not None and data.width > 1: self.texture.blit_data(data) def render(self, real=False): '''Return a tuple (width, height) to create the image with the user constraints. (width, height) includes the padding. ''' if real: return self._render_real() options = copy(self.options) options['space_width'] = self.get_extents(' ')[0] options['strip'] = strip = (options['strip'] or options['halign'] == 'justify') uw, uh = options['text_size'] = self._text_size text = self.text if strip: text = text.strip() if uw is not None and options['shorten']: text = self.shorten(text) self._cached_lines = lines = [] if not text: return 0, 0 if uh is not None and (options['valign'] == 'middle' or options['valign'] == 'center'): center = -1 # pos of newline if len(text) > 1: middle = int(len(text) // 2) l, r = text.rfind('\n', 0, middle), text.find('\n', middle) if l != -1 and r != -1: center = l if center - l <= r - center else r elif l != -1: center = l elif r != -1: center = r # if a newline split text, render from center down and up til uh if center != -1: # layout from center down until half uh w, h, clipped = layout_text(text[center + 1:], lines, (0, 0), (uw, uh / 2), options, self.get_cached_extents(), True, True) # now layout from center upwards until uh is reached w, h, clipped = layout_text(text[:center + 1], lines, (w, h), (uw, uh), options, self.get_cached_extents(), False, True) else: # if there's no new line, layout everything w, h, clipped = layout_text(text, lines, (0, 0), (uw, None), options, self.get_cached_extents(), True, True) else: # top or bottom w, h, clipped = layout_text(text, lines, (0, 0), (uw, uh), options, self.get_cached_extents(), options['valign'] == 'top', True) self._internal_size = w, h if uw: w = uw if uh: h = uh if h > 1 and w < 2: w = 2 return int(w), int(h) def _texture_refresh(self, *l): self.refresh() def _texture_fill(self, texture): # second pass, render for real self.render(real=True) def refresh(self): '''Force re-rendering of the text ''' self.resolve_font_name() # first pass, calculating width/height sz = self.render() self._size_texture = sz self._size = (sz[0], sz[1]) # if no text are rendered, return nothing. width, height = self._size if width <= 1 or height <= 1: self.texture = self.texture_1px return # create a delayed texture texture = self.texture if texture is None or \ width != texture.width or \ height != texture.height: texture = Texture.create(size=(width, height), mipmap=self.options['mipmap'], callback=self._texture_fill) texture.flip_vertical() texture.add_reload_observer(self._texture_refresh) self.texture = texture else: texture.ask_update(self._texture_fill) def _get_text(self): if PY2: try: if isinstance(self._text, unicode): return self._text return self._text.decode('utf8') except AttributeError: # python 3 support return str(self._text) except UnicodeDecodeError: return self._text else: return self._text def _set_text(self, text): if text != self._text: self._text = text text = property(_get_text, _set_text, doc='Get/Set the text') label = property(_get_text, _set_text, doc='Get/Set the text') @property def texture_1px(self): if LabelBase._texture_1px is None: tex = Texture.create(size=(1, 1), colorfmt='rgba') tex.blit_buffer(b'\x00\x00\x00\x00', colorfmt='rgba') LabelBase._texture_1px = tex return LabelBase._texture_1px @property def size(self): return self._size @property def width(self): return self._size[0] @property def height(self): return self._size[1] @property def content_width(self): '''Return the content width; i.e. the width of the text without any padding.''' if self.texture is None: return 0 return self.texture.width - 2 * self.options['padding_x'] @property def content_height(self): '''Return the content height; i.e. the height of the text without any padding.''' if self.texture is None: return 0 return self.texture.height - 2 * self.options['padding_y'] @property def content_size(self): '''Return the content size (width, height)''' if self.texture is None: return (0, 0) return (self.content_width, self.content_height) @property def fontid(self): '''Return a unique id for all font parameters''' return str([self.options[x] for x in ( 'font_size', 'font_name_r', 'bold', 'italic', 'underline', 'strikethrough')]) def _get_text_size(self): return self._text_size def _set_text_size(self, x): self._text_size = x text_size = property(_get_text_size, _set_text_size, doc='''Get/set the (width, height) of the ' 'contrained rendering box''') usersize = property(_get_text_size, _set_text_size, doc='''(deprecated) Use text_size instead.''') # Load the appropriate provider label_libs = [] if USE_SDL2: label_libs += [('sdl2', 'text_sdl2', 'LabelSDL2')] else: label_libs += [('pygame', 'text_pygame', 'LabelPygame')] label_libs += [ ('pil', 'text_pil', 'LabelPIL')] Text = Label = core_select_lib('text', label_libs) if 'KIVY_DOC' not in os.environ: if not Label: from kivy.logger import Logger import sys Logger.critical('App: Unable to get a Text provider, abort.') sys.exit(1) # For the first initalization, register the default font Label.register('Roboto', 'data/fonts/Roboto-Regular.ttf', 'data/fonts/Roboto-Italic.ttf', 'data/fonts/Roboto-Bold.ttf', 'data/fonts/Roboto-BoldItalic.ttf')
[]
[]
[ "SYSTEMROOT" ]
[]
["SYSTEMROOT"]
python
1
0
pkg/plugin/client/client.go
package client import ( "context" "io/ioutil" "net" "os" "os/signal" "reflect" "syscall" v1 "github.com/csweichel/werft/pkg/api/v1" "github.com/csweichel/werft/pkg/plugin/common" log "github.com/sirupsen/logrus" "golang.org/x/xerrors" "google.golang.org/grpc" "gopkg.in/yaml.v3" ) type Services struct { v1.WerftServiceClient v1.WerftUIClient } // IntegrationPlugin works on the public werft API type IntegrationPlugin interface { // Run runs the plugin. Once this function returns the plugin stops running. // Implementors must respect the context deadline as that's the signal for graceful shutdown. Run(ctx context.Context, config interface{}, srv *Services) error } // RepositoryPlugin adds support for a repository host type RepositoryPlugin interface { // Run runs the plugin. The plugin runs until the context is canceled and the server returned // by this function is expected to remain functional until then. Run(ctx context.Context, config interface{}) (common.RepositoryPluginServer, error) } // AuthenticationPlugin adds support for API authentication type AuthenticationPlugin interface { // Run runs the plugin. The plugin runs until the context is canceled and the server returned // by this function is expected to remain functional until then. Run(ctx context.Context, config interface{}) (common.AuthenticationPluginServer, error) } // ServeOpt configures a plugin serve type ServeOpt struct { Type common.Type Run func(ctx context.Context, config interface{}, socket string) error } // WithIntegrationPlugin registers integration plugin capabilities func WithIntegrationPlugin(p IntegrationPlugin) ServeOpt { return ServeOpt{ Type: common.TypeIntegration, Run: func(ctx context.Context, config interface{}, socket string) error { conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(unixConnect)) if err != nil { return xerrors.Errorf("did not connect: %v", err) } defer conn.Close() return p.Run(ctx, config, &Services{ WerftServiceClient: v1.NewWerftServiceClient(conn), WerftUIClient: v1.NewWerftUIClient(conn), }) }, } } // WithRepositoryPlugin registers repo plugin capabilities func WithRepositoryPlugin(p RepositoryPlugin) ServeOpt { return ServeOpt{ Type: common.TypeRepository, Run: func(ctx context.Context, config interface{}, socket string) error { lis, err := net.Listen("unix", socket) if err != nil { return err } service, err := p.Run(ctx, config) if err != nil { return err } s := grpc.NewServer() common.RegisterRepositoryPluginServer(s, service) return s.Serve(lis) }, } } // WithRepositoryPlugin registers repo plugin capabilities func WithAuthenticationPlugin(p AuthenticationPlugin) ServeOpt { return ServeOpt{ Type: common.TypeAuthentication, Run: func(ctx context.Context, config interface{}, socket string) error { lis, err := net.Listen("unix", socket) if err != nil { return err } service, err := p.Run(ctx, config) if err != nil { return err } s := grpc.NewServer() common.RegisterAuthenticationPluginServer(s, service) return s.Serve(lis) }, } } const proxyPassPluginType common.Type = "proxy-pass" // ProxyPassPlugin adds additional support for proxied webhooks type ProxyPassPlugin interface { Serve(ctx context.Context, l net.Listener) error } // WithProxyPass enables a "through werft" proxy route to the plugin. // The route will be available at "http://<werft-location>/plugins/<plugin-name>" func WithProxyPass(p ProxyPassPlugin) ServeOpt { return ServeOpt{ Type: proxyPassPluginType, Run: func(ctx context.Context, config interface{}, socket string) error { l, err := net.Listen("unix", socket) if err != nil { return err } return p.Serve(ctx, l) }, } } // Serve is the main entry point for plugins func Serve(configType interface{}, opts ...ServeOpt) { if typ := reflect.TypeOf(configType); typ.Kind() != reflect.Ptr { log.Fatal("configType is not a pointer") } log.SetLevel(log.DebugLevel) log.SetFormatter(&log.TextFormatter{ DisableColors: true, DisableTimestamp: true, }) log.SetOutput(os.Stdout) errchan := make(chan error) if len(os.Args) != 4 { log.Fatalf("usage: %s <type> <cfgfile.yaml> <socket>", os.Args[0]) return } tpe, cfgfn, socketfn := os.Args[1], os.Args[2], os.Args[3] // load config cfgraw, err := ioutil.ReadFile(cfgfn) if err != nil { log.Fatalf("cannot read config file: %v", err) } err = yaml.Unmarshal(cfgraw, configType) if err != nil { log.Fatalf("cannot unmarshal config: %v", err) } config := configType ctx, cancel := context.WithCancel(context.Background()) for _, o := range opts { if o.Type != proxyPassPluginType { continue } o := o go func() { err = o.Run(ctx, nil, os.Getenv("WERFT_PLUGIN_PROXY_SOCKET")) if err != nil && err != context.Canceled { errchan <- err } }() break } var sv *ServeOpt for _, o := range opts { if string(o.Type) == tpe { sv = &o break } } if sv == nil { log.Fatalf("cannot serve as %s plugin", tpe) } go func() { err := sv.Run(ctx, config, socketfn) if err != nil && err != context.Canceled { errchan <- err } }() sigchan := make(chan os.Signal, 1) signal.Notify(sigchan, os.Interrupt, syscall.SIGTERM) log.Info("plugin is running") select { case <-sigchan: case err := <-errchan: log.Fatal(err) } cancel() } func unixConnect(ctx context.Context, addr string) (net.Conn, error) { return net.Dial("unix", addr) }
[ "\"WERFT_PLUGIN_PROXY_SOCKET\"" ]
[]
[ "WERFT_PLUGIN_PROXY_SOCKET" ]
[]
["WERFT_PLUGIN_PROXY_SOCKET"]
go
1
0
redis.go
package tools import ( "encoding/json" "fmt" "github.com/gomodule/redigo/redis" "io/ioutil" "log" "net/http" "os" "strconv" "strings" ) type CommandLine struct { Data []struct { DoOrSend string `json:"do_or_send"` Command string `json:"command"` Params string `json:"params"` } `json:"data"` } var conn redis.Conn func init() { var err error address := os.Getenv("CACHE_ADDRESS") dbNum, _ := strconv.Atoi(os.Getenv("CACHE_DB_NUM")) conn, err = redis.Dial("tcp", address, redis.DialDatabase(dbNum)) if err != nil { log.Println(err.Error()) return } } func parseParams(params string) []interface{} { args := []interface{}{} if len(params) != 0 { paramSlice := strings.Split(params, ",") for _, param := range paramSlice { args = append(args, param) } } return args } func errorRespond(w http.ResponseWriter, err string){ log.Println(err) http.Error(w, err, http.StatusBadRequest) } func ExecCloudRedis(w http.ResponseWriter, r *http.Request) { data, err := ioutil.ReadAll(r.Body) if err != nil { errorRespond(w, err.Error()) return } cmd := CommandLine{} if err := json.Unmarshal(data, &cmd); err != nil { errorRespond(w, err.Error()) return } if len(cmd.Data) == 0 { errorRespond(w, "data is empty") return } for _, val := range cmd.Data { args := parseParams(val.Params) switch strings.ToLower(val.DoOrSend) { case "send": redis.Values(conn.Send(val.Command, args...), nil) continue case "do": reply, err := conn.Do(val.Command, args...) result, err := redis.Values(reply, err) if err != nil { if strings.Contains(err.Error(), "unexpected type for Values") { w.Write([]byte(fmt.Sprintf("%s\n", reply))) }else { errorRespond(w, err.Error()) } return } var response string for _, str := range result { tmpStr, _ := redis.String(str, nil) response += tmpStr response += "\n" } w.Write([]byte(response)) return default: errorRespond(w, "wrong param " + val.DoOrSend) return } } }
[ "\"CACHE_ADDRESS\"", "\"CACHE_DB_NUM\"" ]
[]
[ "CACHE_ADDRESS", "CACHE_DB_NUM" ]
[]
["CACHE_ADDRESS", "CACHE_DB_NUM"]
go
2
0
internal/docker/builder.go
// Copyright 2020 The Matrix.org Foundation C.I.C. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package docker import ( "archive/tar" "bytes" "context" "errors" "fmt" "io/ioutil" "log" "net/http" "net/url" "os" "path" "runtime" "strings" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/volume" client "github.com/docker/docker/client" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/go-connections/nat" "github.com/matrix-org/complement/internal/b" "github.com/matrix-org/complement/internal/config" "github.com/matrix-org/complement/internal/instruction" ) var ( // HostnameRunningComplement is the hostname of Complement from the perspective of a Homeserver. HostnameRunningComplement = "host.docker.internal" // HostnameRunningDocker is the hostname of the docker daemon from the perspective of Complement. HostnameRunningDocker = "localhost" ) func init() { if os.Getenv("CI") == "true" { log.Println("Running under CI: redirecting localhost to docker host on 172.17.0.1") // this assumes we are running inside docker so they have // forwarded the docker socket to us and we're in a container. HostnameRunningDocker = "172.17.0.1" } } const complementLabel = "complement_context" type Builder struct { BaseImage string ImageArgs []string KeepBlueprints []string CSAPIPort int FederationPort int Docker *client.Client debugLogging bool bestEffort bool config *config.Complement } func NewBuilder(cfg *config.Complement) (*Builder, error) { cli, err := client.NewEnvClient() if err != nil { return nil, err } return &Builder{ Docker: cli, BaseImage: cfg.BaseImageURI, ImageArgs: cfg.BaseImageArgs, KeepBlueprints: cfg.KeepBlueprints, CSAPIPort: 8008, FederationPort: 8448, debugLogging: cfg.DebugLoggingEnabled, bestEffort: cfg.BestEffort, config: cfg, }, nil } func (d *Builder) log(str string, args ...interface{}) { if !d.debugLogging { return } log.Printf(str, args...) } func (d *Builder) Cleanup() { err := d.removeContainers() if err != nil { d.log("Cleanup: Failed to remove containers: %s", err) } err = d.removeImages() if err != nil { d.log("Cleanup: Failed to remove images: %s", err) } err = d.removeNetworks() if err != nil { d.log("Cleanup: Failed to remove networks: %s", err) } } // removeImages removes all images with `complementLabel`. func (d *Builder) removeNetworks() error { networks, err := d.Docker.NetworkList(context.Background(), types.NetworkListOptions{ Filters: label(complementLabel), }) if err != nil { return err } for _, nw := range networks { err = d.Docker.NetworkRemove(context.Background(), nw.ID) if err != nil { return err } } return nil } // removeImages removes all images with `complementLabel`. func (d *Builder) removeImages() error { images, err := d.Docker.ImageList(context.Background(), types.ImageListOptions{ Filters: label(complementLabel), }) if err != nil { return err } for _, img := range images { // we only clean up localhost/complement images else if someone docker pulls // an anonymous snapshot we might incorrectly nuke it :( any non-localhost // tag marks this image as safe (as images can have multiple tags) isLocalhost := true for _, rt := range img.RepoTags { if !strings.HasPrefix(rt, "localhost/complement") { isLocalhost = false break } } if !isLocalhost { d.log("Not cleaning up image with tags: %v", img.RepoTags) continue } bprintName := img.Labels["complement_blueprint"] keep := false for _, keepBprint := range d.KeepBlueprints { if bprintName == keepBprint { keep = true break } } if keep { d.log("Keeping image created from blueprint %s", bprintName) continue } _, err = d.Docker.ImageRemove(context.Background(), img.ID, types.ImageRemoveOptions{ Force: true, }) if err != nil { return err } } return nil } // removeContainers removes all containers with `complementLabel`. func (d *Builder) removeContainers() error { containers, err := d.Docker.ContainerList(context.Background(), types.ContainerListOptions{ All: true, Filters: label(complementLabel), }) if err != nil { return err } for _, c := range containers { err = d.Docker.ContainerRemove(context.Background(), c.ID, types.ContainerRemoveOptions{ Force: true, }) if err != nil { return err } } return nil } func (d *Builder) ConstructBlueprintsIfNotExist(bs []b.Blueprint) error { var blueprintsToBuild []b.Blueprint for _, bprint := range bs { images, err := d.Docker.ImageList(context.Background(), types.ImageListOptions{ Filters: label("complement_blueprint=" + bprint.Name), }) if err != nil { return fmt.Errorf("ConstructBlueprintsIfNotExist: failed to ImageList: %w", err) } if len(images) == 0 { blueprintsToBuild = append(blueprintsToBuild, bprint) } } return d.ConstructBlueprints(blueprintsToBuild) } func (d *Builder) ConstructBlueprints(bs []b.Blueprint) error { errc := make(chan []error, len(bs)) for _, bprint := range bs { go (func(bprint b.Blueprint) { errc <- d.construct(bprint) })(bprint) } var errs []error for i := 0; i < len(bs); i++ { // the channel returns a slice of errors; // spread and append them to the error slice // (nothing will be appended if the slice is empty) errs = append(errs, <-errc...) } close(errc) if len(errs) > 0 { for _, err := range errs { d.log("could not construct blueprint: %s", err) } return errs[0] } // wait a bit for images/containers to show up in 'image ls' foundImages := false for i := 0; i < 50; i++ { // max 5s images, err := d.Docker.ImageList(context.Background(), types.ImageListOptions{ Filters: label(complementLabel), }) if err != nil { return err } if len(images) < len(bs) { time.Sleep(100 * time.Millisecond) } else { foundImages = true break } } // do this after we have found images so we know that the containers have been detached so // we can actually remove the networks. d.removeNetworks() if !foundImages { return fmt.Errorf("failed to find built images via ImageList: did they all build ok?") } return nil } // construct all Homeservers sequentially then commits them func (d *Builder) construct(bprint b.Blueprint) (errs []error) { networkID, err := CreateNetwork(d.Docker, bprint.Name) if err != nil { return []error{err} } runner := instruction.NewRunner(bprint.Name, d.bestEffort, d.debugLogging) results := make([]result, len(bprint.Homeservers)) for i, hs := range bprint.Homeservers { res := d.constructHomeserver(bprint.Name, runner, hs, networkID) if res.err != nil { errs = append(errs, res.err) if res.containerID != "" { // something went wrong, but we have a container which may have interesting logs printLogs(d.Docker, res.containerID, res.contextStr) } } // kill the container defer func(r result) { killErr := d.Docker.ContainerKill(context.Background(), r.containerID, "KILL") if killErr != nil { d.log("%s : Failed to kill container %s: %s\n", r.contextStr, r.containerID, killErr) } }(res) results[i] = res } // commit containers for _, res := range results { if res.err != nil { continue } // collect and store access tokens as labels 'access_token_$userid: $token' labels := make(map[string]string) accessTokens := runner.AccessTokens(res.homeserver.Name) if len(bprint.KeepAccessTokensForUsers) > 0 { // only keep access tokens for specified users for _, userID := range bprint.KeepAccessTokensForUsers { tok, ok := accessTokens[userID] if ok { labels["access_token_"+userID] = tok } } } else { // keep all tokens for k, v := range accessTokens { labels["access_token_"+k] = v } } // Combine the labels for tokens and application services asLabels := labelsForApplicationServices(res.homeserver) for k, v := range asLabels { labels[k] = v } // commit the container commit, err := d.Docker.ContainerCommit(context.Background(), res.containerID, types.ContainerCommitOptions{ Author: "Complement", Pause: true, Reference: "localhost/complement:" + res.contextStr, Config: &container.Config{ Labels: labels, }, }) if err != nil { d.log("%s : failed to ContainerCommit: %s\n", res.contextStr, err) errs = append(errs, fmt.Errorf("%s : failed to ContainerCommit: %w", res.contextStr, err)) continue } imageID := strings.Replace(commit.ID, "sha256:", "", 1) d.log("%s => %s\n", res.contextStr, imageID) } return errs } // construct this homeserver and execute its instructions, keeping the container alive. func (d *Builder) constructHomeserver(blueprintName string, runner *instruction.Runner, hs b.Homeserver, networkID string) result { contextStr := fmt.Sprintf("%s.%s", blueprintName, hs.Name) d.log("%s : constructing homeserver...\n", contextStr) dep, err := d.deployBaseImage(blueprintName, hs, contextStr, networkID) if err != nil { log.Printf("%s : failed to deployBaseImage: %s\n", contextStr, err) containerID := "" if dep != nil { containerID = dep.ContainerID } return result{ err: err, containerID: containerID, contextStr: contextStr, homeserver: hs, } } d.log("%s : deployed base image to %s (%s)\n", contextStr, dep.BaseURL, dep.ContainerID) err = runner.Run(hs, dep.BaseURL) if err != nil { d.log("%s : failed to run instructions: %s\n", contextStr, err) } return result{ err: err, containerID: dep.ContainerID, contextStr: contextStr, homeserver: hs, } } // deployBaseImage runs the base image and returns the baseURL, containerID or an error. func (d *Builder) deployBaseImage(blueprintName string, hs b.Homeserver, contextStr, networkID string) (*HomeserverDeployment, error) { asIDToRegistrationMap := asIDToRegistrationFromLabels(labelsForApplicationServices(hs)) return deployImage( d.Docker, d.BaseImage, d.CSAPIPort, fmt.Sprintf("complement_%s", contextStr), blueprintName, hs.Name, asIDToRegistrationMap, contextStr, networkID, d.config.VersionCheckIterations, ) } // getCaVolume returns the correct volume mount for providing a CA to homeserver containers. // If running CI, returns an error if it's unable to find a volume that has /ca // Otherwise, returns an error if we're unable to find the <cwd>/ca directory on the local host func getCaVolume(ctx context.Context, docker *client.Client) (caMount mount.Mount, err error) { if os.Getenv("CI") == "true" { // When in CI, Complement itself is a container with the CA volume mounted at /ca. // We need to mount this volume to all homeserver containers to synchronize the CA cert. // This is needed to establish trust among all containers. // Get volume mounted at /ca. First we get the container ID // /proc/1/cpuset should be /docker/<containerID> cpuset, err := ioutil.ReadFile("/proc/1/cpuset") if err != nil { return caMount, err } if !strings.Contains(string(cpuset), "docker") { return caMount, errors.New("Could not identify container ID using /proc/1/cpuset") } cpusetList := strings.Split(strings.TrimSpace(string(cpuset)), "/") containerID := cpusetList[len(cpusetList)-1] container, err := docker.ContainerInspect(ctx, containerID) if err != nil { return caMount, err } // Get the volume that matches the destination in our complement container var volumeName string for i := range container.Mounts { if container.Mounts[i].Destination == "/ca" { volumeName = container.Mounts[i].Name } } if volumeName == "" { // We did not find a volume. This container might be created without a volume, // or CI=true is passed but we are not running in a container. // todo: log that we do not provide a CA volume mount? return caMount, nil } caMount = mount.Mount{ Type: mount.TypeVolume, Source: volumeName, Target: "/ca", } } else { // When not in CI, our CA cert is placed in the current working dir. // We bind mount this directory to all homeserver containers. cwd, err := os.Getwd() if err != nil { return caMount, err } caCertificateDirHost := path.Join(cwd, "ca") if _, err := os.Stat(caCertificateDirHost); os.IsNotExist(err) { err = os.Mkdir(caCertificateDirHost, 0770) if err != nil { return caMount, err } } caMount = mount.Mount{ Type: mount.TypeBind, Source: path.Join(cwd, "ca"), Target: "/ca", } } return caMount, nil } // getAppServiceVolume returns a volume mount for providing the `/appservice` directory to homeserver containers. // This directory will contain application service registration config files. // Returns an error if the volume failed to create func getAppServiceVolume(ctx context.Context, docker *client.Client) (asMount mount.Mount, err error) { asVolume, err := docker.VolumeCreate(context.Background(), volume.VolumesCreateBody{ Name: "appservices", }) if err != nil { return asMount, err } asMount = mount.Mount{ Type: mount.TypeVolume, Source: asVolume.Name, Target: "/appservices", } return asMount, err } func generateASRegistrationYaml(as b.ApplicationService) string { return fmt.Sprintf("id: %s\n", as.ID) + fmt.Sprintf("hs_token: %s\n", as.HSToken) + fmt.Sprintf("as_token: %s\n", as.ASToken) + fmt.Sprintf("url: '%s'\n", as.URL) + fmt.Sprintf("sender_localpart: %s\n", as.SenderLocalpart) + fmt.Sprintf("rate_limited: %v\n", as.RateLimited) + "namespaces:\n" + " users: []\n" + " rooms: []\n" + " aliases: []\n" } func deployImage( docker *client.Client, imageID string, csPort int, containerName, blueprintName, hsName string, asIDToRegistrationMap map[string]string, contextStr, networkID string, versionCheckIterations int, ) (*HomeserverDeployment, error) { ctx := context.Background() var extraHosts []string var mounts []mount.Mount var err error if runtime.GOOS == "linux" { // By default docker for linux does not expose this, so do it now. // When https://github.com/moby/moby/pull/40007 lands in Docker 20, we should // change this to be `host.docker.internal:host-gateway` extraHosts = []string{HostnameRunningComplement + ":172.17.0.1"} } if os.Getenv("COMPLEMENT_CA") == "true" { var caMount mount.Mount caMount, err = getCaVolume(ctx, docker) if err != nil { return nil, err } mounts = append(mounts, caMount) } asMount, err := getAppServiceVolume(ctx, docker) if err != nil { return nil, err } mounts = append(mounts, asMount) env := []string{ "SERVER_NAME=" + hsName, "COMPLEMENT_CA=" + os.Getenv("COMPLEMENT_CA"), } body, err := docker.ContainerCreate(ctx, &container.Config{ Image: imageID, Env: env, //Cmd: d.ImageArgs, Labels: map[string]string{ complementLabel: contextStr, "complement_blueprint": blueprintName, "complement_hs_name": hsName, }, }, &container.HostConfig{ PublishAllPorts: true, ExtraHosts: extraHosts, Mounts: mounts, }, &network.NetworkingConfig{ EndpointsConfig: map[string]*network.EndpointSettings{ hsName: { NetworkID: networkID, Aliases: []string{hsName}, }, }, }, containerName) if err != nil { return nil, err } containerID := body.ID // Create the application service files for asID, registration := range asIDToRegistrationMap { // Create a fake/virtual file in memory that we can copy to the container // via https://stackoverflow.com/a/52131297/796832 var buf bytes.Buffer tw := tar.NewWriter(&buf) err = tw.WriteHeader(&tar.Header{ Name: fmt.Sprintf("/appservices/%s.yaml", url.PathEscape(asID)), Mode: 0777, Size: int64(len(registration)), }) if err != nil { return nil, fmt.Errorf("Failed to copy regstration to container: %v", err) } tw.Write([]byte(registration)) tw.Close() // Put our new fake file in the container volume err = docker.CopyToContainer(context.Background(), containerID, "/", &buf, types.CopyToContainerOptions{ AllowOverwriteDirWithFile: false, }) if err != nil { return nil, err } } err = docker.ContainerStart(ctx, containerID, types.ContainerStartOptions{}) if err != nil { return nil, err } inspect, err := docker.ContainerInspect(ctx, containerID) if err != nil { return nil, err } baseURL, fedBaseURL, err := endpoints(inspect.NetworkSettings.Ports, 8008, 8448) if err != nil { return nil, fmt.Errorf("%s : image %s : %w", contextStr, imageID, err) } versionsURL := fmt.Sprintf("%s/_matrix/client/versions", baseURL) // hit /versions to check it is up var lastErr error for i := 0; i < versionCheckIterations; i++ { res, err := http.Get(versionsURL) if err != nil { lastErr = fmt.Errorf("GET %s => error: %s", versionsURL, err) time.Sleep(50 * time.Millisecond) continue } if res.StatusCode != 200 { lastErr = fmt.Errorf("GET %s => HTTP %s", versionsURL, res.Status) time.Sleep(50 * time.Millisecond) continue } lastErr = nil break } d := &HomeserverDeployment{ BaseURL: baseURL, FedBaseURL: fedBaseURL, ContainerID: containerID, AccessTokens: tokensFromLabels(inspect.Config.Labels), ApplicationServices: asIDToRegistrationFromLabels(inspect.Config.Labels), } if lastErr != nil { return d, fmt.Errorf("%s: failed to check server is up. %w", contextStr, lastErr) } return d, nil } // CreateNetwork creates a docker network and returns its id. // ID is guaranteed not to be empty when err == nil func CreateNetwork(docker *client.Client, blueprintName string) (networkID string, err error) { // make a user-defined network so we get DNS based on the container name nw, err := docker.NetworkCreate(context.Background(), "complement_"+blueprintName, types.NetworkCreate{ Labels: map[string]string{ complementLabel: blueprintName, "complement_blueprint": blueprintName, }, }) if err != nil { return "", fmt.Errorf("%s: failed to create docker network. %w", blueprintName, err) } if nw.Warning != "" { if nw.ID == "" { return "", fmt.Errorf("%s: fatal warning while creating docker network. %s", blueprintName, nw.Warning) } log.Printf("WARNING: %s\n", nw.Warning) } if nw.ID == "" { return "", fmt.Errorf("%s: unexpected empty ID while creating networkID", blueprintName) } return nw.ID, nil } func printLogs(docker *client.Client, containerID, contextStr string) { reader, err := docker.ContainerLogs(context.Background(), containerID, types.ContainerLogsOptions{ ShowStderr: true, ShowStdout: true, Follow: false, }) if err != nil { log.Printf("%s : Failed to extract container logs: %s\n", contextStr, err) return } log.Printf("============================================\n\n\n") log.Printf("%s : Server logs:\n", contextStr) stdcopy.StdCopy(log.Writer(), log.Writer(), reader) log.Printf("============== %s : END LOGS ==============\n\n\n", contextStr) } func label(in string) filters.Args { f := filters.NewArgs() f.Add("label", in) return f } func tokensFromLabels(labels map[string]string) map[string]string { userIDToToken := make(map[string]string) for k, v := range labels { if strings.HasPrefix(k, "access_token_") { userIDToToken[strings.TrimPrefix(k, "access_token_")] = v } } return userIDToToken } func asIDToRegistrationFromLabels(labels map[string]string) map[string]string { asMap := make(map[string]string) for k, v := range labels { if strings.HasPrefix(k, "application_service_") { asMap[strings.TrimPrefix(k, "application_service_")] = v } } return asMap } func labelsForApplicationServices(hs b.Homeserver) map[string]string { labels := make(map[string]string) // collect and store app service registrations as labels 'application_service_$as_id: $registration' // collect and store app service access tokens as labels 'access_token_$sender_localpart: $as_token' for _, as := range hs.ApplicationServices { labels["application_service_"+as.ID] = generateASRegistrationYaml(as) labels["access_token_@"+as.SenderLocalpart+":"+hs.Name] = as.ASToken } return labels } func endpoints(p nat.PortMap, csPort, ssPort int) (baseURL, fedBaseURL string, err error) { csapiPort := fmt.Sprintf("%d/tcp", csPort) csapiPortInfo, ok := p[nat.Port(csapiPort)] if !ok { return "", "", fmt.Errorf("port %s not exposed - exposed ports: %v", csapiPort, p) } baseURL = fmt.Sprintf("http://"+HostnameRunningDocker+":%s", csapiPortInfo[0].HostPort) ssapiPort := fmt.Sprintf("%d/tcp", ssPort) ssapiPortInfo, ok := p[nat.Port(ssapiPort)] if !ok { return "", "", fmt.Errorf("port %s not exposed - exposed ports: %v", ssapiPort, p) } fedBaseURL = fmt.Sprintf("https://"+HostnameRunningDocker+":%s", ssapiPortInfo[0].HostPort) return } type result struct { err error containerID string contextStr string homeserver b.Homeserver }
[ "\"CI\"", "\"CI\"", "\"COMPLEMENT_CA\"", "\"COMPLEMENT_CA\"" ]
[]
[ "COMPLEMENT_CA", "CI" ]
[]
["COMPLEMENT_CA", "CI"]
go
2
0
server/container_create_linux.go
// +build linux package server import ( "encoding/json" "fmt" "os" "path/filepath" "sort" "strings" "syscall" "time" "github.com/containers/buildah/pkg/secrets" "github.com/containers/libpod/pkg/annotations" "github.com/containers/libpod/pkg/apparmor" "github.com/containers/libpod/pkg/rootless" createconfig "github.com/containers/libpod/pkg/spec" libconfig "github.com/cri-o/cri-o/internal/lib/config" "github.com/cri-o/cri-o/internal/lib/sandbox" "github.com/cri-o/cri-o/internal/oci" "github.com/cri-o/cri-o/internal/pkg/log" "github.com/cri-o/cri-o/internal/pkg/storage" "github.com/cri-o/cri-o/utils" dockermounts "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/symlink" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/devices" rspec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "golang.org/x/net/context" pb "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" ) // minMemoryLimit is the minimum memory that must be set for a container. // A lower value would result in the container failing to start. const minMemoryLimit = 12582912 type configDevice struct { Device rspec.LinuxDevice Resource rspec.LinuxDeviceCgroup } func findCgroupMountpoint(name string) error { // Set up pids limit if pids cgroup is mounted _, err := cgroups.FindCgroupMountpoint("", name) return err } func addDevicesPlatform(ctx context.Context, sb *sandbox.Sandbox, containerConfig *pb.ContainerConfig, specgen *generate.Generator) error { sp := specgen.Config if containerConfig.GetLinux().GetSecurityContext().GetPrivileged() { hostDevices, err := devices.HostDevices() if err != nil { return err } for _, hostDevice := range hostDevices { rd := rspec.LinuxDevice{ Path: hostDevice.Path, Type: string(hostDevice.Type), Major: hostDevice.Major, Minor: hostDevice.Minor, UID: &hostDevice.Uid, GID: &hostDevice.Gid, } if hostDevice.Major == 0 && hostDevice.Minor == 0 { // Invalid device, most likely a symbolic link, skip it. continue } specgen.AddDevice(rd) } sp.Linux.Resources.Devices = []rspec.LinuxDeviceCgroup{ { Allow: true, Access: "rwm", }, } } for _, device := range containerConfig.GetDevices() { // pin the device to avoid using `device` within the range scope as // wrong function literal device := device // If we are privileged, we have access to devices on the host. // If the requested container path already exists on the host, the container won't see the expected host path. // Therefore, we must error out if the container path already exists privileged := containerConfig.GetLinux().GetSecurityContext() != nil && containerConfig.GetLinux().GetSecurityContext().GetPrivileged() if privileged && device.ContainerPath != device.HostPath { // we expect this to not exist _, err := os.Stat(device.ContainerPath) if err == nil { return errors.Errorf("privileged container was configured with a device container path that already exists on the host.") } if !os.IsNotExist(err) { return errors.Wrapf(err, "error checking if container path exists on host") } } path, err := resolveSymbolicLink(device.HostPath, "/") if err != nil { return err } dev, err := devices.DeviceFromPath(path, device.Permissions) // if there was no error, return the device if err == nil { rd := rspec.LinuxDevice{ Path: device.ContainerPath, Type: string(dev.Type), Major: dev.Major, Minor: dev.Minor, UID: &dev.Uid, GID: &dev.Gid, } specgen.AddDevice(rd) sp.Linux.Resources.Devices = append(sp.Linux.Resources.Devices, rspec.LinuxDeviceCgroup{ Allow: true, Type: string(dev.Type), Major: &dev.Major, Minor: &dev.Minor, Access: dev.Permissions, }) continue } // if the device is not a device node // try to see if it's a directory holding many devices if err == devices.ErrNotADevice { // check if it is a directory if e := utils.IsDirectory(path); e == nil { // mount the internal devices recursively // nolint: errcheck filepath.Walk(path, func(dpath string, f os.FileInfo, e error) error { if e != nil { log.Debugf(ctx, "addDevice walk: %v", e) } childDevice, e := devices.DeviceFromPath(dpath, device.Permissions) if e != nil { // ignore the device return nil } cPath := strings.Replace(dpath, path, device.ContainerPath, 1) rd := rspec.LinuxDevice{ Path: cPath, Type: string(childDevice.Type), Major: childDevice.Major, Minor: childDevice.Minor, UID: &childDevice.Uid, GID: &childDevice.Gid, } specgen.AddDevice(rd) sp.Linux.Resources.Devices = append(sp.Linux.Resources.Devices, rspec.LinuxDeviceCgroup{ Allow: true, Type: string(childDevice.Type), Major: &childDevice.Major, Minor: &childDevice.Minor, Access: childDevice.Permissions, }) return nil }) } } } return nil } // createContainerPlatform performs platform dependent intermediate steps before calling the container's oci.Runtime().CreateContainer() func (s *Server) createContainerPlatform(container, infraContainer *oci.Container, cgroupParent string) error { if s.defaultIDMappings != nil && !s.defaultIDMappings.Empty() { rootPair := s.defaultIDMappings.RootPair() for _, path := range []string{container.BundlePath(), container.MountPoint()} { if err := os.Chown(path, rootPair.UID, rootPair.GID); err != nil { return errors.Wrapf(err, "cannot chown %s to %d:%d", path, rootPair.UID, rootPair.GID) } if err := makeAccessible(path, rootPair.UID, rootPair.GID); err != nil { return errors.Wrapf(err, "cannot make %s accessible to %d:%d", path, rootPair.UID, rootPair.GID) } } } return s.Runtime().CreateContainer(container, cgroupParent) } // makeAccessible changes the path permission and each parent directory to have --x--x--x func makeAccessible(path string, uid, gid int) error { for ; path != "/"; path = filepath.Dir(path) { st, err := os.Stat(path) if err != nil { if os.IsNotExist(err) { return nil } return err } if int(st.Sys().(*syscall.Stat_t).Uid) == uid && int(st.Sys().(*syscall.Stat_t).Gid) == gid { continue } if st.Mode()&0111 != 0111 { if err := os.Chmod(path, st.Mode()|0111); err != nil { return err } } } return nil } // nolint:gocyclo func (s *Server) createSandboxContainer(ctx context.Context, containerID, containerName string, sb *sandbox.Sandbox, sandboxConfig *pb.PodSandboxConfig, containerConfig *pb.ContainerConfig) (*oci.Container, error) { if sb == nil { return nil, errors.New("createSandboxContainer needs a sandbox") } // TODO: simplify this function (cyclomatic complexity here is high) // TODO: factor generating/updating the spec into something other projects can vendor // creates a spec Generator with the default spec. specgen, err := generate.New("linux") if err != nil { return nil, err } specgen.HostSpecific = true specgen.ClearProcessRlimits() ulimits, err := getUlimitsFromConfig(&s.config) if err != nil { return nil, err } for _, u := range ulimits { specgen.AddProcessRlimits(u.name, u.hard, u.soft) } readOnlyRootfs := s.config.ReadOnly var privileged bool if containerConfig.GetLinux().GetSecurityContext() != nil { if containerConfig.GetLinux().GetSecurityContext().GetPrivileged() { privileged = true } if privileged { if !sandboxConfig.GetLinux().GetSecurityContext().GetPrivileged() { return nil, errors.New("no privileged container allowed in sandbox") } } if containerConfig.GetLinux().GetSecurityContext().GetReadonlyRootfs() { readOnlyRootfs = true } } specgen.SetRootReadonly(readOnlyRootfs) if s.config.ReadOnly { // tmpcopyup is a runc extension and is not part of the OCI spec. // WORK ON: Use "overlay" mounts as an alternative to tmpfs with tmpcopyup // Look at https://github.com/cri-o/cri-o/pull/1434#discussion_r177200245 for more info on this options := []string{"rw", "noexec", "nosuid", "nodev", "tmpcopyup"} if !isInCRIMounts("/run", containerConfig.GetMounts()) { mnt := rspec.Mount{ Destination: "/run", Type: "tmpfs", Source: "tmpfs", Options: append(options, "mode=0755"), } // Add tmpfs mount on /run specgen.AddMount(mnt) } if !isInCRIMounts("/tmp", containerConfig.GetMounts()) { mnt := rspec.Mount{ Destination: "/tmp", Type: "tmpfs", Source: "tmpfs", Options: append(options, "mode=1777"), } // Add tmpfs mount on /tmp specgen.AddMount(mnt) } if !isInCRIMounts("/var/tmp", containerConfig.GetMounts()) { mnt := rspec.Mount{ Destination: "/var/tmp", Type: "tmpfs", Source: "tmpfs", Options: append(options, "mode=1777"), } // Add tmpfs mount on /var/tmp specgen.AddMount(mnt) } } imageSpec := containerConfig.GetImage() if imageSpec == nil { return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Image is nil") } image := imageSpec.Image if image == "" { return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Image.Image is empty") } images, err := s.StorageImageServer().ResolveNames(s.systemContext, image) if err != nil { if err == storage.ErrCannotParseImageID { images = append(images, image) } else { return nil, err } } // Get imageName and imageRef that are later requested in container status var ( imgResult *storage.ImageResult imgResultErr error ) for _, img := range images { imgResult, imgResultErr = s.StorageImageServer().ImageStatus(s.systemContext, img) if imgResultErr == nil { break } } if imgResultErr != nil { return nil, imgResultErr } imageName := imgResult.Name imageRef := imgResult.ID if len(imgResult.RepoDigests) > 0 { imageRef = imgResult.RepoDigests[0] } specgen.AddAnnotation(annotations.Image, image) specgen.AddAnnotation(annotations.ImageName, imageName) specgen.AddAnnotation(annotations.ImageRef, imageRef) selinuxConfig := containerConfig.GetLinux().GetSecurityContext().GetSelinuxOptions() var labelOptions []string if selinuxConfig == nil { labelOptions, err = label.DupSecOpt(sb.ProcessLabel()) if err != nil { return nil, err } } else { labelOptions = getLabelOptions(selinuxConfig) } containerIDMappings := s.defaultIDMappings metadata := containerConfig.GetMetadata() containerInfo, err := s.StorageRuntimeServer().CreateContainer(s.systemContext, sb.Name(), sb.ID(), image, imgResult.ID, containerName, containerID, metadata.Name, metadata.Attempt, containerIDMappings, labelOptions) if err != nil { return nil, err } mountLabel := containerInfo.MountLabel var processLabel string if !privileged { processLabel = containerInfo.ProcessLabel } hostIPC := containerConfig.GetLinux().GetSecurityContext().GetNamespaceOptions().GetIpc() == pb.NamespaceMode_NODE hostPID := containerConfig.GetLinux().GetSecurityContext().GetNamespaceOptions().GetPid() == pb.NamespaceMode_NODE // Don't use SELinux separation with Host Pid or IPC Namespace or privileged. if hostPID || hostIPC { processLabel, mountLabel = "", "" } defer func() { if err != nil { err2 := s.StorageRuntimeServer().DeleteContainer(containerInfo.ID) if err2 != nil { log.Warnf(ctx, "Failed to cleanup container directory: %v", err2) } } }() specgen.SetLinuxMountLabel(mountLabel) specgen.SetProcessSelinuxLabel(processLabel) containerVolumes, ociMounts, err := addOCIBindMounts(ctx, mountLabel, containerConfig, &specgen, s.config.RuntimeConfig.BindMountPrefix) if err != nil { return nil, err } volumesJSON, err := json.Marshal(containerVolumes) if err != nil { return nil, err } specgen.AddAnnotation(annotations.Volumes, string(volumesJSON)) configuredDevices, err := getDevicesFromConfig(ctx, &s.config) if err != nil { return nil, err } for i := range configuredDevices { d := &configuredDevices[i] specgen.AddDevice(d.Device) specgen.AddLinuxResourcesDevice(d.Resource.Allow, d.Resource.Type, d.Resource.Major, d.Resource.Minor, d.Resource.Access) } if err := addDevices(ctx, sb, containerConfig, &specgen); err != nil { return nil, err } labels := containerConfig.GetLabels() if err := validateLabels(labels); err != nil { return nil, err } kubeAnnotations := containerConfig.GetAnnotations() for k, v := range kubeAnnotations { specgen.AddAnnotation(k, v) } for k, v := range labels { specgen.AddAnnotation(k, v) } // set this container's apparmor profile if it is set by sandbox if s.appArmorEnabled && !privileged { appArmorProfileName := s.getAppArmorProfileName(containerConfig.GetLinux().GetSecurityContext().GetApparmorProfile()) if appArmorProfileName != "" { // reload default apparmor profile if it is unloaded. if s.appArmorProfile == libconfig.DefaultApparmorProfile { isLoaded, err := apparmor.IsLoaded(libconfig.DefaultApparmorProfile) if err != nil { return nil, err } if !isLoaded { if err := apparmor.InstallDefault(libconfig.DefaultApparmorProfile); err != nil { return nil, err } } } specgen.SetProcessApparmorProfile(appArmorProfileName) } } logPath := containerConfig.GetLogPath() sboxLogDir := sandboxConfig.GetLogDirectory() if sboxLogDir == "" { sboxLogDir = sb.LogDir() } if logPath == "" { logPath = filepath.Join(sboxLogDir, containerID+".log") } if !filepath.IsAbs(logPath) { // XXX: It's not really clear what this should be versus the sbox logDirectory. log.Warnf(ctx, "requested logPath for ctr id %s is a relative path: %s", containerID, logPath) logPath = filepath.Join(sboxLogDir, logPath) log.Warnf(ctx, "logPath from relative path is now absolute: %s", logPath) } // Handle https://issues.k8s.io/44043 if err := ensureSaneLogPath(logPath); err != nil { return nil, err } log.Debugf(ctx, "setting container's log_path = %s, sbox.logdir = %s, ctr.logfile = %s", sboxLogDir, containerConfig.GetLogPath(), logPath, ) specgen.SetProcessTerminal(containerConfig.Tty) if containerConfig.Tty { specgen.AddProcessEnv("TERM", "xterm") } linux := containerConfig.GetLinux() if linux != nil { resources := linux.GetResources() if resources != nil { specgen.SetLinuxResourcesCPUPeriod(uint64(resources.GetCpuPeriod())) specgen.SetLinuxResourcesCPUQuota(resources.GetCpuQuota()) specgen.SetLinuxResourcesCPUShares(uint64(resources.GetCpuShares())) memoryLimit := resources.GetMemoryLimitInBytes() if memoryLimit != 0 && memoryLimit < minMemoryLimit { return nil, fmt.Errorf("set memory limit %v too low; should be at least %v", memoryLimit, minMemoryLimit) } specgen.SetLinuxResourcesMemoryLimit(memoryLimit) specgen.SetProcessOOMScoreAdj(int(resources.GetOomScoreAdj())) specgen.SetLinuxResourcesCPUCpus(resources.GetCpusetCpus()) specgen.SetLinuxResourcesCPUMems(resources.GetCpusetMems()) } var cgPath string parent := defaultCgroupfsParent useSystemd := s.config.CgroupManager == oci.SystemdCgroupsManager if useSystemd { parent = defaultSystemdParent } if sb.CgroupParent() != "" { parent = sb.CgroupParent() } if useSystemd { cgPath = parent + ":" + scopePrefix + ":" + containerID } else { cgPath = filepath.Join(parent, scopePrefix+"-"+containerID) } specgen.SetLinuxCgroupsPath(cgPath) if privileged { specgen.SetupPrivileged(true) } else { capabilities := linux.GetSecurityContext().GetCapabilities() // Ensure we don't get a nil pointer error if the config // doesn't set any capabilities if capabilities == nil { capabilities = &pb.Capability{} } // Clear default capabilities from spec specgen.ClearProcessCapabilities() capabilities.AddCapabilities = append(capabilities.AddCapabilities, s.config.DefaultCapabilities...) err = setupCapabilities(&specgen, capabilities) if err != nil { return nil, err } } specgen.SetProcessNoNewPrivileges(linux.GetSecurityContext().GetNoNewPrivs()) if containerConfig.GetLinux().GetSecurityContext() != nil && !containerConfig.GetLinux().GetSecurityContext().Privileged { // TODO(runcom): have just one of this var at the top of the function securityContext := containerConfig.GetLinux().GetSecurityContext() for _, mp := range []string{ "/proc/acpi", "/proc/kcore", "/proc/keys", "/proc/latency_stats", "/proc/timer_list", "/proc/timer_stats", "/proc/sched_debug", "/proc/scsi", "/sys/firmware", } { specgen.AddLinuxMaskedPaths(mp) } if securityContext.GetMaskedPaths() != nil { specgen.Config.Linux.MaskedPaths = nil for _, path := range securityContext.GetMaskedPaths() { specgen.AddLinuxMaskedPaths(path) } } for _, rp := range []string{ "/proc/asound", "/proc/bus", "/proc/fs", "/proc/irq", "/proc/sys", "/proc/sysrq-trigger", } { specgen.AddLinuxReadonlyPaths(rp) } if securityContext.GetReadonlyPaths() != nil { specgen.Config.Linux.ReadonlyPaths = nil for _, path := range securityContext.GetReadonlyPaths() { specgen.AddLinuxReadonlyPaths(path) } } } } // Join the namespace paths for the pod sandbox container. podInfraState := sb.InfraContainer().State() log.Debugf(ctx, "pod container state %+v", podInfraState) ipcNsPath := fmt.Sprintf("/proc/%d/ns/ipc", podInfraState.Pid) if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.IPCNamespace), ipcNsPath); err != nil { return nil, err } utsNsPath := fmt.Sprintf("/proc/%d/ns/uts", podInfraState.Pid) if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.UTSNamespace), utsNsPath); err != nil { return nil, err } if containerConfig.GetLinux().GetSecurityContext().GetNamespaceOptions().GetPid() == pb.NamespaceMode_NODE { // kubernetes PodSpec specify to use Host PID namespace if err := specgen.RemoveLinuxNamespace(string(rspec.PIDNamespace)); err != nil { return nil, err } } else if containerConfig.GetLinux().GetSecurityContext().GetNamespaceOptions().GetPid() == pb.NamespaceMode_POD { // share Pod PID namespace pidNsPath := fmt.Sprintf("/proc/%d/ns/pid", podInfraState.Pid) if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.PIDNamespace), pidNsPath); err != nil { return nil, err } } // If the sandbox is configured to run in the host network, do not create a new network namespace if sb.HostNetwork() { if err := specgen.RemoveLinuxNamespace(string(rspec.NetworkNamespace)); err != nil { return nil, err } if !isInCRIMounts("/sys", containerConfig.GetMounts()) { specgen.RemoveMount("/sys") specgen.RemoveMount("/sys/fs/cgroup") sysMnt := rspec.Mount{ Destination: "/sys", Type: "bind", Source: "/sys", Options: []string{"nosuid", "noexec", "nodev", "ro", "rbind"}, } specgen.AddMount(sysMnt) } } else { netNsPath := sb.NetNsPath() if netNsPath == "" { // The sandbox does not have a permanent namespace, // it's on the host one. netNsPath = fmt.Sprintf("/proc/%d/ns/net", podInfraState.Pid) } if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.NetworkNamespace), netNsPath); err != nil { return nil, err } if privileged { specgen.RemoveMount("/sys") specgen.RemoveMount("/sys/fs/cgroup") sysMnt := rspec.Mount{ Destination: "/sys", Type: "bind", Source: "/sys", Options: []string{"nosuid", "noexec", "nodev", "rw", "rbind"}, } specgen.AddMount(sysMnt) } } for idx, ip := range sb.IPs() { specgen.AddAnnotation(fmt.Sprintf("%s.%d", annotations.IP, idx), ip) } // Remove the default /dev/shm mount to ensure we overwrite it specgen.RemoveMount("/dev/shm") mnt := rspec.Mount{ Type: "bind", Source: sb.ShmPath(), Destination: "/dev/shm", Options: []string{"rw", "bind"}, } // bind mount the pod shm specgen.AddMount(mnt) options := []string{"rw"} if readOnlyRootfs { options = []string{"ro"} } if sb.ResolvPath() != "" { if err := securityLabel(sb.ResolvPath(), mountLabel, false); err != nil { return nil, err } mnt = rspec.Mount{ Type: "bind", Source: sb.ResolvPath(), Destination: "/etc/resolv.conf", Options: []string{"bind", "nodev", "nosuid", "noexec"}, } // bind mount the pod resolver file specgen.AddMount(mnt) } if sb.HostnamePath() != "" { if err := securityLabel(sb.HostnamePath(), mountLabel, false); err != nil { return nil, err } mnt = rspec.Mount{ Type: "bind", Source: sb.HostnamePath(), Destination: "/etc/hostname", Options: append(options, "bind"), } specgen.AddMount(mnt) } if !isInCRIMounts("/etc/hosts", containerConfig.GetMounts()) && hostNetwork(containerConfig) { // Only bind mount for host netns and when CRI does not give us any hosts file mnt = rspec.Mount{ Type: "bind", Source: "/etc/hosts", Destination: "/etc/hosts", Options: append(options, "bind"), } specgen.AddMount(mnt) } if privileged { setOCIBindMountsPrivileged(&specgen) } // Set hostname and add env for hostname specgen.SetHostname(sb.Hostname()) specgen.AddProcessEnv("HOSTNAME", sb.Hostname()) specgen.AddAnnotation(annotations.Name, containerName) specgen.AddAnnotation(annotations.ContainerID, containerID) specgen.AddAnnotation(annotations.SandboxID, sb.ID()) specgen.AddAnnotation(annotations.SandboxName, sb.InfraContainer().Name()) specgen.AddAnnotation(annotations.ContainerType, annotations.ContainerTypeContainer) specgen.AddAnnotation(annotations.LogPath, logPath) specgen.AddAnnotation(annotations.TTY, fmt.Sprintf("%v", containerConfig.Tty)) specgen.AddAnnotation(annotations.Stdin, fmt.Sprintf("%v", containerConfig.Stdin)) specgen.AddAnnotation(annotations.StdinOnce, fmt.Sprintf("%v", containerConfig.StdinOnce)) specgen.AddAnnotation(annotations.ResolvPath, sb.InfraContainer().CrioAnnotations()[annotations.ResolvPath]) created := time.Now() specgen.AddAnnotation(annotations.Created, created.Format(time.RFC3339Nano)) metadataJSON, err := json.Marshal(metadata) if err != nil { return nil, err } specgen.AddAnnotation(annotations.Metadata, string(metadataJSON)) labelsJSON, err := json.Marshal(labels) if err != nil { return nil, err } specgen.AddAnnotation(annotations.Labels, string(labelsJSON)) kubeAnnotationsJSON, err := json.Marshal(kubeAnnotations) if err != nil { return nil, err } specgen.AddAnnotation(annotations.Annotations, string(kubeAnnotationsJSON)) spp := containerConfig.GetLinux().GetSecurityContext().GetSeccompProfilePath() if !privileged { if err := s.setupSeccomp(ctx, &specgen, spp); err != nil { return nil, err } } specgen.AddAnnotation(annotations.SeccompProfilePath, spp) mountPoint, err := s.StorageRuntimeServer().StartContainer(containerID) if err != nil { return nil, fmt.Errorf("failed to mount container %s(%s): %v", containerName, containerID, err) } specgen.AddAnnotation(annotations.MountPoint, mountPoint) containerImageConfig := containerInfo.Config if containerImageConfig == nil { err = fmt.Errorf("empty image config for %s", image) return nil, err } if containerImageConfig.Config.StopSignal != "" { // this key is defined in image-spec conversion document at https://github.com/opencontainers/image-spec/pull/492/files#diff-8aafbe2c3690162540381b8cdb157112R57 specgen.AddAnnotation("org.opencontainers.image.stopSignal", containerImageConfig.Config.StopSignal) } // Setup user and groups if linux != nil { if err := setupContainerUser(ctx, &specgen, mountPoint, mountLabel, containerInfo.RunDir, linux.GetSecurityContext(), containerImageConfig); err != nil { return nil, err } } // Add image volumes volumeMounts, err := addImageVolumes(ctx, mountPoint, s, &containerInfo, mountLabel, &specgen) if err != nil { return nil, err } processArgs, err := buildOCIProcessArgs(ctx, containerConfig, containerImageConfig) if err != nil { return nil, err } specgen.SetProcessArgs(processArgs) envs := mergeEnvs(containerImageConfig, containerConfig.GetEnvs()) for _, e := range envs { parts := strings.SplitN(e, "=", 2) specgen.AddProcessEnv(parts[0], parts[1]) } // Set working directory // Pick it up from image config first and override if specified in CRI containerCwd := "/" imageCwd := containerImageConfig.Config.WorkingDir if imageCwd != "" { containerCwd = imageCwd } runtimeCwd := containerConfig.WorkingDir if runtimeCwd != "" { containerCwd = runtimeCwd } specgen.SetProcessCwd(containerCwd) if err := setupWorkingDirectory(mountPoint, mountLabel, containerCwd); err != nil { if err1 := s.StorageRuntimeServer().StopContainer(containerID); err1 != nil { return nil, fmt.Errorf("can't umount container after cwd error %v: %v", err, err1) } return nil, err } var secretMounts []rspec.Mount if len(s.config.DefaultMounts) > 0 { // This option has been deprecated, once it is removed in the later versions, delete the server/secrets.go file as well log.Warnf(ctx, "--default-mounts has been deprecated and will be removed in future versions. Add mounts to either %q or %q", secrets.DefaultMountsFile, secrets.OverrideMountsFile) var err error secretMounts, err = addSecretsBindMounts(ctx, mountLabel, containerInfo.RunDir, s.config.DefaultMounts, specgen) if err != nil { return nil, fmt.Errorf("failed to mount secrets: %v", err) } } // Add secrets from the default and override mounts.conf files secretMounts = append(secretMounts, secrets.SecretMounts(mountLabel, containerInfo.RunDir, s.config.DefaultMountsFile, rootless.IsRootless())...) mounts := []rspec.Mount{} mounts = append(mounts, ociMounts...) mounts = append(mounts, volumeMounts...) mounts = append(mounts, secretMounts...) sort.Sort(orderedMounts(mounts)) for _, m := range mounts { mnt = rspec.Mount{ Type: "bind", Source: m.Source, Destination: m.Destination, Options: append(m.Options, "bind"), } specgen.AddMount(mnt) } newAnnotations := map[string]string{} for key, value := range containerConfig.GetAnnotations() { newAnnotations[key] = value } for key, value := range sb.Annotations() { newAnnotations[key] = value } if s.ContainerServer.Hooks != nil { if _, err := s.ContainerServer.Hooks.Hooks(specgen.Config, newAnnotations, len(containerConfig.GetMounts()) > 0); err != nil { return nil, err } } // Set up pids limit if pids cgroup is mounted if findCgroupMountpoint("pids") == nil { specgen.SetLinuxResourcesPidsLimit(s.config.PidsLimit) } // by default, the root path is an empty string. set it now. specgen.SetRootPath(mountPoint) crioAnnotations := specgen.Config.Annotations container, err := oci.NewContainer(containerID, containerName, containerInfo.RunDir, logPath, sb.NetNs().Path(), labels, crioAnnotations, kubeAnnotations, image, imageName, imageRef, metadata, sb.ID(), containerConfig.Tty, containerConfig.Stdin, containerConfig.StdinOnce, sb.Privileged(), sb.RuntimeHandler(), containerInfo.Dir, created, containerImageConfig.Config.StopSignal) if err != nil { return nil, err } container.SetIDMappings(containerIDMappings) if s.defaultIDMappings != nil && !s.defaultIDMappings.Empty() { userNsPath := sb.UserNsPath() if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.UserNamespace), userNsPath); err != nil { return nil, err } for _, uidmap := range s.defaultIDMappings.UIDs() { specgen.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size)) } for _, gidmap := range s.defaultIDMappings.GIDs() { specgen.AddLinuxGIDMapping(uint32(gidmap.HostID), uint32(gidmap.ContainerID), uint32(gidmap.Size)) } } if os.Getenv("_CRIO_ROOTLESS") != "" { makeOCIConfigurationRootless(&specgen) } saveOptions := generate.ExportOptions{} if err := specgen.SaveToFile(filepath.Join(containerInfo.Dir, "config.json"), saveOptions); err != nil { return nil, err } if err := specgen.SaveToFile(filepath.Join(containerInfo.RunDir, "config.json"), saveOptions); err != nil { return nil, err } container.SetSpec(specgen.Config) container.SetMountPoint(mountPoint) container.SetSeccompProfilePath(spp) for _, cv := range containerVolumes { container.AddVolume(cv) } return container, nil } func setupWorkingDirectory(rootfs, mountLabel, containerCwd string) error { fp, err := symlink.FollowSymlinkInScope(filepath.Join(rootfs, containerCwd), rootfs) if err != nil { return err } if err := os.MkdirAll(fp, 0755); err != nil { return err } if mountLabel != "" { if err1 := securityLabel(fp, mountLabel, false); err1 != nil { return err1 } } return nil } func setOCIBindMountsPrivileged(g *generate.Generator) { spec := g.Config // clear readonly for /sys and cgroup for i := range spec.Mounts { clearReadOnly(&spec.Mounts[i]) } spec.Linux.ReadonlyPaths = nil spec.Linux.MaskedPaths = nil } func clearReadOnly(m *rspec.Mount) { var opt []string for _, o := range m.Options { if o == "rw" { return } else if o != "ro" { opt = append(opt, o) } } m.Options = opt m.Options = append(m.Options, "rw") } func addOCIBindMounts(ctx context.Context, mountLabel string, containerConfig *pb.ContainerConfig, specgen *generate.Generator, bindMountPrefix string) ([]oci.ContainerVolume, []rspec.Mount, error) { volumes := []oci.ContainerVolume{} ociMounts := []rspec.Mount{} mounts := containerConfig.GetMounts() // Sort mounts in number of parts. This ensures that high level mounts don't // shadow other mounts. sort.Sort(criOrderedMounts(mounts)) // Copy all mounts from default mounts, except for // - mounts overridden by supplied mount; // - all mounts under /dev if a supplied /dev is present. mountSet := make(map[string]struct{}) for _, m := range mounts { mountSet[filepath.Clean(m.ContainerPath)] = struct{}{} } defaultMounts := specgen.Mounts() specgen.ClearMounts() for _, m := range defaultMounts { dst := filepath.Clean(m.Destination) if _, ok := mountSet[dst]; ok { // filter out mount overridden by a supplied mount continue } if _, mountDev := mountSet["/dev"]; mountDev && strings.HasPrefix(dst, "/dev/") { // filter out everything under /dev if /dev is a supplied mount continue } if _, mountSys := mountSet["/sys"]; mountSys && strings.HasPrefix(dst, "/sys/") { // filter out everything under /sys if /sys is a supplied mount continue } specgen.AddMount(m) } for _, mount := range mounts { dest := mount.GetContainerPath() if dest == "" { return nil, nil, fmt.Errorf("mount.ContainerPath is empty") } if mount.HostPath == "" { return nil, nil, fmt.Errorf("mount.HostPath is empty") } src := filepath.Join(bindMountPrefix, mount.GetHostPath()) resolvedSrc, err := resolveSymbolicLink(src, bindMountPrefix) if err == nil { src = resolvedSrc } else { if !os.IsNotExist(err) { return nil, nil, fmt.Errorf("failed to resolve symlink %q: %v", src, err) } else if err = os.MkdirAll(src, 0755); err != nil { return nil, nil, fmt.Errorf("failed to mkdir %s: %s", src, err) } } options := []string{"rw"} if mount.Readonly { options = []string{"ro"} } options = append(options, "rbind") // mount propagation mountInfos, err := dockermounts.GetMounts(nil) if err != nil { return nil, nil, err } switch mount.GetPropagation() { case pb.MountPropagation_PROPAGATION_PRIVATE: options = append(options, "rprivate") // Since default root propagation in runc is rprivate ignore // setting the root propagation case pb.MountPropagation_PROPAGATION_BIDIRECTIONAL: if err := ensureShared(src, mountInfos); err != nil { return nil, nil, err } options = append(options, "rshared") if err := specgen.SetLinuxRootPropagation("rshared"); err != nil { return nil, nil, err } case pb.MountPropagation_PROPAGATION_HOST_TO_CONTAINER: if err := ensureSharedOrSlave(src, mountInfos); err != nil { return nil, nil, err } options = append(options, "rslave") if specgen.Config.Linux.RootfsPropagation != "rshared" && specgen.Config.Linux.RootfsPropagation != "rslave" { if err := specgen.SetLinuxRootPropagation("rslave"); err != nil { return nil, nil, err } } default: log.Warnf(ctx, "unknown propagation mode for hostPath %q", mount.HostPath) options = append(options, "rprivate") } if mount.SelinuxRelabel { if err := securityLabel(src, mountLabel, false); err != nil { return nil, nil, err } } volumes = append(volumes, oci.ContainerVolume{ ContainerPath: dest, HostPath: src, Readonly: mount.Readonly, }) ociMounts = append(ociMounts, rspec.Mount{ Source: src, Destination: dest, Options: options, }) } if _, mountSys := mountSet["/sys"]; !mountSys { m := rspec.Mount{ Destination: "/sys/fs/cgroup", Type: "cgroup", Source: "cgroup", Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"}, } specgen.AddMount(m) } return volumes, ociMounts, nil } func getDevicesFromConfig(ctx context.Context, config *libconfig.Config) ([]configDevice, error) { linuxdevs := make([]configDevice, 0, len(config.RuntimeConfig.AdditionalDevices)) for _, d := range config.RuntimeConfig.AdditionalDevices { src, dst, permissions, err := createconfig.ParseDevice(d) if err != nil { return nil, err } log.Debugf(ctx, "adding device src=%s dst=%s mode=%s", src, dst, permissions) dev, err := devices.DeviceFromPath(src, permissions) if err != nil { return nil, errors.Wrapf(err, "%s is not a valid device", src) } dev.Path = dst linuxdevs = append(linuxdevs, configDevice{ Device: rspec.LinuxDevice{ Path: dev.Path, Type: string(dev.Type), Major: dev.Major, Minor: dev.Minor, FileMode: &dev.FileMode, UID: &dev.Uid, GID: &dev.Gid, }, Resource: rspec.LinuxDeviceCgroup{ Allow: true, Type: string(dev.Type), Major: &dev.Major, Minor: &dev.Minor, Access: permissions, }, }) } return linuxdevs, nil }
[ "\"_CRIO_ROOTLESS\"" ]
[]
[ "_CRIO_ROOTLESS" ]
[]
["_CRIO_ROOTLESS"]
go
1
0
pkg/fission-cli/util/util.go
/* Copyright 2018 The Fission Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util import ( "fmt" "github.com/fission/fission/pkg/controller/client/rest" "os" "os/user" "path/filepath" "regexp" "strconv" "strings" "github.com/hashicorp/go-multierror" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "github.com/fission/fission/pkg/controller/client" "github.com/fission/fission/pkg/fission-cli/cliwrapper/cli" "github.com/fission/fission/pkg/fission-cli/console" flagkey "github.com/fission/fission/pkg/fission-cli/flag/key" "github.com/fission/fission/pkg/info" "github.com/fission/fission/pkg/plugin" "github.com/fission/fission/pkg/utils" ) func GetFissionNamespace() string { fissionNamespace := os.Getenv("FISSION_NAMESPACE") return fissionNamespace } func GetApplicationUrl(selector string) (string, error) { var serverUrl string // Use FISSION_URL env variable if set; otherwise, port-forward to controller. fissionUrl := os.Getenv("FISSION_URL") if len(fissionUrl) == 0 { fissionNamespace := GetFissionNamespace() localPort, err := SetupPortForward(fissionNamespace, selector) if err != nil { return "", err } serverUrl = "http://127.0.0.1:" + localPort } else { serverUrl = fissionUrl } return serverUrl, nil } // KubifyName make a kubernetes compliant name out of an arbitrary string func KubifyName(old string) string { // Kubernetes maximum name length (for some names; others can be 253 chars) maxLen := 63 newName := strings.ToLower(old) // replace disallowed chars with '-' inv, _ := regexp.Compile("[^-a-z0-9]") newName = string(inv.ReplaceAll([]byte(newName), []byte("-"))) // trim leading non-alphabetic leadingnonalpha, _ := regexp.Compile("^[^a-z]+") newName = string(leadingnonalpha.ReplaceAll([]byte(newName), []byte{})) // trim trailing trailing, _ := regexp.Compile("[^a-z0-9]+$") newName = string(trailing.ReplaceAll([]byte(newName), []byte{})) // truncate to length if len(newName) > maxLen { newName = newName[0:maxLen] } // if we removed everything, call this thing "default". maybe // we should generate a unique name... if len(newName) == 0 { newName = "default" } return newName } // GetKubernetesClient builds a new kubernetes client. If the KUBECONFIG // environment variable is empty or doesn't exist, ~/.kube/config is used for // the kube config path func GetKubernetesClient() (*restclient.Config, *kubernetes.Clientset, error) { loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() kubeConfigPath := os.Getenv("KUBECONFIG") if len(kubeConfigPath) == 0 { var homeDir string usr, err := user.Current() if err != nil { // In case that user.Current() may be unable to work under some circumstances and return errors like // "user: Current not implemented on darwin/amd64" due to cross-compilation problem. (https://github.com/golang/go/issues/6376). // Instead of doing fatal here, we fallback to get home directory from the environment $HOME. console.Warn(fmt.Sprintf("Could not get the current user's directory (%s), fallback to get it from env $HOME", err)) homeDir = os.Getenv("HOME") } else { homeDir = usr.HomeDir } kubeConfigPath = filepath.Join(homeDir, ".kube", "config") if _, err := os.Stat(kubeConfigPath); os.IsNotExist(err) { return nil, nil, errors.New("Couldn't find kubeconfig file. " + "Set the KUBECONFIG environment variable to your kubeconfig's path.") } loadingRules.ExplicitPath = kubeConfigPath console.Verbose(2, "Using kubeconfig from %q", kubeConfigPath) } else { console.Verbose(2, "Using kubeconfig from environment %q", kubeConfigPath) } config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( loadingRules, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return nil, nil, errors.Wrap(err, "Failed to build Kubernetes config") } clientset, err := kubernetes.NewForConfig(config) if err != nil { return nil, nil, errors.Wrap(err, "Failed to connect to Kubernetes") } return config, clientset, nil } // given a list of functions, this checks if the functions actually exist on the cluster func CheckFunctionExistence(client client.Interface, functions []string, fnNamespace string) (err error) { fnMissing := make([]string, 0) for _, fnName := range functions { meta := &metav1.ObjectMeta{ Name: fnName, Namespace: fnNamespace, } _, err := client.V1().Function().Get(meta) if err != nil { fnMissing = append(fnMissing, fnName) } } if len(fnMissing) > 0 { return fmt.Errorf("function(s) %s, not present in namespace : %s", fnMissing, fnNamespace) } return nil } func GetVersion(client client.Interface) info.Versions { // Fetch client versions versions := info.Versions{ Client: map[string]info.BuildMeta{ "fission/core": info.BuildInfo(), }, } for _, pmd := range plugin.FindAll() { versions.Client[pmd.Name] = info.BuildMeta{ Version: pmd.Version, } } serverInfo, err := client.V1().Misc().ServerInfo() if err != nil { console.Warn(fmt.Sprintf("Error getting Fission API version: %v", err)) serverInfo = &info.ServerInfo{} } // Fetch server versions versions.Server = map[string]info.BuildMeta{ "fission/core": serverInfo.Build, } // FUTURE: fetch versions of plugins server-side return versions } func GetServer(input cli.Input) (c client.Interface, err error) { serverUrl, err := GetServerURL(input) if err != nil { return nil, err } return client.MakeClientset(rest.NewRESTClient(serverUrl)), nil } func GetServerURL(input cli.Input) (serverUrl string, err error) { serverUrl = input.GlobalString(flagkey.Server) if len(serverUrl) == 0 { // starts local portforwarder etc. serverUrl, err = GetApplicationUrl("application=fission-api") if err != nil { return "", err } } isHTTPS := strings.Index(serverUrl, "https://") == 0 isHTTP := strings.Index(serverUrl, "http://") == 0 if !(isHTTP || isHTTPS) { serverUrl = "http://" + serverUrl } return serverUrl, nil } func GetResourceReqs(input cli.Input, resReqs *v1.ResourceRequirements) (*v1.ResourceRequirements, error) { r := &v1.ResourceRequirements{} if resReqs != nil { r.Requests = resReqs.Requests r.Limits = resReqs.Limits } if len(r.Requests) == 0 { r.Requests = make(map[v1.ResourceName]resource.Quantity) } if len(r.Limits) == 0 { r.Limits = make(map[v1.ResourceName]resource.Quantity) } e := utils.MultiErrorWithFormat() if input.IsSet(flagkey.RuntimeMincpu) { mincpu := input.Int(flagkey.RuntimeMincpu) cpuRequest, err := resource.ParseQuantity(strconv.Itoa(mincpu) + "m") if err != nil { e = multierror.Append(e, errors.Wrap(err, "Failed to parse mincpu")) } r.Requests[v1.ResourceCPU] = cpuRequest } if input.IsSet(flagkey.RuntimeMinmemory) { minmem := input.Int(flagkey.RuntimeMinmemory) memRequest, err := resource.ParseQuantity(strconv.Itoa(minmem) + "Mi") if err != nil { e = multierror.Append(e, errors.Wrap(err, "Failed to parse minmemory")) } r.Requests[v1.ResourceMemory] = memRequest } if input.IsSet(flagkey.RuntimeMaxcpu) { maxcpu := input.Int(flagkey.RuntimeMaxcpu) cpuLimit, err := resource.ParseQuantity(strconv.Itoa(maxcpu) + "m") if err != nil { e = multierror.Append(e, errors.Wrap(err, "Failed to parse maxcpu")) } r.Limits[v1.ResourceCPU] = cpuLimit } if input.IsSet(flagkey.RuntimeMaxmemory) { maxmem := input.Int(flagkey.RuntimeMaxmemory) memLimit, err := resource.ParseQuantity(strconv.Itoa(maxmem) + "Mi") if err != nil { e = multierror.Append(e, errors.Wrap(err, "Failed to parse maxmemory")) } r.Limits[v1.ResourceMemory] = memLimit } limitCPU := r.Limits[v1.ResourceCPU] requestCPU := r.Requests[v1.ResourceCPU] if limitCPU.IsZero() && !requestCPU.IsZero() { r.Limits[v1.ResourceCPU] = requestCPU } else if limitCPU.Cmp(requestCPU) < 0 { e = multierror.Append(e, fmt.Errorf("MinCPU (%v) cannot be greater than MaxCPU (%v)", requestCPU.String(), limitCPU.String())) } limitMem := r.Limits[v1.ResourceMemory] requestMem := r.Requests[v1.ResourceMemory] if limitMem.IsZero() && !requestMem.IsZero() { r.Limits[v1.ResourceMemory] = requestMem } else if limitMem.Cmp(requestMem) < 0 { e = multierror.Append(e, fmt.Errorf("MinMemory (%v) cannot be greater than MaxMemory (%v)", requestMem.String(), limitMem.String())) } if e.ErrorOrNil() != nil { return nil, e } return &v1.ResourceRequirements{ Requests: r.Requests, Limits: r.Limits, }, nil } func GetSpecDir(input cli.Input) string { specDir := input.String(flagkey.SpecDir) if len(specDir) == 0 { specDir = "specs" } return specDir }
[ "\"FISSION_NAMESPACE\"", "\"FISSION_URL\"", "\"KUBECONFIG\"", "\"HOME\"" ]
[]
[ "FISSION_URL", "FISSION_NAMESPACE", "HOME", "KUBECONFIG" ]
[]
["FISSION_URL", "FISSION_NAMESPACE", "HOME", "KUBECONFIG"]
go
4
0
cmd/k3s/main.go
package main import ( "bytes" "os" "os/exec" "path/filepath" "strings" "syscall" "github.com/pkg/errors" "github.com/rancher/k3s/pkg/cli/cmds" "github.com/rancher/k3s/pkg/configfilearg" "github.com/rancher/k3s/pkg/data" "github.com/rancher/k3s/pkg/datadir" "github.com/rancher/k3s/pkg/dataverify" "github.com/rancher/k3s/pkg/flock" "github.com/rancher/k3s/pkg/untar" "github.com/rancher/k3s/pkg/version" "github.com/rancher/wrangler/pkg/resolvehome" "github.com/sirupsen/logrus" "github.com/urfave/cli" ) var criDefaultConfigPath = "/etc/crictl.yaml" // main entrypoint for the k3s multicall binary func main() { dataDir := findDataDir() // Handle direct invocation via symlink alias (multicall binary behavior) if runCLIs(dataDir) { return } etcdsnapshotCommand := internalCLIAction(version.Program+"-"+cmds.EtcdSnapshotCommand, dataDir, os.Args) // Handle subcommand invocation (k3s server, k3s crictl, etc) app := cmds.NewApp() app.Commands = []cli.Command{ cmds.NewServerCommand(internalCLIAction(version.Program+"-server", dataDir, os.Args)), cmds.NewAgentCommand(internalCLIAction(version.Program+"-agent", dataDir, os.Args)), cmds.NewKubectlCommand(externalCLIAction("kubectl", dataDir)), cmds.NewCRICTL(externalCLIAction("crictl", dataDir)), cmds.NewCtrCommand(externalCLIAction("ctr", dataDir)), cmds.NewCheckConfigCommand(externalCLIAction("check-config", dataDir)), cmds.NewEtcdSnapshotCommand(etcdsnapshotCommand, cmds.NewEtcdSnapshotSubcommands( etcdsnapshotCommand, etcdsnapshotCommand, etcdsnapshotCommand, etcdsnapshotCommand), ), } if err := app.Run(os.Args); err != nil { logrus.Fatal(err) } } // findDataDir reads data-dir settings from the CLI args and config file. // If not found, the default will be used, which varies depending on whether // k3s is being run as root or not. func findDataDir() string { for i, arg := range os.Args { for _, flagName := range []string{"--data-dir", "-d"} { if flagName == arg { if len(os.Args) > i+1 { return os.Args[i+1] } } else if strings.HasPrefix(arg, flagName+"=") { return arg[len(flagName)+1:] } } } dataDir := configfilearg.MustFindString(os.Args, "data-dir") if d, err := datadir.Resolve(dataDir); err == nil { dataDir = d } else { logrus.Warnf("Failed to resolve user home directory: %s", err) } return dataDir } // runCLIs handles the case where the binary is being executed as a symlink alias, // /usr/local/bin/crictl for example. If the executable name is one of the external // binaries, it calls it directly and returns true. If it's not an external binary, // it returns false so that standard CLI wrapping can occur. func runCLIs(dataDir string) bool { progName := filepath.Base(os.Args[0]) switch progName { case "crictl", "ctr", "kubectl": if err := externalCLI(progName, dataDir, os.Args[1:]); err != nil { logrus.Fatal(err) } return true } return false } // externalCLIAction returns a function that will call an external binary, be used as the Action of a cli.Command. func externalCLIAction(cmd, dataDir string) func(cli *cli.Context) error { return func(cli *cli.Context) error { return externalCLI(cmd, dataDir, cli.Args()) } } // externalCLI calls an external binary, fixing up argv[0] to the correct name. // crictl needs extra help to find its config file so we do that here too. func externalCLI(cli, dataDir string, args []string) error { if cli == "crictl" { if os.Getenv("CRI_CONFIG_FILE") == "" { os.Setenv("CRI_CONFIG_FILE", findCriConfig(dataDir)) } } return stageAndRun(dataDir, cli, append([]string{cli}, args...)) } // internalCLIAction returns a function that will call a K3s internal command, be used as the Action of a cli.Command. func internalCLIAction(cmd, dataDir string, args []string) func(ctx *cli.Context) error { return func(ctx *cli.Context) error { return stageAndRunCLI(ctx, cmd, dataDir, args) } } // stageAndRunCLI calls an external binary. func stageAndRunCLI(cli *cli.Context, cmd string, dataDir string, args []string) error { return stageAndRun(dataDir, cmd, args) } // stageAndRun does the actual work of setting up and calling an external binary. func stageAndRun(dataDir, cmd string, args []string) error { dir, err := extract(dataDir) if err != nil { return errors.Wrap(err, "extracting data") } logrus.Debugf("Asset dir %s", dir) if err := os.Setenv("PATH", filepath.Join(dir, "bin")+":"+os.Getenv("PATH")+":"+filepath.Join(dir, "bin/aux")); err != nil { return err } if err := os.Setenv(version.ProgramUpper+"_DATA_DIR", dir); err != nil { return err } cmd, err = exec.LookPath(cmd) if err != nil { return err } logrus.Debugf("Running %s %v", cmd, args) return syscall.Exec(cmd, args, os.Environ()) } // getAssetAndDir returns the name of the bindata asset, along with a directory path // derived from the data-dir and bindata asset name. func getAssetAndDir(dataDir string) (string, string) { asset := data.AssetNames()[0] dir := filepath.Join(dataDir, "data", strings.SplitN(filepath.Base(asset), ".", 2)[0]) return asset, dir } // extract checks for and if necessary unpacks the bindata archive, returning the unique path // to the extracted bindata asset. func extract(dataDir string) (string, error) { // first look for global asset folder so we don't create a HOME version if not needed _, dir := getAssetAndDir(datadir.DefaultDataDir) if _, err := os.Stat(filepath.Join(dir, "bin", "containerd")); err == nil { return dir, nil } asset, dir := getAssetAndDir(dataDir) // check if target content already exists if _, err := os.Stat(filepath.Join(dir, "bin", "containerd")); err == nil { return dir, nil } // acquire a data directory lock os.MkdirAll(filepath.Join(dataDir, "data"), 0755) lockFile := filepath.Join(dataDir, "data", ".lock") logrus.Infof("Acquiring lock file %s", lockFile) lock, err := flock.Acquire(lockFile) if err != nil { return "", err } defer flock.Release(lock) // check again if target directory exists if _, err := os.Stat(dir); err == nil { return dir, nil } logrus.Infof("Preparing data dir %s", dir) content, err := data.Asset(asset) if err != nil { return "", err } buf := bytes.NewBuffer(content) tempDest := dir + "-tmp" defer os.RemoveAll(tempDest) os.RemoveAll(tempDest) if err := untar.Untar(buf, tempDest); err != nil { return "", err } if err := dataverify.Verify(filepath.Join(tempDest, "bin")); err != nil { return "", err } currentSymLink := filepath.Join(dataDir, "data", "current") previousSymLink := filepath.Join(dataDir, "data", "previous") if _, err := os.Lstat(currentSymLink); err == nil { if err := os.Rename(currentSymLink, previousSymLink); err != nil { return "", err } } if err := os.Symlink(dir, currentSymLink); err != nil { return "", err } return dir, os.Rename(tempDest, dir) } // findCriConfig returns the path to crictl.yaml // crictl won't search multiple locations for a config file. It will fall back to looking in // the same directory as the crictl binary, but that's it. We need to check the various possible // data-dir locations ourselves and then point it at the right one. We check: // - the configured data-dir // - the default user data-dir (assuming we can find the user's home directory) // - the default system data-dir // - the default path from upstream crictl func findCriConfig(dataDir string) string { searchList := []string{filepath.Join(dataDir, "agent", criDefaultConfigPath)} if homeDataDir, err := resolvehome.Resolve(datadir.DefaultHomeDataDir); err == nil { searchList = append(searchList, filepath.Join(homeDataDir, "agent", criDefaultConfigPath)) } else { logrus.Warnf("Failed to resolve user home directory: %s", err) } searchList = append(searchList, filepath.Join(datadir.DefaultDataDir, "agent", criDefaultConfigPath)) searchList = append(searchList, criDefaultConfigPath) for _, path := range searchList { _, err := os.Stat(path) if err == nil { return path } if !errors.Is(err, os.ErrNotExist) { logrus.Warnf("Failed to %s", err) } } return "" }
[ "\"CRI_CONFIG_FILE\"", "\"PATH\"" ]
[]
[ "CRI_CONFIG_FILE", "PATH" ]
[]
["CRI_CONFIG_FILE", "PATH"]
go
2
0
discord_test.go
package discord import ( "context" "os" "testing" "golang.org/x/oauth2" "gotest.tools/v3/assert" ) func TestDiscord(t *testing.T) { clientID := os.Getenv("DISCORD_CLIENT_ID") clientSecret := os.Getenv("DISCORD_CLIENT_SECRET") redirectURL := os.Getenv("DISCORD_REDIRECT_URL") token := os.Getenv("DISCORD_TOKEN") assert.Assert(t, clientID != "" && clientSecret != "" && redirectURL != "" && token != "", "missing options") scopes := []string{"guilds", "identify"} cfg := &oauth2.Config{ ClientID: clientID, ClientSecret: clientSecret, RedirectURL: redirectURL, Endpoint: Endpoint, Scopes: scopes, } cli, err := New(WithOAuth2Config(cfg)) assert.NilError(t, err, "creating new client") ctx := context.Background() t.Run("get user", func(t *testing.T) { user, err := cli.User(ctx, token) assert.NilError(t, err) t.Log(user) }) t.Run("get user guilds", func(t *testing.T) { guilds, err := cli.UserGuilds(ctx, token) assert.NilError(t, err) t.Log(guilds) }) }
[ "\"DISCORD_CLIENT_ID\"", "\"DISCORD_CLIENT_SECRET\"", "\"DISCORD_REDIRECT_URL\"", "\"DISCORD_TOKEN\"" ]
[]
[ "DISCORD_REDIRECT_URL", "DISCORD_TOKEN", "DISCORD_CLIENT_ID", "DISCORD_CLIENT_SECRET" ]
[]
["DISCORD_REDIRECT_URL", "DISCORD_TOKEN", "DISCORD_CLIENT_ID", "DISCORD_CLIENT_SECRET"]
go
4
0
module_api/iaas/mongo_client.py
# -*- coding: utf-8 -*- """ TencentBlueKing is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaSCommunity Edition) available. Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os from pymongo import MongoClient from settings import ENVIRONMENT class MongoDB: url = "" def __init__(self, db_name): if ENVIRONMENT == "dev": self._client = MongoClient(host="127.0.0.1", port=27017) else: self.host = os.environ.get("BKAPP_MONGO_DB_IP") self.port = os.environ.get("BKAPP_MONGO_DB_PORT") self.user = os.environ.get("BKAPP_MONGO_DB_NAME") self.password = os.environ.get("BKAPP_MONGO_DB_PASSWORD") self.url = "mongodb://{}:{}@{}:{}".format( self.user, self.password, self.host, self.port, ) self._client = MongoClient(self.url) self._db = self._client[db_name] def insert(self, data, collection_name): collection = self._db[collection_name] result = collection.insert_many(data) return result.inserted_ids def search(self, condition, collection_name): collection = self._db[collection_name] cursor = collection.find_one(condition) return cursor def search_all(self, condition, collection_name): collection = self._db[collection_name] cursor = collection.find(condition) return cursor def delete(self, condition, collection_name): collection = self._db[collection_name] result = collection.delete_many(condition) return result.deleted_count def update(self, condition, collection_name, **kwargs): collection = self._db[collection_name] obj = collection.find_one(condition) if obj: obj.update(kwargs) result = collection.update(condition, obj) return result else: condition.update(kwargs) return self.insert([condition], collection_name) def close(self): self._client.close()
[]
[]
[ "BKAPP_MONGO_DB_NAME", "BKAPP_MONGO_DB_PORT", "BKAPP_MONGO_DB_IP", "BKAPP_MONGO_DB_PASSWORD" ]
[]
["BKAPP_MONGO_DB_NAME", "BKAPP_MONGO_DB_PORT", "BKAPP_MONGO_DB_IP", "BKAPP_MONGO_DB_PASSWORD"]
python
4
0
witches_rule_the_world/settings.py
""" Settings for witches rule the world """ import os BLOG_URL = 'witchesruletheworld.tumblr.com' TUMBLR = [ os.environ['WITCHES_CONSUMER_KEY'], os.environ['WITCHES_CONSUMER_SECRET'], os.environ['WITCHES_OAUTH_TOKEN'], os.environ['WITCHES_OAUTH_SECRET'] ] TAGS = [ 'witch', 'moody', 'noir', 'nu goth', 'gothgoth' ] BLACKLIST = set([ "31witches", "ass", "blood", "bloodplay", "bloody", "boy", "boyswillbegirls", "breasts", "cocksucking", "comic", "crossdresser", "crossdressing", "cuts", "cutting", "daddy", "dick", "dickgirl", "do not reblog", "do not reblog pls", "do not reblog plz", "don't reblog", "don't reblog pls", "don't reblog plz", "dont reblog", "dont reblog pls", "dont reblog plz", "fanart", "femboi", "fembot", "ftm", "ftmtransgender", "ftm hrt", "ftm transgender", "ftm transman", "futa", "futanari", "gay boy", "gay boys", "gay man", "gay men", "gender hc", "genderbend", "genderbender", "genderswap", "he/him", "headcanon", "herm", "hermaphrodite", "hot transgender", "inktober", "inktober2017", "ladyboy", "ladyboys", "lewd", "masturbate", "masturbating", "menlikeus", "mindless", "naked", "no reblog", "no reblog plz", "no reblog plz", "no reblogs", "no reblogs plz", "no reblogs plz", "nsfw", "nude", "nudity", "pre op ftm", "play thing", "porn", "self harm", "self-harm", "sex", "sexual", "shamala", "shamale", "shamela", "shamele", "shemala", "shemale", "shemela", "shemele", "shim", "shirtless", "siss", "sissifaction", "sissy", "t girl", "t-girl", "tgirl", "topless", "toptrans", "tranny", "trannywomen", "trans boy", "trans boys", "trans guy", "trans hc", "trans headcanon", "trans masc", "trans masculine", "trans positivity", "transboy", "transboys", "transexuals", "transguy", "transmasc", "transmasculine", "transsexual", "transvesti", "transvestite", "trap", "traps", "truscum", "webcomic", "witchtober", "xdresser" ]) SAFE_EMOJIS = [ ':innocent:', ':grinning:', ':smiley:', ':wink:', ':blush:', ':relaxed:', ':yum:', ':innocent:', ':heart_eyes:', ':kissing_heart:', ':heartpulse:', ':astonished:', ':smile_cat:', ':heart_eyes_cat:', ':ok_woman:', ':bow:', ':raised_hands:', ':eyes:', ':thumbsup:', ':ok_hand:', ':tulip:', ':rose:', ':bouquet:', ':herb:', ':leaves:', ':rabbit:', ':hatching_chick:', ':hatched_chick:', ':dog:', ':gift_heart:', ':monkey_face:', ':see_no_evil:', ':hear_no_evil:', ':whale:', ':frog:', ':octopus:', ':sunny:', ':star2:', ':star:', ':stars:', ':sunrise:', ':tada:', ':confetti_ball:', ':sparkles:', ':two_hearts:', ':cupid:', ':+1:', ':smiley_cat:' ':sparkling_heart:', ':heart:', ':purple_heart:', ':yellow_heart:', ':green_heart:', ':blue_heart:', ':dancer:', ':snowboarder:', ':bangbang:', ':exclamation:', ':100:' ]
[]
[]
[ "WITCHES_OAUTH_TOKEN", "WITCHES_OAUTH_SECRET", "WITCHES_CONSUMER_SECRET", "WITCHES_CONSUMER_KEY" ]
[]
["WITCHES_OAUTH_TOKEN", "WITCHES_OAUTH_SECRET", "WITCHES_CONSUMER_SECRET", "WITCHES_CONSUMER_KEY"]
python
4
0
fedml_experiments/distributed/fedavg/main_fedavg.py
import argparse import logging import os import random import socket import sys import traceback import numpy as np import psutil import setproctitle import torch import wandb from mpi4py import MPI logging.basicConfig( level=logging.INFO, format='%(process)d %(asctime)s.%(msecs)03d - {%(filename)s.py (%(lineno)d)} - %(funcName)s(): %(message)s', datefmt='%Y-%m-%d,%H:%M:%S') # add the FedML root directory to the python path sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "./../../../../"))) sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "./../../../"))) sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), ""))) from fedml_api.distributed.utils.gpu_mapping import mapping_processes_to_gpu_device_from_yaml_file from fedml_api.data_preprocessing.FederatedEMNIST.data_loader import load_partition_data_federated_emnist from fedml_api.data_preprocessing.fed_cifar100.data_loader import load_partition_data_federated_cifar100 from fedml_api.data_preprocessing.fed_shakespeare.data_loader import load_partition_data_federated_shakespeare from fedml_api.data_preprocessing.shakespeare.data_loader import load_partition_data_shakespeare from fedml_api.data_preprocessing.stackoverflow_lr.data_loader import load_partition_data_federated_stackoverflow_lr from fedml_api.data_preprocessing.stackoverflow_nwp.data_loader import load_partition_data_federated_stackoverflow_nwp from fedml_api.data_preprocessing.MNIST.data_loader import load_partition_data_mnist from fedml_api.data_preprocessing.ImageNet.data_loader import load_partition_data_ImageNet from fedml_api.data_preprocessing.Landmarks.data_loader import load_partition_data_landmarks from fedml_api.data_preprocessing.TILES.data_loader import load_partition_data_tiles from fedml_api.data_preprocessing.cifar10.data_loader import load_partition_data_cifar10 from fedml_api.data_preprocessing.cifar100.data_loader import load_partition_data_cifar100 from fedml_api.data_preprocessing.cinic10.data_loader import load_partition_data_cinic10 from fedml_api.model.cv.cnn import CNN_DropOut from fedml_api.model.cv.resnet_gn import resnet18 from fedml_api.model.cv.mobilenet import mobilenet from fedml_api.model.cv.resnet import resnet56 from fedml_api.model.nlp.rnn import RNN_OriginalFedAvg, RNN_StackOverFlow from fedml_api.model.linear.lr import LogisticRegression from fedml_api.model.cv.mobilenet_v3 import MobileNetV3 from fedml_api.model.cv.efficientnet import EfficientNet from fedml_api.model.TILES.baseline_models import OneDCnnLstm from fedml_api.distributed.fedavg.FedAvgAPI import FedML_init, FedML_FedAvg_distributed def add_args(parser): """ parser : argparse.ArgumentParser return a parser added with args required by fit """ # Training settings parser.add_argument('--model', type=str, default='mobilenet', metavar='N', help='neural network used in training') parser.add_argument('--dataset', type=str, default='cifar10', metavar='N', help='dataset used for training') parser.add_argument('--data_dir', type=str, default='./../../../data/cifar10', help='data directory') parser.add_argument('--partition_method', type=str, default='hetero', metavar='N', help='how to partition the dataset on local workers') parser.add_argument('--partition_alpha', type=float, default=0.5, metavar='PA', help='partition alpha (default: 0.5)') parser.add_argument('--client_num_in_total', type=int, default=1000, metavar='NN', help='number of workers in a distributed cluster') parser.add_argument('--client_num_per_round', type=int, default=4, metavar='NN', help='number of workers') parser.add_argument('--batch_size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--client_optimizer', type=str, default='adam', help='SGD with momentum; adam') parser.add_argument('--backend', type=str, default="MPI", help='Backend for Server and Client') parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.001)') parser.add_argument('--wd', help='weight decay parameter;', type=float, default=0.0001) parser.add_argument('--epochs', type=int, default=5, metavar='EP', help='how many epochs will be trained locally') parser.add_argument('--comm_round', type=int, default=10, help='how many round of communications we shoud use') parser.add_argument('--is_mobile', type=int, default=1, help='whether the program is running on the FedML-Mobile server side') parser.add_argument('--frequency_of_the_test', type=int, default=1, help='the frequency of the algorithms') parser.add_argument('--gpu_server_num', type=int, default=1, help='gpu_server_num') parser.add_argument('--gpu_num_per_server', type=int, default=4, help='gpu_num_per_server') parser.add_argument('--gpu_mapping_file', type=str, default="gpu_mapping.yaml", help='the gpu utilization file for servers and clients. If there is no \ gpu_util_file, gpu will not be used.') parser.add_argument('--gpu_mapping_key', type=str, default="mapping_default", help='the key in gpu utilization file') parser.add_argument('--grpc_ipconfig_path', type=str, default="grpc_ipconfig.csv", help='config table containing ipv4 address of grpc server') parser.add_argument('--ci', type=int, default=0, help='CI') parser.add_argument('--output_dir', type=str, default='/data/rash/tiles-motif/expts/') args = parser.parse_args() return args def load_data(args, dataset_name): if dataset_name == "mnist": logging.info("load_data. dataset_name = %s" % dataset_name) client_num, train_data_num, test_data_num, train_data_global, test_data_global, \ train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \ class_num = load_partition_data_mnist(args.batch_size) """ For shallow NN or linear models, we uniformly sample a fraction of clients each round (as the original FedAvg paper) """ args.client_num_in_total = client_num elif dataset_name == "femnist": logging.info("load_data. dataset_name = %s" % dataset_name) client_num, train_data_num, test_data_num, train_data_global, test_data_global, \ train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \ class_num = load_partition_data_federated_emnist(args.dataset, args.data_dir) args.client_num_in_total = client_num elif dataset_name == "shakespeare": logging.info("load_data. dataset_name = %s" % dataset_name) client_num, train_data_num, test_data_num, train_data_global, test_data_global, \ train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \ class_num = load_partition_data_shakespeare(args.batch_size) args.client_num_in_total = client_num elif dataset_name == "fed_shakespeare": logging.info("load_data. dataset_name = %s" % dataset_name) client_num, train_data_num, test_data_num, train_data_global, test_data_global, \ train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \ class_num = load_partition_data_federated_shakespeare(args.dataset, args.data_dir) args.client_num_in_total = client_num elif dataset_name == "fed_cifar100": logging.info("load_data. dataset_name = %s" % dataset_name) client_num, train_data_num, test_data_num, train_data_global, test_data_global, \ train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \ class_num = load_partition_data_federated_cifar100(args.dataset, args.data_dir) args.client_num_in_total = client_num elif dataset_name == "stackoverflow_lr": logging.info("load_data. dataset_name = %s" % dataset_name) client_num, train_data_num, test_data_num, train_data_global, test_data_global, \ train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \ class_num = load_partition_data_federated_stackoverflow_lr(args.dataset, args.data_dir) args.client_num_in_total = client_num elif dataset_name == "stackoverflow_nwp": logging.info("load_data. dataset_name = %s" % dataset_name) client_num, train_data_num, test_data_num, train_data_global, test_data_global, \ train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \ class_num = load_partition_data_federated_stackoverflow_nwp(args.dataset, args.data_dir) args.client_num_in_total = client_num elif dataset_name == "ILSVRC2012": logging.info("load_data. dataset_name = %s" % dataset_name) train_data_num, test_data_num, train_data_global, test_data_global, \ train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \ class_num = load_partition_data_ImageNet(dataset=dataset_name, data_dir=args.data_dir, partition_method=None, partition_alpha=None, client_number=args.client_num_in_total, batch_size=args.batch_size) elif dataset_name == "gld23k": logging.info("load_data. dataset_name = %s" % dataset_name) args.client_num_in_total = 233 fed_train_map_file = os.path.join(args.data_dir, 'mini_gld_train_split.csv') fed_test_map_file = os.path.join(args.data_dir, 'mini_gld_test.csv') args.data_dir = os.path.join(args.data_dir, 'images') train_data_num, test_data_num, train_data_global, test_data_global, \ train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \ class_num = load_partition_data_landmarks(dataset=dataset_name, data_dir=args.data_dir, fed_train_map_file=fed_train_map_file, fed_test_map_file=fed_test_map_file, partition_method=None, partition_alpha=None, client_number=args.client_num_in_total, batch_size=args.batch_size) elif dataset_name == "gld160k": logging.info("load_data. dataset_name = %s" % dataset_name) args.client_num_in_total = 1262 fed_train_map_file = os.path.join(args.data_dir, 'federated_train.csv') fed_test_map_file = os.path.join(args.data_dir, 'test.csv') args.data_dir = os.path.join(args.data_dir, 'images') train_data_num, test_data_num, train_data_global, test_data_global, \ train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \ class_num = load_partition_data_landmarks(dataset=dataset_name, data_dir=args.data_dir, fed_train_map_file=fed_train_map_file, fed_test_map_file=fed_test_map_file, partition_method=None, partition_alpha=None, client_number=args.client_num_in_total, batch_size=args.batch_size) elif dataset_name == "tiles": logging.info("load_data. dataset_name = %s" % dataset_name) train_data_num, test_data_num, train_data_global, test_data_global, \ train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \ class_num = load_partition_data_tiles(batch_size=args.batch_size) else: if dataset_name == "cifar10": data_loader = load_partition_data_cifar10 elif dataset_name == "cifar100": data_loader = load_partition_data_cifar100 elif dataset_name == "cinic10": data_loader = load_partition_data_cinic10 else: data_loader = load_partition_data_cifar10 train_data_num, test_data_num, train_data_global, test_data_global, \ train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \ class_num = data_loader(args.dataset, args.data_dir, args.partition_method, args.partition_alpha, args.client_num_in_total, args.batch_size) dataset = [train_data_num, test_data_num, train_data_global, test_data_global, train_data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num] return dataset def create_model(args, model_name, output_dim): logging.info("create_model. model_name = %s, output_dim = %s" % (model_name, output_dim)) model = None if model_name == "lr" and args.dataset == "mnist": logging.info("LogisticRegression + MNIST") model = LogisticRegression(28 * 28, output_dim) elif model_name == "rnn" and args.dataset == "shakespeare": logging.info("RNN + shakespeare") model = RNN_OriginalFedAvg() elif model_name == "cnn" and args.dataset == "femnist": logging.info("CNN + FederatedEMNIST") model = CNN_DropOut(False) elif model_name == "resnet18_gn" and args.dataset == "fed_cifar100": logging.info("ResNet18_GN + Federated_CIFAR100") model = resnet18() elif model_name == "rnn" and args.dataset == "fed_shakespeare": logging.info("RNN + fed_shakespeare") model = RNN_OriginalFedAvg() elif model_name == "lr" and args.dataset == "stackoverflow_lr": logging.info("lr + stackoverflow_lr") model = LogisticRegression(10004, output_dim) elif model_name == "rnn" and args.dataset == "stackoverflow_nwp": logging.info("CNN + stackoverflow_nwp") model = RNN_StackOverFlow() elif model_name == "resnet56": model = resnet56(class_num=output_dim) elif model_name == "mobilenet": model = mobilenet(class_num=output_dim) # TODO elif model_name == 'mobilenet_v3': '''model_mode \in {LARGE: 5.15M, SMALL: 2.94M}''' model = MobileNetV3(model_mode='LARGE') elif model_name == 'efficientnet': model = EfficientNet() elif model_name == 'onedcnnlstm': model = OneDCnnLstm(input_channel=1, num_pred=4) return model if __name__ == "__main__": # quick fix for issue in MacOS environment: https://github.com/openai/spinningup/issues/16 if sys.platform == 'darwin': os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' # logging.basicConfig( # level=logging.INFO, # format='%(process)d %(asctime)s.%(msecs)03d - {%(filename)s.py (%(lineno)d)} - %(funcName)s(): %(message)s', # datefmt='%Y-%m-%d,%H:%M:%S') # initialize distributed computing (MPI) comm, process_id, worker_number = FedML_init() # parse python script input parameters parser = argparse.ArgumentParser() args = add_args(parser) logging.info(args) # customize the process name str_process_name = "FedAvg (distributed):" + str(process_id) setproctitle.setproctitle(str_process_name) # customize the log format # logging.basicConfig(level=logging.INFO, # logging.basicConfig(level=logging.INFO, # format=str( # process_id) + ' - %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', # datefmt='%a, %d %b %Y %H:%M:%S') hostname = socket.gethostname() logging.info("#############process ID = " + str(process_id) + ", host name = " + hostname + "########" + ", process ID = " + str(os.getpid()) + ", process Name = " + str(psutil.Process(os.getpid()))) # initialize the wandb machine learning experimental tracking platform (https://www.wandb.com/). if process_id == 0: wandb.init( # project="federated_nas", project="tiles_HAD", name="FedAVG(d)" + str(args.partition_method) + "r" + str(args.comm_round) + "-e" + str( args.epochs) + "-lr" + str( args.lr), config=args ) # Set the random seed. The np.random seed determines the dataset partition. # The torch_manual_seed determines the initial weight. # We fix these two, so that we can reproduce the result. random.seed(0) np.random.seed(0) torch.manual_seed(0) torch.cuda.manual_seed_all(0) # Please check "GPU_MAPPING.md" to see how to define the topology logging.info("process_id = %d, size = %d" % (process_id, worker_number)) device = mapping_processes_to_gpu_device_from_yaml_file(process_id, worker_number, args.gpu_mapping_file, args.gpu_mapping_key) # load data dataset = load_data(args, args.dataset) [train_data_num, test_data_num, train_data_global, test_data_global, train_data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num] = dataset # create model. # Note if the model is DNN (e.g., ResNet), the training will be very slow. # In this case, please use our FedML distributed version (./fedml_experiments/distributed_fedavg) model = create_model(args, model_name=args.model, output_dim=dataset[7]) try: # start "federated averaging (FedAvg)" FedML_FedAvg_distributed(process_id, worker_number, device, comm, model, train_data_num, train_data_global, test_data_global, train_data_local_num_dict, train_data_local_dict, test_data_local_dict, args) except Exception as e: print(e) logging.info('traceback.format_exc():\n%s' % traceback.format_exc()) MPI.COMM_WORLD.Abort()
[]
[]
[ "KMP_DUPLICATE_LIB_OK" ]
[]
["KMP_DUPLICATE_LIB_OK"]
python
1
0
telemetry/telemetry/internal/backends/chrome/desktop_browser_backend.py
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import print_function from __future__ import absolute_import import datetime import hashlib import logging import os import os.path import random import re import shutil import signal import subprocess as subprocess import sys import tempfile import py_utils from py_utils import cloud_storage from py_utils import exc_util from telemetry.core import exceptions from telemetry.internal.backends.chrome import chrome_browser_backend from telemetry.internal.backends.chrome import minidump_finder from telemetry.internal.backends.chrome import desktop_minidump_symbolizer from telemetry.internal.util import format_for_logging DEVTOOLS_ACTIVE_PORT_FILE = 'DevToolsActivePort' UI_DEVTOOLS_ACTIVE_PORT_FILE = 'UIDevToolsActivePort' class DesktopBrowserBackend(chrome_browser_backend.ChromeBrowserBackend): """The backend for controlling a locally-executed browser instance, on Linux, Mac or Windows. """ def __init__(self, desktop_platform_backend, browser_options, browser_directory, profile_directory, executable, flash_path, is_content_shell, build_dir=None): super(DesktopBrowserBackend, self).__init__( desktop_platform_backend, browser_options=browser_options, browser_directory=browser_directory, profile_directory=profile_directory, supports_extensions=not is_content_shell, supports_tab_control=not is_content_shell, build_dir=build_dir) self._executable = executable self._flash_path = flash_path self._is_content_shell = is_content_shell # Initialize fields so that an explosion during init doesn't break in Close. self._proc = None self._tmp_output_file = None # pylint: disable=invalid-name self._minidump_path_crashpad_retrieval = {} # pylint: enable=invalid-name if not self._executable: raise Exception('Cannot create browser, no executable found!') if self._flash_path and not os.path.exists(self._flash_path): raise RuntimeError('Flash path does not exist: %s' % self._flash_path) if self.is_logging_enabled: self._log_file_path = os.path.join(tempfile.mkdtemp(), 'chrome.log') else: self._log_file_path = None @property def is_logging_enabled(self): return self.browser_options.logging_verbosity in [ self.browser_options.NON_VERBOSE_LOGGING, self.browser_options.VERBOSE_LOGGING, self.browser_options.SUPER_VERBOSE_LOGGING] @property def log_file_path(self): return self._log_file_path @property def supports_uploading_logs(self): return (self.browser_options.logs_cloud_bucket and self.log_file_path and os.path.isfile(self.log_file_path)) def _GetDevToolsActivePortPath(self): return os.path.join(self.profile_directory, DEVTOOLS_ACTIVE_PORT_FILE) def _FindDevToolsPortAndTarget(self): devtools_file_path = self._GetDevToolsActivePortPath() if not os.path.isfile(devtools_file_path): raise EnvironmentError('DevTools file doest not exist yet') # Attempt to avoid reading the file until it's populated. # Both stat and open may raise IOError if not ready, the caller will retry. lines = None if os.stat(devtools_file_path).st_size > 0: with open(devtools_file_path) as f: lines = [line.rstrip() for line in f] if not lines: raise EnvironmentError('DevTools file empty') devtools_port = int(lines[0]) browser_target = lines[1] if len(lines) >= 2 else None return devtools_port, browser_target def _FindUIDevtoolsPort(self): devtools_file_path = os.path.join(self.profile_directory, UI_DEVTOOLS_ACTIVE_PORT_FILE) if not os.path.isfile(devtools_file_path): raise EnvironmentError('UIDevTools file does not exist yet') lines = None if os.stat(devtools_file_path).st_size > 0: with open(devtools_file_path) as f: lines = [line.rstrip() for line in f] if not lines: raise EnvironmentError('UIDevTools file empty') devtools_port = int(lines[0]) return devtools_port def Start(self, startup_args): assert not self._proc, 'Must call Close() before Start()' self._dump_finder = minidump_finder.MinidumpFinder( self.browser.platform.GetOSName(), self.browser.platform.GetArchName()) # macOS displays a blocking crash resume dialog that we need to suppress. if self.browser.platform.GetOSName() == 'mac': # Default write expects either the application name or the # path to the application. self._executable has the path to the app # with a few other bits tagged on after .app. Thus, we shorten the path # to end with .app. If this is ineffective on your mac, please delete # the saved state of the browser you are testing on here: # /Users/.../Library/Saved\ Application State/... # http://stackoverflow.com/questions/20226802 dialog_path = re.sub(r'\.app\/.*', '.app', self._executable) subprocess.check_call([ 'defaults', 'write', '-app', dialog_path, 'NSQuitAlwaysKeepsWindows', '-bool', 'false' ]) cmd = [self._executable] if self.browser.platform.GetOSName() == 'mac': cmd.append('--use-mock-keychain') # crbug.com/865247 cmd.extend(startup_args) cmd.append('about:blank') env = os.environ.copy() env['CHROME_HEADLESS'] = '1' # Don't upload minidumps. env['BREAKPAD_DUMP_LOCATION'] = self._tmp_minidump_dir if self.is_logging_enabled: sys.stderr.write( 'Chrome log file will be saved in %s\n' % self.log_file_path) env['CHROME_LOG_FILE'] = self.log_file_path # Make sure we have predictable language settings that don't differ from the # recording. for name in ('LC_ALL', 'LC_MESSAGES', 'LANG'): encoding = 'en_US.UTF-8' if env.get(name, encoding) != encoding: logging.warn('Overriding env[%s]=="%s" with default value "%s"', name, env[name], encoding) env[name] = 'en_US.UTF-8' self.LogStartCommand(cmd, env) if not self.browser_options.show_stdout: self._tmp_output_file = tempfile.NamedTemporaryFile('w') self._proc = subprocess.Popen( cmd, stdout=self._tmp_output_file, stderr=subprocess.STDOUT, env=env) else: self._proc = subprocess.Popen(cmd, env=env) self.BindDevToolsClient() # browser is foregrounded by default on Windows and Linux, but not Mac. if self.browser.platform.GetOSName() == 'mac': subprocess.Popen([ 'osascript', '-e', ('tell application "%s" to activate' % self._executable) ]) if self._supports_extensions: self._WaitForExtensionsToLoad() def LogStartCommand(self, command, env): """Log the command used to start Chrome. In order to keep the length of logs down (see crbug.com/943650), we sometimes trim the start command depending on browser_options. The command may change between runs, but usually in innocuous ways like --user-data-dir changes to a new temporary directory. Some benchmarks do use different startup arguments for different stories, but this is discouraged. This method could be changed to print arguments that are different since the last run if need be. """ formatted_command = format_for_logging.ShellFormat( command, trim=self.browser_options.trim_logs) logging.info('Starting Chrome: %s\n', formatted_command) if not self.browser_options.trim_logs: logging.info('Chrome Env: %s', env) def BindDevToolsClient(self): # In addition to the work performed by the base class, quickly check if # the browser process is still alive. if not self.IsBrowserRunning(): raise exceptions.ProcessGoneException( 'Return code: %d' % self._proc.returncode) super(DesktopBrowserBackend, self).BindDevToolsClient() def GetPid(self): if self._proc: return self._proc.pid return None def IsBrowserRunning(self): return self._proc and self._proc.poll() is None def GetStandardOutput(self): if not self._tmp_output_file: if self.browser_options.show_stdout: # This can happen in the case that loading the Chrome binary fails. # We print rather than using logging here, because that makes a # recursive call to this function. print("Can't get standard output with --show-stdout", file=sys.stderr) return '' self._tmp_output_file.flush() try: with open(self._tmp_output_file.name) as f: return f.read() except IOError: return '' def _IsExecutableStripped(self): if self.browser.platform.GetOSName() == 'mac': try: symbols = subprocess.check_output(['/usr/bin/nm', self._executable]) except subprocess.CalledProcessError as err: logging.warning( 'Error when checking whether executable is stripped: %s', err.output) # Just assume that binary is stripped to skip breakpad symbol generation # if this check failed. return True num_symbols = len(symbols.splitlines()) # We assume that if there are more than 10 symbols the executable is not # stripped. return num_symbols < 10 else: return False def _GetStackFromMinidump(self, minidump): # Create an executable-specific directory if necessary to store symbols # for re-use. We purposefully don't clean this up so that future # tests can continue to use the same symbols that are unique to the # executable. symbols_dir = self._CreateExecutableUniqueDirectory('chrome_symbols_') dump_symbolizer = desktop_minidump_symbolizer.DesktopMinidumpSymbolizer( self.browser.platform.GetOSName(), self.browser.platform.GetArchName(), self._dump_finder, self.build_dir, symbols_dir=symbols_dir) return dump_symbolizer.SymbolizeMinidump(minidump) def _CreateExecutableUniqueDirectory(self, prefix): """Creates a semi-permanent directory unique to the browser executable. This directory will persist between different tests, and potentially be available between different test suites, but is liable to be cleaned up by the OS at any point outside of a test suite's run. Args: prefix: A string to include before the unique identifier in the directory name. Returns: A string containing an absolute path to the created directory. """ hashfunc = hashlib.sha1() with open(self._executable, 'rb') as infile: hashfunc.update(infile.read()) symbols_dirname = prefix + hashfunc.hexdigest() # We can't use mkdtemp() directly since that will result in the directory # being different, and thus not shared. So, create an unused directory # and use the same parent directory. unused_dir = tempfile.mkdtemp().rstrip(os.path.sep) symbols_dir = os.path.join(os.path.dirname(unused_dir), symbols_dirname) if not os.path.exists(symbols_dir) or not os.path.isdir(symbols_dir): os.makedirs(symbols_dir) shutil.rmtree(unused_dir) return symbols_dir def _UploadMinidumpToCloudStorage(self, minidump_path): """ Upload minidump_path to cloud storage and return the cloud storage url. """ remote_path = ('minidump-%s-%i.dmp' % (datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'), random.randint(0, 1000000))) try: return cloud_storage.Insert(cloud_storage.TELEMETRY_OUTPUT, remote_path, minidump_path) except cloud_storage.CloudStorageError as err: logging.error('Cloud storage error while trying to upload dump: %s', repr(err)) return '<Missing link>' def SymbolizeMinidump(self, minidump_path): return self._InternalSymbolizeMinidump(minidump_path) def _InternalSymbolizeMinidump(self, minidump_path): cloud_storage_link = self._UploadMinidumpToCloudStorage(minidump_path) stack = self._GetStackFromMinidump(minidump_path) if not stack: error_message = ('Failed to symbolize minidump. Raw stack is uploaded to' ' cloud storage: %s.' % cloud_storage_link) return (False, error_message) self._symbolized_minidump_paths.add(minidump_path) return (True, stack) def _TryCooperativeShutdown(self): if self.browser.platform.IsCooperativeShutdownSupported(): # Ideally there would be a portable, cooperative shutdown # mechanism for the browser. This seems difficult to do # correctly for all embedders of the content API. The only known # problem with unclean shutdown of the browser process is on # Windows, where suspended child processes frequently leak. For # now, just solve this particular problem. See Issue 424024. if self.browser.platform.CooperativelyShutdown(self._proc, "chrome"): try: # Use a long timeout to handle slow Windows debug # (see crbug.com/815004) # Allow specifying a custom shutdown timeout via the # 'CHROME_SHUTDOWN_TIMEOUT' environment variable. # TODO(sebmarchand): Remove this now that there's an option to shut # down Chrome via Devtools. py_utils.WaitFor(lambda: not self.IsBrowserRunning(), timeout=int(os.getenv('CHROME_SHUTDOWN_TIMEOUT', 15)) ) logging.info('Successfully shut down browser cooperatively') except py_utils.TimeoutException as e: logging.warning('Failed to cooperatively shutdown. ' + 'Proceeding to terminate: ' + str(e)) def Background(self): raise NotImplementedError @exc_util.BestEffort def Close(self): super(DesktopBrowserBackend, self).Close() # First, try to cooperatively shutdown. if self.IsBrowserRunning(): self._TryCooperativeShutdown() # Second, try to politely shutdown with SIGINT. Use SIGINT instead of # SIGTERM (or terminate()) here since the browser treats SIGTERM as a more # urgent shutdown signal and may not free all resources. if self.IsBrowserRunning() and self.browser.platform.GetOSName() != 'win': self._proc.send_signal(signal.SIGINT) try: py_utils.WaitFor(lambda: not self.IsBrowserRunning(), timeout=int(os.getenv('CHROME_SHUTDOWN_TIMEOUT', 5)) ) self._proc = None except py_utils.TimeoutException: logging.warning('Failed to gracefully shutdown.') # Shutdown aggressively if all above failed. if self.IsBrowserRunning(): logging.warning('Proceed to kill the browser.') self._proc.kill() self._proc = None if self._tmp_output_file: self._tmp_output_file.close() self._tmp_output_file = None if self._tmp_minidump_dir: shutil.rmtree(self._tmp_minidump_dir, ignore_errors=True) self._tmp_minidump_dir = None
[]
[]
[ "CHROME_SHUTDOWN_TIMEOUT" ]
[]
["CHROME_SHUTDOWN_TIMEOUT"]
python
1
0
core/config/config.go
/* Copyright Greg Haskins <[email protected]> 2017, All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package config import ( "fmt" "os" "path/filepath" "github.com/spf13/viper" ) func dirExists(path string) bool { _, err := os.Stat(path) return err == nil } func addConfigPath(v *viper.Viper, p string) { if v != nil { v.AddConfigPath(p) } else { viper.AddConfigPath(p) } } //---------------------------------------------------------------------------------- // GetDevConfigDir() //---------------------------------------------------------------------------------- // Returns the path to the default configuration that is maintained with the source // tree. Only valid to call from a test/development context. //---------------------------------------------------------------------------------- func GetDevConfigDir() (string, error) { gopath := os.Getenv("GOPATH") if gopath == "" { return "", fmt.Errorf("GOPATH not set") } for _, p := range filepath.SplitList(gopath) { devPath := filepath.Join(p, "src/github.com/hyperledger/fabric/sampleconfig") if !dirExists(devPath) { continue } return devPath, nil } return "", fmt.Errorf("DevConfigDir not found in %s", gopath) } //---------------------------------------------------------------------------------- // GetDevMspDir() //---------------------------------------------------------------------------------- // Builds upon GetDevConfigDir to return the path to our sampleconfig/msp that is // maintained with the source tree. Only valid to call from a test/development // context. Runtime environment should use configuration elements such as // // GetPath("peer.mspConfigDir") //---------------------------------------------------------------------------------- func GetDevMspDir() (string, error) { devDir, err := GetDevConfigDir() if err != nil { return "", fmt.Errorf("Error obtaining DevConfigDir: %s", devDir) } return filepath.Join(devDir, "msp"), nil } //---------------------------------------------------------------------------------- // TranslatePath() //---------------------------------------------------------------------------------- // Translates a relative path into a fully qualified path relative to the config // file that specified it. Absolute paths are passed unscathed. //---------------------------------------------------------------------------------- func TranslatePath(base, p string) string { if filepath.IsAbs(p) { return p } return filepath.Join(base, p) } //---------------------------------------------------------------------------------- // TranslatePathInPlace() //---------------------------------------------------------------------------------- // Translates a relative path into a fully qualified path in-place (updating the // pointer) relative to the config file that specified it. Absolute paths are // passed unscathed. //---------------------------------------------------------------------------------- func TranslatePathInPlace(base string, p *string) { *p = TranslatePath(base, *p) } //---------------------------------------------------------------------------------- // GetPath() //---------------------------------------------------------------------------------- // GetPath allows configuration strings that specify a (config-file) relative path // // For example: Assume our config is located in /etc/hyperledger/fabric/core.yaml with // a key "msp.configPath" = "msp/config.yaml". // // This function will return: // GetPath("msp.configPath") -> /etc/hyperledger/fabric/msp/config.yaml // //---------------------------------------------------------------------------------- func GetPath(key string) string { p := viper.GetString(key) if p == "" { return "" } return TranslatePath(filepath.Dir(viper.ConfigFileUsed()), p) } const OfficialPath = "/etc/hyperledger/fabric" //---------------------------------------------------------------------------------- // InitViper() //---------------------------------------------------------------------------------- // Performs basic initialization of our viper-based configuration layer. // Primary thrust is to establish the paths that should be consulted to find // the configuration we need. If v == nil, we will initialize the global // Viper instance //---------------------------------------------------------------------------------- func InitViper(v *viper.Viper, configName string) error { var altPath = os.Getenv("FABRIC_CFG_PATH") if altPath != "" { // If the user has overridden the path with an envvar, its the only path // we will consider addConfigPath(v, altPath) } else { // If we get here, we should use the default paths in priority order: // // *) CWD // *) The $GOPATH based development tree // *) /etc/hyperledger/fabric // // CWD addConfigPath(v, "./") // DevConfigPath err := AddDevConfigPath(v) if err != nil { return err } // And finally, the official path if dirExists(OfficialPath) { addConfigPath(v, OfficialPath) } } // Now set the configuration file. if v != nil { v.SetConfigName(configName) } else { viper.SetConfigName(configName) } return nil } //---------------------------------------------------------------------------------- // AddDevConfigPath() //---------------------------------------------------------------------------------- // Helper utility that automatically adds our DevConfigDir to the viper path //---------------------------------------------------------------------------------- func AddDevConfigPath(v *viper.Viper) error { devPath, err := GetDevConfigDir() if err != nil { return err } addConfigPath(v, devPath) return nil }
[ "\"GOPATH\"", "\"FABRIC_CFG_PATH\"" ]
[]
[ "GOPATH", "FABRIC_CFG_PATH" ]
[]
["GOPATH", "FABRIC_CFG_PATH"]
go
2
0
integration/e2e/util.go
package e2e import ( "io/ioutil" "math" "math/rand" "net/http" "os" "os/exec" "path/filepath" "time" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/prompb" ) func RunCommandAndGetOutput(name string, args ...string) ([]byte, error) { cmd := exec.Command(name, args...) return cmd.CombinedOutput() } func EmptyFlags() map[string]string { return map[string]string{} } func MergeFlags(inputs ...map[string]string) map[string]string { output := MergeFlagsWithoutRemovingEmpty(inputs...) for k, v := range output { if v == "" { delete(output, k) } } return output } func MergeFlagsWithoutRemovingEmpty(inputs ...map[string]string) map[string]string { output := map[string]string{} for _, input := range inputs { for name, value := range input { output[name] = value } } return output } func BuildArgs(flags map[string]string) []string { args := make([]string, 0, len(flags)) for name, value := range flags { if value != "" { args = append(args, name+"="+value) } else { args = append(args, name) } } return args } func GetRequest(url string) (*http.Response, error) { const timeout = 1 * time.Second client := &http.Client{Timeout: timeout} return client.Get(url) } // timeToMilliseconds returns the input time as milliseconds, using the same // formula used by Prometheus in order to get the same timestamp when asserting // on query results. func TimeToMilliseconds(t time.Time) int64 { // The millisecond is rounded to the nearest return int64(math.Round(float64(t.UnixNano()) / 1000000)) } func GenerateSeries(name string, ts time.Time, additionalLabels ...prompb.Label) (series []prompb.TimeSeries, vector model.Vector) { tsMillis := TimeToMilliseconds(ts) value := rand.Float64() lbls := append( []prompb.Label{ {Name: labels.MetricName, Value: name}, }, additionalLabels..., ) // Generate the series series = append(series, prompb.TimeSeries{ Labels: lbls, Samples: []prompb.Sample{ {Value: value, Timestamp: tsMillis}, }, }) // Generate the expected vector when querying it metric := model.Metric{} metric[labels.MetricName] = model.LabelValue(name) for _, lbl := range additionalLabels { metric[model.LabelName(lbl.Name)] = model.LabelValue(lbl.Value) } vector = append(vector, &model.Sample{ Metric: metric, Value: model.SampleValue(value), Timestamp: model.Time(tsMillis), }) return } // GetTempDirectory creates a temporary directory for shared integration // test files, either in the working directory or a directory referenced by // the E2E_TEMP_DIR environment variable func GetTempDirectory() (string, error) { var ( dir string err error ) // If a temp dir is referenced, return that if os.Getenv("E2E_TEMP_DIR") != "" { dir = os.Getenv("E2E_TEMP_DIR") } else { dir, err = os.Getwd() if err != nil { return "", err } } tmpDir, err := ioutil.TempDir(dir, "e2e_integration_test") if err != nil { return "", err } absDir, err := filepath.Abs(tmpDir) if err != nil { _ = os.RemoveAll(tmpDir) return "", err } return absDir, nil }
[ "\"E2E_TEMP_DIR\"", "\"E2E_TEMP_DIR\"" ]
[]
[ "E2E_TEMP_DIR" ]
[]
["E2E_TEMP_DIR"]
go
1
0
scripts/aws/destroy-kubernetes-cluster.go
package main import ( "log" "os" "sync" "time" ) import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/eks" "github.com/aws/aws-sdk-go/service/iam" ) var deferError = false func checkDeferError(err error) bool { if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { case "NoSuchEntity": case "ResourceNotFoundException": case "ValidationError": default: log.Printf("Error (%s): %s\n", aerr.Code(), aerr.Message()) deferError = true return true } log.Printf("Warning (%s): %s\n", aerr.Code(), aerr.Message()) } else { log.Printf("Error: %s\n", err.Error()) deferError = true } return true } return false } func DeleteEksRole(iamClient *iam.IAM, eksRoleName *string) { log.Printf("Deleting EKS service role \"%s\"...\n", *eksRoleName) policyArn := "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" _, err := iamClient.DetachRolePolicy(&iam.DetachRolePolicyInput{ RoleName: eksRoleName, PolicyArn: &policyArn, }) checkDeferError(err) policyArn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy" _, err = iamClient.DetachRolePolicy(&iam.DetachRolePolicyInput{ RoleName: eksRoleName, PolicyArn: &policyArn, }) checkDeferError(err) _, err = iamClient.DeleteRole(&iam.DeleteRoleInput{ RoleName: eksRoleName, }) if checkDeferError(err) { return } log.Printf("Role \"%s\" successfully deleted!\n" , *eksRoleName) } func DeleteEksClusterVpc(cfClient *cloudformation.CloudFormation, clusterStackName *string) { log.Printf("Deleting Amazon EKS Cluster VPC \"%s\"...\n", *clusterStackName) resp, err := cfClient.DescribeStacks(&cloudformation.DescribeStacksInput{ StackName: clusterStackName, }) if checkDeferError(err) { return } stackId := resp.Stacks[0].StackId _, err = cfClient.DeleteStack(&cloudformation.DeleteStackInput{ StackName: clusterStackName, }) if checkDeferError(err) { return } for { resp, err := cfClient.DescribeStacks(&cloudformation.DescribeStacksInput{ StackName: stackId, }) if checkDeferError(err) { if err.(awserr.Error).Code() == "ValidationError" { log.Printf("Cluster VPC \"%s\" successfully deleted!\n", *clusterStackName) } return } switch *resp.Stacks[0].StackStatus { case "DELETE_COMPLETE": log.Printf("Cluster VPC \"%s\" successfully deleted!\n", *clusterStackName) return case "DELETE_IN_PROGRESS": time.Sleep(time.Second) default: log.Printf("Error: Unexpected stack status: %s\n", *resp.Stacks[0].StackStatus) deferError = true return } } } func DeleteEksCluster(eksClient *eks.EKS, clusterName *string) { log.Printf("Deleting Amazon EKS Cluster \"%s\"...\n", *clusterName) _, err := eksClient.DeleteCluster(&eks.DeleteClusterInput{ Name: clusterName, }) if checkDeferError(err) { return } for { resp, err := eksClient.DescribeCluster(&eks.DescribeClusterInput{ Name: clusterName, }) if err != nil && err.(awserr.Error).Code() == "ResourceNotFoundException" { log.Printf("EKS Cluster \"%s\" successfully Deleted!\n", *clusterName) return } if checkDeferError(err) { return } switch *resp.Cluster.Status { case "DELETING": time.Sleep(time.Second) default: log.Printf("Error: Unexpected cluster status: %s\n", *resp.Cluster.Status) deferError = true return } } } func DeleteEksEc2KeyPair(ec2Client *ec2.EC2, keyPairName *string) { log.Printf("Deleting Amazon EC2 key pair \"%s\"...\n", *keyPairName) _, err := ec2Client.DeleteKeyPair(&ec2.DeleteKeyPairInput{ KeyName: keyPairName, }) if checkDeferError(err) { return } log.Printf("Amazon EC2 key pair \"%s\" successfully Deleted!\n", *keyPairName) } func DeleteEksWorkerNodes(cfClient *cloudformation.CloudFormation, nodesStackName *string) { log.Printf("Deleting Amazon EKS Worker Nodes...\n") resp, err := cfClient.DescribeStacks(&cloudformation.DescribeStacksInput{ StackName: nodesStackName, }) if checkDeferError(err) { return } stackId := resp.Stacks[0].StackId _, err = cfClient.DeleteStack(&cloudformation.DeleteStackInput{ StackName: nodesStackName, }) if checkDeferError(err) { return } for { resp, err := cfClient.DescribeStacks(&cloudformation.DescribeStacksInput{ StackName: stackId, }) if checkDeferError(err) { return } switch *resp.Stacks[0].StackStatus { case "DELETE_COMPLETE": log.Printf("EKS Worker Nodes \"%s\" successfully deleted!\n", *nodesStackName) return case "DELETE_IN_PROGRESS": time.Sleep(time.Second) default: log.Printf("Error: Unexpected stack status: %s\n", *resp.Stacks[0].StackStatus) deferError = true return } } } func deleteAWSKubernetesCluster() { sess := session.Must(session.NewSession()) iamClient := iam.New(sess) eksClient := eks.New(sess) cfClient := cloudformation.New(sess) ec2Client := ec2.New(sess) serviceSuffix := os.Getenv("NSM_AWS_SERVICE_SUFFIX") var wg sync.WaitGroup wg.Add(3) // Deleting Amazon EKS Worker Nodes nodesStackName := "nsm-nodes" + serviceSuffix go func() { defer wg.Done() DeleteEksWorkerNodes(cfClient, &nodesStackName) }() // Deleting Amazon EKS Cluster clusterName := "nsm" + serviceSuffix go func() { defer wg.Done() DeleteEksCluster(eksClient, &clusterName) }() // Deleting Amazon EKS Cluster VPC clusterStackName := "nsm-srv" + serviceSuffix go func() { defer wg.Done() DeleteEksClusterVpc(cfClient, &clusterStackName) }() wg.Wait() // Deleting Amazon Roles and Keys eksRoleName := "nsm-role" + serviceSuffix DeleteEksRole(iamClient, &eksRoleName) keyPairName := "nsm-key-pair" + serviceSuffix DeleteEksEc2KeyPair(ec2Client, &keyPairName) if deferError { os.Exit(1) } }
[ "\"NSM_AWS_SERVICE_SUFFIX\"" ]
[]
[ "NSM_AWS_SERVICE_SUFFIX" ]
[]
["NSM_AWS_SERVICE_SUFFIX"]
go
1
0
test/functional/test_framework/test_node.py
#!/usr/bin/env python3 # Copyright (c) 2017-2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Class for blinkhashd node under test""" import contextlib import decimal import errno from enum import Enum import http.client import json import logging import os import re import subprocess import tempfile import time import urllib.parse import collections import shlex import sys from .authproxy import JSONRPCException from .descriptors import descsum_create from .p2p import P2P_SUBVERSION from .util import ( MAX_NODES, assert_equal, append_config, delete_cookie_file, get_auth_cookie, get_rpc_proxy, rpc_url, wait_until_helper, p2p_port, EncodeDecimal, ) BLINKHASHD_PROC_WAIT_TIMEOUT = 60 class FailedToStartError(Exception): """Raised when a node fails to start correctly.""" class ErrorMatch(Enum): FULL_TEXT = 1 FULL_REGEX = 2 PARTIAL_REGEX = 3 class TestNode(): """A class for representing a blinkhashd node under test. This class contains: - state about the node (whether it's running, etc) - a Python subprocess.Popen object representing the running process - an RPC connection to the node - one or more P2P connections to the node To make things easier for the test writer, any unrecognised messages will be dispatched to the RPC connection.""" def __init__(self, i, datadir, *, chain, rpchost, timewait, timeout_factor, blinkhashd, blinkhash_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None, descriptors=False): """ Kwargs: start_perf (bool): If True, begin profiling the node with `perf` as soon as the node starts. """ self.index = i self.p2p_conn_index = 1 self.datadir = datadir self.blinkhashconf = os.path.join(self.datadir, "blinkhash.conf") self.stdout_dir = os.path.join(self.datadir, "stdout") self.stderr_dir = os.path.join(self.datadir, "stderr") self.chain = chain self.rpchost = rpchost self.rpc_timeout = timewait self.binary = blinkhashd self.coverage_dir = coverage_dir self.cwd = cwd self.descriptors = descriptors if extra_conf is not None: append_config(datadir, extra_conf) # Most callers will just need to add extra args to the standard list below. # For those callers that need more flexibility, they can just set the args property directly. # Note that common args are set in the config file (see initialize_datadir) self.extra_args = extra_args self.version = version # Configuration for logging is set as command-line args rather than in the blinkhash.conf file. # This means that starting a blinkhashd using the temp dir to debug a failed test won't # spam debug.log. self.args = [ self.binary, "-datadir=" + self.datadir, "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-uacomment=testnode%d" % i, ] if use_valgrind: default_suppressions_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), "..", "..", "..", "contrib", "valgrind.supp") suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE", default_suppressions_file) self.args = ["valgrind", "--suppressions={}".format(suppressions_file), "--gen-suppressions=all", "--exit-on-first-error=yes", "--error-exitcode=1", "--quiet"] + self.args if self.version_is_at_least(190000): self.args.append("-logthreadnames") if self.version_is_at_least(219900): self.args.append("-logsourcelocations") self.cli = TestNodeCLI(blinkhash_cli, self.datadir) self.use_cli = use_cli self.start_perf = start_perf self.running = False self.process = None self.rpc_connected = False self.rpc = None self.url = None self.log = logging.getLogger('TestFramework.node%d' % i) self.cleanup_on_exit = True # Whether to kill the node when this object goes away # Cache perf subprocesses here by their data output filename. self.perf_subprocesses = {} self.p2ps = [] self.timeout_factor = timeout_factor AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key']) PRIV_KEYS = [ # address , privkey AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'), AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'), AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'), AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'), AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'), AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'), AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'), AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'), AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'), AddressKeyPair('mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'), AddressKeyPair('mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'), AddressKeyPair('mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'), ] def get_deterministic_priv_key(self): """Return a deterministic priv key in base58, that only depends on the node's index""" assert len(self.PRIV_KEYS) == MAX_NODES return self.PRIV_KEYS[self.index] def _node_msg(self, msg: str) -> str: """Return a modified msg that identifies this node by its index as a debugging aid.""" return "[node %d] %s" % (self.index, msg) def _raise_assertion_error(self, msg: str): """Raise an AssertionError with msg modified to identify this node.""" raise AssertionError(self._node_msg(msg)) def __del__(self): # Ensure that we don't leave any blinkhashd processes lying around after # the test ends if self.process and self.cleanup_on_exit: # Should only happen on test failure # Avoid using logger, as that may have already been shutdown when # this destructor is called. print(self._node_msg("Cleaning up leftover process")) self.process.kill() def __getattr__(self, name): """Dispatches any unrecognised messages to the RPC connection or a CLI instance.""" if self.use_cli: return getattr(RPCOverloadWrapper(self.cli, True, self.descriptors), name) else: assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection") return getattr(RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name) def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs): """Start the node.""" if extra_args is None: extra_args = self.extra_args # Add a new stdout and stderr file each time blinkhashd is started if stderr is None: stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) if stdout is None: stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) self.stderr = stderr self.stdout = stdout if cwd is None: cwd = self.cwd # Delete any existing cookie file -- if such a file exists (eg due to # unclean shutdown), it will get overwritten anyway by blinkhashd, and # potentially interfere with our attempt to authenticate delete_cookie_file(self.datadir, self.chain) # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1") self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs) self.running = True self.log.debug("blinkhashd started, waiting for RPC to come up") if self.start_perf: self._start_perf() def wait_for_rpc_connection(self): """Sets up an RPC connection to the blinkhashd process. Returns False if unable to connect.""" # Poll at a rate of four times per second poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): if self.process.poll() is not None: raise FailedToStartError(self._node_msg( 'blinkhashd exited with status {} during initialization'.format(self.process.returncode))) try: rpc = get_rpc_proxy( rpc_url(self.datadir, self.index, self.chain, self.rpchost), self.index, timeout=self.rpc_timeout // 2, # Shorter timeout to allow for one retry in case of ETIMEDOUT coveragedir=self.coverage_dir, ) rpc.getblockcount() # If the call to getblockcount() succeeds then the RPC connection is up if self.version_is_at_least(190000): # getmempoolinfo.loaded is available since commit # bb8ae2c (version 0.19.0) wait_until_helper(lambda: rpc.getmempoolinfo()['loaded'], timeout_factor=self.timeout_factor) # Wait for the node to finish reindex, block import, and # loading the mempool. Usually importing happens fast or # even "immediate" when the node is started. However, there # is no guarantee and sometimes ThreadImport might finish # later. This is going to cause intermittent test failures, # because generally the tests assume the node is fully # ready after being started. # # For example, the node will reject block messages from p2p # when it is still importing with the error "Unexpected # block message received" # # The wait is done here to make tests as robust as possible # and prevent racy tests and intermittent failures as much # as possible. Some tests might not need this, but the # overhead is trivial, and the added guarantees are worth # the minimal performance cost. self.log.debug("RPC successfully started") if self.use_cli: return self.rpc = rpc self.rpc_connected = True self.url = self.rpc.rpc_url return except JSONRPCException as e: # Initialization phase # -28 RPC in warmup # -342 Service unavailable, RPC server started but is shutting down due to error if e.error['code'] != -28 and e.error['code'] != -342: raise # unknown JSON RPC exception except ConnectionResetError: # This might happen when the RPC server is in warmup, but shut down before the call to getblockcount # succeeds. Try again to properly raise the FailedToStartError pass except OSError as e: if e.errno == errno.ETIMEDOUT: pass # Treat identical to ConnectionResetError elif e.errno == errno.ECONNREFUSED: pass # Port not yet open? else: raise # unknown OS error except ValueError as e: # cookie file not found and no rpcuser or rpcpassword; blinkhashd is still starting if "No RPC credentials" not in str(e): raise time.sleep(1.0 / poll_per_s) self._raise_assertion_error("Unable to connect to blinkhashd after {}s".format(self.rpc_timeout)) def wait_for_cookie_credentials(self): """Ensures auth cookie credentials can be read, e.g. for testing CLI with -rpcwait before RPC connection is up.""" self.log.debug("Waiting for cookie credentials") # Poll at a rate of four times per second. poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): try: get_auth_cookie(self.datadir, self.chain) self.log.debug("Cookie credentials successfully retrieved") return except ValueError: # cookie file not found and no rpcuser or rpcpassword; blinkhashd is still starting pass # so we continue polling until RPC credentials are retrieved time.sleep(1.0 / poll_per_s) self._raise_assertion_error("Unable to retrieve cookie credentials after {}s".format(self.rpc_timeout)) def generate(self, nblocks, maxtries=1000000, **kwargs): self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`") return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries, **kwargs) def generateblock(self, *args, invalid_call, **kwargs): assert not invalid_call return self.__getattr__('generateblock')(*args, **kwargs) def generatetoaddress(self, *args, invalid_call, **kwargs): assert not invalid_call return self.__getattr__('generatetoaddress')(*args, **kwargs) def generatetodescriptor(self, *args, invalid_call, **kwargs): assert not invalid_call return self.__getattr__('generatetodescriptor')(*args, **kwargs) def get_wallet_rpc(self, wallet_name): if self.use_cli: return RPCOverloadWrapper(self.cli("-rpcwallet={}".format(wallet_name)), True, self.descriptors) else: assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected") wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name)) return RPCOverloadWrapper(self.rpc / wallet_path, descriptors=self.descriptors) def version_is_at_least(self, ver): return self.version is None or self.version >= ver def stop_node(self, expected_stderr='', *, wait=0, wait_until_stopped=True): """Stop the node.""" if not self.running: return self.log.debug("Stopping node") try: # Do not use wait argument when testing older nodes, e.g. in feature_backwards_compatibility.py if self.version_is_at_least(180000): self.stop(wait=wait) else: self.stop() except http.client.CannotSendRequest: self.log.exception("Unable to stop node.") # If there are any running perf processes, stop them. for profile_name in tuple(self.perf_subprocesses.keys()): self._stop_perf(profile_name) # Check that stderr is as expected self.stderr.seek(0) stderr = self.stderr.read().decode('utf-8').strip() if stderr != expected_stderr: raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr)) self.stdout.close() self.stderr.close() del self.p2ps[:] if wait_until_stopped: self.wait_until_stopped() def is_node_stopped(self): """Checks whether the node has stopped. Returns True if the node has stopped. False otherwise. This method is responsible for freeing resources (self.process).""" if not self.running: return True return_code = self.process.poll() if return_code is None: return False # process has stopped. Assert that it didn't return an error code. assert return_code == 0, self._node_msg( "Node returned non-zero exit code (%d) when stopping" % return_code) self.running = False self.process = None self.rpc_connected = False self.rpc = None self.log.debug("Node stopped") return True def wait_until_stopped(self, timeout=BLINKHASHD_PROC_WAIT_TIMEOUT): wait_until_helper(self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor) @contextlib.contextmanager def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2): if unexpected_msgs is None: unexpected_msgs = [] time_end = time.time() + timeout * self.timeout_factor debug_log = os.path.join(self.datadir, self.chain, 'debug.log') with open(debug_log, encoding='utf-8') as dl: dl.seek(0, 2) prev_size = dl.tell() yield while True: found = True with open(debug_log, encoding='utf-8') as dl: dl.seek(prev_size) log = dl.read() print_log = " - " + "\n - ".join(log.splitlines()) for unexpected_msg in unexpected_msgs: if re.search(re.escape(unexpected_msg), log, flags=re.MULTILINE): self._raise_assertion_error('Unexpected message "{}" partially matches log:\n\n{}\n\n'.format(unexpected_msg, print_log)) for expected_msg in expected_msgs: if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None: found = False if found: return if time.time() >= time_end: break time.sleep(0.05) self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log)) @contextlib.contextmanager def profile_with_perf(self, profile_name: str): """ Context manager that allows easy profiling of node activity using `perf`. See `test/functional/README.md` for details on perf usage. Args: profile_name: This string will be appended to the profile data filename generated by perf. """ subp = self._start_perf(profile_name) yield if subp: self._stop_perf(profile_name) def _start_perf(self, profile_name=None): """Start a perf process to profile this node. Returns the subprocess running perf.""" subp = None def test_success(cmd): return subprocess.call( # shell=True required for pipe use below cmd, shell=True, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0 if not sys.platform.startswith('linux'): self.log.warning("Can't profile with perf; only available on Linux platforms") return None if not test_success('which perf'): self.log.warning("Can't profile with perf; must install perf-tools") return None if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))): self.log.warning( "perf output won't be very useful without debug symbols compiled into blinkhashd") output_path = tempfile.NamedTemporaryFile( dir=self.datadir, prefix="{}.perf.data.".format(profile_name or 'test'), delete=False, ).name cmd = [ 'perf', 'record', '-g', # Record the callgraph. '--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer. '-F', '101', # Sampling frequency in Hz. '-p', str(self.process.pid), '-o', output_path, ] subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.perf_subprocesses[profile_name] = subp return subp def _stop_perf(self, profile_name): """Stop (and pop) a perf subprocess.""" subp = self.perf_subprocesses.pop(profile_name) output_path = subp.args[subp.args.index('-o') + 1] subp.terminate() subp.wait(timeout=10) stderr = subp.stderr.read().decode() if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr: self.log.warning( "perf couldn't collect data! Try " "'sudo sysctl -w kernel.perf_event_paranoid=-1'") else: report_cmd = "perf report -i {}".format(output_path) self.log.info("See perf output by running '{}'".format(report_cmd)) def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs): """Attempt to start the node and expect it to raise an error. extra_args: extra arguments to pass through to blinkhashd expected_msg: regex that stderr should match when blinkhashd fails Will throw if blinkhashd starts without an error. Will throw if an expected_msg is provided and it does not match blinkhashd's stdout.""" with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \ tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout: try: self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs) ret = self.process.wait(timeout=self.rpc_timeout) self.log.debug(self._node_msg(f'blinkhashd exited with status {ret} during initialization')) assert ret != 0 # Exit code must indicate failure self.running = False self.process = None # Check stderr for expected message if expected_msg is not None: log_stderr.seek(0) stderr = log_stderr.read().decode('utf-8').strip() if match == ErrorMatch.PARTIAL_REGEX: if re.search(expected_msg, stderr, flags=re.MULTILINE) is None: self._raise_assertion_error( 'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr)) elif match == ErrorMatch.FULL_REGEX: if re.fullmatch(expected_msg, stderr) is None: self._raise_assertion_error( 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr)) elif match == ErrorMatch.FULL_TEXT: if expected_msg != stderr: self._raise_assertion_error( 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr)) except subprocess.TimeoutExpired: self.process.kill() self.running = False self.process = None assert_msg = f'blinkhashd should have exited within {self.rpc_timeout}s ' if expected_msg is None: assert_msg += "with an error" else: assert_msg += "with expected error " + expected_msg self._raise_assertion_error(assert_msg) def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs): """Add an inbound p2p connection to the node. This method adds the p2p connection to the self.p2ps list and also returns the connection to the caller.""" if 'dstport' not in kwargs: kwargs['dstport'] = p2p_port(self.index) if 'dstaddr' not in kwargs: kwargs['dstaddr'] = '127.0.0.1' p2p_conn.peer_connect(**kwargs, net=self.chain, timeout_factor=self.timeout_factor)() self.p2ps.append(p2p_conn) p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False) if wait_for_verack: # Wait for the node to send us the version and verack p2p_conn.wait_for_verack() # At this point we have sent our version message and received the version and verack, however the full node # has not yet received the verack from us (in reply to their version). So, the connection is not yet fully # established (fSuccessfullyConnected). # # This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the # message we send. However, it might lead to races where we are expecting to receive a message. E.g. a # transaction that will be added to the mempool as soon as we return here. # # So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds) # in comparison to the upside of making tests less fragile and unexpected intermittent errors less likely. p2p_conn.sync_with_ping() # Consistency check that the Blinkhash Core has received our user agent string. This checks the # node's newest peer. It could be racy if another Blinkhash Core node has connected since we opened # our connection, but we don't expect that to happen. assert_equal(self.getpeerinfo()[-1]['subver'], P2P_SUBVERSION) return p2p_conn def add_outbound_p2p_connection(self, p2p_conn, *, p2p_idx, connection_type="outbound-full-relay", **kwargs): """Add an outbound p2p connection from node. Must be an "outbound-full-relay", "block-relay-only" or "addr-fetch" connection. This method adds the p2p connection to the self.p2ps list and returns the connection to the caller. """ def addconnection_callback(address, port): self.log.debug("Connecting to %s:%d %s" % (address, port, connection_type)) self.addconnection('%s:%d' % (address, port), connection_type) p2p_conn.peer_accept_connection(connect_cb=addconnection_callback, connect_id=p2p_idx + 1, net=self.chain, timeout_factor=self.timeout_factor, **kwargs)() p2p_conn.wait_for_connect() self.p2ps.append(p2p_conn) p2p_conn.wait_for_verack() p2p_conn.sync_with_ping() return p2p_conn def num_test_p2p_connections(self): """Return number of test framework p2p connections to the node.""" return len([peer for peer in self.getpeerinfo() if peer['subver'] == P2P_SUBVERSION]) def disconnect_p2ps(self): """Close all p2p connections to the node.""" for p in self.p2ps: p.peer_disconnect() del self.p2ps[:] wait_until_helper(lambda: self.num_test_p2p_connections() == 0, timeout_factor=self.timeout_factor) class TestNodeCLIAttr: def __init__(self, cli, command): self.cli = cli self.command = command def __call__(self, *args, **kwargs): return self.cli.send_cli(self.command, *args, **kwargs) def get_request(self, *args, **kwargs): return lambda: self(*args, **kwargs) def arg_to_cli(arg): if isinstance(arg, bool): return str(arg).lower() elif arg is None: return 'null' elif isinstance(arg, dict) or isinstance(arg, list): return json.dumps(arg, default=EncodeDecimal) else: return str(arg) class TestNodeCLI(): """Interface to blinkhash-cli for an individual node""" def __init__(self, binary, datadir): self.options = [] self.binary = binary self.datadir = datadir self.input = None self.log = logging.getLogger('TestFramework.blinkhashcli') def __call__(self, *options, input=None): # TestNodeCLI is callable with blinkhash-cli command-line options cli = TestNodeCLI(self.binary, self.datadir) cli.options = [str(o) for o in options] cli.input = input return cli def __getattr__(self, command): return TestNodeCLIAttr(self, command) def batch(self, requests): results = [] for request in requests: try: results.append(dict(result=request())) except JSONRPCException as e: results.append(dict(error=e)) return results def send_cli(self, command=None, *args, **kwargs): """Run blinkhash-cli command. Deserializes returned string as python object.""" pos_args = [arg_to_cli(arg) for arg in args] named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()] assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same blinkhash-cli call" p_args = [self.binary, "-datadir=" + self.datadir] + self.options if named_args: p_args += ["-named"] if command is not None: p_args += [command] p_args += pos_args + named_args self.log.debug("Running blinkhash-cli {}".format(p_args[2:])) process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) cli_stdout, cli_stderr = process.communicate(input=self.input) returncode = process.poll() if returncode: match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr) if match: code, message = match.groups() raise JSONRPCException(dict(code=int(code), message=message)) # Ignore cli_stdout, raise with cli_stderr raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr) try: return json.loads(cli_stdout, parse_float=decimal.Decimal) except (json.JSONDecodeError, decimal.InvalidOperation): return cli_stdout.rstrip("\n") class RPCOverloadWrapper(): def __init__(self, rpc, cli=False, descriptors=False): self.rpc = rpc self.is_cli = cli self.descriptors = descriptors def __getattr__(self, name): return getattr(self.rpc, name) def createwallet(self, wallet_name, disable_private_keys=None, blank=None, passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None, external_signer=None): if descriptors is None: descriptors = self.descriptors return self.__getattr__('createwallet')(wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup, external_signer) def importprivkey(self, privkey, label=None, rescan=None): wallet_info = self.getwalletinfo() if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']): return self.__getattr__('importprivkey')(privkey, label, rescan) desc = descsum_create('combo(' + privkey + ')') req = [{ 'desc': desc, 'timestamp': 0 if rescan else 'now', 'label': label if label else '' }] import_res = self.importdescriptors(req) if not import_res[0]['success']: raise JSONRPCException(import_res[0]['error']) def addmultisigaddress(self, nrequired, keys, label=None, address_type=None): wallet_info = self.getwalletinfo() if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']): return self.__getattr__('addmultisigaddress')(nrequired, keys, label, address_type) cms = self.createmultisig(nrequired, keys, address_type) req = [{ 'desc': cms['descriptor'], 'timestamp': 0, 'label': label if label else '' }] import_res = self.importdescriptors(req) if not import_res[0]['success']: raise JSONRPCException(import_res[0]['error']) return cms def importpubkey(self, pubkey, label=None, rescan=None): wallet_info = self.getwalletinfo() if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']): return self.__getattr__('importpubkey')(pubkey, label, rescan) desc = descsum_create('combo(' + pubkey + ')') req = [{ 'desc': desc, 'timestamp': 0 if rescan else 'now', 'label': label if label else '' }] import_res = self.importdescriptors(req) if not import_res[0]['success']: raise JSONRPCException(import_res[0]['error']) def importaddress(self, address, label=None, rescan=None, p2sh=None): wallet_info = self.getwalletinfo() if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']): return self.__getattr__('importaddress')(address, label, rescan, p2sh) is_hex = False try: int(address ,16) is_hex = True desc = descsum_create('raw(' + address + ')') except: desc = descsum_create('addr(' + address + ')') reqs = [{ 'desc': desc, 'timestamp': 0 if rescan else 'now', 'label': label if label else '' }] if is_hex and p2sh: reqs.append({ 'desc': descsum_create('p2sh(raw(' + address + '))'), 'timestamp': 0 if rescan else 'now', 'label': label if label else '' }) import_res = self.importdescriptors(reqs) for res in import_res: if not res['success']: raise JSONRPCException(res['error'])
[]
[]
[ "VALGRIND_SUPPRESSIONS_FILE" ]
[]
["VALGRIND_SUPPRESSIONS_FILE"]
python
1
0
test/e2e/daemonset.go
package e2e import ( goctx "context" "encoding/json" "fmt" "io/ioutil" "net/http" "testing" "time" "github.com/jaegertracing/jaeger-operator/pkg/apis/io/v1alpha1" framework "github.com/operator-framework/operator-sdk/pkg/test" "github.com/operator-framework/operator-sdk/pkg/test/e2eutil" "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" ) // DaemonSet runs a test with the agent as DaemonSet func DaemonSet(t *testing.T) { ctx := prepare(t) defer ctx.Cleanup() if err := daemonsetTest(t, framework.Global, ctx); err != nil { t.Fatal(err) } } func daemonsetTest(t *testing.T, f *framework.Framework, ctx *framework.TestCtx) error { cleanupOptions := &framework.CleanupOptions{TestContext: ctx, Timeout: timeout, RetryInterval: retryInterval} namespace, err := ctx.GetNamespace() if err != nil { return fmt.Errorf("could not get namespace: %v", err) } j := &v1alpha1.Jaeger{ TypeMeta: metav1.TypeMeta{ Kind: "Jaeger", APIVersion: "io.jaegertracing/v1alpha1", }, ObjectMeta: metav1.ObjectMeta{ Name: "agent-as-daemonset", Namespace: namespace, }, Spec: v1alpha1.JaegerSpec{ Strategy: "allInOne", AllInOne: v1alpha1.JaegerAllInOneSpec{}, Agent: v1alpha1.JaegerAgentSpec{ Strategy: "DaemonSet", Options: v1alpha1.NewOptions(map[string]interface{}{ "log-level": "debug", }), }, }, } logrus.Infof("passing %v", j) err = f.Client.Create(goctx.TODO(), j, cleanupOptions) if err != nil { return err } err = WaitForDaemonSet(t, f.KubeClient, namespace, "agent-as-daemonset-agent-daemonset", retryInterval, timeout) if err != nil { return err } selector := map[string]string{"app": "vertx-create-span"} dep := &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{ APIVersion: "apps/v1", Kind: "Deployment", }, ObjectMeta: metav1.ObjectMeta{ Name: "vertx-create-span", Namespace: namespace, }, Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: selector, }, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: selector, }, Spec: v1.PodSpec{ Containers: []v1.Container{{ Image: "jaegertracing/vertx-create-span:operator-e2e-tests", Name: "vertx-create-span", Env: []v1.EnvVar{ v1.EnvVar{ Name: "JAEGER_AGENT_HOST", ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ FieldPath: "status.hostIP", }, }, }, }, Ports: []v1.ContainerPort{ { ContainerPort: 8080, }, }, ReadinessProbe: &v1.Probe{ Handler: v1.Handler{ HTTPGet: &v1.HTTPGetAction{ Path: "/", Port: intstr.FromInt(8080), }, }, InitialDelaySeconds: 1, }, LivenessProbe: &v1.Probe{ Handler: v1.Handler{ HTTPGet: &v1.HTTPGetAction{ Path: "/", Port: intstr.FromInt(8080), }, }, InitialDelaySeconds: 1, }, }}, }, }, }, } err = f.Client.Create(goctx.TODO(), dep, cleanupOptions) if err != nil { return err } err = e2eutil.WaitForDeployment(t, f.KubeClient, namespace, "vertx-create-span", 1, retryInterval, timeout) if err != nil { return err } err = WaitForIngress(t, f.KubeClient, namespace, "agent-as-daemonset-query", retryInterval, timeout) if err != nil { return err } i, err := f.KubeClient.ExtensionsV1beta1().Ingresses(namespace).Get("agent-as-daemonset-query", metav1.GetOptions{}) if err != nil { return err } if len(i.Status.LoadBalancer.Ingress) != 1 { return fmt.Errorf("Wrong number of ingresses. Expected 1, was %v", len(i.Status.LoadBalancer.Ingress)) } address := i.Status.LoadBalancer.Ingress[0].IP url := fmt.Sprintf("http://%s/api/traces?service=order", address) c := http.Client{Timeout: time.Second} req, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { return err } return wait.Poll(retryInterval, timeout, func() (done bool, err error) { res, err := c.Do(req) if err != nil { return false, err } body, err := ioutil.ReadAll(res.Body) if err != nil { return false, err } resp := &resp{} err = json.Unmarshal(body, &resp) if err != nil { return false, err } return len(resp.Data) > 0, nil }) }
[]
[]
[]
[]
[]
go
null
null
null
src/python/tensorflow_cloud/tuner/tests/integration/distributing_cloudtuner_integration_test.py
# Lint as: python3 # Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Integration tests for Distributing Cloud Tuner.""" import contextlib import io import os import re import keras_tuner import tensorflow as tf from tensorflow import keras from tensorflow_cloud.tuner import vizier_client from tensorflow_cloud.tuner.tuner import DistributingCloudTuner # If input dataset is created outside tuner.search(), # it requires eager execution even in TF 1.x. if tf.version.VERSION.split(".")[0] == "1": tf.compat.v1.enable_eager_execution() # The project id to use to run tests. _PROJECT_ID = os.environ["PROJECT_ID"] # The GCP region in which the end-to-end test is run. _REGION = os.environ["REGION"] # Study ID for testing _STUDY_ID_BASE = "dct_{}".format((os.environ["BUILD_ID"]).replace("-", "_")) # The base docker image to use for the remote environment. _DOCKER_IMAGE = os.environ["DOCKER_IMAGE"] # The staging bucket to use to copy the model and data for the remote run. _REMOTE_DIR = os.path.join("gs://", os.environ["TEST_BUCKET"], _STUDY_ID_BASE) # The search space for hyperparameters _HPS = keras_tuner.HyperParameters() _HPS.Float("learning_rate", min_value=1e-4, max_value=1e-2, sampling="log") _HPS.Int("num_layers", 2, 10) def _load_data(dir_path=None): """Loads and prepares data.""" mnist_file_path = None if dir_path: mnist_file_path = os.path.join(dir_path, "mnist.npz") (x, y), (val_x, val_y) = keras.datasets.mnist.load_data(mnist_file_path) x = x.astype("float32") / 255.0 val_x = val_x.astype("float32") / 255.0 return ((x[:10000], y[:10000]), (val_x, val_y)) def _build_model(hparams): # Note that CloudTuner does not support adding hyperparameters in # the model building function. Instead, the search space is configured # by passing a hyperparameters argument when instantiating (constructing) # the tuner. model = keras.Sequential() model.add(keras.layers.Flatten(input_shape=(28, 28))) # Build the model with number of layers from the hyperparameters for _ in range(hparams.get("num_layers")): model.add(keras.layers.Dense(units=64, activation="relu")) model.add(keras.layers.Dense(10, activation="softmax")) # Compile the model with learning rate from the hyperparameters model.compile( optimizer=keras.optimizers.Adam(lr=hparams.get("learning_rate")), loss="sparse_categorical_crossentropy", metrics=["acc"], ) return model class _DistributingCloudTunerIntegrationTestBase(tf.test.TestCase): def setUp(self): super(_DistributingCloudTunerIntegrationTestBase, self).setUp() self._study_id = None def _assert_output(self, fn, regex_str): stdout = io.StringIO() with contextlib.redirect_stdout(stdout): fn() output = stdout.getvalue() self.assertRegex(output, re.compile(regex_str, re.DOTALL)) def _assert_results_summary(self, fn): self._assert_output( fn, ".*Results summary.*Trial summary.*Hyperparameters.*") def _delete_dir(self, path) -> None: """Deletes a directory if exists.""" if tf.io.gfile.isdir(path): tf.io.gfile.rmtree(path) def tearDown(self): super(_DistributingCloudTunerIntegrationTestBase, self).tearDown() # Delete the study used in the test, if present if self._study_id: service = vizier_client.create_or_load_study( _PROJECT_ID, _REGION, self._study_id, None) service.delete_study() tf.keras.backend.clear_session() # Delete log files, saved_models and other training assets self._delete_dir(_REMOTE_DIR) class DistributingCloudTunerIntegrationTest( _DistributingCloudTunerIntegrationTestBase): def setUp(self): super(DistributingCloudTunerIntegrationTest, self).setUp() (self._x, self._y), (self._val_x, self._val_y) = _load_data( self.get_temp_dir()) def testCloudTunerHyperparameters(self): """Test case to configure Distributing Tuner with HyperParameters.""" study_id = "{}_hyperparameters".format(_STUDY_ID_BASE) self._study_id = study_id tuner = DistributingCloudTuner( _build_model, project_id=_PROJECT_ID, region=_REGION, objective="acc", hyperparameters=_HPS, max_trials=2, study_id=study_id, directory=_REMOTE_DIR, container_uri=_DOCKER_IMAGE ) tuner.search( x=self._x, y=self._y, epochs=2, validation_data=(self._val_x, self._val_y), ) self._assert_results_summary(tuner.results_summary) if __name__ == "__main__": tf.test.main()
[]
[]
[ "TEST_BUCKET", "PROJECT_ID", "DOCKER_IMAGE", "REGION", "BUILD_ID" ]
[]
["TEST_BUCKET", "PROJECT_ID", "DOCKER_IMAGE", "REGION", "BUILD_ID"]
python
5
0
exaroton_test.go
package exaroton import ( "os" "testing" ) var ( envToken = os.Getenv("EXAROTON_TOKEN") envServer = os.Getenv("EXAROTON_SERVER") ) func TestAccount(t *testing.T) { client := New(envToken) acc, err := client.Account() if err != nil { t.Error(err) return } t.Log(acc.Name) } func TestServers(t *testing.T) { client := New(envToken) servers, err := client.Servers() if err != nil { t.Error(err) return } for _, s := range servers { t.Log(s.Name + ": " + s.ID) } } func TestStatus(t *testing.T) { client := New(envToken) server, err := client.Server(envServer) if err != nil { t.Error(err) return } t.Log(server.GetStatus()) } func TestStart(t *testing.T) { client := New(envToken) server, err := client.Server(envServer) if err != nil { t.Error(err) return } err = server.Stop(client) if err != nil { t.Error(err) return } } func TestExecute(t *testing.T) { client := New(envToken) server, error := client.Server(envServer) if error != nil { t.Error(error) return } err := server.ExecuteCommand(client, "say hello") if err != nil { t.Error(err) return } } func TestLogs(t *testing.T) { client := New(envToken) server, error := client.Server(envServer) if error != nil { t.Error(error) return } logs, err := server.GetLogs(client) if err != nil { t.Error(err) return } t.Log(logs) } func TestShareLog(t *testing.T) { client := New(envToken) server, error := client.Server(envServer) if error != nil { t.Error(error) return } logs, err := server.ShareLogs(client) if err != nil { t.Error(err) return } t.Log(logs.ID) t.Log(logs.Raw) t.Log(logs.URL) } func TestSetRam(t *testing.T) { client := New(envToken) server, err := client.Server(envServer) if err != nil { t.Error(err) return } err = server.SetRam(client, 2) if err != nil { t.Error(err) return } } func TestGetPlayerList(t *testing.T) { client := New(envToken) server, err := client.Server(envServer) if err != nil { t.Error(err) return } playerList, err := server.GetPlayerList(client) if err != nil { t.Error(err) return } t.Log(playerList.List) } func TestGetPlayerListType(t *testing.T) { client := New(envToken) server, err := client.Server(envServer) if err != nil { t.Error(err) return } playerList, err := server.GetPlayerList(client, "whitelist") if err != nil { t.Error(err) return } t.Log(playerList.List) } func TestRemovePlayerList(t *testing.T) { client := New(envToken) server, err := client.Server(envServer) if err != nil { t.Error(err) return } playerList, err := server.GetPlayerList(client, "whitelist") if err != nil { t.Error(err) return } username := []string{"a", "b", "c", "d"} playerList.RemoveEntry(client, username) } func TestMotd(t *testing.T) { client := New(envToken) server, err := client.Server(envServer) if err != nil { t.Error(err) return } t.Log(server.MOTD) err = server.SetMOTD(client, "Hello!") if err != nil { t.Error(err) return } t.Log(server.MOTD) }
[ "\"EXAROTON_TOKEN\"", "\"EXAROTON_SERVER\"" ]
[]
[ "EXAROTON_TOKEN", "EXAROTON_SERVER" ]
[]
["EXAROTON_TOKEN", "EXAROTON_SERVER"]
go
2
0
vendor/github.com/xenolf/lego/challenge/dns01/dns_challenge.go
package dns01 import ( "crypto/sha256" "encoding/base64" "fmt" "os" "strconv" "time" "github.com/miekg/dns" "github.com/xenolf/lego/acme" "github.com/xenolf/lego/acme/api" "github.com/xenolf/lego/challenge" "github.com/xenolf/lego/log" "github.com/xenolf/lego/platform/wait" ) const ( // DefaultPropagationTimeout default propagation timeout DefaultPropagationTimeout = 60 * time.Second // DefaultPollingInterval default polling interval DefaultPollingInterval = 2 * time.Second // DefaultTTL default TTL DefaultTTL = 120 ) type ValidateFunc func(core *api.Core, domain string, chlng acme.Challenge) error type ChallengeOption func(*Challenge) error // CondOption Conditional challenge option. func CondOption(condition bool, opt ChallengeOption) ChallengeOption { if !condition { // NoOp options return func(*Challenge) error { return nil } } return opt } // Challenge implements the dns-01 challenge type Challenge struct { core *api.Core validate ValidateFunc provider challenge.Provider preCheck preCheck dnsTimeout time.Duration } func NewChallenge(core *api.Core, validate ValidateFunc, provider challenge.Provider, opts ...ChallengeOption) *Challenge { chlg := &Challenge{ core: core, validate: validate, provider: provider, preCheck: newPreCheck(), dnsTimeout: 10 * time.Second, } for _, opt := range opts { err := opt(chlg) if err != nil { log.Infof("challenge option error: %v", err) } } return chlg } // PreSolve just submits the txt record to the dns provider. // It does not validate record propagation, or do anything at all with the acme server. func (c *Challenge) PreSolve(authz acme.Authorization) error { domain := challenge.GetTargetedDomain(authz) log.Infof("[%s] acme: Preparing to solve DNS-01", domain) chlng, err := challenge.FindChallenge(challenge.DNS01, authz) if err != nil { return err } if c.provider == nil { return fmt.Errorf("[%s] acme: no DNS Provider configured", domain) } // Generate the Key Authorization for the challenge keyAuth, err := c.core.GetKeyAuthorization(chlng.Token) if err != nil { return err } err = c.provider.Present(authz.Identifier.Value, chlng.Token, keyAuth) if err != nil { return fmt.Errorf("[%s] acme: error presenting token: %s", domain, err) } return nil } func (c *Challenge) Solve(authz acme.Authorization) error { domain := challenge.GetTargetedDomain(authz) log.Infof("[%s] acme: Trying to solve DNS-01", domain) chlng, err := challenge.FindChallenge(challenge.DNS01, authz) if err != nil { return err } // Generate the Key Authorization for the challenge keyAuth, err := c.core.GetKeyAuthorization(chlng.Token) if err != nil { return err } fqdn, value := GetRecord(authz.Identifier.Value, keyAuth) var timeout, interval time.Duration switch provider := c.provider.(type) { case challenge.ProviderTimeout: timeout, interval = provider.Timeout() default: timeout, interval = DefaultPropagationTimeout, DefaultPollingInterval } log.Infof("[%s] acme: Checking DNS record propagation using %+v", domain, recursiveNameservers) err = wait.For("propagation", timeout, interval, func() (bool, error) { stop, errP := c.preCheck.call(domain, fqdn, value) if !stop || errP != nil { log.Infof("[%s] acme: Waiting for DNS record propagation.", domain) } return stop, errP }) if err != nil { return err } chlng.KeyAuthorization = keyAuth return c.validate(c.core, domain, chlng) } // CleanUp cleans the challenge. func (c *Challenge) CleanUp(authz acme.Authorization) error { log.Infof("[%s] acme: Cleaning DNS-01 challenge", challenge.GetTargetedDomain(authz)) chlng, err := challenge.FindChallenge(challenge.DNS01, authz) if err != nil { return err } keyAuth, err := c.core.GetKeyAuthorization(chlng.Token) if err != nil { return err } return c.provider.CleanUp(authz.Identifier.Value, chlng.Token, keyAuth) } func (c *Challenge) Sequential() (bool, time.Duration) { if p, ok := c.provider.(sequential); ok { return ok, p.Sequential() } return false, 0 } type sequential interface { Sequential() time.Duration } // GetRecord returns a DNS record which will fulfill the `dns-01` challenge func GetRecord(domain, keyAuth string) (fqdn string, value string) { keyAuthShaBytes := sha256.Sum256([]byte(keyAuth)) // base64URL encoding without padding value = base64.RawURLEncoding.EncodeToString(keyAuthShaBytes[:sha256.Size]) fqdn = fmt.Sprintf("_acme-challenge.%s.", domain) if ok, _ := strconv.ParseBool(os.Getenv("LEGO_EXPERIMENTAL_CNAME_SUPPORT")); ok { r, err := dnsQuery(fqdn, dns.TypeCNAME, recursiveNameservers, true) // Check if the domain has CNAME then return that if err == nil && r.Rcode == dns.RcodeSuccess { fqdn = updateDomainWithCName(r, fqdn) } } return }
[ "\"LEGO_EXPERIMENTAL_CNAME_SUPPORT\"" ]
[]
[ "LEGO_EXPERIMENTAL_CNAME_SUPPORT" ]
[]
["LEGO_EXPERIMENTAL_CNAME_SUPPORT"]
go
1
0
app/request_shoutout/domain/emails/template_builders.py
import os from datetime import timedelta from request_shoutout.domain.models import Order, SP_TZ from .templates import ( MailTemplate, NotifyTalentAboutNewOrderMailData, OrderMailData, ResetPasswordMailData, ShoutoutRequestFulfilledMailData, TalentEnrollmentMailData, ) def customer_order_detail_template_builder(order, talent): expiration_datetime = order.expiration_datetime - timedelta(hours=SP_TZ) data = OrderMailData( customer_name=(order.is_from if order.video_is_for == Order.SOMEONE_ELSE else order.is_to), order_created_at=order.created_at.date().strftime('%d/%m/%Y'), talent_url=talent.profile_url, talent_name=talent.user.get_full_name(), order_instruction=order.instruction, charge_amout_paid=float(order.charge.amount_paid), order_expiration_datetime=expiration_datetime.strftime('%d/%m/%Y - %Hh'), ) subject = f'Seu pedido foi enviado para {talent.user.get_full_name()}' return MailTemplate(name='order-detail', subject=subject, data=data) def notify_talent_about_new_order_template_builder(order, talent): expiration_datetime = order.expiration_datetime - timedelta(hours=SP_TZ) data = NotifyTalentAboutNewOrderMailData( talent_name=talent.user.get_full_name(), order_created_at=order.created_at.date().strftime('%d/%m/%Y'), customer_name=order.is_from, order_instruction=order.instruction, charge_amout_paid=float(order.charge.amount_paid), order_expiration_datetime=expiration_datetime.strftime('%d/%m/%Y - %Hh'), order_is_to=order.is_to, dashboard_url=os.environ['SITE_URL'] + 'dashboard/', ) subject = 'Você tem um novo pedido' return MailTemplate(name='notify-talent-about-new-order', subject=subject, data=data) def notify_customer_about_shoutout_request_fulfilled_template_builder(order, talent, shoutout): data = ShoutoutRequestFulfilledMailData( customer_name=order.is_from, order_is_to=order.is_to, talent_name=talent.user.get_full_name(), shoutout_absolute_url=shoutout.get_absolute_url(), ) subject = f'Seu viggio está pronto' if order.video_is_for == Order.SOMEONE_ELSE: subject = f'Seu viggio para {order.is_to} está pronto' return MailTemplate(name='notify-customer-that-his-viggio-is-ready', subject=subject, data=data) def enroll_talent_template_builder(data): mail_data = TalentEnrollmentMailData( email=data['email'], first_name=data['first_name'], last_name=data['last_name'], phone_number=data['phone_number'], area_code=data['area_code'], main_social_media=data['main_social_media'], social_media_username=data['social_media_username'], number_of_followers=data['number_of_followers'], ) subject = 'Pedido de inscrição de talento' return MailTemplate('notify-staff-about-new-talent-enrollment', subject, mail_data) def user_reset_password_template_builder(new_password, first_name, last_name): mail_data = ResetPasswordMailData( first_name=first_name, last_name=last_name, new_password=new_password ) subject = 'Pedido de nova senha' return MailTemplate('user-new-password', subject, mail_data)
[]
[]
[ "SITE_URL" ]
[]
["SITE_URL"]
python
1
0
cmd/goa/gen.go
package main import ( "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "runtime" "strings" "goa.design/goa/codegen" "goa.design/goa/pkg" ) // Generator is the code generation management data structure. type Generator struct { // Command is the name of the command to run. Command string // DesignPath is the Go import path to the design package. DesignPath string // Output is the absolute path to the output directory. Output string // bin is the filename of the generated generator. bin string // tmpDir is the temporary directory used to compile the generator. tmpDir string } // NewGenerator creates a Generator. func NewGenerator(cmd string, path, output string) *Generator { bin := "goa" if runtime.GOOS == "windows" { bin += ".exe" } return &Generator{ Command: cmd, DesignPath: path, Output: output, bin: bin, } } // Write writes the main file. func (g *Generator) Write(debug bool) error { var tmpDir string { wd := "." if cwd, err := os.Getwd(); err != nil { wd = cwd } tmp, err := ioutil.TempDir(wd, "goa") if err != nil { return err } tmpDir = tmp } g.tmpDir = tmpDir var sections []*codegen.SectionTemplate { data := map[string]interface{}{ "Command": g.Command, "CleanupDirs": cleanupDirs(g.Command, g.Output), } imports := []*codegen.ImportSpec{ codegen.SimpleImport("flag"), codegen.SimpleImport("fmt"), codegen.SimpleImport("os"), codegen.SimpleImport("path/filepath"), codegen.SimpleImport("sort"), codegen.SimpleImport("strings"), codegen.SimpleImport("goa.design/goa/codegen/generator"), codegen.SimpleImport("goa.design/goa/eval"), codegen.SimpleImport("goa.design/goa/pkg"), codegen.NewImport("_", g.DesignPath), } sections = []*codegen.SectionTemplate{ codegen.Header("Code Generator", "main", imports), &codegen.SectionTemplate{ Name: "main", Source: mainT, Data: data, }, } } f := &codegen.File{Path: "main.go", SectionTemplates: sections} _, err := f.Render(tmpDir) return err } // Compile compiles the generator. func (g *Generator) Compile() error { gobin, err := exec.LookPath("go") if err != nil { return fmt.Errorf(`failed to find a go compiler, looked in "%s"`, os.Getenv("PATH")) } c := exec.Cmd{ Path: gobin, Args: []string{gobin, "build", "-o", g.bin}, Dir: g.tmpDir, } out, err := c.CombinedOutput() if err != nil { if len(out) > 0 { return fmt.Errorf(string(out)) } return fmt.Errorf("failed to compile generator: %s", err) } return nil } // Run runs the compiled binary and return the output lines. func (g *Generator) Run() ([]string, error) { var cmdl string { args := make([]string, len(os.Args)-1) gopaths := filepath.SplitList(os.Getenv("GOPATH")) for i, a := range os.Args[1:] { for _, p := range gopaths { if strings.Contains(a, p) { args[i] = strings.Replace(a, p, "$(GOPATH)", -1) break } } if args[i] == "" { args[i] = a } } cmdl = " " + strings.Join(args, " ") rawcmd := filepath.Base(os.Args[0]) // Remove .exe suffix to avoid different output on Windows. rawcmd = strings.TrimSuffix(rawcmd, ".exe") cmdl = fmt.Sprintf("$ %s%s", rawcmd, cmdl) } args := []string{"--version=" + pkg.Version(), "--output=" + g.Output, "--cmd=" + cmdl} cmd := exec.Command(filepath.Join(g.tmpDir, g.bin), args...) out, err := cmd.CombinedOutput() if err != nil { return nil, fmt.Errorf("%s\n%s", err, string(out)) } res := strings.Split(string(out), "\n") for (len(res) > 0) && (res[len(res)-1] == "") { res = res[:len(res)-1] } return res, nil } // Remove deletes the package files. func (g *Generator) Remove() { if g.tmpDir != "" { os.RemoveAll(g.tmpDir) g.tmpDir = "" } } // cleanupDirs returns the names of the directories to delete before generating // code. func cleanupDirs(cmd, output string) []string { if cmd == "gen" { return []string{filepath.Join(output, codegen.Gendir)} } return nil } // mainT is the template for the generator main. const mainT = `func main() { var ( out = flag.String("output", "", "") version = flag.String("version", "", "") cmdl = flag.String("cmd", "", "") ) { flag.Parse() if *out == "" { fail("missing output flag") } if *version == "" { fail("missing version flag") } if *cmdl == "" { fail("missing cmd flag") } } if *version != pkg.Version() { fail("cannot run generator produced by goa version %s and compiled with goa version %s\n", *version, pkg.Version()) } if err := eval.Context.Errors; err != nil { fail(err.Error()) } if err := eval.RunDSL(); err != nil { fail(err.Error()) } {{- range .CleanupDirs }} if err := os.RemoveAll({{ printf "%q" . }}); err != nil { fail(err.Error()) } {{- end }} outputs, err := generator.Generate(*out, {{ printf "%q" .Command }}) if err != nil { fail(err.Error()) } fmt.Println(strings.Join(outputs, "\n")) } func fail(msg string, vals ...interface{}) { fmt.Fprintf(os.Stderr, msg, vals...) os.Exit(1) } `
[ "\"PATH\"", "\"GOPATH\"" ]
[]
[ "GOPATH", "PATH" ]
[]
["GOPATH", "PATH"]
go
2
0
config/pgoconfig.go
package config /* Copyright 2018 - 2020 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ import ( "errors" "fmt" "io/ioutil" "os" "strconv" "strings" "text/template" crv1 "github.com/crunchydata/postgres-operator/apis/cr/v1" "github.com/crunchydata/postgres-operator/kubeapi" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/client-go/kubernetes" ) const CustomConfigMapName = "pgo-config" const DefaultConfigsPath = "/default-pgo-config/" const CustomConfigsPath = "/pgo-config/" var PgoDefaultServiceAccountTemplate *template.Template const PGODefaultServiceAccountPath = "pgo-default-sa.json" var PgoTargetRoleBindingTemplate *template.Template const PGOTargetRoleBindingPath = "pgo-target-role-binding.json" var PgoBackrestServiceAccountTemplate *template.Template const PGOBackrestServiceAccountPath = "pgo-backrest-sa.json" var PgoTargetServiceAccountTemplate *template.Template const PGOTargetServiceAccountPath = "pgo-target-sa.json" var PgoBackrestRoleTemplate *template.Template const PGOBackrestRolePath = "pgo-backrest-role.json" var PgoBackrestRoleBindingTemplate *template.Template const PGOBackrestRoleBindingPath = "pgo-backrest-role-binding.json" var PgoTargetRoleTemplate *template.Template const PGOTargetRolePath = "pgo-target-role.json" var PgoPgServiceAccountTemplate *template.Template const PGOPgServiceAccountPath = "pgo-pg-sa.json" var PgoPgRoleTemplate *template.Template const PGOPgRolePath = "pgo-pg-role.json" var PgoPgRoleBindingTemplate *template.Template const PGOPgRoleBindingPath = "pgo-pg-role-binding.json" var PolicyJobTemplate *template.Template const policyJobTemplatePath = "pgo.sqlrunner-template.json" var PVCTemplate *template.Template const pvcPath = "pvc.json" var ContainerResourcesTemplate *template.Template const containerResourcesTemplatePath = "container-resources.json" var LoadTemplate *template.Template const loadTemplatePath = "pgo.load-template.json" var AffinityTemplate *template.Template const affinityTemplatePath = "affinity.json" var PodAntiAffinityTemplate *template.Template const podAntiAffinityTemplatePath = "pod-anti-affinity.json" var PgoBackrestRepoServiceTemplate *template.Template const pgoBackrestRepoServiceTemplatePath = "pgo-backrest-repo-service-template.json" var PgoBackrestRepoTemplate *template.Template const pgoBackrestRepoTemplatePath = "pgo-backrest-repo-template.json" var PgmonitorEnvVarsTemplate *template.Template const pgmonitorEnvVarsPath = "pgmonitor-env-vars.json" var PgbackrestEnvVarsTemplate *template.Template const pgbackrestEnvVarsPath = "pgbackrest-env-vars.json" var PgbackrestS3EnvVarsTemplate *template.Template const pgbackrestS3EnvVarsPath = "pgbackrest-s3-env-vars.json" var JobTemplate *template.Template const jobPath = "backup-job.json" var PgBasebackupRestoreJobTemplate *template.Template const pgBasebackupRestoreJobTemplatePath = "pgbasebackup-restore-job.json" var PgbouncerTemplate *template.Template const pgbouncerTemplatePath = "pgbouncer-template.json" var PgbouncerConfTemplate *template.Template const pgbouncerConfTemplatePath = "pgbouncer.ini" var PgbouncerUsersTemplate *template.Template const pgbouncerUsersTemplatePath = "users.txt" var PgbouncerHBATemplate *template.Template const pgbouncerHBATemplatePath = "pgbouncer_hba.conf" var ServiceTemplate *template.Template const serviceTemplatePath = "cluster-service.json" var RmdatajobTemplate *template.Template const rmdatajobPath = "rmdata-job.json" var BackrestjobTemplate *template.Template const backrestjobPath = "backrest-job.json" var BackrestRestorejobTemplate *template.Template const backrestRestorejobPath = "backrest-restore-job.json" var PgDumpBackupJobTemplate *template.Template const pgDumpBackupJobPath = "pgdump-job.json" var PgRestoreJobTemplate *template.Template const pgRestoreJobPath = "pgrestore-job.json" var PVCMatchLabelsTemplate *template.Template const pvcMatchLabelsPath = "pvc-matchlabels.json" var PVCStorageClassTemplate *template.Template const pvcSCPath = "pvc-storageclass.json" var CollectTemplate *template.Template const collectTemplatePath = "collect.json" var BadgerTemplate *template.Template const badgerTemplatePath = "pgbadger.json" var DeploymentTemplate *template.Template const deploymentTemplatePath = "cluster-deployment.json" type ClusterStruct struct { CCPImagePrefix string `yaml:"CCPImagePrefix"` CCPImageTag string `yaml:"CCPImageTag"` PrimaryNodeLabel string `yaml:"PrimaryNodeLabel"` ReplicaNodeLabel string `yaml:"ReplicaNodeLabel"` Policies string `yaml:"Policies"` Metrics bool `yaml:"Metrics"` Badger bool `yaml:"Badger"` Port string `yaml:"Port"` PGBadgerPort string `yaml:"PGBadgerPort"` ExporterPort string `yaml:"ExporterPort"` User string `yaml:"User"` Database string `yaml:"Database"` PasswordAgeDays string `yaml:"PasswordAgeDays"` PasswordLength string `yaml:"PasswordLength"` Strategy string `yaml:"Strategy"` Replicas string `yaml:"Replicas"` ServiceType string `yaml:"ServiceType"` BackrestPort int `yaml:"BackrestPort"` Backrest bool `yaml:"Backrest"` BackrestS3Bucket string `yaml:"BackrestS3Bucket"` BackrestS3Endpoint string `yaml:"BackrestS3Endpoint"` BackrestS3Region string `yaml:"BackrestS3Region"` DisableAutofail bool `yaml:"DisableAutofail"` PgmonitorPassword string `yaml:"PgmonitorPassword"` EnableCrunchyadm bool `yaml:"EnableCrunchyadm"` DisableReplicaStartFailReinit bool `yaml:"DisableReplicaStartFailReinit"` PodAntiAffinity string `yaml:"PodAntiAffinity"` SyncReplication bool `yaml:"SyncReplication"` } type StorageStruct struct { AccessMode string `yaml:"AccessMode"` Size string `yaml:"Size"` StorageType string `yaml:"StorageType"` StorageClass string `yaml:"StorageClass"` Fsgroup string `yaml:"Fsgroup"` SupplementalGroups string `yaml:"SupplementalGroups"` MatchLabels string `yaml:"MatchLabels"` } type ContainerResourcesStruct struct { RequestsMemory string `yaml:"RequestsMemory"` RequestsCPU string `yaml:"RequestsCPU"` LimitsMemory string `yaml:"LimitsMemory"` LimitsCPU string `yaml:"LimitsCPU"` } type PgoStruct struct { PreferredFailoverNode string `yaml:"PreferredFailoverNode"` Audit bool `yaml:"Audit"` PGOImagePrefix string `yaml:"PGOImagePrefix"` PGOImageTag string `yaml:"PGOImageTag"` } type PgoConfig struct { BasicAuth string `yaml:"BasicAuth"` Cluster ClusterStruct `yaml:"Cluster"` Pgo PgoStruct `yaml:"Pgo"` ContainerResources map[string]ContainerResourcesStruct `yaml:"ContainerResources"` PrimaryStorage string `yaml:"PrimaryStorage"` BackupStorage string `yaml:"BackupStorage"` ReplicaStorage string `yaml:"ReplicaStorage"` BackrestStorage string `yaml:"BackrestStorage"` Storage map[string]StorageStruct `yaml:"Storage"` DefaultContainerResources string `yaml:"DefaultContainerResources"` DefaultLoadResources string `yaml:"DefaultLoadResources"` DefaultRmdataResources string `yaml:"DefaultRmdataResources"` DefaultBackupResources string `yaml:"DefaultBackupResources"` DefaultBadgerResources string `yaml:"DefaultBadgerResources"` DefaultPgbouncerResources string `yaml:"DefaultPgbouncerResources"` } const DEFAULT_SERVICE_TYPE = "ClusterIP" const LOAD_BALANCER_SERVICE_TYPE = "LoadBalancer" const NODEPORT_SERVICE_TYPE = "NodePort" const CONFIG_PATH = "pgo.yaml" var log_statement_values = []string{"ddl", "none", "mod", "all"} const DEFAULT_BACKREST_PORT = 2022 const DEFAULT_PGBADGER_PORT = "10000" const DEFAULT_EXPORTER_PORT = "9187" const DEFAULT_POSTGRES_PORT = "5432" const DEFAULT_PATRONI_PORT = "8009" func (c *PgoConfig) Validate() error { var err error errPrefix := "Error in pgoconfig: check pgo.yaml: " if c.Cluster.BackrestPort == 0 { c.Cluster.BackrestPort = DEFAULT_BACKREST_PORT log.Infof("setting BackrestPort to default %d", c.Cluster.BackrestPort) } if c.Cluster.PGBadgerPort == "" { c.Cluster.PGBadgerPort = DEFAULT_PGBADGER_PORT log.Infof("setting PGBadgerPort to default %s", c.Cluster.PGBadgerPort) } else { if _, err := strconv.Atoi(c.Cluster.PGBadgerPort); err != nil { return errors.New(errPrefix + "Invalid PGBadgerPort: " + err.Error()) } } if c.Cluster.ExporterPort == "" { c.Cluster.ExporterPort = DEFAULT_EXPORTER_PORT log.Infof("setting ExporterPort to default %s", c.Cluster.ExporterPort) } else { if _, err := strconv.Atoi(c.Cluster.ExporterPort); err != nil { return errors.New(errPrefix + "Invalid ExporterPort: " + err.Error()) } } if c.Cluster.Port == "" { c.Cluster.Port = DEFAULT_POSTGRES_PORT log.Infof("setting Postgres Port to default %s", c.Cluster.Port) } else { if _, err := strconv.Atoi(c.Cluster.Port); err != nil { return errors.New(errPrefix + "Invalid Port: " + err.Error()) } } if c.Cluster.PrimaryNodeLabel != "" { parts := strings.Split(c.Cluster.PrimaryNodeLabel, "=") if len(parts) != 2 { return errors.New(errPrefix + "Cluster.PrimaryNodeLabel does not follow key=value format") } } if c.Cluster.ReplicaNodeLabel != "" { parts := strings.Split(c.Cluster.ReplicaNodeLabel, "=") if len(parts) != 2 { return errors.New(errPrefix + "Cluster.ReplicaNodeLabel does not follow key=value format") } } log.Infof("pgo.yaml Cluster.Backrest is %v", c.Cluster.Backrest) _, ok := c.Storage[c.PrimaryStorage] if !ok { return errors.New(errPrefix + "PrimaryStorage setting required") } _, ok = c.Storage[c.BackupStorage] if !ok { return errors.New(errPrefix + "BackupStorage setting required") } _, ok = c.Storage[c.BackrestStorage] if !ok { log.Warning("BackrestStorage setting not set, will use PrimaryStorage setting") c.Storage[c.BackrestStorage] = c.Storage[c.PrimaryStorage] } _, ok = c.Storage[c.ReplicaStorage] if !ok { return errors.New(errPrefix + "ReplicaStorage setting required") } for k, _ := range c.Storage { _, err = c.GetStorageSpec(k) if err != nil { return err } } if c.Pgo.PGOImagePrefix == "" { return errors.New(errPrefix + "Pgo.PGOImagePrefix is required") } if c.Pgo.PGOImageTag == "" { return errors.New(errPrefix + "Pgo.PGOImageTag is required") } if c.DefaultContainerResources != "" { _, ok = c.ContainerResources[c.DefaultContainerResources] if !ok { return errors.New(errPrefix + "DefaultContainerResources setting invalid") } } if c.DefaultLoadResources != "" { _, ok = c.ContainerResources[c.DefaultLoadResources] if !ok { return errors.New(errPrefix + "DefaultLoadResources setting invalid") } } if c.DefaultRmdataResources != "" { _, ok = c.ContainerResources[c.DefaultRmdataResources] if !ok { return errors.New(errPrefix + "DefaultRmdataResources setting invalid") } } if c.DefaultBackupResources != "" { _, ok = c.ContainerResources[c.DefaultBackupResources] if !ok { return errors.New(errPrefix + "DefaultBackupResources setting invalid") } } if c.DefaultBadgerResources != "" { _, ok = c.ContainerResources[c.DefaultBadgerResources] if !ok { return errors.New(errPrefix + "DefaultBadgerResources setting invalid") } } if c.DefaultPgbouncerResources != "" { _, ok = c.ContainerResources[c.DefaultPgbouncerResources] if !ok { return errors.New(errPrefix + "DefaultPgbouncerResources setting invalid") } } if c.Cluster.ServiceType == "" { log.Warn("Cluster.ServiceType not set, using default, ClusterIP ") c.Cluster.ServiceType = DEFAULT_SERVICE_TYPE } else { if c.Cluster.ServiceType != DEFAULT_SERVICE_TYPE && c.Cluster.ServiceType != LOAD_BALANCER_SERVICE_TYPE && c.Cluster.ServiceType != NODEPORT_SERVICE_TYPE { return errors.New(errPrefix + "Cluster.ServiceType is required to be either ClusterIP, NodePort, or LoadBalancer") } } if c.Cluster.CCPImagePrefix == "" { return errors.New(errPrefix + "Cluster.CCPImagePrefix is required") } if c.Cluster.CCPImageTag == "" { return errors.New(errPrefix + "Cluster.CCPImageTag is required") } if c.Cluster.User == "" { return errors.New(errPrefix + "Cluster.User is required") } else { // validates that username can be used as the kubernetes secret name // Must consist of lower case alphanumeric characters, // '-' or '.', and must start and end with an alphanumeric character errs := validation.IsDNS1123Subdomain(c.Cluster.User) if len(errs) > 0 { var msg string for i := range errs { msg = msg + errs[i] } return errors.New(errPrefix + msg) } } // if provided, ensure that the type of pod anti-affinity specified is valid podAntiAffinityType := crv1.PodAntiAffinityType(c.Cluster.PodAntiAffinity) if err := podAntiAffinityType.Validate(); podAntiAffinityType != "" && err != nil { return errors.New(errPrefix + "Invalid value provided for Cluster.PodAntiAffinityType") } return err } func (c *PgoConfig) GetConf() *PgoConfig { yamlFile, err := ioutil.ReadFile(CONFIG_PATH) if err != nil { log.Printf("yamlFile.Get err #%v ", err) } err = yaml.Unmarshal(yamlFile, c) if err != nil { log.Fatalf("Unmarshal: %v", err) } return c } func (c *PgoConfig) GetStorageSpec(name string) (crv1.PgStorageSpec, error) { var err error storage := crv1.PgStorageSpec{} s, ok := c.Storage[name] if !ok { err = errors.New("invalid Storage name " + name) log.Error(err) return storage, err } storage.StorageClass = s.StorageClass storage.AccessMode = s.AccessMode storage.Size = s.Size storage.StorageType = s.StorageType storage.Fsgroup = s.Fsgroup storage.MatchLabels = s.MatchLabels storage.SupplementalGroups = s.SupplementalGroups if s.Fsgroup != "" && s.SupplementalGroups != "" { err = errors.New("invalid Storage config " + name + " can not have both fsgroup and supplementalGroups specified in the same config, choose one.") log.Error(err) return storage, err } if storage.MatchLabels != "" { test := strings.Split(storage.MatchLabels, "=") if len(test) != 2 { err = errors.New("invalid Storage config " + name + " MatchLabels needs to be in key=value format.") log.Error(err) return storage, err } } return storage, err } func (c *PgoConfig) GetContainerResource(name string) (crv1.PgContainerResources, error) { var err error r := crv1.PgContainerResources{} s, ok := c.ContainerResources[name] if !ok { err = errors.New("invalid Container Resources name " + name) log.Error(err) return r, err } r.RequestsMemory = s.RequestsMemory r.RequestsCPU = s.RequestsCPU r.LimitsMemory = s.LimitsMemory r.LimitsCPU = s.LimitsCPU return r, err } func (c *PgoConfig) GetConfig(clientset *kubernetes.Clientset, namespace string) error { cMap, rootPath := getRootPath(clientset, namespace) var yamlFile []byte var err error //get the pgo.yaml config file if cMap != nil { str := cMap.Data[CONFIG_PATH] if str == "" { errMsg := fmt.Sprintf("could not get %s from ConfigMap", CONFIG_PATH) return errors.New(errMsg) } yamlFile = []byte(str) } else { yamlFile, err = ioutil.ReadFile(rootPath + CONFIG_PATH) if err != nil { log.Errorf("yamlFile.Get err #%v ", err) return err } } err = yaml.Unmarshal(yamlFile, c) if err != nil { log.Errorf("Unmarshal: %v", err) return err } //validate the pgo.yaml config file err = c.Validate() if err != nil { log.Error(err) return err } //determine the default storage class if necessary if cMap == nil { err = c.SetDefaultStorageClass(clientset) if err != nil { return err } } c.CheckEnv() //load up all the templates PgoDefaultServiceAccountTemplate, err = c.LoadTemplate(cMap, rootPath, PGODefaultServiceAccountPath) if err != nil { return err } PgoBackrestServiceAccountTemplate, err = c.LoadTemplate(cMap, rootPath, PGOBackrestServiceAccountPath) if err != nil { return err } PgoTargetServiceAccountTemplate, err = c.LoadTemplate(cMap, rootPath, PGOTargetServiceAccountPath) if err != nil { return err } PgoTargetRoleBindingTemplate, err = c.LoadTemplate(cMap, rootPath, PGOTargetRoleBindingPath) if err != nil { return err } PgoBackrestRoleTemplate, err = c.LoadTemplate(cMap, rootPath, PGOBackrestRolePath) if err != nil { return err } PgoBackrestRoleBindingTemplate, err = c.LoadTemplate(cMap, rootPath, PGOBackrestRoleBindingPath) if err != nil { return err } PgoTargetRoleTemplate, err = c.LoadTemplate(cMap, rootPath, PGOTargetRolePath) if err != nil { return err } PgoPgServiceAccountTemplate, err = c.LoadTemplate(cMap, rootPath, PGOPgServiceAccountPath) if err != nil { return err } PgoPgRoleTemplate, err = c.LoadTemplate(cMap, rootPath, PGOPgRolePath) if err != nil { return err } PgoPgRoleBindingTemplate, err = c.LoadTemplate(cMap, rootPath, PGOPgRoleBindingPath) if err != nil { return err } PVCTemplate, err = c.LoadTemplate(cMap, rootPath, pvcPath) if err != nil { return err } PolicyJobTemplate, err = c.LoadTemplate(cMap, rootPath, policyJobTemplatePath) if err != nil { return err } ContainerResourcesTemplate, err = c.LoadTemplate(cMap, rootPath, containerResourcesTemplatePath) if err != nil { return err } LoadTemplate, err = c.LoadTemplate(cMap, rootPath, loadTemplatePath) if err != nil { return err } JobTemplate, err = c.LoadTemplate(cMap, rootPath, jobPath) if err != nil { return err } PgoBackrestRepoServiceTemplate, err = c.LoadTemplate(cMap, rootPath, pgoBackrestRepoServiceTemplatePath) if err != nil { return err } PgoBackrestRepoTemplate, err = c.LoadTemplate(cMap, rootPath, pgoBackrestRepoTemplatePath) if err != nil { return err } PgmonitorEnvVarsTemplate, err = c.LoadTemplate(cMap, rootPath, pgmonitorEnvVarsPath) if err != nil { return err } PgbackrestEnvVarsTemplate, err = c.LoadTemplate(cMap, rootPath, pgbackrestEnvVarsPath) if err != nil { return err } PgbackrestS3EnvVarsTemplate, err = c.LoadTemplate(cMap, rootPath, pgbackrestS3EnvVarsPath) if err != nil { return err } JobTemplate, err = c.LoadTemplate(cMap, rootPath, jobPath) if err != nil { return err } PgBasebackupRestoreJobTemplate, err = c.LoadTemplate(cMap, rootPath, pgBasebackupRestoreJobTemplatePath) if err != nil { return err } PgbouncerTemplate, err = c.LoadTemplate(cMap, rootPath, pgbouncerTemplatePath) if err != nil { return err } PgbouncerConfTemplate, err = c.LoadTemplate(cMap, rootPath, pgbouncerConfTemplatePath) if err != nil { return err } PgbouncerUsersTemplate, err = c.LoadTemplate(cMap, rootPath, pgbouncerUsersTemplatePath) if err != nil { return err } PgbouncerHBATemplate, err = c.LoadTemplate(cMap, rootPath, pgbouncerHBATemplatePath) if err != nil { return err } ServiceTemplate, err = c.LoadTemplate(cMap, rootPath, serviceTemplatePath) if err != nil { return err } RmdatajobTemplate, err = c.LoadTemplate(cMap, rootPath, rmdatajobPath) if err != nil { return err } BackrestjobTemplate, err = c.LoadTemplate(cMap, rootPath, backrestjobPath) if err != nil { return err } BackrestRestorejobTemplate, err = c.LoadTemplate(cMap, rootPath, backrestRestorejobPath) if err != nil { return err } PgDumpBackupJobTemplate, err = c.LoadTemplate(cMap, rootPath, pgDumpBackupJobPath) if err != nil { return err } PgRestoreJobTemplate, err = c.LoadTemplate(cMap, rootPath, pgRestoreJobPath) if err != nil { return err } PVCMatchLabelsTemplate, err = c.LoadTemplate(cMap, rootPath, pvcMatchLabelsPath) if err != nil { return err } PVCStorageClassTemplate, err = c.LoadTemplate(cMap, rootPath, pvcSCPath) if err != nil { return err } AffinityTemplate, err = c.LoadTemplate(cMap, rootPath, affinityTemplatePath) if err != nil { return err } PodAntiAffinityTemplate, err = c.LoadTemplate(cMap, rootPath, podAntiAffinityTemplatePath) if err != nil { return err } CollectTemplate, err = c.LoadTemplate(cMap, rootPath, collectTemplatePath) if err != nil { return err } BadgerTemplate, err = c.LoadTemplate(cMap, rootPath, badgerTemplatePath) if err != nil { return err } DeploymentTemplate, err = c.LoadTemplate(cMap, rootPath, deploymentTemplatePath) if err != nil { return err } return nil } func getRootPath(clientset *kubernetes.Clientset, namespace string) (*v1.ConfigMap, string) { cMap, found := kubeapi.GetConfigMap(clientset, CustomConfigMapName, namespace) if found { log.Infof("Config: %s ConfigMap found, using config files from the configmap", CustomConfigMapName) return cMap, "" } log.Infof("Config: %s ConfigMap NOT found, using default baked-in config files from %s", CustomConfigMapName, DefaultConfigsPath) return nil, DefaultConfigsPath } // LoadTemplate will load a JSON template from a path func (c *PgoConfig) LoadTemplate(cMap *v1.ConfigMap, rootPath, path string) (*template.Template, error) { var value string var err error // Determine if there exists a configmap entry for the template file. if cMap != nil { // Get the data that is stored in the configmap value = cMap.Data[path] } // if the configmap does not exist, or there is no data in the configmap for // this particular configuration template, attempt to load the template from // the default configuration if cMap == nil || value == "" { value, err = c.DefaultTemplate(path) if err != nil { return nil, err } } // if we have a value for the templated file, return return template.Must(template.New(path).Parse(value)), nil } // DefaultTemplate attempts to load a default configuration template file func (c *PgoConfig) DefaultTemplate(path string) (string, error) { // set the lookup value for the file path based on the default configuration // path and the template file requested to be loaded fullPath := DefaultConfigsPath + path log.Debugf("No entry in cmap loading default path [%s]", fullPath) // read in the file from the default path buf, err := ioutil.ReadFile(fullPath) if err != nil { log.Errorf("error: could not read %s", fullPath) log.Error(err) return "", err } // extract the value of the default configuration file and return value := string(buf) return value, nil } func (c *PgoConfig) SetDefaultStorageClass(clientset *kubernetes.Clientset) error { selector := LABEL_PGO_DEFAULT_SC + "=true" scList, err := kubeapi.GetStorageClasses(clientset, selector) if err != nil { return err } if len(scList.Items) == 0 { //no pgo default sc was found, so we will use 1st sc we find scList, err = kubeapi.GetAllStorageClasses(clientset) if err != nil { return err } if len(scList.Items) == 0 { return errors.New("no storage classes were found on this Kube system") } //configure with the 1st SC on the system } else { //configure with the default pgo sc } log.Infof("setting pgo-default-sc to %s", scList.Items[0].Name) //add the storage class into the config c.Storage[LABEL_PGO_DEFAULT_SC] = StorageStruct{ AccessMode: "ReadWriteOnce", Size: "1G", StorageType: "dynamic", StorageClass: scList.Items[0].Name, Fsgroup: "26", SupplementalGroups: "", MatchLabels: "", } //set the default storage configs to this new one c.PrimaryStorage = LABEL_PGO_DEFAULT_SC c.BackupStorage = LABEL_PGO_DEFAULT_SC c.ReplicaStorage = LABEL_PGO_DEFAULT_SC c.BackrestStorage = LABEL_PGO_DEFAULT_SC return nil } // CheckEnv is mostly used for the OLM deployment use case // when someone wants to deploy with OLM, use the baked-in // configuration, but use a different set of images, by // setting these env vars in the OLM CSV, users can override // the baked in images func (c *PgoConfig) CheckEnv() { pgoImageTag := os.Getenv("PGO_IMAGE_TAG") if pgoImageTag != "" { c.Pgo.PGOImageTag = pgoImageTag log.Infof("CheckEnv: using PGO_IMAGE_TAG env var: %s", pgoImageTag) } pgoImagePrefix := os.Getenv("PGO_IMAGE_PREFIX") if pgoImagePrefix != "" { c.Pgo.PGOImagePrefix = pgoImagePrefix log.Infof("CheckEnv: using PGO_IMAGE_PREFIX env var: %s", pgoImagePrefix) } ccpImageTag := os.Getenv("CCP_IMAGE_TAG") if ccpImageTag != "" { c.Cluster.CCPImageTag = ccpImageTag log.Infof("CheckEnv: using CCP_IMAGE_TAG env var: %s", ccpImageTag) } ccpImagePrefix := os.Getenv("CCP_IMAGE_PREFIX") if ccpImagePrefix != "" { c.Cluster.CCPImagePrefix = ccpImagePrefix log.Infof("CheckEnv: using CCP_IMAGE_PREFIX env var: %s", ccpImagePrefix) } }
[ "\"PGO_IMAGE_TAG\"", "\"PGO_IMAGE_PREFIX\"", "\"CCP_IMAGE_TAG\"", "\"CCP_IMAGE_PREFIX\"" ]
[]
[ "PGO_IMAGE_PREFIX", "CCP_IMAGE_PREFIX", "PGO_IMAGE_TAG", "CCP_IMAGE_TAG" ]
[]
["PGO_IMAGE_PREFIX", "CCP_IMAGE_PREFIX", "PGO_IMAGE_TAG", "CCP_IMAGE_TAG"]
go
4
0
run/logging-manual/main.py
# Copyright 2019 Google, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from flask import Flask, request app = Flask(__name__) @app.route("/", methods=["GET"]) def index(): PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] # [START cloudrun_manual_logging] # [START run_manual_logging] # Uncomment and populate this variable in your code: # PROJECT = 'The project ID of your Cloud Run service'; # Build structured log messages as an object. global_log_fields = {} # Add log correlation to nest all log messages. trace_header = request.headers.get("X-Cloud-Trace-Context") if trace_header and PROJECT: trace = trace_header.split("/") global_log_fields[ "logging.googleapis.com/trace" ] = f"projects/{PROJECT}/traces/{trace[0]}" # Complete a structured log entry. entry = dict( severity="NOTICE", message="This is the default display field.", # Log viewer accesses 'component' as jsonPayload.component'. component="arbitrary-property", **global_log_fields, ) print(json.dumps(entry)) # [END run_manual_logging] # [END cloudrun_manual_logging] return "Hello Logger!" if __name__ == "__main__": PORT = int(os.getenv("PORT")) if os.getenv("PORT") else 8080 # This is used when running locally. Gunicorn is used to run the # application on Cloud Run. See entrypoint in Dockerfile. app.run(host="127.0.0.1", port=PORT, debug=True)
[]
[]
[ "PORT", "GOOGLE_CLOUD_PROJECT" ]
[]
["PORT", "GOOGLE_CLOUD_PROJECT"]
python
2
0
backends/windows/userspace/netsh.go
package userspace import ( "fmt" "net" "os" "strings" "time" "k8s.io/klog/v2" utilexec "k8s.io/utils/exec" ) // Interface is an injectable interface for running netsh commands. Implementations must be goroutine-safe. type Interface interface { // EnsurePortProxyRule checks if the specified redirect exists, if not creates it EnsurePortProxyRule(args []string) (bool, error) // DeletePortProxyRule deletes the specified portproxy rule. If the rule did not exist, return error. DeletePortProxyRule(args []string) error // EnsureIPAddress checks if the specified IP Address is added to vEthernet (HNSTransparent) interface, if not, add it. If the address existed, return true. EnsureIPAddress(args []string, ip net.IP) (bool, error) // DeleteIPAddress checks if the specified IP address is present and, if so, deletes it. DeleteIPAddress(args []string) error // Restore runs `netsh exec` to restore portproxy or addresses using a file. // TODO Check if this is required, most likely not Restore(args []string) error // GetInterfaceToAddIP returns the interface name where Service IP needs to be added // IP Address needs to be added for netsh portproxy to redirect traffic // Reads Environment variable INTERFACE_TO_ADD_SERVICE_IP, if it is not defined then "vEthernet (HNSTransparent)" is returned GetInterfaceToAddIP() string } const ( cmdNetsh string = "netsh" ) // runner implements Interface in terms of exec("netsh"). type runner struct { exec utilexec.Interface } // New returns a new Interface which will exec netsh. func New(exec utilexec.Interface) Interface { runner := &runner{ exec: exec, } return runner } // EnsurePortProxyRule checks if the specified redirect exists, if not creates it. func (runner *runner) EnsurePortProxyRule(args []string) (bool, error) { klog.V(4).InfoS("Running netsh interface portproxy add v4tov4", "arguments", args) out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() if err == nil { return true, nil } if ee, ok := err.(utilexec.ExitError); ok { // netsh uses exit(0) to indicate a success of the operation, // as compared to a malformed commandline, for example. if ee.Exited() && ee.ExitStatus() != 0 { return false, nil } } return false, fmt.Errorf("error checking portproxy rule: %v: %s", err, out) } // DeletePortProxyRule deletes the specified portproxy rule. If the rule did not exist, return error. func (runner *runner) DeletePortProxyRule(args []string) error { klog.V(4).InfoS("Running netsh interface portproxy delete v4tov4", "arguments", args) out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() if err == nil { return nil } if ee, ok := err.(utilexec.ExitError); ok { // netsh uses exit(0) to indicate a success of the operation, // as compared to a malformed commandline, for example. if ee.Exited() && ee.ExitStatus() == 0 { return nil } } return fmt.Errorf("error deleting portproxy rule: %v: %s", err, out) } // EnsureIPAddress checks if the specified IP Address is added to interface identified by Environment variable INTERFACE_TO_ADD_SERVICE_IP, if not, add it. If the address existed, return true. func (runner *runner) EnsureIPAddress(args []string, ip net.IP) (bool, error) { // Check if the ip address exists intName := runner.GetInterfaceToAddIP() argsShowAddress := []string{ "interface", "ipv4", "show", "address", "name=" + intName, } ipToCheck := ip.String() exists, _ := checkIPExists(ipToCheck, argsShowAddress, runner) if exists == true { klog.V(4).InfoS("Not adding IP address, as it already exists", "IP", ipToCheck) return true, nil } // IP Address is not already added, add it now klog.V(4).InfoS("Running netsh interface IPv4 add address", "IP", args) out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() if err == nil { // Once the IP Address is added, it takes a bit to initialize and show up when querying for it // Query all the IP addresses and see if the one we added is present // PS: We are using netsh interface IPv4 show address here to query all the IP addresses, instead of // querying net.InterfaceAddrs() as it returns the IP address as soon as it is added even though it is uninitialized klog.V(3).InfoS("Waiting until IP is added to the network adapter", "IP", ipToCheck) for { if exists, _ := checkIPExists(ipToCheck, argsShowAddress, runner); exists { return true, nil } time.Sleep(500 * time.Millisecond) } } if ee, ok := err.(utilexec.ExitError); ok { // netsh uses exit(0) to indicate a success of the operation, // as compared to a malformed commandline, for example. if ee.Exited() && ee.ExitStatus() != 0 { return false, nil } } return false, fmt.Errorf("error adding IPv4 address: %v: %s", err, out) } // DeleteIPAddress checks if the specified IP address is present and, if so, deletes it. func (runner *runner) DeleteIPAddress(args []string) error { klog.V(4).InfoS("Running netsh interface IPv4 delete address", "IP", args) out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() if err == nil { return nil } if ee, ok := err.(utilexec.ExitError); ok { // netsh uses exit(0) to indicate a success of the operation, // as compared to a malformed commandline, for example. if ee.Exited() && ee.ExitStatus() == 0 { return nil } } return fmt.Errorf("error deleting IPv4 address: %v: %s", err, out) } // GetInterfaceToAddIP returns the interface name where Service IP needs to be added // IP Address needs to be added for netsh portproxy to redirect traffic // Reads Environment variable INTERFACE_TO_ADD_SERVICE_IP, if it is not defined then "vEthernet (HNS Internal NIC)" is returned func (runner *runner) GetInterfaceToAddIP() string { if iface := os.Getenv("INTERFACE_TO_ADD_SERVICE_IP"); len(iface) > 0 { return iface } return "vEthernet (HNS Internal NIC)" } // Restore is part of Interface. func (runner *runner) Restore(args []string) error { return nil } // checkIPExists checks if an IP address exists in 'netsh interface IPv4 show address' output func checkIPExists(ipToCheck string, args []string, runner *runner) (bool, error) { ipAddress, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() if err != nil { return false, err } ipAddressString := string(ipAddress[:]) klog.V(3).InfoS("Searching for IP in IP dump", "IP", ipToCheck, "IPDump", ipAddressString) showAddressArray := strings.Split(ipAddressString, "\n") for _, showAddress := range showAddressArray { if strings.Contains(showAddress, "IP") { ipFromNetsh := getIP(showAddress) if ipFromNetsh == ipToCheck { return true, nil } } } return false, nil } // getIP gets ip from showAddress (e.g. "IP Address: 10.96.0.4"). func getIP(showAddress string) string { list := strings.SplitN(showAddress, ":", 2) if len(list) != 2 { return "" } return strings.TrimSpace(list[1]) }
[ "\"INTERFACE_TO_ADD_SERVICE_IP\"" ]
[]
[ "INTERFACE_TO_ADD_SERVICE_IP" ]
[]
["INTERFACE_TO_ADD_SERVICE_IP"]
go
1
0
cmd/main.go
package main import ( "flag" "fmt" "os" // "github.com/MYOB-Technology/ops-repo-toolbox/pkg/collaborator-remover" ) var org = flag.String("owner", "MYOB-Technology", "name of the repo owner") func die(s string, err int) { fmt.Fprintf(os.Stderr, s+"\n") os.Exit(err) } func usage() { String := `clrt-rm is a Github tool Usage: %s [flags] ` fmt.Fprintf(os.Stderr, String, os.Args[0]) flag.PrintDefaults() } func main() { token := os.Getenv("GITHUB_TOKEN") if token == "" { die("GITHUB_TOKEN not set...", 1) } flag.Usage = usage flag.Parse() if flag.NArg() < 1 { usage() die("Please give me some arguments, check usage", 1) } }
[ "\"GITHUB_TOKEN\"" ]
[]
[ "GITHUB_TOKEN" ]
[]
["GITHUB_TOKEN"]
go
1
0
release/ray_release/template.py
import copy import datetime import os import re from typing import Optional, Dict import jinja2 import yaml from ray_release.config import ( Test, RELEASE_PACKAGE_DIR, parse_python_version, DEFAULT_PYTHON_VERSION, get_test_cloud_id, ) from ray_release.exception import ReleaseTestConfigError from ray_release.util import python_version_str DEFAULT_ENV = { "DATESTAMP": str(datetime.datetime.now().strftime("%Y%m%d")), "TIMESTAMP": str(int(datetime.datetime.now().timestamp())), "EXPIRATION_1D": str( (datetime.datetime.now() + datetime.timedelta(days=1)).strftime("%Y-%m-%d") ), "EXPIRATION_2D": str( (datetime.datetime.now() + datetime.timedelta(days=2)).strftime("%Y-%m-%d") ), "EXPIRATION_3D": str( (datetime.datetime.now() + datetime.timedelta(days=3)).strftime("%Y-%m-%d") ), } class TestEnvironment(dict): pass _test_env = None def get_test_environment(): global _test_env if _test_env: return _test_env _test_env = TestEnvironment(**DEFAULT_ENV) return _test_env def set_test_env_var(key: str, value: str): test_env = get_test_environment() test_env[key] = value def get_test_env_var(key: str, default: Optional[str] = None): test_env = get_test_environment() return test_env.get(key, default) def get_wheels_sanity_check(commit: Optional[str] = None): if not commit: cmd = ( "python -c 'import ray; print(" '"No commit sanity check available, but this is the ' "Ray wheel commit:\", ray.__commit__)'" ) else: cmd = ( f"python -c 'import ray; " f'assert ray.__commit__ == "{commit}", ray.__commit__\'' ) return cmd def load_and_render_yaml_template( template_path: str, env: Optional[Dict] = None ) -> Optional[Dict]: if not template_path: return None if not os.path.exists(template_path): raise ReleaseTestConfigError( f"Cannot load yaml template from {template_path}: Path not found." ) with open(template_path, "rt") as f: content = f.read() return render_yaml_template(template=content, env=env) def render_yaml_template(template: str, env: Optional[Dict] = None): render_env = copy.deepcopy(os.environ) if env: render_env.update(env) try: content = jinja2.Template(template).render(env=render_env) return yaml.safe_load(content) except Exception as e: raise ReleaseTestConfigError( f"Error rendering/loading yaml template: {e}" ) from e def load_test_cluster_env(test: Test, ray_wheels_url: str) -> Optional[Dict]: cluster_env_file = test["cluster"]["cluster_env"] cluster_env_path = os.path.join( RELEASE_PACKAGE_DIR, test.get("working_dir", ""), cluster_env_file ) env = populate_cluster_env_variables(test, ray_wheels_url=ray_wheels_url) return load_and_render_yaml_template(cluster_env_path, env=env) def populate_cluster_env_variables(test: Test, ray_wheels_url: str) -> Dict: env = get_test_environment() commit = env.get("RAY_COMMIT", None) if not commit: match = re.search(r"/([a-f0-9]{40})/", ray_wheels_url) if match: commit = match.group(1) env["RAY_WHEELS_SANITY_CHECK"] = get_wheels_sanity_check(commit) env["RAY_WHEELS"] = ray_wheels_url if "python" in test: python_version = parse_python_version(test["python"]) else: python_version = DEFAULT_PYTHON_VERSION env[ "RAY_IMAGE_NIGHTLY_CPU" ] = f"anyscale/ray:nightly-py{python_version_str(python_version)}" env[ "RAY_IMAGE_ML_NIGHTLY_GPU" ] = f"anyscale/ray-ml:nightly-py{python_version_str(python_version)}-gpu" return env def load_test_cluster_compute(test: Test) -> Optional[Dict]: cluster_compute_file = test["cluster"]["cluster_compute"] cluster_compute_path = os.path.join( RELEASE_PACKAGE_DIR, test.get("working_dir", ""), cluster_compute_file ) env = populate_cluster_compute_variables(test) return load_and_render_yaml_template(cluster_compute_path, env=env) def populate_cluster_compute_variables(test: Test) -> Dict: env = get_test_environment() cloud_id = get_test_cloud_id(test) env["ANYSCALE_CLOUD_ID"] = cloud_id return env
[]
[]
[]
[]
[]
python
0
0
scripts/close_gh_issues.py
#!/usr/bin/env python3 import argparse import json import os import subprocess from pathlib import Path from typing import Union from urllib.parse import quote import call_wrapper PathLike = Union[str, Path] def close(repo_dir: PathLike): if not repo_dir: repo_dir = Path(os.getcwd()).absolute() label_to_close = "fixed in dev build" cmd_list_issues = ' '.join([ 'gh', 'api', 'repos/:owner/:repo/issues', '--method=GET', '-F', f'labels="{label_to_close}"', '-F', 'state=open']) print('> ' + cmd_list_issues) json_str = subprocess.check_output(cmd_list_issues, text=True, env=os.environ, cwd=repo_dir) issues = json.loads(json_str) if not issues: print('No relevant issues were found') return def get_cmd_remove_label(issue_number): return [ 'gh', 'api', f'/repos/:owner/:repo/issues/{str(issue_number)}/labels/{quote(label_to_close)}', '--method=DELETE', '--silent'] def get_cmd_close_issue(issue_number): return [ 'gh', 'api', f'/repos/:owner/:repo/issues/{str(issue_number)}', '--method=PATCH', '-Fstate=close', '--silent'] for i in issues: cmd_close_issue = get_cmd_close_issue(i['number']) cmd_remove_label = get_cmd_remove_label(i['number']) print('> ' + ' '.join(cmd_close_issue)) subprocess.check_call(cmd_close_issue, env=os.environ, cwd=repo_dir) print('> ' + ' '.join(cmd_remove_label)) subprocess.check_call(cmd_remove_label, env=os.environ, cwd=repo_dir) print('All issues were closed') if __name__ == '__main__': parser = argparse.ArgumentParser(description='Close all fixed issues') parser.add_argument('--repo_dir', default=Path(os.getcwd()).absolute()) args = parser.parse_args() call_wrapper.final_call_decorator( "Closing issues", "Issues were successfully closed!", "Failed to close all issues!" )( close )( args.repo_dir )
[]
[]
[]
[]
[]
python
0
0
daemon/graphdriver/lcow/lcow.go
// +build windows // Locale: en-gb // About: Graph-driver for Linux Containers On Windows (LCOW) // // This graphdriver runs in two modes. Yet to be determined which one will // be the shipping mode. The global mode is where a single utility VM // is used for all service VM tool operations. This isn't safe security-wise // as it's attaching a sandbox of multiple containers to it, containing // untrusted data. This may be fine for client devops scenarios. In // safe mode, a unique utility VM is instantiated for all service VM tool // operations. The downside of safe-mode is that operations are slower as // a new service utility VM has to be started and torn-down when needed. // // Options: // // The following options are read by the graphdriver itself: // // * lcow.globalmode - Enables global service VM Mode // -- Possible values: true/false // -- Default if omitted: false // // * lcow.sandboxsize - Specifies a custom sandbox size in GB for starting a container // -- Possible values: >= default sandbox size (opengcs defined, currently 20) // -- Default if omitted: 20 // // The following options are read by opengcs: // // * lcow.kirdpath - Specifies a custom path to a kernel/initrd pair // -- Possible values: Any local path that is not a mapped drive // -- Default if omitted: %ProgramFiles%\Linux Containers // // * lcow.bootparameters - Specifies additional boot parameters for booting in kernel+initrd mode // -- Possible values: Any valid linux kernel boot options // -- Default if omitted: <nil> // // * lcow.timeout - Specifies a timeout for utility VM operations in seconds // -- Possible values: >=0 // -- Default if omitted: 300 // TODO: Grab logs from SVM at terminate or errors package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow" import ( "bytes" "encoding/json" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "syscall" "time" "github.com/Microsoft/go-winio/pkg/security" "github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim/ext4/tar2ext4" "github.com/Microsoft/opengcs/client" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/reexec" "github.com/sirupsen/logrus" ) // noreexec controls reexec functionality. Off by default, on for debugging purposes. var noreexec = false // init registers this driver to the register. It gets initialised by the // function passed in the second parameter, implemented in this file. func init() { graphdriver.Register("lcow", InitDriver) // DOCKER_LCOW_NOREEXEC allows for inline processing which makes // debugging issues in the re-exec codepath significantly easier. if os.Getenv("DOCKER_LCOW_NOREEXEC") != "" { logrus.Warnf("LCOW Graphdriver is set to not re-exec. This is intended for debugging purposes only.") noreexec = true } else { reexec.Register("docker-lcow-tar2ext4", tar2ext4Reexec) } } const ( // sandboxFilename is the name of the file containing a layer's sandbox (read-write layer). sandboxFilename = "sandbox.vhdx" // scratchFilename is the name of the scratch-space used by an SVM to avoid running out of memory. scratchFilename = "scratch.vhdx" // layerFilename is the name of the file containing a layer's read-only contents. // Note this really is VHD format, not VHDX. layerFilename = "layer.vhd" // toolsScratchPath is a location in a service utility VM that the tools can use as a // scratch space to avoid running out of memory. toolsScratchPath = "/tmp/scratch" // svmGlobalID is the ID used in the serviceVMs map for the global service VM when running in "global" mode. svmGlobalID = "_lcow_global_svm_" // cacheDirectory is the sub-folder under the driver's data-root used to cache blank sandbox and scratch VHDs. cacheDirectory = "cache" // scratchDirectory is the sub-folder under the driver's data-root used for scratch VHDs in service VMs scratchDirectory = "scratch" // errOperationPending is the HRESULT returned by the HCS when the VM termination operation is still pending. errOperationPending syscall.Errno = 0xc0370103 ) // Driver represents an LCOW graph driver. type Driver struct { dataRoot string // Root path on the host where we are storing everything. cachedSandboxFile string // Location of the local default-sized cached sandbox. cachedSandboxMutex sync.Mutex // Protects race conditions from multiple threads creating the cached sandbox. cachedScratchFile string // Location of the local cached empty scratch space. cachedScratchMutex sync.Mutex // Protects race conditions from multiple threads creating the cached scratch. options []string // Graphdriver options we are initialised with. globalMode bool // Indicates if running in an unsafe/global service VM mode. defaultSandboxSize uint64 // The default sandbox size to use if one is not specified // NOTE: It is OK to use a cache here because Windows does not support // restoring containers when the daemon dies. serviceVms *serviceVMMap // Map of the configs representing the service VM(s) we are running. } // layerDetails is the structure returned by a helper function `getLayerDetails` // for getting information about a layer folder type layerDetails struct { filename string // \path\to\sandbox.vhdx or \path\to\layer.vhd size int64 // size of the above file isSandbox bool // true if sandbox.vhdx } // deletefiles is a helper function for initialisation where we delete any // left-over scratch files in case we were previously forcibly terminated. func deletefiles(path string, f os.FileInfo, err error) error { if strings.HasSuffix(f.Name(), ".vhdx") { logrus.Warnf("lcowdriver: init: deleting stale scratch file %s", path) return os.Remove(path) } return nil } // InitDriver returns a new LCOW storage driver. func InitDriver(dataRoot string, options []string, _, _ []idtools.IDMap) (graphdriver.Driver, error) { title := "lcowdriver: init:" cd := filepath.Join(dataRoot, cacheDirectory) sd := filepath.Join(dataRoot, scratchDirectory) d := &Driver{ dataRoot: dataRoot, options: options, cachedSandboxFile: filepath.Join(cd, sandboxFilename), cachedScratchFile: filepath.Join(cd, scratchFilename), serviceVms: &serviceVMMap{ svms: make(map[string]*serviceVMMapItem), }, globalMode: false, defaultSandboxSize: client.DefaultVhdxSizeGB, } // Looks for relevant options for _, v := range options { opt := strings.SplitN(v, "=", 2) if len(opt) == 2 { switch strings.ToLower(opt[0]) { case "lcow.globalmode": var err error d.globalMode, err = strconv.ParseBool(opt[1]) if err != nil { return nil, fmt.Errorf("%s failed to parse value for 'lcow.globalmode' - must be 'true' or 'false'", title) } break case "lcow.sandboxsize": var err error d.defaultSandboxSize, err = strconv.ParseUint(opt[1], 10, 32) if err != nil { return nil, fmt.Errorf("%s failed to parse value '%s' for 'lcow.sandboxsize'", title, v) } if d.defaultSandboxSize < client.DefaultVhdxSizeGB { return nil, fmt.Errorf("%s 'lcow.sandboxsize' option cannot be less than %d", title, client.DefaultVhdxSizeGB) } break } } } // Make sure the dataRoot directory is created if err := idtools.MkdirAllAndChown(dataRoot, 0700, idtools.Identity{UID: 0, GID: 0}); err != nil { return nil, fmt.Errorf("%s failed to create '%s': %v", title, dataRoot, err) } // Make sure the cache directory is created under dataRoot if err := idtools.MkdirAllAndChown(cd, 0700, idtools.Identity{UID: 0, GID: 0}); err != nil { return nil, fmt.Errorf("%s failed to create '%s': %v", title, cd, err) } // Make sure the scratch directory is created under dataRoot if err := idtools.MkdirAllAndChown(sd, 0700, idtools.Identity{UID: 0, GID: 0}); err != nil { return nil, fmt.Errorf("%s failed to create '%s': %v", title, sd, err) } // Delete any items in the scratch directory filepath.Walk(sd, deletefiles) logrus.Infof("%s dataRoot: %s globalMode: %t", title, dataRoot, d.globalMode) return d, nil } func (d *Driver) getVMID(id string) string { if d.globalMode { return svmGlobalID } return id } // remapLongToShortContainerPath does the mapping of a long container path for a // SCSI attached disk, to a short container path where it's actually mounted. func remapLongToShortContainerPath(longContainerPath string, attachCounter uint64, svmName string) string { shortContainerPath := longContainerPath if shortContainerPath != "" && shortContainerPath != toolsScratchPath { shortContainerPath = fmt.Sprintf("/tmp/d%d", attachCounter) logrus.Debugf("lcowdriver: UVM %s: remapping %s --> %s", svmName, longContainerPath, shortContainerPath) } return shortContainerPath } // startServiceVMIfNotRunning starts a service utility VM if it is not currently running. // It can optionally be started with a mapped virtual disk. Returns a opengcs config structure // representing the VM. func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd []hcsshim.MappedVirtualDisk, context string) (_ *serviceVM, err error) { // Use the global ID if in global mode id = d.getVMID(id) title := "lcowdriver: startServiceVMIfNotRunning " + id // Attempt to add ID to the service vm map logrus.Debugf("%s: adding entry to service vm map", title) svm, exists, err := d.serviceVms.add(id) if err != nil && err == errVMisTerminating { // VM is in the process of terminating. Wait until it's done and then try again logrus.Debugf("%s: VM with current ID still in the process of terminating", title) if err := svm.getStopError(); err != nil { logrus.Debugf("%s: VM did not stop successfully: %s", title, err) return nil, err } return d.startServiceVMIfNotRunning(id, mvdToAdd, context) } else if err != nil { logrus.Debugf("%s: failed to add service vm to map: %s", title, err) return nil, fmt.Errorf("%s: failed to add to service vm map: %s", title, err) } if exists { // Service VM is already up and running. In this case, just hot add the vhds. // Note that hotAddVHDs will remap long to short container paths, so no need // for us to that here. logrus.Debugf("%s: service vm already exists. Just hot adding: %+v", title, mvdToAdd) if err := svm.hotAddVHDs(mvdToAdd...); err != nil { logrus.Debugf("%s: failed to hot add vhds on service vm creation: %s", title, err) return nil, fmt.Errorf("%s: failed to hot add vhds on service vm: %s", title, err) } return svm, nil } // We are the first service for this id, so we need to start it logrus.Debugf("%s: service vm doesn't exist. Now starting it up", title) defer func() { // Signal that start has finished, passing in the error if any. svm.signalStartFinished(err) if err != nil { // We added a ref to the VM, since we failed, we should delete the ref. d.terminateServiceVM(id, "error path on startServiceVMIfNotRunning", false) } }() // Generate a default configuration if err := svm.config.GenerateDefault(d.options); err != nil { return nil, fmt.Errorf("%s: failed to generate default gogcs configuration for global svm (%s): %s", title, context, err) } // For the name, we deliberately suffix if safe-mode to ensure that it doesn't // clash with another utility VM which may be running for the container itself. // This also makes it easier to correlate through Get-ComputeProcess. if id == svmGlobalID { svm.config.Name = svmGlobalID } else { svm.config.Name = fmt.Sprintf("%s_svm", id) } // Ensure we take the cached scratch mutex around the check to ensure the file is complete // and not in the process of being created by another thread. scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id)) logrus.Debugf("%s: locking cachedScratchMutex", title) d.cachedScratchMutex.Lock() if _, err := os.Stat(d.cachedScratchFile); err == nil { // Make a copy of cached scratch to the scratch directory logrus.Debugf("%s: (%s) cloning cached scratch for mvd", title, context) if err := client.CopyFile(d.cachedScratchFile, scratchTargetFile, true); err != nil { logrus.Debugf("%s: releasing cachedScratchMutex on err: %s", title, err) d.cachedScratchMutex.Unlock() return nil, err } // Add the cached clone as a mapped virtual disk logrus.Debugf("%s: (%s) adding cloned scratch as mvd", title, context) mvd := hcsshim.MappedVirtualDisk{ HostPath: scratchTargetFile, ContainerPath: toolsScratchPath, CreateInUtilityVM: true, } svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvd) svm.scratchAttached = true } logrus.Debugf("%s: releasing cachedScratchMutex", title) d.cachedScratchMutex.Unlock() // Add mapped virtual disks. First those that are already in the configuration. Generally, // the only one that will be here is the service VMs scratch. The exception is when invoked // via the graphdrivers DiffGetter implementation. for i, mvd := range svm.config.MappedVirtualDisks { svm.attachCounter++ svm.attachedVHDs[mvd.HostPath] = &attachedVHD{refCount: 1, attachCounter: svm.attachCounter} // No-op for the service VMs scratch disk. Only applicable in the DiffGetter interface invocation. svm.config.MappedVirtualDisks[i].ContainerPath = remapLongToShortContainerPath(mvd.ContainerPath, svm.attachCounter, svm.config.Name) } // Then the remaining ones to add, and adding them to the startup configuration. for _, mvd := range mvdToAdd { svm.attachCounter++ svm.attachedVHDs[mvd.HostPath] = &attachedVHD{refCount: 1, attachCounter: svm.attachCounter} mvd.ContainerPath = remapLongToShortContainerPath(mvd.ContainerPath, svm.attachCounter, svm.config.Name) svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvd) } // Start it. logrus.Debugf("%s: (%s) starting %s", title, context, svm.config.Name) if err := svm.config.StartUtilityVM(); err != nil { return nil, fmt.Errorf("failed to start service utility VM (%s): %s", context, err) } // defer function to terminate the VM if the next steps fail defer func() { if err != nil { waitTerminate(svm, fmt.Sprintf("%s: (%s)", title, context)) } }() // Now we have a running service VM, we can create the cached scratch file if it doesn't exist. logrus.Debugf("%s: locking cachedScratchMutex", title) d.cachedScratchMutex.Lock() if _, err := os.Stat(d.cachedScratchFile); err != nil { logrus.Debugf("%s: (%s) creating an SVM scratch", title, context) // Don't use svm.CreateExt4Vhdx since that only works when the service vm is setup, // but we're still in that process right now. if err := svm.config.CreateExt4Vhdx(scratchTargetFile, client.DefaultVhdxSizeGB, d.cachedScratchFile); err != nil { logrus.Debugf("%s: (%s) releasing cachedScratchMutex on error path", title, context) d.cachedScratchMutex.Unlock() logrus.Debugf("%s: failed to create vm scratch %s: %s", title, scratchTargetFile, err) return nil, fmt.Errorf("failed to create SVM scratch VHDX (%s): %s", context, err) } } logrus.Debugf("%s: (%s) releasing cachedScratchMutex", title, context) d.cachedScratchMutex.Unlock() // Hot-add the scratch-space if not already attached if !svm.scratchAttached { logrus.Debugf("%s: (%s) hot-adding scratch %s", title, context, scratchTargetFile) if err := svm.hotAddVHDsAtStart(hcsshim.MappedVirtualDisk{ HostPath: scratchTargetFile, ContainerPath: toolsScratchPath, CreateInUtilityVM: true, }); err != nil { logrus.Debugf("%s: failed to hot-add scratch %s: %s", title, scratchTargetFile, err) return nil, fmt.Errorf("failed to hot-add %s failed: %s", scratchTargetFile, err) } svm.scratchAttached = true // Don't need to ref-count here as it will be done via hotAddVHDsAtStart() call above. } logrus.Debugf("%s: (%s) success", title, context) return svm, nil } // terminateServiceVM terminates a service utility VM if its running if it's, // not being used by any goroutine, but does nothing when in global mode as it's // lifetime is limited to that of the daemon. If the force flag is set, then // the VM will be killed regardless of the ref count or if it's global. func (d *Driver) terminateServiceVM(id, context string, force bool) (err error) { // We don't do anything in safe mode unless the force flag has been passed, which // is only the case for cleanup at driver termination. if d.globalMode && !force { logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - doing nothing as in global mode", id, context) return nil } id = d.getVMID(id) var svm *serviceVM var lastRef bool if !force { // In the not force case, we ref count svm, lastRef, err = d.serviceVms.decrementRefCount(id) } else { // In the force case, we ignore the ref count and just set it to 0 svm, err = d.serviceVms.setRefCountZero(id) lastRef = true } if err == errVMUnknown { return nil } else if err == errVMisTerminating { return svm.getStopError() } else if !lastRef { return nil } // We run the deletion of the scratch as a deferred function to at least attempt // clean-up in case of errors. defer func() { if svm.scratchAttached { scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id)) logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - deleting scratch %s", id, context, scratchTargetFile) if errRemove := os.Remove(scratchTargetFile); errRemove != nil { logrus.Warnf("failed to remove scratch file %s (%s): %s", scratchTargetFile, context, errRemove) err = errRemove } } // This function shouldn't actually return error unless there is a bug if errDelete := d.serviceVms.deleteID(id); errDelete != nil { logrus.Warnf("failed to service vm from svm map %s (%s): %s", id, context, errDelete) } // Signal that this VM has stopped svm.signalStopFinished(err) }() // Now it's possible that the service VM failed to start and now we are trying to terminate it. // In this case, we will relay the error to the goroutines waiting for this vm to stop. if err := svm.getStartError(); err != nil { logrus.Debugf("lcowdriver: terminateservicevm: %s had failed to start up: %s", id, err) return err } if err := waitTerminate(svm, fmt.Sprintf("terminateservicevm: %s (%s)", id, context)); err != nil { return err } logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - success", id, context) return nil } func waitTerminate(svm *serviceVM, context string) error { if svm.config == nil { return fmt.Errorf("lcowdriver: waitTermiante: Nil utility VM. %s", context) } logrus.Debugf("lcowdriver: waitTerminate: Calling terminate: %s", context) if err := svm.config.Uvm.Terminate(); err != nil { // We might get operation still pending from the HCS. In that case, we shouldn't return // an error since we call wait right after. underlyingError := err if conterr, ok := err.(*hcsshim.ContainerError); ok { underlyingError = conterr.Err } if syscallErr, ok := underlyingError.(syscall.Errno); ok { underlyingError = syscallErr } if underlyingError != errOperationPending { return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err) } logrus.Debugf("lcowdriver: waitTerminate: uvm.Terminate() returned operation pending (%s)", context) } logrus.Debugf("lcowdriver: waitTerminate: (%s) - waiting for utility VM to terminate", context) if err := svm.config.Uvm.WaitTimeout(time.Duration(svm.config.UvmTimeoutSeconds) * time.Second); err != nil { return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err) } return nil } // String returns the string representation of a driver. This should match // the name the graph driver has been registered with. func (d *Driver) String() string { return "lcow" } // Status returns the status of the driver. func (d *Driver) Status() [][2]string { return [][2]string{ {"LCOW", ""}, // TODO: Add some more info here - mode, home, .... } } // Exists returns true if the given id is registered with this driver. func (d *Driver) Exists(id string) bool { _, err := os.Lstat(d.dir(id)) logrus.Debugf("lcowdriver: exists: id %s %t", id, err == nil) return err == nil } // CreateReadWrite creates a layer that is writable for use as a container // file system. That equates to creating a sandbox. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { title := fmt.Sprintf("lcowdriver: createreadwrite: id %s", id) logrus.Debugf(title) // First we need to create the folder if err := d.Create(id, parent, opts); err != nil { return err } // Look for an explicit sandbox size option. sandboxSize := d.defaultSandboxSize for k, v := range opts.StorageOpt { switch strings.ToLower(k) { case "lcow.sandboxsize": var err error sandboxSize, err = strconv.ParseUint(v, 10, 32) if err != nil { return fmt.Errorf("%s failed to parse value '%s' for 'lcow.sandboxsize'", title, v) } if sandboxSize < client.DefaultVhdxSizeGB { return fmt.Errorf("%s 'lcow.sandboxsize' option cannot be less than %d", title, client.DefaultVhdxSizeGB) } break } } // Massive perf optimisation here. If we know that the RW layer is the default size, // and that the cached sandbox already exists, and we are running in safe mode, we // can just do a simple copy into the layers sandbox file without needing to start a // unique service VM. For a global service VM, it doesn't really matter. Of course, // this is only the case where the sandbox is the default size. // // Make sure we have the sandbox mutex taken while we are examining it. if sandboxSize == client.DefaultVhdxSizeGB { logrus.Debugf("%s: locking cachedSandboxMutex", title) d.cachedSandboxMutex.Lock() _, err := os.Stat(d.cachedSandboxFile) logrus.Debugf("%s: releasing cachedSandboxMutex", title) d.cachedSandboxMutex.Unlock() if err == nil { logrus.Debugf("%s: using cached sandbox to populate", title) if err := client.CopyFile(d.cachedSandboxFile, filepath.Join(d.dir(id), sandboxFilename), true); err != nil { return err } return nil } } logrus.Debugf("%s: creating SVM to create sandbox", title) svm, err := d.startServiceVMIfNotRunning(id, nil, "createreadwrite") if err != nil { return err } defer d.terminateServiceVM(id, "createreadwrite", false) // So the sandbox needs creating. If default size ensure we are the only thread populating the cache. // Non-default size we don't store, just create them one-off so no need to lock the cachedSandboxMutex. if sandboxSize == client.DefaultVhdxSizeGB { logrus.Debugf("%s: locking cachedSandboxMutex for creation", title) d.cachedSandboxMutex.Lock() defer func() { logrus.Debugf("%s: releasing cachedSandboxMutex for creation", title) d.cachedSandboxMutex.Unlock() }() } // Make sure we don't write to our local cached copy if this is for a non-default size request. targetCacheFile := d.cachedSandboxFile if sandboxSize != client.DefaultVhdxSizeGB { targetCacheFile = "" } // Create the ext4 vhdx logrus.Debugf("%s: creating sandbox ext4 vhdx", title) if err := svm.createExt4VHDX(filepath.Join(d.dir(id), sandboxFilename), uint32(sandboxSize), targetCacheFile); err != nil { logrus.Debugf("%s: failed to create sandbox vhdx for %s: %s", title, id, err) return err } return nil } // Create creates the folder for the layer with the given id, and // adds it to the layer chain. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { logrus.Debugf("lcowdriver: create: id %s parent: %s", id, parent) parentChain, err := d.getLayerChain(parent) if err != nil { return err } var layerChain []string if parent != "" { if !d.Exists(parent) { return fmt.Errorf("lcowdriver: cannot create layer folder with missing parent %s", parent) } layerChain = []string{d.dir(parent)} } layerChain = append(layerChain, parentChain...) layerPath := d.dir(id) logrus.Debugf("lcowdriver: create: id %s: creating %s", id, layerPath) // Standard mkdir here, not with SDDL as the dataroot was created with // inheritance to just local system and administrators. if err := os.MkdirAll(layerPath, 0700); err != nil { return err } if err := d.setLayerChain(id, layerChain); err != nil { if err2 := os.RemoveAll(layerPath); err2 != nil { logrus.Warnf("failed to remove layer %s: %s", layerPath, err2) } return err } logrus.Debugf("lcowdriver: create: id %s: success", id) return nil } // Remove unmounts and removes the dir information. func (d *Driver) Remove(id string) error { logrus.Debugf("lcowdriver: remove: id %s", id) tmpID := fmt.Sprintf("%s-removing", id) tmpLayerPath := d.dir(tmpID) layerPath := d.dir(id) logrus.Debugf("lcowdriver: remove: id %s: layerPath %s", id, layerPath) // Unmount all the layers err := d.Put(id) if err != nil { logrus.Debugf("lcowdriver: remove id %s: failed to unmount: %s", id, err) return err } // for non-global case just kill the vm if !d.globalMode { if err := d.terminateServiceVM(id, fmt.Sprintf("Remove %s", id), true); err != nil { return err } } if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { return err } if err := os.RemoveAll(tmpLayerPath); err != nil { return err } logrus.Debugf("lcowdriver: remove: id %s: layerPath %s succeeded", id, layerPath) return nil } // Get returns the rootfs path for the id. It is reference counted and // effectively can be thought of as a "mount the layer into the utility // vm if it isn't already". The contract from the caller of this is that // all Gets and Puts are matched. It -should- be the case that on cleanup, // nothing is mounted. // // For optimisation, we don't actually mount the filesystem (which in our // case means [hot-]adding it to a service VM. But we track that and defer // the actual adding to the point we need to access it. func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { title := fmt.Sprintf("lcowdriver: get: %s", id) logrus.Debugf(title) // Generate the mounts needed for the deferred operation. disks, err := d.getAllMounts(id) if err != nil { logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err) return nil, fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err) } logrus.Debugf("%s: got layer mounts: %+v", title, disks) return &lcowfs{ root: unionMountName(disks), d: d, mappedDisks: disks, vmID: d.getVMID(id), }, nil } // Put does the reverse of get. If there are no more references to // the layer, it unmounts it from the utility VM. func (d *Driver) Put(id string) error { title := fmt.Sprintf("lcowdriver: put: %s", id) // Get the service VM that we need to remove from svm, err := d.serviceVms.get(d.getVMID(id)) if err == errVMUnknown { return nil } else if err == errVMisTerminating { return svm.getStopError() } // Generate the mounts that Get() might have mounted disks, err := d.getAllMounts(id) if err != nil { logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err) return fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err) } // Now, we want to perform the unmounts, hot-remove and stop the service vm. // We want to go though all the steps even if we have an error to clean up properly err = svm.deleteUnionMount(unionMountName(disks), disks...) if err != nil { logrus.Debugf("%s failed to delete union mount %s: %s", title, id, err) } err1 := svm.hotRemoveVHDs(disks...) if err1 != nil { logrus.Debugf("%s failed to hot remove vhds %s: %s", title, id, err) if err == nil { err = err1 } } err1 = d.terminateServiceVM(id, fmt.Sprintf("Put %s", id), false) if err1 != nil { logrus.Debugf("%s failed to terminate service vm %s: %s", title, id, err1) if err == nil { err = err1 } } logrus.Debugf("Put succeeded on id %s", id) return err } // Cleanup ensures the information the driver stores is properly removed. // We use this opportunity to cleanup any -removing folders which may be // still left if the daemon was killed while it was removing a layer. func (d *Driver) Cleanup() error { title := "lcowdriver: cleanup" items, err := ioutil.ReadDir(d.dataRoot) if err != nil { if os.IsNotExist(err) { return nil } return err } // Note we don't return an error below - it's possible the files // are locked. However, next time around after the daemon exits, // we likely will be able to cleanup successfully. Instead we log // warnings if there are errors. for _, item := range items { if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { if err := os.RemoveAll(filepath.Join(d.dataRoot, item.Name())); err != nil { logrus.Warnf("%s failed to cleanup %s: %s", title, item.Name(), err) } else { logrus.Infof("%s cleaned up %s", title, item.Name()) } } } // Cleanup any service VMs we have running, along with their scratch spaces. // We don't take the lock for this as it's taken in terminateServiceVm. for k, v := range d.serviceVms.svms { logrus.Debugf("%s svm entry: %s: %+v", title, k, v) d.terminateServiceVM(k, "cleanup", true) } return nil } // Diff takes a layer (and it's parent layer which may be null, but // is ignored by this implementation below) and returns a reader for // a tarstream representing the layers contents. The id could be // a read-only "layer.vhd" or a read-write "sandbox.vhdx". The semantics // of this function dictate that the layer is already mounted. // However, as we do lazy mounting as a performance optimisation, // this will likely not be the case. func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { title := fmt.Sprintf("lcowdriver: diff: %s", id) // Get VHDX info ld, err := getLayerDetails(d.dir(id)) if err != nil { logrus.Debugf("%s: failed to get vhdx information of %s: %s", title, d.dir(id), err) return nil, err } // Start the SVM with a mapped virtual disk. Note that if the SVM is // already running and we are in global mode, this will be // hot-added. mvd := hcsshim.MappedVirtualDisk{ HostPath: ld.filename, ContainerPath: hostToGuest(ld.filename), CreateInUtilityVM: true, ReadOnly: true, } logrus.Debugf("%s: starting service VM", title) svm, err := d.startServiceVMIfNotRunning(id, []hcsshim.MappedVirtualDisk{mvd}, fmt.Sprintf("diff %s", id)) if err != nil { return nil, err } logrus.Debugf("lcowdriver: diff: waiting for svm to finish booting") err = svm.getStartError() if err != nil { d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) return nil, fmt.Errorf("lcowdriver: diff: svm failed to boot: %s", err) } // Obtain the tar stream for it // The actual container path will have be remapped to a short name, so use that. actualContainerPath := svm.getShortContainerPath(&mvd) if actualContainerPath == "" { return nil, fmt.Errorf("failed to get short container path for %+v in SVM %s", mvd, svm.config.Name) } logrus.Debugf("%s: %s %s, size %d, ReadOnly %t", title, ld.filename, actualContainerPath, ld.size, ld.isSandbox) tarReadCloser, err := svm.config.VhdToTar(mvd.HostPath, actualContainerPath, ld.isSandbox, ld.size) if err != nil { svm.hotRemoveVHDs(mvd) d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) return nil, fmt.Errorf("%s failed to export layer to tar stream for id: %s, parent: %s : %s", title, id, parent, err) } logrus.Debugf("%s id %s parent %s completed successfully", title, id, parent) // In safe/non-global mode, we can't tear down the service VM until things have been read. return ioutils.NewReadCloserWrapper(tarReadCloser, func() error { tarReadCloser.Close() svm.hotRemoveVHDs(mvd) d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) return nil }), nil } // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. The layer should not be mounted when calling // this function. Another way of describing this is that ApplyDiff writes // to a new layer (a VHD in LCOW) the contents of a tarstream it's given. func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { logrus.Debugf("lcowdriver: applydiff: id %s", id) // Log failures here as it's undiagnosable sometimes, due to a possible panic. // See https://github.com/moby/moby/issues/37955 for more information. dest := filepath.Join(d.dataRoot, id, layerFilename) if !noreexec { cmd := reexec.Command([]string{"docker-lcow-tar2ext4", dest}...) stdout := bytes.NewBuffer(nil) stderr := bytes.NewBuffer(nil) cmd.Stdin = diff cmd.Stdout = stdout cmd.Stderr = stderr if err := cmd.Start(); err != nil { logrus.Warnf("lcowdriver: applydiff: id %s failed to start re-exec: %s", id, err) return 0, err } if err := cmd.Wait(); err != nil { logrus.Warnf("lcowdriver: applydiff: id %s failed %s", id, err) return 0, fmt.Errorf("re-exec error: %v: stderr: %s", err, stderr) } size, err := strconv.ParseInt(stdout.String(), 10, 64) if err != nil { logrus.Warnf("lcowdriver: applydiff: id %s failed to parse output %s", id, err) return 0, fmt.Errorf("re-exec error: %v: stdout: %s", err, stdout) } return applySID(id, size, dest) } // The inline case size, err := tar2ext4Actual(dest, diff) if err != nil { logrus.Warnf("lcowdriver: applydiff: id %s failed %s", id, err) } return applySID(id, size, dest) } // applySID adds the VM Group SID read-only access. func applySID(id string, size int64, dest string) (int64, error) { if err := security.GrantVmGroupAccess(dest); err != nil { logrus.Warnf("lcowdriver: applySIDs: id %s failed %s", id, err) return 0, err } return size, nil } // tar2ext4Reexec is the re-exec entry point for writing a layer from a tar file func tar2ext4Reexec() { size, err := tar2ext4Actual(os.Args[1], os.Stdin) if err != nil { fmt.Fprint(os.Stderr, err) os.Exit(1) } fmt.Fprint(os.Stdout, size) } // tar2ext4Actual is the implementation of tar2ext to write a layer from a tar file. // It can be called through re-exec (default), or inline for debugging. func tar2ext4Actual(dest string, diff io.Reader) (int64, error) { // maxDiskSize is not relating to the sandbox size - this is the // maximum possible size a layer VHD generated can be from an EXT4 // layout perspective. const maxDiskSize = 128 * 1024 * 1024 * 1024 // 128GB out, err := os.Create(dest) if err != nil { return 0, err } defer out.Close() if err := tar2ext4.Convert( diff, out, tar2ext4.AppendVhdFooter, tar2ext4.ConvertWhiteout, tar2ext4.MaximumDiskSize(maxDiskSize)); err != nil { return 0, err } fi, err := os.Stat(dest) if err != nil { return 0, err } return fi.Size(), nil } // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. // The layer should not be mounted when calling this function. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { logrus.Debugf("lcowdriver: changes: id %s parent %s", id, parent) // TODO @gupta-ak. Needs implementation with assistance from service VM return nil, nil } // DiffSize calculates the changes between the specified layer // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { logrus.Debugf("lcowdriver: diffsize: id %s", id) // TODO @gupta-ak. Needs implementation with assistance from service VM return 0, nil } // GetMetadata returns custom driver information. func (d *Driver) GetMetadata(id string) (map[string]string, error) { logrus.Debugf("lcowdriver: getmetadata: id %s", id) m := make(map[string]string) m["dir"] = d.dir(id) return m, nil } // GetLayerPath gets the layer path on host (path to VHD/VHDX) func (d *Driver) GetLayerPath(id string) (string, error) { return d.dir(id), nil } // dir returns the absolute path to the layer. func (d *Driver) dir(id string) string { return filepath.Join(d.dataRoot, filepath.Base(id)) } // getLayerChain returns the layer chain information. func (d *Driver) getLayerChain(id string) ([]string, error) { jPath := filepath.Join(d.dir(id), "layerchain.json") logrus.Debugf("lcowdriver: getlayerchain: id %s json %s", id, jPath) content, err := ioutil.ReadFile(jPath) if os.IsNotExist(err) { return nil, nil } else if err != nil { return nil, fmt.Errorf("lcowdriver: getlayerchain: %s unable to read layerchain file %s: %s", id, jPath, err) } var layerChain []string err = json.Unmarshal(content, &layerChain) if err != nil { return nil, fmt.Errorf("lcowdriver: getlayerchain: %s failed to unmarshall layerchain file %s: %s", id, jPath, err) } return layerChain, nil } // setLayerChain stores the layer chain information on disk. func (d *Driver) setLayerChain(id string, chain []string) error { content, err := json.Marshal(&chain) if err != nil { return fmt.Errorf("lcowdriver: setlayerchain: %s failed to marshall layerchain json: %s", id, err) } jPath := filepath.Join(d.dir(id), "layerchain.json") logrus.Debugf("lcowdriver: setlayerchain: id %s json %s", id, jPath) err = ioutil.WriteFile(jPath, content, 0600) if err != nil { return fmt.Errorf("lcowdriver: setlayerchain: %s failed to write layerchain file: %s", id, err) } return nil } // getLayerDetails is a utility for getting a file name, size and indication of // sandbox for a VHD(x) in a folder. A read-only layer will be layer.vhd. A // read-write layer will be sandbox.vhdx. func getLayerDetails(folder string) (*layerDetails, error) { var fileInfo os.FileInfo ld := &layerDetails{ isSandbox: false, filename: filepath.Join(folder, layerFilename), } fileInfo, err := os.Stat(ld.filename) if err != nil { ld.filename = filepath.Join(folder, sandboxFilename) if fileInfo, err = os.Stat(ld.filename); err != nil { return nil, fmt.Errorf("failed to locate layer or sandbox in %s", folder) } ld.isSandbox = true } ld.size = fileInfo.Size() return ld, nil } func (d *Driver) getAllMounts(id string) ([]hcsshim.MappedVirtualDisk, error) { layerChain, err := d.getLayerChain(id) if err != nil { return nil, err } layerChain = append([]string{d.dir(id)}, layerChain...) logrus.Debugf("getting all layers: %v", layerChain) disks := make([]hcsshim.MappedVirtualDisk, len(layerChain), len(layerChain)) for i := range layerChain { ld, err := getLayerDetails(layerChain[i]) if err != nil { logrus.Debugf("Failed to get LayerVhdDetails from %s: %s", layerChain[i], err) return nil, err } disks[i].HostPath = ld.filename disks[i].ContainerPath = hostToGuest(ld.filename) disks[i].CreateInUtilityVM = true disks[i].ReadOnly = !ld.isSandbox } return disks, nil } func hostToGuest(hostpath string) string { // This is the "long" container path. At the point of which we are // calculating this, we don't know which service VM we're going to be // using, so we can't translate this to a short path yet, instead // deferring until the point of which it's added to an SVM. We don't // use long container paths in SVMs for SCSI disks, otherwise it can cause // command line operations that we invoke to fail due to being over ~4200 // characters when there are ~47 layers involved. An example of this is // the mount call to create the overlay across multiple SCSI-attached disks. // It doesn't affect VPMem attached layers during container creation as // these get mapped by openGCS to /tmp/N/M where N is a container instance // number, and M is a layer number. return fmt.Sprintf("/tmp/%s", filepath.Base(filepath.Dir(hostpath))) } func unionMountName(disks []hcsshim.MappedVirtualDisk) string { return fmt.Sprintf("%s-mount", disks[0].ContainerPath) } type nopCloser struct { io.Reader } func (nopCloser) Close() error { return nil } type fileGetCloserFromSVM struct { id string svm *serviceVM mvd *hcsshim.MappedVirtualDisk d *Driver } func (fgc *fileGetCloserFromSVM) Close() error { if fgc.svm != nil { if fgc.mvd != nil { if err := fgc.svm.hotRemoveVHDs(*fgc.mvd); err != nil { // We just log this as we're going to tear down the SVM imminently unless in global mode logrus.Errorf("failed to remove mvd %s: %s", fgc.mvd.ContainerPath, err) } } } if fgc.d != nil && fgc.svm != nil && fgc.id != "" { if err := fgc.d.terminateServiceVM(fgc.id, fmt.Sprintf("diffgetter %s", fgc.id), false); err != nil { return err } } return nil } func (fgc *fileGetCloserFromSVM) Get(filename string) (io.ReadCloser, error) { errOut := &bytes.Buffer{} outOut := &bytes.Buffer{} // Must map to the actual "short" container path where the SCSI disk was mounted actualContainerPath := fgc.svm.getShortContainerPath(fgc.mvd) if actualContainerPath == "" { return nil, fmt.Errorf("inconsistency detected: couldn't get short container path for %+v in utility VM %s", fgc.mvd, fgc.svm.config.Name) } file := path.Join(actualContainerPath, filename) // Ugly fix for MSFT internal bug VSO#19696554 // If a file name contains a space, pushing an image fails. // Using solution from https://groups.google.com/forum/#!topic/Golang-Nuts/DpldsmrhPio to escape for shell execution file = "'" + strings.Join(strings.Split(file, "'"), `'"'"'`) + "'" if err := fgc.svm.runProcess(fmt.Sprintf("cat %s", file), nil, outOut, errOut); err != nil { logrus.Debugf("cat %s failed: %s", file, errOut.String()) return nil, err } return nopCloser{bytes.NewReader(outOut.Bytes())}, nil } // DiffGetter returns a FileGetCloser that can read files from the directory that // contains files for the layer differences. Used for direct access for tar-split. func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { title := fmt.Sprintf("lcowdriver: diffgetter: %s", id) logrus.Debugf(title) ld, err := getLayerDetails(d.dir(id)) if err != nil { logrus.Debugf("%s: failed to get vhdx information of %s: %s", title, d.dir(id), err) return nil, err } // Start the SVM with a mapped virtual disk. Note that if the SVM is // already running and we are in global mode, this will be hot-added. mvd := hcsshim.MappedVirtualDisk{ HostPath: ld.filename, ContainerPath: hostToGuest(ld.filename), CreateInUtilityVM: true, ReadOnly: true, } logrus.Debugf("%s: starting service VM", title) svm, err := d.startServiceVMIfNotRunning(id, []hcsshim.MappedVirtualDisk{mvd}, fmt.Sprintf("diffgetter %s", id)) if err != nil { return nil, err } logrus.Debugf("%s: waiting for svm to finish booting", title) err = svm.getStartError() if err != nil { d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) return nil, fmt.Errorf("%s: svm failed to boot: %s", title, err) } return &fileGetCloserFromSVM{ id: id, svm: svm, mvd: &mvd, d: d}, nil }
[ "\"DOCKER_LCOW_NOREEXEC\"" ]
[]
[ "DOCKER_LCOW_NOREEXEC" ]
[]
["DOCKER_LCOW_NOREEXEC"]
go
1
0
go/mysql/server_test.go
/* Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreedto in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package mysql import ( "fmt" "io/ioutil" "net" "os" "os/exec" "path" "strings" "testing" "time" "golang.org/x/net/context" "vitess.io/vitess/go/sqltypes" vtenv "vitess.io/vitess/go/vt/env" "vitess.io/vitess/go/vt/tlstest" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttls" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) var selectRowsResult = &sqltypes.Result{ Fields: []*querypb.Field{ { Name: "id", Type: querypb.Type_INT32, }, { Name: "name", Type: querypb.Type_VARCHAR, }, }, Rows: [][]sqltypes.Value{ { sqltypes.MakeTrusted(querypb.Type_INT32, []byte("10")), sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nice name")), }, { sqltypes.MakeTrusted(querypb.Type_INT32, []byte("20")), sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nicer name")), }, }, RowsAffected: 2, } type testHandler struct { lastConn *Conn result *sqltypes.Result err error warnings uint16 } func (th *testHandler) NewConnection(c *Conn) { th.lastConn = c } func (th *testHandler) ConnectionClosed(c *Conn) { } func (th *testHandler) ComQuery(c *Conn, query string, callback func(*sqltypes.Result) error) error { if th.result != nil { callback(th.result) return nil } switch query { case "error": return th.err case "panic": panic("test panic attack!") case "select rows": callback(selectRowsResult) case "error after send": callback(selectRowsResult) return th.err case "insert": callback(&sqltypes.Result{ RowsAffected: 123, InsertID: 123456789, }) case "schema echo": callback(&sqltypes.Result{ Fields: []*querypb.Field{ { Name: "schema_name", Type: querypb.Type_VARCHAR, }, }, Rows: [][]sqltypes.Value{ { sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(c.SchemaName)), }, }, }) case "ssl echo": value := "OFF" if c.Capabilities&CapabilityClientSSL > 0 { value = "ON" } callback(&sqltypes.Result{ Fields: []*querypb.Field{ { Name: "ssl_flag", Type: querypb.Type_VARCHAR, }, }, Rows: [][]sqltypes.Value{ { sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(value)), }, }, }) case "userData echo": callback(&sqltypes.Result{ Fields: []*querypb.Field{ { Name: "user", Type: querypb.Type_VARCHAR, }, { Name: "user_data", Type: querypb.Type_VARCHAR, }, }, Rows: [][]sqltypes.Value{ { sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(c.User)), sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(c.UserData.Get().Username)), }, }, }) default: if strings.HasPrefix(query, benchmarkQueryPrefix) { callback(&sqltypes.Result{ Fields: []*querypb.Field{ { Name: "result", Type: querypb.Type_VARCHAR, }, }, Rows: [][]sqltypes.Value{ { sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(query)), }, }, }) } callback(&sqltypes.Result{}) } return nil } func (th *testHandler) ComPrepare(c *Conn, query string) ([]*querypb.Field, error) { return nil, nil } func (th *testHandler) ComStmtExecute(c *Conn, prepare *PrepareData, callback func(*sqltypes.Result) error) error { return nil } func (th *testHandler) ComResetConnection(c *Conn) { } func (th *testHandler) WarningCount(c *Conn) uint16 { return th.warnings } func getHostPort(t *testing.T, a net.Addr) (string, int) { // For the host name, we resolve 'localhost' into an address. // This works around a few travis issues where IPv6 is not 100% enabled. hosts, err := net.LookupHost("localhost") if err != nil { t.Fatalf("LookupHost(localhost) failed: %v", err) } host := hosts[0] port := a.(*net.TCPAddr).Port t.Logf("listening on address '%v' port %v", host, port) return host, port } func TestConnectionFromListener(t *testing.T) { th := &testHandler{} authServer := NewAuthServerStatic() authServer.Entries["user1"] = []*AuthServerStaticEntry{{ Password: "password1", UserData: "userData1", }} // Make sure we can create our own net.Listener for use with the mysql // listener listener, err := net.Listen("tcp", ":0") if err != nil { t.Fatalf("net.Listener failed: %v", err) } l, err := NewFromListener(listener, authServer, th, 0, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } defer l.Close() go l.Accept() host, port := getHostPort(t, l.Addr()) // Setup the right parameters. params := &ConnParams{ Host: host, Port: port, Uname: "user1", Pass: "password1", } c, err := Connect(context.Background(), params) if err != nil { t.Errorf("Should be able to connect to server but found error: %v", err) } c.Close() } func TestConnectionWithoutSourceHost(t *testing.T) { th := &testHandler{} authServer := NewAuthServerStatic() authServer.Entries["user1"] = []*AuthServerStaticEntry{{ Password: "password1", UserData: "userData1", }} l, err := NewListener("tcp", ":0", authServer, th, 0, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } defer l.Close() go l.Accept() host, port := getHostPort(t, l.Addr()) // Setup the right parameters. params := &ConnParams{ Host: host, Port: port, Uname: "user1", Pass: "password1", } c, err := Connect(context.Background(), params) if err != nil { t.Errorf("Should be able to connect to server but found error: %v", err) } c.Close() } func TestConnectionWithSourceHost(t *testing.T) { th := &testHandler{} authServer := NewAuthServerStatic() authServer.Entries["user1"] = []*AuthServerStaticEntry{ { Password: "password1", UserData: "userData1", SourceHost: "localhost", }, } l, err := NewListener("tcp", ":0", authServer, th, 0, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } defer l.Close() go l.Accept() host, port := getHostPort(t, l.Addr()) // Setup the right parameters. params := &ConnParams{ Host: host, Port: port, Uname: "user1", Pass: "password1", } _, err = Connect(context.Background(), params) // target is localhost, should not work from tcp connection if err == nil { t.Errorf("Should be able to connect to server but found error: %v", err) } } func TestConnectionUseMysqlNativePasswordWithSourceHost(t *testing.T) { th := &testHandler{} authServer := NewAuthServerStatic() authServer.Entries["user1"] = []*AuthServerStaticEntry{ { MysqlNativePassword: "*9E128DA0C64A6FCCCDCFBDD0FC0A2C967C6DB36F", UserData: "userData1", SourceHost: "localhost", }, } l, err := NewListener("tcp", ":0", authServer, th, 0, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } defer l.Close() go l.Accept() host, port := getHostPort(t, l.Addr()) // Setup the right parameters. params := &ConnParams{ Host: host, Port: port, Uname: "user1", Pass: "mysql_password", } _, err = Connect(context.Background(), params) // target is localhost, should not work from tcp connection if err == nil { t.Errorf("Should be able to connect to server but found error: %v", err) } } func TestConnectionUnixSocket(t *testing.T) { th := &testHandler{} authServer := NewAuthServerStatic() authServer.Entries["user1"] = []*AuthServerStaticEntry{ { Password: "password1", UserData: "userData1", SourceHost: "localhost", }, } unixSocket, err := ioutil.TempFile("", "mysql_vitess_test.sock") if err != nil { t.Fatalf("Failed to create temp file") } os.Remove(unixSocket.Name()) l, err := NewListener("unix", unixSocket.Name(), authServer, th, 0, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } defer l.Close() go l.Accept() // Setup the right parameters. params := &ConnParams{ UnixSocket: unixSocket.Name(), Uname: "user1", Pass: "password1", } c, err := Connect(context.Background(), params) if err != nil { t.Errorf("Should be able to connect to server but found error: %v", err) } c.Close() } func TestClientFoundRows(t *testing.T) { th := &testHandler{} authServer := NewAuthServerStatic() authServer.Entries["user1"] = []*AuthServerStaticEntry{{ Password: "password1", UserData: "userData1", }} l, err := NewListener("tcp", ":0", authServer, th, 0, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } defer l.Close() go l.Accept() host, port := getHostPort(t, l.Addr()) // Setup the right parameters. params := &ConnParams{ Host: host, Port: port, Uname: "user1", Pass: "password1", } // Test without flag. c, err := Connect(context.Background(), params) if err != nil { t.Fatal(err) } foundRows := th.lastConn.Capabilities & CapabilityClientFoundRows if foundRows != 0 { t.Errorf("FoundRows flag: %x, second bit must be 0", th.lastConn.Capabilities) } c.Close() if !c.IsClosed() { t.Errorf("IsClosed returned true on Close-d connection.") } // Test with flag. params.Flags |= CapabilityClientFoundRows c, err = Connect(context.Background(), params) if err != nil { t.Fatal(err) } foundRows = th.lastConn.Capabilities & CapabilityClientFoundRows if foundRows == 0 { t.Errorf("FoundRows flag: %x, second bit must be set", th.lastConn.Capabilities) } c.Close() } func TestConnCounts(t *testing.T) { th := &testHandler{} initialNumUsers := len(connCountPerUser.Counts()) user := "anotherNotYetConnectedUser1" passwd := "password1" authServer := NewAuthServerStatic() authServer.Entries[user] = []*AuthServerStaticEntry{{ Password: passwd, UserData: "userData1", }} l, err := NewListener("tcp", ":0", authServer, th, 0, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } defer l.Close() go l.Accept() host, port := getHostPort(t, l.Addr()) // Test with one new connection. params := &ConnParams{ Host: host, Port: port, Uname: user, Pass: passwd, } c, err := Connect(context.Background(), params) if err != nil { t.Fatal(err) } connCounts := connCountPerUser.Counts() if l := len(connCounts); l-initialNumUsers != 1 { t.Errorf("Expected 1 new user, got %d", l) } checkCountsForUser(t, user, 1) // Test with a second new connection. c2, err := Connect(context.Background(), params) if err != nil { t.Fatal(err) } connCounts = connCountPerUser.Counts() // There is still only one new user. if l2 := len(connCounts); l2-initialNumUsers != 1 { t.Errorf("Expected 1 new user, got %d", l2) } checkCountsForUser(t, user, 2) // Test after closing connections. time.Sleep lets it work, but seems flakey. c.Close() //time.Sleep(10 * time.Millisecond) //checkCountsForUser(t, user, 1) c2.Close() //time.Sleep(10 * time.Millisecond) //checkCountsForUser(t, user, 0) } func checkCountsForUser(t *testing.T, user string, expected int64) { connCounts := connCountPerUser.Counts() userCount, ok := connCounts[user] if ok { if userCount != expected { t.Errorf("Expected connection count for user to be %d, got %d", expected, userCount) } } else { t.Errorf("No count found for user %s", user) } } func TestServer(t *testing.T) { th := &testHandler{} authServer := NewAuthServerStatic() authServer.Entries["user1"] = []*AuthServerStaticEntry{{ Password: "password1", UserData: "userData1", }} l, err := NewListener("tcp", ":0", authServer, th, 0, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } defer l.Close() go l.Accept() host, port := getHostPort(t, l.Addr()) // Setup the right parameters. params := &ConnParams{ Host: host, Port: port, Uname: "user1", Pass: "password1", } initialTimingCounts := timings.Counts() initialConnAccept := connAccept.Get() initialConnSlow := connSlow.Get() l.SlowConnectWarnThreshold = time.Duration(time.Nanosecond * 1) // Run an 'error' command. th.err = NewSQLError(ERUnknownComError, SSUnknownComError, "forced query error") output, ok := runMysql(t, params, "error") if ok { t.Fatalf("mysql should have failed: %v", output) } if !strings.Contains(output, "ERROR 1047 (08S01)") || !strings.Contains(output, "forced query error") { t.Errorf("Unexpected output for 'error': %v", output) } if connCount.Get() != 0 { t.Errorf("Expected ConnCount=0, got %d", connCount.Get()) } if connAccept.Get()-initialConnAccept != 1 { t.Errorf("Expected ConnAccept delta=1, got %d", connAccept.Get()-initialConnAccept) } if connSlow.Get()-initialConnSlow != 1 { t.Errorf("Expected ConnSlow delta=1, got %d", connSlow.Get()-initialConnSlow) } expectedTimingDeltas := map[string]int64{ "All": 2, connectTimingKey: 1, queryTimingKey: 1, } gotTimingCounts := timings.Counts() for key, got := range gotTimingCounts { expected := expectedTimingDeltas[key] delta := got - initialTimingCounts[key] if delta < expected { t.Errorf("Expected Timing count delta %s should be >= %d, got %d", key, expected, delta) } } // Set the slow connect threshold to something high that we don't expect to trigger l.SlowConnectWarnThreshold = time.Duration(time.Second * 1) // Run a 'panic' command, other side should panic, recover and // close the connection. output, ok = runMysql(t, params, "panic") if ok { t.Fatalf("mysql should have failed: %v", output) } if !strings.Contains(output, "ERROR 2013 (HY000)") || !strings.Contains(output, "Lost connection to MySQL server during query") { t.Errorf("Unexpected output for 'panic'") } if connCount.Get() != 0 { t.Errorf("Expected ConnCount=0, got %d", connCount.Get()) } if connAccept.Get()-initialConnAccept != 2 { t.Errorf("Expected ConnAccept delta=2, got %d", connAccept.Get()-initialConnAccept) } if connSlow.Get()-initialConnSlow != 1 { t.Errorf("Expected ConnSlow delta=1, got %d", connSlow.Get()-initialConnSlow) } // Run a 'select rows' command with results. output, ok = runMysql(t, params, "select rows") if !ok { t.Fatalf("mysql failed: %v", output) } if !strings.Contains(output, "nice name") || !strings.Contains(output, "nicer name") || !strings.Contains(output, "2 rows in set") { t.Errorf("Unexpected output for 'select rows'") } if strings.Contains(output, "warnings") { t.Errorf("Unexpected warnings in 'select rows'") } // Run a 'select rows' command with warnings th.warnings = 13 output, ok = runMysql(t, params, "select rows") if !ok { t.Fatalf("mysql failed: %v", output) } if !strings.Contains(output, "nice name") || !strings.Contains(output, "nicer name") || !strings.Contains(output, "2 rows in set") || !strings.Contains(output, "13 warnings") { t.Errorf("Unexpected output for 'select rows': %v", output) } th.warnings = 0 // If there's an error after streaming has started, // we should get a 2013 th.err = NewSQLError(ERUnknownComError, SSUnknownComError, "forced error after send") output, ok = runMysql(t, params, "error after send") if ok { t.Fatalf("mysql should have failed: %v", output) } if !strings.Contains(output, "ERROR 2013 (HY000)") || !strings.Contains(output, "Lost connection to MySQL server during query") { t.Errorf("Unexpected output for 'panic'") } // Run an 'insert' command, no rows, but rows affected. output, ok = runMysql(t, params, "insert") if !ok { t.Fatalf("mysql failed: %v", output) } if !strings.Contains(output, "Query OK, 123 rows affected") { t.Errorf("Unexpected output for 'insert'") } // Run a 'schema echo' command, to make sure db name is right. params.DbName = "XXXfancyXXX" output, ok = runMysql(t, params, "schema echo") if !ok { t.Fatalf("mysql failed: %v", output) } if !strings.Contains(output, params.DbName) { t.Errorf("Unexpected output for 'schema echo'") } // Sanity check: make sure this didn't go through SSL output, ok = runMysql(t, params, "ssl echo") if !ok { t.Fatalf("mysql failed: %v", output) } if !strings.Contains(output, "ssl_flag") || !strings.Contains(output, "OFF") || !strings.Contains(output, "1 row in set") { t.Errorf("Unexpected output for 'ssl echo': %v", output) } // UserData check: checks the server user data is correct. output, ok = runMysql(t, params, "userData echo") if !ok { t.Fatalf("mysql failed: %v", output) } if !strings.Contains(output, "user1") || !strings.Contains(output, "user_data") || !strings.Contains(output, "userData1") { t.Errorf("Unexpected output for 'userData echo': %v", output) } // Permissions check: check a bad password is rejected. params.Pass = "bad" output, ok = runMysql(t, params, "select rows") if ok { t.Fatalf("mysql should have failed: %v", output) } if !strings.Contains(output, "1045") || !strings.Contains(output, "28000") || !strings.Contains(output, "Access denied") { t.Errorf("Unexpected output for invalid password: %v", output) } // Permissions check: check an unknown user is rejected. params.Pass = "password1" params.Uname = "user2" output, ok = runMysql(t, params, "select rows") if ok { t.Fatalf("mysql should have failed: %v", output) } if !strings.Contains(output, "1045") || !strings.Contains(output, "28000") || !strings.Contains(output, "Access denied") { t.Errorf("Unexpected output for invalid password: %v", output) } // Uncomment to leave setup up for a while, to run tests manually. // fmt.Printf("Listening to server on host '%v' port '%v'.\n", host, port) // time.Sleep(60 * time.Minute) } // TestClearTextServer creates a Server that needs clear text // passwords from the client. func TestClearTextServer(t *testing.T) { // If the database we're using is MariaDB, the client // is also the MariaDB client, that does support // clear text by default. isMariaDB := os.Getenv("MYSQL_FLAVOR") == "MariaDB" th := &testHandler{} authServer := NewAuthServerStatic() authServer.Entries["user1"] = []*AuthServerStaticEntry{{ Password: "password1", UserData: "userData1", }} authServer.Method = MysqlClearPassword l, err := NewListener("tcp", ":0", authServer, th, 0, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } defer l.Close() go l.Accept() host, port := getHostPort(t, l.Addr()) // Setup the right parameters. params := &ConnParams{ Host: host, Port: port, Uname: "user1", Pass: "password1", } // Run a 'select rows' command with results. This should fail // as clear text is not enabled by default on the client // (except MariaDB). l.AllowClearTextWithoutTLS = true sql := "select rows" output, ok := runMysql(t, params, sql) if ok { if isMariaDB { t.Logf("mysql should have failed but returned: %v\nbut letting it go on MariaDB", output) } else { t.Fatalf("mysql should have failed but returned: %v", output) } } else { if strings.Contains(output, "No such file or directory") { t.Logf("skipping mysql clear text tests, as the clear text plugin cannot be loaded: %v", err) return } if !strings.Contains(output, "plugin not enabled") { t.Errorf("Unexpected output for 'select rows': %v", output) } } // Now enable clear text plugin in client, but server requires SSL. l.AllowClearTextWithoutTLS = false if !isMariaDB { sql = enableCleartextPluginPrefix + sql } output, ok = runMysql(t, params, sql) if ok { t.Fatalf("mysql should have failed but returned: %v", output) } if !strings.Contains(output, "Cannot use clear text authentication over non-SSL connections") { t.Errorf("Unexpected output for 'select rows': %v", output) } // Now enable clear text plugin, it should now work. l.AllowClearTextWithoutTLS = true output, ok = runMysql(t, params, sql) if !ok { t.Fatalf("mysql failed: %v", output) } if !strings.Contains(output, "nice name") || !strings.Contains(output, "nicer name") || !strings.Contains(output, "2 rows in set") { t.Errorf("Unexpected output for 'select rows'") } // Change password, make sure server rejects us. params.Pass = "bad" output, ok = runMysql(t, params, sql) if ok { t.Fatalf("mysql should have failed but returned: %v", output) } if !strings.Contains(output, "Access denied for user 'user1'") { t.Errorf("Unexpected output for 'select rows': %v", output) } } // TestDialogServer creates a Server that uses the dialog plugin on the client. func TestDialogServer(t *testing.T) { th := &testHandler{} authServer := NewAuthServerStatic() authServer.Entries["user1"] = []*AuthServerStaticEntry{{ Password: "password1", UserData: "userData1", }} authServer.Method = MysqlDialog l, err := NewListener("tcp", ":0", authServer, th, 0, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } l.AllowClearTextWithoutTLS = true defer l.Close() go l.Accept() host, port := getHostPort(t, l.Addr()) // Setup the right parameters. params := &ConnParams{ Host: host, Port: port, Uname: "user1", Pass: "password1", } sql := "select rows" output, ok := runMysql(t, params, sql) if strings.Contains(output, "No such file or directory") { t.Logf("skipping dialog plugin tests, as the dialog plugin cannot be loaded: %v", err) return } if !ok { t.Fatalf("mysql failed: %v", output) } if !strings.Contains(output, "nice name") || !strings.Contains(output, "nicer name") || !strings.Contains(output, "2 rows in set") { t.Errorf("Unexpected output for 'select rows': %v", output) } } // TestTLSServer creates a Server with TLS support, then uses mysql // client to connect to it. func TestTLSServer(t *testing.T) { th := &testHandler{} authServer := NewAuthServerStatic() authServer.Entries["user1"] = []*AuthServerStaticEntry{{ Password: "password1", }} // Create the listener, so we can get its host. // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. l, err := NewListener("tcp", ":0", authServer, th, 0, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } defer l.Close() // Make sure hostname is added as an entry to /etc/hosts, otherwise ssl handshake will fail host, err := os.Hostname() if err != nil { t.Fatalf("Failed to get os Hostname: %v", err) } port := l.Addr().(*net.TCPAddr).Port // Create the certs. root, err := ioutil.TempDir("", "TestTLSServer") if err != nil { t.Fatalf("TempDir failed: %v", err) } defer os.RemoveAll(root) tlstest.CreateCA(root) tlstest.CreateSignedCert(root, tlstest.CA, "01", "server", host) tlstest.CreateSignedCert(root, tlstest.CA, "02", "client", "Client Cert") // Create the server with TLS config. serverConfig, err := vttls.ServerConfig( path.Join(root, "server-cert.pem"), path.Join(root, "server-key.pem"), path.Join(root, "ca-cert.pem")) if err != nil { t.Fatalf("TLSServerConfig failed: %v", err) } l.TLSConfig = serverConfig go l.Accept() // Setup the right parameters. params := &ConnParams{ Host: host, Port: port, Uname: "user1", Pass: "password1", // SSL flags. Flags: CapabilityClientSSL, SslCa: path.Join(root, "ca-cert.pem"), SslCert: path.Join(root, "client-cert.pem"), SslKey: path.Join(root, "client-key.pem"), } // Run a 'select rows' command with results. conn, err := Connect(context.Background(), params) //output, ok := runMysql(t, params, "select rows") if err != nil { t.Fatalf("mysql failed: %v", err) } results, err := conn.ExecuteFetch("select rows", 1000, true) if err != nil { t.Fatalf("mysql fetch failed: %v", err) } output := "" for _, row := range results.Rows { r := make([]string, 0) for _, col := range row { r = append(r, col.String()) } output = output + strings.Join(r, ",") + "\n" } if results.Rows[0][1].ToString() != "nice name" || results.Rows[1][1].ToString() != "nicer name" || len(results.Rows) != 2 { t.Errorf("Unexpected output for 'select rows': %v", output) } // make sure this went through SSL results, err = conn.ExecuteFetch("ssl echo", 1000, true) if err != nil { t.Fatalf("mysql fetch failed: %v", err) } if results.Rows[0][0].ToString() != "ON" { t.Errorf("Unexpected output for 'ssl echo': %v", results) } checkCountForTLSVer(t, versionTLS12, 1) checkCountForTLSVer(t, versionNoTLS, 0) conn.Close() } // TestTLSRequired creates a Server with TLS required, then tests that an insecure mysql // client is rejected func TestTLSRequired(t *testing.T) { th := &testHandler{} authServer := NewAuthServerStatic() authServer.Entries["user1"] = []*AuthServerStaticEntry{{ Password: "password1", }} // Create the listener, so we can get its host. // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. l, err := NewListener("tcp", ":0", authServer, th, 0, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } defer l.Close() // Make sure hostname is added as an entry to /etc/hosts, otherwise ssl handshake will fail host, err := os.Hostname() if err != nil { t.Fatalf("Failed to get os Hostname: %v", err) } port := l.Addr().(*net.TCPAddr).Port // Create the certs. root, err := ioutil.TempDir("", "TestTLSRequired") if err != nil { t.Fatalf("TempDir failed: %v", err) } defer os.RemoveAll(root) tlstest.CreateCA(root) tlstest.CreateSignedCert(root, tlstest.CA, "01", "server", host) // Create the server with TLS config. serverConfig, err := vttls.ServerConfig( path.Join(root, "server-cert.pem"), path.Join(root, "server-key.pem"), path.Join(root, "ca-cert.pem")) if err != nil { t.Fatalf("TLSServerConfig failed: %v", err) } l.TLSConfig = serverConfig l.RequireSecureTransport = true go l.Accept() // Setup conn params without SSL. params := &ConnParams{ Host: host, Port: port, Uname: "user1", Pass: "password1", } conn, err := Connect(context.Background(), params) if err == nil { t.Fatal("mysql should have failed") } if conn != nil { conn.Close() } // setup conn params with TLS tlstest.CreateSignedCert(root, tlstest.CA, "02", "client", "Client Cert") params.Flags = CapabilityClientSSL params.SslCa = path.Join(root, "ca-cert.pem") params.SslCert = path.Join(root, "client-cert.pem") params.SslKey = path.Join(root, "client-key.pem") conn, err = Connect(context.Background(), params) if err != nil { t.Fatalf("mysql failed: %v", err) } if conn != nil { conn.Close() } } func checkCountForTLSVer(t *testing.T, version string, expected int64) { connCounts := connCountByTLSVer.Counts() count, ok := connCounts[version] if ok { if count != expected { t.Errorf("Expected connection count for version %s to be %d, got %d", version, expected, count) } } else { t.Errorf("No count found for version %s", version) } } func TestErrorCodes(t *testing.T) { th := &testHandler{} authServer := NewAuthServerStatic() authServer.Entries["user1"] = []*AuthServerStaticEntry{{ Password: "password1", UserData: "userData1", }} l, err := NewListener("tcp", ":0", authServer, th, 0, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } defer l.Close() go l.Accept() host, port := getHostPort(t, l.Addr()) // Setup the right parameters. params := &ConnParams{ Host: host, Port: port, Uname: "user1", Pass: "password1", } ctx := context.Background() client, err := Connect(ctx, params) if err != nil { t.Fatalf("error in connect: %v", err) } // Test that the right mysql errno/sqlstate are returned for various // internal vitess errors tests := []struct { err error code int sqlState string text string }{ { err: vterrors.Errorf( vtrpcpb.Code_INVALID_ARGUMENT, "invalid argument"), code: ERUnknownError, sqlState: SSUnknownSQLState, text: "invalid argument", }, { err: vterrors.Errorf( vtrpcpb.Code_INVALID_ARGUMENT, "(errno %v) (sqlstate %v) invalid argument with errno", ERDupEntry, SSDupKey), code: ERDupEntry, sqlState: SSDupKey, text: "invalid argument with errno", }, { err: vterrors.Errorf( vtrpcpb.Code_DEADLINE_EXCEEDED, "connection deadline exceeded"), code: ERQueryInterrupted, sqlState: SSUnknownSQLState, text: "deadline exceeded", }, { err: vterrors.Errorf( vtrpcpb.Code_RESOURCE_EXHAUSTED, "query pool timeout"), code: ERTooManyUserConnections, sqlState: SSUnknownSQLState, text: "resource exhausted", }, { err: vterrors.Wrap(NewSQLError(ERVitessMaxRowsExceeded, SSUnknownSQLState, "Row count exceeded 10000"), "wrapped"), code: ERVitessMaxRowsExceeded, sqlState: SSUnknownSQLState, text: "resource exhausted", }, } for _, test := range tests { th.err = NewSQLErrorFromError(test.err) result, err := client.ExecuteFetch("error", 100, false) if err == nil { t.Fatalf("mysql should have failed but returned: %v", result) } serr, ok := err.(*SQLError) if !ok { t.Fatalf("mysql should have returned a SQLError") } if serr.Number() != test.code { t.Errorf("error in %s: want code %v got %v", test.text, test.code, serr.Number()) } if serr.SQLState() != test.sqlState { t.Errorf("error in %s: want sqlState %v got %v", test.text, test.sqlState, serr.SQLState()) } if !strings.Contains(serr.Error(), test.err.Error()) { t.Errorf("error in %s: want err %v got %v", test.text, test.err.Error(), serr.Error()) } } } const enableCleartextPluginPrefix = "enable-cleartext-plugin: " // runMysql forks a mysql command line process connecting to the provided server. func runMysql(t *testing.T, params *ConnParams, command string) (string, bool) { dir, err := vtenv.VtMysqlRoot() if err != nil { t.Fatalf("vtenv.VtMysqlRoot failed: %v", err) } name, err := binaryPath(dir, "mysql") if err != nil { t.Fatalf("binaryPath failed: %v", err) } // The args contain '-v' 3 times, to switch to very verbose output. // In particular, it has the message: // Query OK, 1 row affected (0.00 sec) args := []string{ "-v", "-v", "-v", } if strings.HasPrefix(command, enableCleartextPluginPrefix) { command = command[len(enableCleartextPluginPrefix):] args = append(args, "--enable-cleartext-plugin") } args = append(args, "-e", command) if params.UnixSocket != "" { args = append(args, "-S", params.UnixSocket) } else { args = append(args, "-h", params.Host, "-P", fmt.Sprintf("%v", params.Port)) } if params.Uname != "" { args = append(args, "-u", params.Uname) } if params.Pass != "" { args = append(args, "-p"+params.Pass) } if params.DbName != "" { args = append(args, "-D", params.DbName) } if params.Flags&CapabilityClientSSL > 0 { args = append(args, "--ssl", "--ssl-ca", params.SslCa, "--ssl-cert", params.SslCert, "--ssl-key", params.SslKey, "--ssl-verify-server-cert") } env := []string{ "LD_LIBRARY_PATH=" + path.Join(dir, "lib/mysql"), } t.Logf("Running mysql command: %v %v", name, args) cmd := exec.Command(name, args...) cmd.Env = env cmd.Dir = dir out, err := cmd.CombinedOutput() output := string(out) if err != nil { return output, false } return output, true } // binaryPath does a limited path lookup for a command, // searching only within sbin and bin in the given root. // // FIXME(alainjobart) move this to vt/env, and use it from // go/vt/mysqlctl too. func binaryPath(root, binary string) (string, error) { subdirs := []string{"sbin", "bin"} for _, subdir := range subdirs { binPath := path.Join(root, subdir, binary) if _, err := os.Stat(binPath); err == nil { return binPath, nil } } return "", fmt.Errorf("%s not found in any of %s/{%s}", binary, root, strings.Join(subdirs, ",")) } func TestListenerShutdown(t *testing.T) { th := &testHandler{} authServer := NewAuthServerStatic() authServer.Entries["user1"] = []*AuthServerStaticEntry{{ Password: "password1", UserData: "userData1", }} l, err := NewListener("tcp", ":0", authServer, th, 0, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } defer l.Close() go l.Accept() host, port := getHostPort(t, l.Addr()) // Setup the right parameters. params := &ConnParams{ Host: host, Port: port, Uname: "user1", Pass: "password1", } ctx, cancel := context.WithCancel(context.Background()) defer cancel() conn, err := Connect(ctx, params) if err != nil { t.Fatalf("Can't connect to listener: %v", err) } if err := conn.Ping(); err != nil { t.Fatalf("Ping failed: %v", err) } l.Shutdown() if err := conn.Ping(); err != nil { sqlErr, ok := err.(*SQLError) if !ok { t.Fatalf("Wrong error type: %T", err) } if sqlErr.Number() != ERServerShutdown { t.Fatalf("Unexpected sql error code: %d", sqlErr.Number()) } if sqlErr.SQLState() != SSServerShutdown { t.Fatalf("Unexpected error sql state: %s", sqlErr.SQLState()) } if sqlErr.Message != "Server shutdown in progress" { t.Fatalf("Unexpected error message: %s", sqlErr.Message) } } else { t.Fatalf("Ping should fail after shutdown") } } func TestParseConnAttrs(t *testing.T) { expected := map[string]string{ "_client_version": "8.0.11", "program_name": "mysql", "_pid": "22850", "_platform": "x86_64", "_os": "linux-glibc2.12", "_client_name": "libmysql", } data := []byte{0x70, 0x04, 0x5f, 0x70, 0x69, 0x64, 0x05, 0x32, 0x32, 0x38, 0x35, 0x30, 0x09, 0x5f, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x06, 0x78, 0x38, 0x36, 0x5f, 0x36, 0x34, 0x03, 0x5f, 0x6f, 0x73, 0x0f, 0x6c, 0x69, 0x6e, 0x75, 0x78, 0x2d, 0x67, 0x6c, 0x69, 0x62, 0x63, 0x32, 0x2e, 0x31, 0x32, 0x0c, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x08, 0x6c, 0x69, 0x62, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x0f, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x06, 0x38, 0x2e, 0x30, 0x2e, 0x31, 0x31, 0x0c, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x05, 0x6d, 0x79, 0x73, 0x71, 0x6c} attrs, pos, err := parseConnAttrs(data, 0) if err != nil { t.Fatalf("Failed to read connection attributes: %v", err) } if pos != 113 { t.Fatalf("Unexpeded pos after reading connection attributes: %d instead of 113", pos) } for k, v := range expected { if val, ok := attrs[k]; ok { if val != v { t.Fatalf("Unexpected value found in attrs for key %s: got %s expected %s", k, val, v) } } else { t.Fatalf("Error reading key %s from connection attributes: attrs: %-v", k, attrs) } } }
[ "\"MYSQL_FLAVOR\"" ]
[]
[ "MYSQL_FLAVOR" ]
[]
["MYSQL_FLAVOR"]
go
1
0
main.go
// Copyright (c) 2019, Daniel Martí <[email protected]> // See LICENSE for licensing information package main import ( "context" "crypto/aes" "crypto/cipher" "crypto/hmac" "crypto/sha256" "encoding/json" "flag" "fmt" "io" "io/ioutil" "os" "os/signal" "path/filepath" "strings" "text/tabwriter" "time" "github.com/google/uuid" "github.com/knq/ini" "golang.org/x/crypto/hkdf" "golang.org/x/crypto/pbkdf2" "golang.org/x/crypto/ssh/terminal" ) var flagSet = flag.NewFlagSet("bitw", flag.ContinueOnError) func init() { flagSet.Usage = usage } func usage() { fmt.Fprintf(os.Stderr, ` Usage of bitw: bitw [command] Commands: help show a command's help text sync fetch the latest data from the server login force a new login, even if not necessary dump list all the stored login secrets serve start the org.freedesktop.secrets D-Bus service config print the current configuration `[1:]) flagSet.PrintDefaults() } func main() { os.Exit(main1(os.Stderr)) } func main1(stderr io.Writer) int { if err := flagSet.Parse(os.Args[1:]); err != nil { return 2 } args := flagSet.Args() if err := run(args...); err != nil { switch err { case context.Canceled: return 0 case flag.ErrHelp: return 2 } fmt.Fprintln(stderr, "error:", err) return 1 } return 0 } const ( deviceName = "firefox" deviceType = "3" // bitwarden's device type for FireFox loginScope = "api offline_access" ) // These can be overriden by the config. var ( apiURL = "https://api.bitwarden.com" idtURL = "https://identity.bitwarden.com" email = os.Getenv("EMAIL") // TODO: make these more secure password []byte key, macKey []byte ) func ensurePassword() error { if len(password) > 0 { return nil } if s := os.Getenv("PASSWORD"); s != "" { password = []byte(s) return nil } var err error password, err = prompt("Password") return err } // readLine is similar to terminal.ReadPassword, but it doesn't use key codes. func readLine(r io.Reader) ([]byte, error) { var buf [1]byte var line []byte for { n, err := r.Read(buf[:]) if n > 0 { switch buf[0] { case '\n', '\r': return line, nil default: line = append(line, buf[0]) } } else if err != nil { if err == io.EOF && len(line) > 0 { return line, nil } return nil, err } } } func prompt(line string) ([]byte, error) { // TODO: Support cancellation with ^C. Currently not possible in any // simple way. Closing os.Stdin on cancel doesn't seem to do the trick // either. Simply doing an os.Exit keeps the terminal broken because of // ReadPassword. fd := int(os.Stdin.Fd()) switch { case terminal.IsTerminal(fd): fmt.Printf("%s: ", line) password, err := terminal.ReadPassword(fd) fmt.Println() if err == nil && len(password) == 0 { err = io.ErrUnexpectedEOF } return password, err case os.Getenv("FORCE_STDIN_PROMPTS") == "true": return readLine(os.Stdin) default: return nil, fmt.Errorf("need a terminal to prompt for a password") } } var ( config *ini.File data dataFile saveData bool ) type dataFile struct { path string DeviceID string AccessToken string RefreshToken string TokenExpiry time.Time KDF int KDFIterations int LastSync time.Time Sync SyncData } func loadDataFile(path string) error { data.path = path f, err := os.Open(path) if os.IsNotExist(err) { return nil } else if err != nil { return err } defer f.Close() if err := json.NewDecoder(f).Decode(&data); err != nil { return err } return nil } func (f *dataFile) Save() error { bs, err := json.MarshalIndent(f, "", "\t") if err != nil { return err } bs = append(bs, '\n') if err := os.MkdirAll(filepath.Dir(f.path), 0755); err != nil { return err } return ioutil.WriteFile(f.path, bs, 0600) } func run(args ...string) (err error) { if len(args) == 0 { flagSet.Usage() return flag.ErrHelp } switch args[0] { case "help": // TODO: per-command help flagSet.Usage() return flag.ErrHelp } dir := os.Getenv("CONFIG_DIR") if dir == "" { if dir, err = os.UserConfigDir(); err != nil { return err } dir = filepath.Join(dir, "bitw") } config, err = ini.LoadFile(filepath.Join(dir, "config")) if err != nil { return err } for _, section := range config.AllSections() { if section.Name() != "" { return fmt.Errorf("sections are not used in config files yet") } for _, key := range section.Keys() { // note that these are lowercased switch key { case "email": email = section.Get(key) case "apiurl": apiURL = section.Get(key) case "identityurl": idtURL = section.Get(key) default: return fmt.Errorf("unknown config key: %q", key) } } } if err := loadDataFile(filepath.Join(dir, "data.json")); err != nil { return err } if args[0] == "config" { fmt.Printf("email = %q\n", email) fmt.Printf("apiURL = %q\n", apiURL) fmt.Printf("identityURL = %q\n", idtURL) return nil } defer func() { if !saveData { return } if err1 := data.Save(); err == nil { err = err1 } }() if data.DeviceID == "" { data.DeviceID = uuid.New().String() saveData = true } ctx := context.Background() ctx, cancel := context.WithCancel(ctx) c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) go func() { <-c cancel() }() ctx = context.WithValue(ctx, authToken{}, data.AccessToken) switch args[0] { case "login": if err := login(ctx); err != nil { return err } case "sync": if err := ensureToken(ctx); err != nil { return err } if err := sync(ctx); err != nil { return err } case "dump": w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', tabwriter.AlignRight) fmt.Fprintln(w, "name\turi\tusername\tpassword\t") for _, cipher := range data.Sync.Ciphers { for _, cipherStr := range [...]CipherString{ cipher.Name, cipher.Login.URI, cipher.Login.Username, cipher.Login.Password, } { s, err := decrypt(cipherStr) if err != nil { return err } fmt.Fprintf(w, "%s\t", s) } fmt.Fprintln(w) } w.Flush() case "serve": if err := serveDBus(ctx); err != nil { return err } default: fmt.Fprintf(os.Stderr, "unknown command: %q\n", args[0]) flagSet.Usage() return flag.ErrHelp } return nil } func ensureToken(ctx context.Context) error { if data.RefreshToken == "" { if err := login(ctx); err != nil { return err } } else if time.Now().After(data.TokenExpiry) { if err := refreshToken(ctx); err != nil { return err } } return nil } func ensureDecryptKey() error { if len(key) > 0 { return nil } if email == "" { // If the user specified $EMAIL just for the login, grab it from // the data file now. email = data.Sync.Profile.Email } if err := ensurePassword(); err != nil { return err } masterKey := pbkdf2.Key(password, []byte(strings.ToLower(email)), data.KDFIterations, 32, sha256.New) // We decrypt the decryption key from the synced data, using the key // resulting from stretching masterKey. The keys will be overwritten // once we decrypt the final ones. key, macKey = stretchKey(masterKey) s, err := decrypt(data.Sync.Profile.Key) if err != nil { return err } key, macKey = s[:32], s[32:64] return nil } func stretchKey(orig []byte) (key, macKey []byte) { key = make([]byte, 32) macKey = make([]byte, 32) var r io.Reader r = hkdf.Expand(sha256.New, orig, []byte("enc")) r.Read(key) r = hkdf.Expand(sha256.New, orig, []byte("mac")) r.Read(macKey) return key, macKey } func decryptStr(s CipherString) (string, error) { dec, err := decrypt(s) if err != nil { return "", err } return string(dec), nil } // TODO: turn this into a method func decrypt(s CipherString) ([]byte, error) { if s.Type == 0 { return nil, nil } if err := ensureDecryptKey(); err != nil { return nil, err } c, err := aes.NewCipher(key) if err != nil { return nil, err } switch s.Type { case 2: // AES-CBC-256, HMAC-SHA256, base-64; continues below default: return nil, fmt.Errorf("unsupported cipher type %q", s.Type) } if macKey != nil { var msg []byte msg = append(msg, s.IV...) msg = append(msg, s.CT...) if !validMAC(msg, s.MAC, macKey) { return nil, fmt.Errorf("MAC mismatch") } } decrypter := cipher.NewCBCDecrypter(c, s.IV) dst := make([]byte, len(s.CT)) decrypter.CryptBlocks(dst, s.CT) dst = unpad(dst) return dst, nil } func unpad(src []byte) []byte { n := src[len(src)-1] return src[:len(src)-int(n)] } func validMAC(message, messageMAC, key []byte) bool { mac := hmac.New(sha256.New, key) mac.Write(message) expectedMAC := mac.Sum(nil) return hmac.Equal(messageMAC, expectedMAC) }
[ "\"EMAIL\"", "\"PASSWORD\"", "\"FORCE_STDIN_PROMPTS\"", "\"CONFIG_DIR\"" ]
[]
[ "FORCE_STDIN_PROMPTS", "EMAIL", "PASSWORD", "CONFIG_DIR" ]
[]
["FORCE_STDIN_PROMPTS", "EMAIL", "PASSWORD", "CONFIG_DIR"]
go
4
0
src/lambda_codebase/organization/main.py
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 """ The Organization main that is called when ADF is installed to create the organization if required """ from typing import Mapping, Any, Tuple, cast from dataclasses import dataclass, asdict import logging import os import json import boto3 from cfn_custom_resource import ( # pylint: disable=unused-import lambda_handler, create, update, delete, ) # Type aliases: Data = Mapping[str, str] PhysicalResourceId = str Created = bool OrganizationId = str OrganizationRootId = str CloudFormationResponse = Tuple[PhysicalResourceId, Data] # Globals: ORGANIZATION_CLIENT = boto3.client("organizations") LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.INFO) class InvalidPhysicalResourceId(Exception): pass @dataclass class PhysicalResource: organization_id: str created: bool organization_root_id: str @classmethod def from_json(cls, json_string: PhysicalResourceId) -> "PhysicalResource": try: return cls(**json.loads(json_string)) except json.JSONDecodeError as err: raise InvalidPhysicalResourceId from err def as_cfn_response(self) -> Tuple[PhysicalResourceId, Data]: physical_resource_id = json.dumps(asdict(self)) data = { "OrganizationId": self.organization_id, "OrganizationCreated": json.dumps(self.created), "OrganizationRootId": self.organization_root_id, } return physical_resource_id, data @create() def create_(_event: Mapping[str, Any], _context: Any) -> CloudFormationResponse: approved_regions = [ 'us-east-1', 'us-gov-west-1' ] region = os.getenv('AWS_REGION') if region not in approved_regions: raise Exception( "Deployment of ADF is only available via the us-east-1 " "and us-gov-west-1 regions." ) organization_id, created = ensure_organization() organization_root_id = get_organization_root_id() return PhysicalResource( organization_id, created, organization_root_id ).as_cfn_response() @update() def update_(_event: Mapping[str, Any], _context: Any) -> CloudFormationResponse: organization_id, created = ensure_organization() organization_root_id = get_organization_root_id() return PhysicalResource( organization_id, created, organization_root_id ).as_cfn_response() @delete() def delete_(event, _context): try: physical_resource = PhysicalResource.from_json(event["PhysicalResourceId"]) except InvalidPhysicalResourceId: raw_physical_resource = event["PhysicalResourceId"] LOGGER.info( "Unrecognized physical resource: %s. Assuming no delete necessary", raw_physical_resource ) return if physical_resource.created: try: ORGANIZATION_CLIENT.delete_organization() LOGGER.info("Deleted Organization") except ORGANIZATION_CLIENT.exceptions.OrganizationNotEmptyException: LOGGER.info("Organization not empty –– skipping delete") except ORGANIZATION_CLIENT.exceptions.AWSOrganizationsNotInUseException: LOGGER.info("Organization does not exist –– skipping delete") def ensure_organization() -> Tuple[OrganizationId, Created]: try: describe_organization = ORGANIZATION_CLIENT.describe_organization() except ORGANIZATION_CLIENT.exceptions.AWSOrganizationsNotInUseException: create_organization = ORGANIZATION_CLIENT.create_organization(FeatureSet="ALL") organization_id = create_organization["Organization"]["Id"] LOGGER.info("Organization created: %s", organization_id) return organization_id, True if describe_organization["Organization"]["FeatureSet"] != "ALL": raise Exception( "Existing organization is only set up for CONSOLIDATED_BILLING, " "but ADF needs ALL features" ) organization_id = describe_organization["Organization"]["Id"] LOGGER.info( "Organization exists (id: %s) and enabled for ALL features", organization_id ) return organization_id, False def get_organization_root_id() -> str: LOGGER.info("Determining ORG root id ...") params: dict = {} while True: roots = ORGANIZATION_CLIENT.list_roots(**params) if "Roots" in roots and roots["Roots"]: organization_root_id = roots["Roots"][0]["Id"] LOGGER.info("ORG root id is: %s", organization_root_id) return cast(str, organization_root_id) if not "NextToken" in roots: raise Exception("Unable to find ORG root id") params["next_token"] = roots["NextToken"]
[]
[]
[ "AWS_REGION" ]
[]
["AWS_REGION"]
python
1
0
tracer.go
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package apm import ( "bytes" "compress/zlib" "context" "io" "log" "math/rand" "strings" "sync" "sync/atomic" "time" "github.com/Beeketing/apm-agent-go/apmconfig" "github.com/Beeketing/apm-agent-go/internal/apmlog" "github.com/Beeketing/apm-agent-go/internal/configutil" "github.com/Beeketing/apm-agent-go/internal/iochan" "github.com/Beeketing/apm-agent-go/internal/ringbuffer" "github.com/Beeketing/apm-agent-go/internal/wildcard" "github.com/Beeketing/apm-agent-go/model" "github.com/Beeketing/apm-agent-go/stacktrace" "github.com/Beeketing/apm-agent-go/transport" "go.elastic.co/fastjson" ) const ( defaultPreContext = 3 defaultPostContext = 3 gracePeriodJitter = 0.1 // +/- 10% tracerEventChannelCap = 1000 ) var ( // DefaultTracer is the default global Tracer, set at package // initialization time, configured via environment variables. // // This will always be initialized to a non-nil value. If any // of the environment variables are invalid, the corresponding // errors will be logged to stderr and the default values will // be used instead. DefaultTracer *Tracer ) func init() { var opts TracerOptions opts.initDefaults(true) DefaultTracer = newTracer(opts) } // TracerOptions holds initial tracer options, for passing to NewTracerOptions. type TracerOptions struct { // ServiceName holds the service name. // // If ServiceName is empty, the service name will be defined using the // ELASTIC_APM_SERVICE_NAME environment variable, or if that is not set, // the executable name. ServiceName string // ServiceVersion holds the service version. // // If ServiceVersion is empty, the service version will be defined using // the ELASTIC_APM_SERVICE_VERSION environment variable. ServiceVersion string // ServiceEnvironment holds the service environment. // // If ServiceEnvironment is empty, the service environment will be defined // using the ELASTIC_APM_ENVIRONMENT environment variable. ServiceEnvironment string // Transport holds the transport to use for sending events. // // If Transport is nil, transport.Default will be used. // // If Transport implements apmconfig.Watcher, the tracer will begin watching // for remote changes immediately. This behaviour can be disabled by setting // the environment variable ELASTIC_APM_CENTRAL_CONFIG=false. Transport transport.Transport requestDuration time.Duration metricsInterval time.Duration maxSpans int requestSize int bufferSize int metricsBufferSize int sampler Sampler sanitizedFieldNames wildcard.Matchers disabledMetrics wildcard.Matchers captureHeaders bool captureBody CaptureBodyMode spanFramesMinDuration time.Duration stackTraceLimit int active bool configWatcher apmconfig.Watcher breakdownMetrics bool } // initDefaults updates opts with default values. func (opts *TracerOptions) initDefaults(continueOnError bool) error { var errs []error failed := func(err error) bool { if err == nil { return false } errs = append(errs, err) return true } requestDuration, err := initialRequestDuration() if failed(err) { requestDuration = defaultAPIRequestTime } metricsInterval, err := initialMetricsInterval() if err != nil { metricsInterval = defaultMetricsInterval errs = append(errs, err) } requestSize, err := initialAPIRequestSize() if err != nil { requestSize = int(defaultAPIRequestSize) errs = append(errs, err) } bufferSize, err := initialAPIBufferSize() if err != nil { bufferSize = int(defaultAPIBufferSize) errs = append(errs, err) } metricsBufferSize, err := initialMetricsBufferSize() if err != nil { metricsBufferSize = int(defaultMetricsBufferSize) errs = append(errs, err) } maxSpans, err := initialMaxSpans() if failed(err) { maxSpans = defaultMaxSpans } sampler, err := initialSampler() if failed(err) { sampler = nil } captureHeaders, err := initialCaptureHeaders() if failed(err) { captureHeaders = defaultCaptureHeaders } captureBody, err := initialCaptureBody() if failed(err) { captureBody = CaptureBodyOff } spanFramesMinDuration, err := initialSpanFramesMinDuration() if failed(err) { spanFramesMinDuration = defaultSpanFramesMinDuration } stackTraceLimit, err := initialStackTraceLimit() if failed(err) { stackTraceLimit = defaultStackTraceLimit } active, err := initialActive() if failed(err) { active = true } centralConfigEnabled, err := initialCentralConfigEnabled() if failed(err) { centralConfigEnabled = true } breakdownMetricsEnabled, err := initialBreakdownMetricsEnabled() if failed(err) { breakdownMetricsEnabled = true } if opts.ServiceName != "" { err := validateServiceName(opts.ServiceName) if failed(err) { opts.ServiceName = "" } } if len(errs) != 0 && !continueOnError { return errs[0] } for _, err := range errs { log.Printf("[apm]: %s", err) } opts.requestDuration = requestDuration opts.metricsInterval = metricsInterval opts.requestSize = requestSize opts.bufferSize = bufferSize opts.metricsBufferSize = metricsBufferSize opts.maxSpans = maxSpans opts.sampler = sampler opts.sanitizedFieldNames = initialSanitizedFieldNames() opts.disabledMetrics = initialDisabledMetrics() opts.breakdownMetrics = breakdownMetricsEnabled opts.captureHeaders = captureHeaders opts.captureBody = captureBody opts.spanFramesMinDuration = spanFramesMinDuration opts.stackTraceLimit = stackTraceLimit opts.active = active if opts.Transport == nil { opts.Transport = transport.Default } if centralConfigEnabled { if cw, ok := opts.Transport.(apmconfig.Watcher); ok { opts.configWatcher = cw } } serviceName, serviceVersion, serviceEnvironment := initialService() if opts.ServiceName == "" { opts.ServiceName = serviceName } if opts.ServiceVersion == "" { opts.ServiceVersion = serviceVersion } if opts.ServiceEnvironment == "" { opts.ServiceEnvironment = serviceEnvironment } return nil } // Tracer manages the sampling and sending of transactions to // Elastic APM. // // Transactions are buffered until they are flushed (forcibly // with a Flush call, or when the flush timer expires), or when // the maximum transaction queue size is reached. Failure to // send will be periodically retried. Once the queue limit has // been reached, new transactions will replace older ones in // the queue. // // Errors are sent as soon as possible, but will buffered and // later sent in bulk if the tracer is busy, or otherwise cannot // send to the server, e.g. due to network failure. There is // a limit to the number of errors that will be buffered, and // once that limit has been reached, new errors will be dropped // until the queue is drained. // // The exported fields be altered or replaced any time up until // any Tracer methods have been invoked. type Tracer struct { Transport transport.Transport Service struct { Name string Version string Environment string } process *model.Process system *model.System active int32 bufferSize int metricsBufferSize int closing chan struct{} closed chan struct{} forceFlush chan chan<- struct{} forceSendMetrics chan chan<- struct{} configCommands chan tracerConfigCommand configWatcher chan apmconfig.Watcher events chan tracerEvent breakdownMetrics *breakdownMetrics statsMu sync.Mutex stats TracerStats maxSpansMu sync.RWMutex maxSpans int spanFramesMinDurationMu sync.RWMutex spanFramesMinDuration time.Duration stackTraceLimitMu sync.RWMutex stackTraceLimit int samplerMu sync.RWMutex sampler Sampler // localSampler holds the most recently locally-configured Sampler, // which is what sampler will be set to when a centrally defined // sampler is removed. localSampler Sampler // remoteSampler records whether sampler is defined by the remote // config watcher. remoteSampler bool captureHeadersMu sync.RWMutex captureHeaders bool captureBodyMu sync.RWMutex captureBody CaptureBodyMode errorDataPool sync.Pool spanDataPool sync.Pool transactionDataPool sync.Pool } // NewTracer returns a new Tracer, using the default transport, // and with the specified service name and version if specified. // This is equivalent to calling NewTracerOptions with a // TracerOptions having ServiceName and ServiceVersion set to // the provided arguments. func NewTracer(serviceName, serviceVersion string) (*Tracer, error) { return NewTracerOptions(TracerOptions{ ServiceName: serviceName, ServiceVersion: serviceVersion, }) } // NewTracerOptions returns a new Tracer using the provided options. // See TracerOptions for details on the options, and their default // values. func NewTracerOptions(opts TracerOptions) (*Tracer, error) { if err := opts.initDefaults(false); err != nil { return nil, err } return newTracer(opts), nil } func newTracer(opts TracerOptions) *Tracer { t := &Tracer{ Transport: opts.Transport, process: &currentProcess, system: &localSystem, closing: make(chan struct{}), closed: make(chan struct{}), forceFlush: make(chan chan<- struct{}), forceSendMetrics: make(chan chan<- struct{}), configCommands: make(chan tracerConfigCommand), configWatcher: make(chan apmconfig.Watcher), events: make(chan tracerEvent, tracerEventChannelCap), active: 1, breakdownMetrics: newBreakdownMetrics(), maxSpans: opts.maxSpans, sampler: opts.sampler, localSampler: opts.sampler, captureHeaders: opts.captureHeaders, captureBody: opts.captureBody, spanFramesMinDuration: opts.spanFramesMinDuration, stackTraceLimit: opts.stackTraceLimit, bufferSize: opts.bufferSize, metricsBufferSize: opts.metricsBufferSize, } t.Service.Name = opts.ServiceName t.Service.Version = opts.ServiceVersion t.Service.Environment = opts.ServiceEnvironment t.breakdownMetrics.enabled = opts.breakdownMetrics if !opts.active { t.active = 0 close(t.closed) return t } go t.loop() t.configCommands <- func(cfg *tracerConfig) { cfg.metricsInterval = opts.metricsInterval cfg.requestDuration = opts.requestDuration cfg.requestSize = opts.requestSize cfg.sanitizedFieldNames = opts.sanitizedFieldNames cfg.disabledMetrics = opts.disabledMetrics cfg.preContext = defaultPreContext cfg.postContext = defaultPostContext cfg.metricsGatherers = []MetricsGatherer{newBuiltinMetricsGatherer(t)} if apmlog.DefaultLogger != nil { cfg.logger = apmlog.DefaultLogger } } if opts.configWatcher != nil { t.configWatcher <- opts.configWatcher } return t } // tracerConfig holds the tracer's runtime configuration, which may be modified // by sending a tracerConfigCommand to the tracer's configCommands channel. type tracerConfig struct { requestSize int requestDuration time.Duration metricsInterval time.Duration logger WarningLogger metricsGatherers []MetricsGatherer contextSetter stacktrace.ContextSetter preContext, postContext int sanitizedFieldNames wildcard.Matchers disabledMetrics wildcard.Matchers } type tracerConfigCommand func(*tracerConfig) // Close closes the Tracer, preventing transactions from being // sent to the APM server. func (t *Tracer) Close() { select { case <-t.closing: default: close(t.closing) } <-t.closed } // Flush waits for the Tracer to flush any transactions and errors it currently // has queued to the APM server, the tracer is stopped, or the abort channel // is signaled. func (t *Tracer) Flush(abort <-chan struct{}) { flushed := make(chan struct{}, 1) select { case t.forceFlush <- flushed: select { case <-abort: case <-flushed: case <-t.closed: } case <-t.closed: } } // Active reports whether the tracer is active. If the tracer is inactive, // no transactions or errors will be sent to the Elastic APM server. func (t *Tracer) Active() bool { return atomic.LoadInt32(&t.active) == 1 } // SetRequestDuration sets the maximum amount of time to keep a request open // to the APM server for streaming data before closing the stream and starting // a new request. func (t *Tracer) SetRequestDuration(d time.Duration) { t.sendConfigCommand(func(cfg *tracerConfig) { cfg.requestDuration = d }) } // SetMetricsInterval sets the metrics interval -- the amount of time in // between metrics samples being gathered. func (t *Tracer) SetMetricsInterval(d time.Duration) { t.sendConfigCommand(func(cfg *tracerConfig) { cfg.metricsInterval = d }) } // SetContextSetter sets the stacktrace.ContextSetter to be used for // setting stacktrace source context. If nil (which is the initial // value), no context will be set. func (t *Tracer) SetContextSetter(setter stacktrace.ContextSetter) { t.sendConfigCommand(func(cfg *tracerConfig) { cfg.contextSetter = setter }) } // SetLogger sets the Logger to be used for logging the operation of // the tracer. // // If logger implements WarningLogger, its Warningf method will be used // for logging warnings. Otherwise, warnings will logged using Debugf. // // The tracer is initialized with a default logger configured with the // environment variables ELASTIC_APM_LOG_FILE and ELASTIC_APM_LOG_LEVEL. // Calling SetLogger will replace the default logger. func (t *Tracer) SetLogger(logger Logger) { t.sendConfigCommand(func(cfg *tracerConfig) { cfg.logger = makeWarningLogger(logger) }) } // SetSanitizedFieldNames sets the wildcard patterns that will be used to // match cookie and form field names for sanitization. Fields matching any // of the the supplied patterns will have their values redacted. If // SetSanitizedFieldNames is called with no arguments, then no fields // will be redacted. func (t *Tracer) SetSanitizedFieldNames(patterns ...string) error { var matchers wildcard.Matchers if len(patterns) != 0 { matchers = make(wildcard.Matchers, len(patterns)) for i, p := range patterns { matchers[i] = configutil.ParseWildcardPattern(p) } } t.sendConfigCommand(func(cfg *tracerConfig) { cfg.sanitizedFieldNames = matchers }) return nil } // RegisterMetricsGatherer registers g for periodic (or forced) metrics // gathering by t. // // RegisterMetricsGatherer returns a function which will deregister g. // It may safely be called multiple times. func (t *Tracer) RegisterMetricsGatherer(g MetricsGatherer) func() { // Wrap g in a pointer-to-struct, so we can safely compare. wrapped := &struct{ MetricsGatherer }{MetricsGatherer: g} t.sendConfigCommand(func(cfg *tracerConfig) { cfg.metricsGatherers = append(cfg.metricsGatherers, wrapped) }) deregister := func(cfg *tracerConfig) { for i, g := range cfg.metricsGatherers { if g != wrapped { continue } cfg.metricsGatherers = append(cfg.metricsGatherers[:i], cfg.metricsGatherers[i+1:]...) } } var once sync.Once return func() { once.Do(func() { t.sendConfigCommand(deregister) }) } } // SetConfigWatcher sets w as the config watcher. // // By default, the tracer will be configured to use the transport for // watching config, if the transport implements apmconfig.Watcher. This // can be overridden by calling SetConfigWatcher. // // If w is nil, config watching will be stopped. // // Calling SetConfigWatcher will discard any previously observed remote // config, reverting to local config until a config change from w is // observed. func (t *Tracer) SetConfigWatcher(w apmconfig.Watcher) { select { case t.configWatcher <- w: case <-t.closing: case <-t.closed: } } func (t *Tracer) sendConfigCommand(cmd tracerConfigCommand) { select { case t.configCommands <- cmd: case <-t.closing: case <-t.closed: } } // SetSampler sets the sampler the tracer. // // It is valid to pass nil, in which case all transactions will be sampled. // // Configuration via Kibana takes precedence over local configuration, so // if sampling has been configured via Kibana, this call will not have any // effect until/unless that configuration has been removed. func (t *Tracer) SetSampler(s Sampler) { t.samplerMu.Lock() t.localSampler = s if !t.remoteSampler { t.sampler = s } t.samplerMu.Unlock() } // SetMaxSpans sets the maximum number of spans that will be added // to a transaction before dropping spans. If set to a non-positive // value, the number of spans is unlimited. func (t *Tracer) SetMaxSpans(n int) { t.maxSpansMu.Lock() t.maxSpans = n t.maxSpansMu.Unlock() } // SetSpanFramesMinDuration sets the minimum duration for a span after which // we will capture its stack frames. func (t *Tracer) SetSpanFramesMinDuration(d time.Duration) { t.spanFramesMinDurationMu.Lock() t.spanFramesMinDuration = d t.spanFramesMinDurationMu.Unlock() } // SetStackTraceLimit sets the the maximum number of stack frames to collect // for each stack trace. If limit is negative, then all frames will be collected. func (t *Tracer) SetStackTraceLimit(limit int) { t.stackTraceLimitMu.Lock() t.stackTraceLimit = limit t.stackTraceLimitMu.Unlock() } // SetCaptureHeaders enables or disables capturing of HTTP headers. func (t *Tracer) SetCaptureHeaders(capture bool) { t.captureHeadersMu.Lock() t.captureHeaders = capture t.captureHeadersMu.Unlock() } // SetCaptureBody sets the HTTP request body capture mode. func (t *Tracer) SetCaptureBody(mode CaptureBodyMode) { t.captureBodyMu.Lock() t.captureBody = mode t.captureBodyMu.Unlock() } // SendMetrics forces the tracer to gather and send metrics immediately, // blocking until the metrics have been sent or the abort channel is // signalled. func (t *Tracer) SendMetrics(abort <-chan struct{}) { sent := make(chan struct{}, 1) select { case t.forceSendMetrics <- sent: select { case <-abort: case <-sent: case <-t.closed: } case <-t.closed: } } // Stats returns the current TracerStats. This will return the most // recent values even after the tracer has been closed. func (t *Tracer) Stats() TracerStats { t.statsMu.Lock() stats := t.stats t.statsMu.Unlock() return stats } func (t *Tracer) loop() { ctx, cancelContext := context.WithCancel(context.Background()) defer cancelContext() defer close(t.closed) defer atomic.StoreInt32(&t.active, 0) var req iochan.ReadRequest var requestBuf bytes.Buffer var metadata []byte var gracePeriod time.Duration = -1 var flushed chan<- struct{} var requestBufTransactions, requestBufSpans, requestBufErrors, requestBufMetricsets uint64 zlibWriter, _ := zlib.NewWriterLevel(&requestBuf, zlib.BestSpeed) zlibFlushed := true zlibClosed := false iochanReader := iochan.NewReader() requestBytesRead := 0 requestActive := false closeRequest := false flushRequest := false requestResult := make(chan error, 1) requestTimer := time.NewTimer(0) requestTimerActive := false if !requestTimer.Stop() { <-requestTimer.C } // Run another goroutine to perform the blocking requests, // communicating with the tracer loop to obtain stream data. sendStreamRequest := make(chan time.Duration) defer close(sendStreamRequest) go func() { jitterRand := rand.New(rand.NewSource(time.Now().UnixNano())) for gracePeriod := range sendStreamRequest { if gracePeriod > 0 { select { case <-time.After(jitterDuration(gracePeriod, jitterRand, gracePeriodJitter)): case <-ctx.Done(): } } requestResult <- t.Transport.SendStream(ctx, iochanReader) } }() var breakdownMetricsLimitWarningLogged bool var stats TracerStats var metrics Metrics var sentMetrics chan<- struct{} var gatheringMetrics bool var metricsTimerStart time.Time metricsBuffer := ringbuffer.New(t.metricsBufferSize) gatheredMetrics := make(chan struct{}, 1) metricsTimer := time.NewTimer(0) if !metricsTimer.Stop() { <-metricsTimer.C } var lastConfigChange map[string]string var configChanges <-chan apmconfig.Change var stopConfigWatcher func() defer func() { if stopConfigWatcher != nil { stopConfigWatcher() } }() var cfg tracerConfig buffer := ringbuffer.New(t.bufferSize) buffer.Evicted = func(h ringbuffer.BlockHeader) { switch h.Tag { case errorBlockTag: stats.ErrorsDropped++ case spanBlockTag: stats.SpansDropped++ case transactionBlockTag: stats.TransactionsDropped++ } } modelWriter := modelWriter{ buffer: buffer, metricsBuffer: metricsBuffer, cfg: &cfg, stats: &stats, } for { var gatherMetrics bool select { case <-t.closing: cancelContext() // informs transport that EOF is expected iochanReader.CloseRead(io.EOF) return case cmd := <-t.configCommands: oldMetricsInterval := cfg.metricsInterval cmd(&cfg) if !gatheringMetrics && cfg.metricsInterval != oldMetricsInterval { if metricsTimerStart.IsZero() { if cfg.metricsInterval > 0 { metricsTimer.Reset(cfg.metricsInterval) metricsTimerStart = time.Now() } } else { if cfg.metricsInterval <= 0 { metricsTimerStart = time.Time{} if !metricsTimer.Stop() { <-metricsTimer.C } } else { alreadyPassed := time.Since(metricsTimerStart) if alreadyPassed >= cfg.metricsInterval { metricsTimer.Reset(0) } else { metricsTimer.Reset(cfg.metricsInterval - alreadyPassed) } } } } continue case cw := <-t.configWatcher: if configChanges != nil { stopConfigWatcher() t.updateConfig(&cfg, lastConfigChange, nil) lastConfigChange = nil configChanges = nil } if cw == nil { continue } var configWatcherContext context.Context var watchParams apmconfig.WatchParams watchParams.Service.Name = t.Service.Name watchParams.Service.Environment = t.Service.Environment configWatcherContext, stopConfigWatcher = context.WithCancel(ctx) configChanges = cw.WatchConfig(configWatcherContext, watchParams) // Silence go vet's "possible context leak" false positive. // We call a previous stopConfigWatcher before reassigning // the variable, and we have a defer at the top level of the // loop method that will call the final stopConfigWatcher // value on method exit. _ = stopConfigWatcher continue case change, ok := <-configChanges: if !ok { configChanges = nil continue } if change.Err != nil { if cfg.logger != nil { cfg.logger.Errorf("config request failed: %s", change.Err) } } else { t.updateConfig(&cfg, lastConfigChange, change.Attrs) lastConfigChange = change.Attrs } continue case event := <-t.events: switch event.eventType { case transactionEvent: if !t.breakdownMetrics.recordTransaction(event.tx.TransactionData) { if !breakdownMetricsLimitWarningLogged && cfg.logger != nil { cfg.logger.Warningf("%s", breakdownMetricsLimitWarning) breakdownMetricsLimitWarningLogged = true } } modelWriter.writeTransaction(event.tx.Transaction, event.tx.TransactionData) case spanEvent: modelWriter.writeSpan(event.span.Span, event.span.SpanData) case errorEvent: modelWriter.writeError(event.err) // Flush the buffer to transmit the error immediately. flushRequest = true } case <-requestTimer.C: requestTimerActive = false closeRequest = true case <-metricsTimer.C: metricsTimerStart = time.Time{} gatherMetrics = !gatheringMetrics case sentMetrics = <-t.forceSendMetrics: if !metricsTimerStart.IsZero() { if !metricsTimer.Stop() { <-metricsTimer.C } metricsTimerStart = time.Time{} } gatherMetrics = !gatheringMetrics case <-gatheredMetrics: modelWriter.writeMetrics(&metrics) gatheringMetrics = false flushRequest = true if cfg.metricsInterval > 0 { metricsTimerStart = time.Now() metricsTimer.Reset(cfg.metricsInterval) } case flushed = <-t.forceFlush: // Drain any objects buffered in the channels. for n := len(t.events); n > 0; n-- { event := <-t.events switch event.eventType { case transactionEvent: if !t.breakdownMetrics.recordTransaction(event.tx.TransactionData) { if !breakdownMetricsLimitWarningLogged && cfg.logger != nil { cfg.logger.Warningf("%s", breakdownMetricsLimitWarning) breakdownMetricsLimitWarningLogged = true } } modelWriter.writeTransaction(event.tx.Transaction, event.tx.TransactionData) case spanEvent: modelWriter.writeSpan(event.span.Span, event.span.SpanData) case errorEvent: modelWriter.writeError(event.err) } } if !requestActive && buffer.Len() == 0 && metricsBuffer.Len() == 0 { flushed <- struct{}{} continue } closeRequest = true case req = <-iochanReader.C: case err := <-requestResult: if err != nil { stats.Errors.SendStream++ gracePeriod = nextGracePeriod(gracePeriod) if cfg.logger != nil { logf := cfg.logger.Debugf if err, ok := err.(*transport.HTTPError); ok && err.Response.StatusCode == 404 { // 404 typically means the server is too old, meaning // the error is due to a misconfigured environment. logf = cfg.logger.Errorf } logf("request failed: %s (next request in ~%s)", err, gracePeriod) } } else { gracePeriod = -1 // Reset grace period after success. stats.TransactionsSent += requestBufTransactions stats.SpansSent += requestBufSpans stats.ErrorsSent += requestBufErrors if cfg.logger != nil { s := func(n uint64) string { if n != 1 { return "s" } return "" } cfg.logger.Debugf( "sent request with %d transaction%s, %d span%s, %d error%s, %d metricset%s", requestBufTransactions, s(requestBufTransactions), requestBufSpans, s(requestBufSpans), requestBufErrors, s(requestBufErrors), requestBufMetricsets, s(requestBufMetricsets), ) } } if !stats.isZero() { t.statsMu.Lock() t.stats.accumulate(stats) t.statsMu.Unlock() stats = TracerStats{} } if sentMetrics != nil && requestBufMetricsets > 0 { sentMetrics <- struct{}{} sentMetrics = nil } if flushed != nil { flushed <- struct{}{} flushed = nil } if req.Buf != nil { // req will be canceled by CloseRead below. req.Buf = nil } iochanReader.CloseRead(io.EOF) iochanReader = iochan.NewReader() flushRequest = false closeRequest = false requestActive = false requestBytesRead = 0 requestBuf.Reset() requestBufTransactions = 0 requestBufSpans = 0 requestBufErrors = 0 requestBufMetricsets = 0 if requestTimerActive { if !requestTimer.Stop() { <-requestTimer.C } requestTimerActive = false } } if !stats.isZero() { t.statsMu.Lock() t.stats.accumulate(stats) t.statsMu.Unlock() stats = TracerStats{} } if gatherMetrics { gatheringMetrics = true metrics.disabled = cfg.disabledMetrics t.gatherMetrics(ctx, cfg.metricsGatherers, &metrics, cfg.logger, gatheredMetrics) if cfg.logger != nil { cfg.logger.Debugf("gathering metrics") } } if !requestActive { if buffer.Len() == 0 && metricsBuffer.Len() == 0 { continue } sendStreamRequest <- gracePeriod if metadata == nil { metadata = t.jsonRequestMetadata() } zlibWriter.Reset(&requestBuf) zlibWriter.Write(metadata) zlibFlushed = false zlibClosed = false requestActive = true requestTimer.Reset(cfg.requestDuration) requestTimerActive = true } if !closeRequest || !zlibClosed { for requestBytesRead+requestBuf.Len() < cfg.requestSize { if metricsBuffer.Len() > 0 { if _, _, err := metricsBuffer.WriteBlockTo(zlibWriter); err == nil { requestBufMetricsets++ zlibWriter.Write([]byte("\n")) zlibFlushed = false if sentMetrics != nil { // SendMetrics was called: close the request // off so we can inform the user when the // metrics have been processed. closeRequest = true } } continue } if buffer.Len() == 0 { break } if h, _, err := buffer.WriteBlockTo(zlibWriter); err == nil { switch h.Tag { case transactionBlockTag: requestBufTransactions++ case spanBlockTag: requestBufSpans++ case errorBlockTag: requestBufErrors++ } zlibWriter.Write([]byte("\n")) zlibFlushed = false } } if !closeRequest { closeRequest = requestBytesRead+requestBuf.Len() >= cfg.requestSize } } if closeRequest { if !zlibClosed { zlibWriter.Close() zlibClosed = true } } else if flushRequest && !zlibFlushed { zlibWriter.Flush() flushRequest = false zlibFlushed = true } if req.Buf == nil || requestBuf.Len() == 0 { continue } const zlibHeaderLen = 2 if requestBytesRead+requestBuf.Len() > zlibHeaderLen { n, err := requestBuf.Read(req.Buf) if closeRequest && err == nil && requestBuf.Len() == 0 { err = io.EOF } req.Respond(n, err) req.Buf = nil if n > 0 { requestBytesRead += n } } } } // jsonRequestMetadata returns a JSON-encoded metadata object that features // at the head of every request body. This is called exactly once, when the // first request is made. func (t *Tracer) jsonRequestMetadata() []byte { var json fastjson.Writer service := makeService(t.Service.Name, t.Service.Version, t.Service.Environment) json.RawString(`{"metadata":{`) json.RawString(`"system":`) t.system.MarshalFastJSON(&json) json.RawString(`,"process":`) t.process.MarshalFastJSON(&json) json.RawString(`,"service":`) service.MarshalFastJSON(&json) if len(globalLabels) > 0 { json.RawString(`,"labels":`) globalLabels.MarshalFastJSON(&json) } json.RawString("}}\n") return json.Bytes() } // gatherMetrics gathers metrics from each of the registered // metrics gatherers. Once all gatherers have returned, a value // will be sent on the "gathered" channel. func (t *Tracer) gatherMetrics(ctx context.Context, gatherers []MetricsGatherer, m *Metrics, l Logger, gathered chan<- struct{}) { timestamp := model.Time(time.Now().UTC()) var group sync.WaitGroup for _, g := range gatherers { group.Add(1) go func(g MetricsGatherer) { defer group.Done() gatherMetrics(ctx, g, m, l) }(g) } go func() { group.Wait() for _, m := range m.transactionGroupMetrics { m.Timestamp = timestamp } for _, m := range m.metrics { m.Timestamp = timestamp } gathered <- struct{}{} }() } // updateConfig updates t and cfg with changes held in "attrs", and reverts // to local config for config attributes that have been removed (exist in old // but not in attrs). // // On return from updateConfig, unapplied config will have been removed from attrs. func (t *Tracer) updateConfig(cfg *tracerConfig, old, attrs map[string]string) { warningf := func(string, ...interface{}) {} debugf := func(string, ...interface{}) {} errorf := func(string, ...interface{}) {} if cfg.logger != nil { warningf = cfg.logger.Warningf debugf = cfg.logger.Debugf errorf = cfg.logger.Errorf } envName := func(k string) string { return "ELASTIC_APM_" + strings.ToUpper(k) } for k, v := range attrs { if oldv, ok := old[k]; ok && oldv == v { continue } switch envName(k) { case envTransactionSampleRate: sampler, err := parseSampleRate(k, v) if err != nil { errorf("central config failure: %s", err) delete(attrs, k) continue } else { t.samplerMu.Lock() t.sampler = sampler t.remoteSampler = true t.samplerMu.Unlock() } default: warningf("central config failure: unsupported config: %s", k) delete(attrs, k) continue } debugf("central config update: updated %s to %s", k, v) } for k := range old { if _, ok := attrs[k]; ok { continue } switch envName(k) { case envTransactionSampleRate: t.samplerMu.Lock() t.sampler = t.localSampler t.remoteSampler = false t.samplerMu.Unlock() default: continue } debugf("central config update: reverted %s to local config", k) } } type tracerEventType int const ( transactionEvent tracerEventType = iota spanEvent errorEvent ) type tracerEvent struct { eventType tracerEventType // err is set only if eventType == errorEvent. err *ErrorData // tx is set only if eventType == transactionEvent. tx struct { *Transaction // Transaction.TransactionData is nil at the // point tracerEvent is created (to signify // that the transaction is ended), so we pass // it along side. *TransactionData } // span is set only if eventType == spanEvent. span struct { *Span // Span.SpanData is nil at the point tracerEvent // is created (to signify that the span is ended), // so we pass it along side. *SpanData } }
[]
[]
[]
[]
[]
go
null
null
null
prysmgrpc/targetaggregatorspercommittee_test.go
// Copyright © 2020 Attestant Limited. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prysmgrpc_test import ( "context" "os" "testing" "github.com/attestantio/go-eth2-client/prysmgrpc" "github.com/stretchr/testify/require" ) func TestTargetAggregatorsPerCommittee(t *testing.T) { tests := []struct { name string }{ { name: "Good", }, } service, err := prysmgrpc.New(context.Background(), prysmgrpc.WithAddress(os.Getenv("PRYSMGRPC_ADDRESS")), prysmgrpc.WithTimeout(timeout), ) require.NoError(t, err) for _, test := range tests { t.Run(test.name, func(t *testing.T) { targetAggregatorsPerCommittee, err := service.TargetAggregatorsPerCommittee(context.Background()) require.NoError(t, err) require.NotNil(t, targetAggregatorsPerCommittee) }) } }
[ "\"PRYSMGRPC_ADDRESS\"" ]
[]
[ "PRYSMGRPC_ADDRESS" ]
[]
["PRYSMGRPC_ADDRESS"]
go
1
0
vendor/github.com/glycerine/zygomys/zygo/system.go
package zygo import ( "fmt" "os" "os/exec" "runtime" "strings" ) var ShellCmd string = "/bin/bash" func init() { SetShellCmd() } // set ShellCmd as used by SystemFunction func SetShellCmd() { if runtime.GOOS == "windows" { ShellCmd = os.Getenv("COMSPEC") return } try := []string{"/usr/bin/bash"} if !FileExists(ShellCmd) { for i := range try { b := try[i] if FileExists(b) { ShellCmd = b return } } } } // sys is a builder. shell out, return the combined output. func SystemBuilder(env *Zlisp, name string, args []Sexp) (Sexp, error) { //P("SystemBuilder called with args='%#v'", args) return SystemFunction(env, name, args) } func SystemFunction(env *Zlisp, name string, args []Sexp) (Sexp, error) { if len(args) == 0 { return SexpNull, WrongNargs } flat, err := flattenToWordsHelper(args) if err != nil { return SexpNull, fmt.Errorf("flatten on '%#v' failed with error '%s'", args, err) } if len(flat) == 0 { return SexpNull, WrongNargs } joined := strings.Join(flat, " ") cmd := ShellCmd var out []byte if runtime.GOOS == "windows" { out, err = exec.Command(cmd, "/c", joined).CombinedOutput() } else { out, err = exec.Command(cmd, "-c", joined).CombinedOutput() } if err != nil { return SexpNull, fmt.Errorf("error from command: '%s'. Output:'%s'", err, string(Chomp(out))) } return &SexpStr{S: string(Chomp(out))}, nil } // given strings/lists of strings with possible whitespace // flatten out to a array of SexpStr with no internal whitespace, // suitable for passing along to (system) / exec.Command() func FlattenToWordsFunction(env *Zlisp, name string, args []Sexp) (Sexp, error) { if len(args) == 0 { return SexpNull, WrongNargs } stringArgs, err := flattenToWordsHelper(args) if err != nil { return SexpNull, err } // Now convert to []Sexp{SexpStr} res := make([]Sexp, len(stringArgs)) for i := range stringArgs { res[i] = &SexpStr{S: stringArgs[i]} } return env.NewSexpArray(res), nil } func flattenToWordsHelper(args []Sexp) ([]string, error) { stringArgs := []string{} for i := range args { switch c := args[i].(type) { case *SexpStr: many := strings.Split(c.S, " ") stringArgs = append(stringArgs, many...) case *SexpSymbol: stringArgs = append(stringArgs, c.name) case *SexpPair: carry, err := ListToArray(c) if err != nil { return []string{}, fmt.Errorf("tried to convert list of strings to array but failed with error '%s'. Input was type %T / val = '%#v'", err, c, c) } moreWords, err := flattenToWordsHelper(carry) if err != nil { return []string{}, err } stringArgs = append(stringArgs, moreWords...) default: return []string{}, fmt.Errorf("arguments to system must be strings; instead we have %T / val = '%#v'", c, c) } } // end i over args // INVAR: stringArgs has our flattened list. return stringArgs, nil } func Chomp(by []byte) []byte { if len(by) > 0 { n := len(by) if by[n-1] == '\n' { return by[:n-1] } } return by }
[ "\"COMSPEC\"" ]
[]
[ "COMSPEC" ]
[]
["COMSPEC"]
go
1
0
replaceme/cmds/init_funcs.py
import os import shutil from pathlib import Path from typing import Any, Dict, List, Optional, Tuple import yaml from replaceme import __version__ from replaceme.consensus.coinbase import create_puzzlehash_for_pk from replaceme.ssl.create_ssl import ( ensure_ssl_dirs, generate_ca_signed_cert, get_replaceme_ca_crt_key, make_ca_cert, write_ssl_cert_and_key, ) from replaceme.util.bech32m import encode_puzzle_hash from replaceme.util.config import ( create_default_replaceme_config, initial_config_file, load_config, save_config, unflatten_properties, ) from replaceme.util.ints import uint32 from replaceme.util.keychain import Keychain from replaceme.util.path import mkdir from replaceme.util.ssl_check import ( DEFAULT_PERMISSIONS_CERT_FILE, DEFAULT_PERMISSIONS_KEY_FILE, RESTRICT_MASK_CERT_FILE, RESTRICT_MASK_KEY_FILE, check_and_fix_permissions_for_ssl_file, fix_ssl, ) from replaceme.wallet.derive_keys import master_sk_to_pool_sk, master_sk_to_wallet_sk from replaceme.cmds.configure import configure private_node_names = {"full_node", "wallet", "farmer", "harvester", "timelord", "daemon"} public_node_names = {"full_node", "wallet", "farmer", "introducer", "timelord"} def dict_add_new_default(updated: Dict, default: Dict, do_not_migrate_keys: Dict[str, Any]): for k in do_not_migrate_keys: if k in updated and do_not_migrate_keys[k] == "": updated.pop(k) for k, v in default.items(): ignore = False if k in do_not_migrate_keys: do_not_data = do_not_migrate_keys[k] if isinstance(do_not_data, dict): ignore = False else: ignore = True if isinstance(v, dict) and k in updated and ignore is False: # If there is an intermediate key with empty string value, do not migrate all descendants if do_not_migrate_keys.get(k, None) == "": do_not_migrate_keys[k] = v dict_add_new_default(updated[k], default[k], do_not_migrate_keys.get(k, {})) elif k not in updated or ignore is True: updated[k] = v def check_keys(new_root: Path, keychain: Optional[Keychain] = None) -> None: if keychain is None: keychain = Keychain() all_sks = keychain.get_all_private_keys() if len(all_sks) == 0: print("No keys are present in the keychain. Generate them with 'replaceme keys generate'") return None config: Dict = load_config(new_root, "config.yaml") pool_child_pubkeys = [master_sk_to_pool_sk(sk).get_g1() for sk, _ in all_sks] all_targets = [] stop_searching_for_farmer = "xch_target_address" not in config["farmer"] stop_searching_for_pool = "xch_target_address" not in config["pool"] number_of_ph_to_search = 500 selected = config["selected_network"] prefix = config["network_overrides"]["config"][selected]["address_prefix"] for i in range(number_of_ph_to_search): if stop_searching_for_farmer and stop_searching_for_pool and i > 0: break for sk, _ in all_sks: all_targets.append( encode_puzzle_hash(create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1()), prefix) ) if all_targets[-1] == config["farmer"].get("xch_target_address"): stop_searching_for_farmer = True if all_targets[-1] == config["pool"].get("xch_target_address"): stop_searching_for_pool = True # Set the destinations, if necessary updated_target: bool = False if "xch_target_address" not in config["farmer"]: print( f"Setting the xch destination for the farmer reward (1/8 plus fees, solo and pooling) to {all_targets[0]}" ) config["farmer"]["xch_target_address"] = all_targets[0] updated_target = True elif config["farmer"]["xch_target_address"] not in all_targets: print( f"WARNING: using a farmer address which we don't have the private" f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding " f"{config['farmer']['xch_target_address']} with {all_targets[0]}" ) if "pool" not in config: config["pool"] = {} if "xch_target_address" not in config["pool"]: print(f"Setting the xch destination address for pool reward (7/8 for solo only) to {all_targets[0]}") config["pool"]["xch_target_address"] = all_targets[0] updated_target = True elif config["pool"]["xch_target_address"] not in all_targets: print( f"WARNING: using a pool address which we don't have the private" f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding " f"{config['pool']['xch_target_address']} with {all_targets[0]}" ) if updated_target: print( f"To change the XCH destination addresses, edit the `xch_target_address` entries in" f" {(new_root / 'config' / 'config.yaml').absolute()}." ) # Set the pool pks in the farmer pool_pubkeys_hex = set(bytes(pk).hex() for pk in pool_child_pubkeys) if "pool_public_keys" in config["farmer"]: for pk_hex in config["farmer"]["pool_public_keys"]: # Add original ones in config pool_pubkeys_hex.add(pk_hex) config["farmer"]["pool_public_keys"] = pool_pubkeys_hex save_config(new_root, "config.yaml", config) def copy_files_rec(old_path: Path, new_path: Path): if old_path.is_file(): print(f"{new_path}") mkdir(new_path.parent) shutil.copy(old_path, new_path) elif old_path.is_dir(): for old_path_child in old_path.iterdir(): new_path_child = new_path / old_path_child.name copy_files_rec(old_path_child, new_path_child) def migrate_from( old_root: Path, new_root: Path, manifest: List[str], do_not_migrate_settings: List[str], ): """ Copy all the files in "manifest" to the new config directory. """ if old_root == new_root: print("same as new path, exiting") return 1 if not old_root.is_dir(): print(f"{old_root} not found - this is ok if you did not install this version") return 0 print(f"\n{old_root} found") print(f"Copying files from {old_root} to {new_root}\n") for f in manifest: old_path = old_root / f new_path = new_root / f copy_files_rec(old_path, new_path) # update config yaml with new keys config: Dict = load_config(new_root, "config.yaml") config_str: str = initial_config_file("config.yaml") default_config: Dict = yaml.safe_load(config_str) flattened_keys = unflatten_properties({k: "" for k in do_not_migrate_settings}) dict_add_new_default(config, default_config, flattened_keys) save_config(new_root, "config.yaml", config) create_all_ssl(new_root) return 1 def create_all_ssl(root_path: Path): # remove old key and crt config_dir = root_path / "config" old_key_path = config_dir / "trusted.key" old_crt_path = config_dir / "trusted.crt" if old_key_path.exists(): print(f"Old key not needed anymore, deleting {old_key_path}") os.remove(old_key_path) if old_crt_path.exists(): print(f"Old crt not needed anymore, deleting {old_crt_path}") os.remove(old_crt_path) ssl_dir = config_dir / "ssl" ca_dir = ssl_dir / "ca" ensure_ssl_dirs([ssl_dir, ca_dir]) private_ca_key_path = ca_dir / "private_ca.key" private_ca_crt_path = ca_dir / "private_ca.crt" replaceme_ca_crt, replaceme_ca_key = get_replaceme_ca_crt_key() replaceme_ca_crt_path = ca_dir / "replaceme_ca.crt" replaceme_ca_key_path = ca_dir / "replaceme_ca.key" write_ssl_cert_and_key(replaceme_ca_crt_path, replaceme_ca_crt, replaceme_ca_key_path, replaceme_ca_key) if not private_ca_key_path.exists() or not private_ca_crt_path.exists(): # Create private CA print(f"Can't find private CA, creating a new one in {root_path} to generate TLS certificates") make_ca_cert(private_ca_crt_path, private_ca_key_path) # Create private certs for each node ca_key = private_ca_key_path.read_bytes() ca_crt = private_ca_crt_path.read_bytes() generate_ssl_for_nodes(ssl_dir, ca_crt, ca_key, True) else: # This is entered when user copied over private CA print(f"Found private CA in {root_path}, using it to generate TLS certificates") ca_key = private_ca_key_path.read_bytes() ca_crt = private_ca_crt_path.read_bytes() generate_ssl_for_nodes(ssl_dir, ca_crt, ca_key, True) replaceme_ca_crt, replaceme_ca_key = get_replaceme_ca_crt_key() generate_ssl_for_nodes(ssl_dir, replaceme_ca_crt, replaceme_ca_key, False, overwrite=False) def generate_ssl_for_nodes(ssl_dir: Path, ca_crt: bytes, ca_key: bytes, private: bool, overwrite=True): if private: names = private_node_names else: names = public_node_names for node_name in names: node_dir = ssl_dir / node_name ensure_ssl_dirs([node_dir]) if private: prefix = "private" else: prefix = "public" key_path = node_dir / f"{prefix}_{node_name}.key" crt_path = node_dir / f"{prefix}_{node_name}.crt" if key_path.exists() and crt_path.exists() and overwrite is False: continue generate_ca_signed_cert(ca_crt, ca_key, crt_path, key_path) def copy_cert_files(cert_path: Path, new_path: Path): for old_path_child in cert_path.glob("*.crt"): new_path_child = new_path / old_path_child.name copy_files_rec(old_path_child, new_path_child) check_and_fix_permissions_for_ssl_file(new_path_child, RESTRICT_MASK_CERT_FILE, DEFAULT_PERMISSIONS_CERT_FILE) for old_path_child in cert_path.glob("*.key"): new_path_child = new_path / old_path_child.name copy_files_rec(old_path_child, new_path_child) check_and_fix_permissions_for_ssl_file(new_path_child, RESTRICT_MASK_KEY_FILE, DEFAULT_PERMISSIONS_KEY_FILE) def init(create_certs: Optional[Path], root_path: Path, fix_ssl_permissions: bool = False, testnet: bool = False): if create_certs is not None: if root_path.exists(): if os.path.isdir(create_certs): ca_dir: Path = root_path / "config/ssl/ca" if ca_dir.exists(): print(f"Deleting your OLD CA in {ca_dir}") shutil.rmtree(ca_dir) print(f"Copying your CA from {create_certs} to {ca_dir}") copy_cert_files(create_certs, ca_dir) create_all_ssl(root_path) else: print(f"** Directory {create_certs} does not exist **") else: print(f"** {root_path} does not exist. Executing core init **") # sanity check here to prevent infinite recursion if ( replaceme_init(root_path, fix_ssl_permissions=fix_ssl_permissions, testnet=testnet) == 0 and root_path.exists() ): return init(create_certs, root_path, fix_ssl_permissions) print(f"** {root_path} was not created. Exiting **") return -1 else: return replaceme_init(root_path, fix_ssl_permissions=fix_ssl_permissions, testnet=testnet) def replaceme_version_number() -> Tuple[str, str, str, str]: scm_full_version = __version__ left_full_version = scm_full_version.split("+") version = left_full_version[0].split(".") scm_major_version = version[0] scm_minor_version = version[1] if len(version) > 2: smc_patch_version = version[2] patch_release_number = smc_patch_version else: smc_patch_version = "" major_release_number = scm_major_version minor_release_number = scm_minor_version dev_release_number = "" # If this is a beta dev release - get which beta it is if "0b" in scm_minor_version: original_minor_ver_list = scm_minor_version.split("0b") major_release_number = str(1 - int(scm_major_version)) # decrement the major release for beta minor_release_number = scm_major_version patch_release_number = original_minor_ver_list[1] if smc_patch_version and "dev" in smc_patch_version: dev_release_number = "." + smc_patch_version elif "0rc" in version[1]: original_minor_ver_list = scm_minor_version.split("0rc") major_release_number = str(1 - int(scm_major_version)) # decrement the major release for release candidate minor_release_number = str(int(scm_major_version) + 1) # RC is 0.2.1 for RC 1 patch_release_number = original_minor_ver_list[1] if smc_patch_version and "dev" in smc_patch_version: dev_release_number = "." + smc_patch_version else: major_release_number = scm_major_version minor_release_number = scm_minor_version patch_release_number = smc_patch_version dev_release_number = "" install_release_number = major_release_number + "." + minor_release_number if len(patch_release_number) > 0: install_release_number += "." + patch_release_number if len(dev_release_number) > 0: install_release_number += dev_release_number return major_release_number, minor_release_number, patch_release_number, dev_release_number def replaceme_minor_release_number(): res = int(replaceme_version_number()[2]) print(f"Install release number: {res}") return res def replaceme_full_version_str() -> str: major, minor, patch, dev = replaceme_version_number() return f"{major}.{minor}.{patch}{dev}" def replaceme_init( root_path: Path, *, should_check_keys: bool = True, fix_ssl_permissions: bool = False, testnet: bool = False ): """ Standard first run initialization or migration steps. Handles config creation, generation of SSL certs, and setting target addresses (via check_keys). should_check_keys can be set to False to avoid blocking when accessing a passphrase protected Keychain. When launching the daemon from the GUI, we want the GUI to handle unlocking the keychain. """ if os.environ.get("REPLACEME_ROOT", None) is not None: print( f"warning, your REPLACEME_ROOT is set to {os.environ['REPLACEME_ROOT']}. " f"Please unset the environment variable and run replaceme init again\n" f"or manually migrate config.yaml" ) print(f"Replaceme directory {root_path}") if root_path.is_dir() and Path(root_path / "config" / "config.yaml").exists(): # This is reached if REPLACEME_ROOT is set, or if user has run replaceme init twice # before a new update. if testnet: configure(root_path, "", "", "", "", "", "", "", "", testnet="true", peer_connect_timeout="") if fix_ssl_permissions: fix_ssl(root_path) if should_check_keys: check_keys(root_path) print(f"{root_path} already exists, no migration action taken") return -1 create_default_replaceme_config(root_path) if testnet: configure(root_path, "", "", "", "", "", "", "", "", testnet="true", peer_connect_timeout="") create_all_ssl(root_path) if fix_ssl_permissions: fix_ssl(root_path) if should_check_keys: check_keys(root_path) print("") print("To see your keys, run 'replaceme keys show --show-mnemonic-seed'") return 0
[]
[]
[ "REPLACEME_ROOT" ]
[]
["REPLACEME_ROOT"]
python
1
0
tests/testing_utility_functions.py
import os import shutil import sys sys.path.insert(0, "../shallow_backup") from shallow_backup.config import safe_create_config def setup_env_vars(): os.environ["SHALLOW_BACKUP_TEST_BACKUP_DIR"] = os.path.abspath(BASE_TEST_DIR) + "/backup" os.environ["SHALLOW_BACKUP_TEST_HOME_DIR"] = os.path.abspath(BASE_TEST_DIR) + "/home" # This env var is referenced in shallow_backup/config.py os.environ["SHALLOW_BACKUP_TEST_CONFIG_PATH"] = os.path.abspath(BASE_TEST_DIR) + "/shallow-backup.conf" def unset_env_vars(): del os.environ["SHALLOW_BACKUP_TEST_BACKUP_DIR"] del os.environ["SHALLOW_BACKUP_TEST_HOME_DIR"] del os.environ["SHALLOW_BACKUP_TEST_CONFIG_PATH"] def create_config_for_test(): config_file = os.environ["SHALLOW_BACKUP_TEST_CONFIG_PATH"] if os.path.isfile(config_file): os.remove(config_file) safe_create_config() def create_dir_overwrite(directory): if os.path.isdir(directory): shutil.rmtree(directory) os.makedirs(directory) def setup_dirs_and_env_vars_and_create_config(): setup_env_vars() create_config_for_test() for directory in DIRS: create_dir_overwrite(directory) def clean_up_dirs_and_env_vars(): shutil.rmtree(BASE_TEST_DIR) unset_env_vars() # This global is required to be set for the setup_env_vars call to work properly. BASE_TEST_DIR = os.path.expanduser("~") + "/SHALLOW-BACKUP-TEST-DIRECTORY" setup_env_vars() BACKUP_DEST_DIR = os.environ.get("SHALLOW_BACKUP_TEST_BACKUP_DIR") FAKE_HOME_DIR = os.environ.get("SHALLOW_BACKUP_TEST_HOME_DIR") DIRS = [BACKUP_DEST_DIR, FAKE_HOME_DIR] DOTFILES = [ os.path.join(FAKE_HOME_DIR, ".ssh/"), os.path.join(FAKE_HOME_DIR, ".config/git/"), os.path.join(FAKE_HOME_DIR, ".zshenv"), os.path.join(FAKE_HOME_DIR, ".pypirc"), os.path.join(FAKE_HOME_DIR, ".config/nvim/init.vim"), os.path.join(FAKE_HOME_DIR, ".config/zsh/.zshrc") ]
[]
[]
[ "SHALLOW_BACKUP_TEST_HOME_DIR", "SHALLOW_BACKUP_TEST_CONFIG_PATH", "SHALLOW_BACKUP_TEST_BACKUP_DIR" ]
[]
["SHALLOW_BACKUP_TEST_HOME_DIR", "SHALLOW_BACKUP_TEST_CONFIG_PATH", "SHALLOW_BACKUP_TEST_BACKUP_DIR"]
python
3
0
main.go
package main import ( "net/http" "os" "time" "github.com/leecalcote/pwk-twitter-auth/dao" "github.com/leecalcote/pwk-twitter-auth/handlers" "github.com/leecalcote/pwk-twitter-auth/queue" "github.com/sirupsen/logrus" ) // main creates and starts a Server listening. func main() { address := os.Getenv("HOST") port := os.Getenv("PORT") proto := os.Getenv("PROTO") dyno, err := dao.NewDynoDao() if err != nil { logrus.Fatalf("unable to create a DynamoDB dao: %v", err) } mmq := queue.NewMemQ(dyno) // read credentials from environment variables if available config := &handlers.Config{ TwitterConsumerKey: os.Getenv("TWITTER_CONSUMER_KEY"), TwitterConsumerSecret: os.Getenv("TWITTER_CONSUMER_SECRET"), CallbackURL: proto + address + ":" + port + "/twitter/callback", Mmq: mmq, } if config.TwitterConsumerKey == "" { logrus.Fatal("Missing Twitter Consumer Key") } if config.TwitterConsumerSecret == "" { logrus.Fatal("Missing Twitter Consumer Secret") } config.Loc, err = time.LoadLocation(os.Getenv("LOCAL_TZ_NAME")) if err != nil { logrus.Fatalf("Time zone provided is not valid: %v", err) } logrus.Infof("Starting Server listening on %s:%s", address, port) err = http.ListenAndServe(":"+port, handlers.New(config)) if err != nil { logrus.Fatalf("ListenAndServe error: %v", err) } }
[ "\"HOST\"", "\"PORT\"", "\"PROTO\"", "\"TWITTER_CONSUMER_KEY\"", "\"TWITTER_CONSUMER_SECRET\"", "\"LOCAL_TZ_NAME\"" ]
[]
[ "PORT", "HOST", "TWITTER_CONSUMER_KEY", "TWITTER_CONSUMER_SECRET", "LOCAL_TZ_NAME", "PROTO" ]
[]
["PORT", "HOST", "TWITTER_CONSUMER_KEY", "TWITTER_CONSUMER_SECRET", "LOCAL_TZ_NAME", "PROTO"]
go
6
0
cmd/podman/common/create.go
package common import ( "os" "github.com/containers/common/pkg/auth" "github.com/containers/common/pkg/completion" commonFlag "github.com/containers/common/pkg/flag" "github.com/containers/podman/v4/cmd/podman/registry" "github.com/containers/podman/v4/libpod/define" "github.com/containers/podman/v4/pkg/domain/entities" "github.com/spf13/cobra" ) const sizeWithUnitFormat = "(format: `<number>[<unit>]`, where unit = b (bytes), k (kilobytes), m (megabytes), or g (gigabytes))" var containerConfig = registry.PodmanConfig() // ContainerToPodOptions takes the Container and Pod Create options, assigning the matching values back to podCreate for the purpose of the libpod API // For this function to succeed, the JSON tags in PodCreateOptions and ContainerCreateOptions need to match due to the Marshaling and Unmarshaling done. // The types of the options also need to match or else the unmarshaling will fail even if the tags match func ContainerToPodOptions(containerCreate *entities.ContainerCreateOptions, podCreate *entities.PodCreateOptions) error { contMarshal, err := json.Marshal(containerCreate) if err != nil { return err } return json.Unmarshal(contMarshal, podCreate) } // DefineCreateFlags declares and instantiates the container create flags func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions, isInfra bool, clone bool) { createFlags := cmd.Flags() if !isInfra && !clone { // regular create flags annotationFlagName := "annotation" createFlags.StringSliceVar( &cf.Annotation, annotationFlagName, []string{}, "Add annotations to container (key=value)", ) _ = cmd.RegisterFlagCompletionFunc(annotationFlagName, completion.AutocompleteNone) attachFlagName := "attach" createFlags.StringSliceVarP( &cf.Attach, attachFlagName, "a", []string{}, "Attach to STDIN, STDOUT or STDERR", ) _ = cmd.RegisterFlagCompletionFunc(attachFlagName, AutocompleteCreateAttach) authfileFlagName := "authfile" createFlags.StringVar( &cf.Authfile, authfileFlagName, auth.GetDefaultAuthFile(), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override", ) _ = cmd.RegisterFlagCompletionFunc(authfileFlagName, completion.AutocompleteDefault) blkioWeightFlagName := "blkio-weight" createFlags.StringVar( &cf.BlkIOWeight, blkioWeightFlagName, "", "Block IO weight (relative weight) accepts a weight value between 10 and 1000.", ) _ = cmd.RegisterFlagCompletionFunc(blkioWeightFlagName, completion.AutocompleteNone) blkioWeightDeviceFlagName := "blkio-weight-device" createFlags.StringSliceVar( &cf.BlkIOWeightDevice, blkioWeightDeviceFlagName, []string{}, "Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`)", ) _ = cmd.RegisterFlagCompletionFunc(blkioWeightDeviceFlagName, completion.AutocompleteDefault) capAddFlagName := "cap-add" createFlags.StringSliceVar( &cf.CapAdd, capAddFlagName, []string{}, "Add capabilities to the container", ) _ = cmd.RegisterFlagCompletionFunc(capAddFlagName, completion.AutocompleteCapabilities) capDropFlagName := "cap-drop" createFlags.StringSliceVar( &cf.CapDrop, capDropFlagName, []string{}, "Drop capabilities from the container", ) _ = cmd.RegisterFlagCompletionFunc(capDropFlagName, completion.AutocompleteCapabilities) cgroupnsFlagName := "cgroupns" createFlags.String( cgroupnsFlagName, "", "cgroup namespace to use", ) _ = cmd.RegisterFlagCompletionFunc(cgroupnsFlagName, AutocompleteNamespace) cgroupsFlagName := "cgroups" createFlags.StringVar( &cf.CgroupsMode, cgroupsFlagName, cgroupConfig(), `control container cgroup configuration ("enabled"|"disabled"|"no-conmon"|"split")`, ) _ = cmd.RegisterFlagCompletionFunc(cgroupsFlagName, AutocompleteCgroupMode) cidfileFlagName := "cidfile" createFlags.StringVar( &cf.CIDFile, cidfileFlagName, "", "Write the container ID to the file", ) _ = cmd.RegisterFlagCompletionFunc(cidfileFlagName, completion.AutocompleteDefault) deviceCgroupRuleFlagName := "device-cgroup-rule" createFlags.StringSliceVar( &cf.DeviceCgroupRule, deviceCgroupRuleFlagName, []string{}, "Add a rule to the cgroup allowed devices list", ) _ = cmd.RegisterFlagCompletionFunc(deviceCgroupRuleFlagName, completion.AutocompleteNone) deviceReadIopsFlagName := "device-read-iops" createFlags.StringSliceVar( &cf.DeviceReadIOPs, deviceReadIopsFlagName, []string{}, "Limit read rate (IO per second) from a device (e.g. --device-read-iops=/dev/sda:1000)", ) _ = cmd.RegisterFlagCompletionFunc(deviceReadIopsFlagName, completion.AutocompleteDefault) deviceWriteBpsFlagName := "device-write-bps" createFlags.StringSliceVar( &cf.DeviceWriteBPs, deviceWriteBpsFlagName, []string{}, "Limit write rate (bytes per second) to a device (e.g. --device-write-bps=/dev/sda:1mb)", ) _ = cmd.RegisterFlagCompletionFunc(deviceWriteBpsFlagName, completion.AutocompleteDefault) deviceWriteIopsFlagName := "device-write-iops" createFlags.StringSliceVar( &cf.DeviceWriteIOPs, deviceWriteIopsFlagName, []string{}, "Limit write rate (IO per second) to a device (e.g. --device-write-iops=/dev/sda:1000)", ) _ = cmd.RegisterFlagCompletionFunc(deviceWriteIopsFlagName, completion.AutocompleteDefault) createFlags.Bool( "disable-content-trust", false, "This is a Docker specific option and is a NOOP", ) envFlagName := "env" createFlags.StringArrayP( envFlagName, "e", env(), "Set environment variables in container", ) _ = cmd.RegisterFlagCompletionFunc(envFlagName, completion.AutocompleteNone) unsetenvFlagName := "unsetenv" createFlags.StringArrayVar( &cf.UnsetEnv, unsetenvFlagName, []string{}, "Unset environment default variables in container", ) _ = cmd.RegisterFlagCompletionFunc(unsetenvFlagName, completion.AutocompleteNone) createFlags.BoolVar( &cf.UnsetEnvAll, "unsetenv-all", false, "Unset all default environment variables in container", ) if !registry.IsRemote() { createFlags.BoolVar( &cf.EnvHost, "env-host", false, "Use all current host environment variables in container", ) } envFileFlagName := "env-file" createFlags.StringSliceVar( &cf.EnvFile, envFileFlagName, []string{}, "Read in a file of environment variables", ) _ = cmd.RegisterFlagCompletionFunc(envFileFlagName, completion.AutocompleteDefault) exposeFlagName := "expose" createFlags.StringSliceVar( &cf.Expose, exposeFlagName, []string{}, "Expose a port or a range of ports", ) _ = cmd.RegisterFlagCompletionFunc(exposeFlagName, completion.AutocompleteNone) groupAddFlagName := "group-add" createFlags.StringSliceVar( &cf.GroupAdd, groupAddFlagName, []string{}, "Add additional groups to the primary container process. 'keep-groups' allows container processes to use supplementary groups.", ) _ = cmd.RegisterFlagCompletionFunc(groupAddFlagName, completion.AutocompleteNone) healthCmdFlagName := "health-cmd" createFlags.StringVar( &cf.HealthCmd, healthCmdFlagName, "", "set a healthcheck command for the container ('none' disables the existing healthcheck)", ) _ = cmd.RegisterFlagCompletionFunc(healthCmdFlagName, completion.AutocompleteNone) healthIntervalFlagName := "health-interval" createFlags.StringVar( &cf.HealthInterval, healthIntervalFlagName, define.DefaultHealthCheckInterval, "set an interval for the healthchecks (a value of disable results in no automatic timer setup)", ) _ = cmd.RegisterFlagCompletionFunc(healthIntervalFlagName, completion.AutocompleteNone) healthRetriesFlagName := "health-retries" createFlags.UintVar( &cf.HealthRetries, healthRetriesFlagName, define.DefaultHealthCheckRetries, "the number of retries allowed before a healthcheck is considered to be unhealthy", ) _ = cmd.RegisterFlagCompletionFunc(healthRetriesFlagName, completion.AutocompleteNone) healthStartPeriodFlagName := "health-start-period" createFlags.StringVar( &cf.HealthStartPeriod, healthStartPeriodFlagName, define.DefaultHealthCheckStartPeriod, "the initialization time needed for a container to bootstrap", ) _ = cmd.RegisterFlagCompletionFunc(healthStartPeriodFlagName, completion.AutocompleteNone) healthTimeoutFlagName := "health-timeout" createFlags.StringVar( &cf.HealthTimeout, healthTimeoutFlagName, define.DefaultHealthCheckTimeout, "the maximum time allowed to complete the healthcheck before an interval is considered failed", ) _ = cmd.RegisterFlagCompletionFunc(healthTimeoutFlagName, completion.AutocompleteNone) createFlags.BoolVar( &cf.HTTPProxy, "http-proxy", containerConfig.Containers.HTTPProxy, "Set proxy environment variables in the container based on the host proxy vars", ) hostUserFlagName := "hostuser" createFlags.StringSliceVar( &cf.HostUsers, hostUserFlagName, []string{}, "Host user account to add to /etc/passwd within container", ) _ = cmd.RegisterFlagCompletionFunc(hostUserFlagName, completion.AutocompleteNone) imageVolumeFlagName := "image-volume" createFlags.StringVar( &cf.ImageVolume, imageVolumeFlagName, DefaultImageVolume, `Tells podman how to handle the builtin image volumes ("bind"|"tmpfs"|"ignore")`, ) _ = cmd.RegisterFlagCompletionFunc(imageVolumeFlagName, AutocompleteImageVolume) createFlags.BoolVar( &cf.Init, "init", false, "Run an init binary inside the container that forwards signals and reaps processes", ) initPathFlagName := "init-path" createFlags.StringVar( &cf.InitPath, initPathFlagName, initPath(), // Do not use the Value field for setting the default value to determine user input (i.e., non-empty string) "Path to the container-init binary", ) _ = cmd.RegisterFlagCompletionFunc(initPathFlagName, completion.AutocompleteDefault) createFlags.BoolVarP( &cf.Interactive, "interactive", "i", false, "Keep STDIN open even if not attached", ) ipcFlagName := "ipc" createFlags.String( ipcFlagName, "", "IPC namespace to use", ) _ = cmd.RegisterFlagCompletionFunc(ipcFlagName, AutocompleteNamespace) createFlags.String( "kernel-memory", "", "DEPRECATED: Option is just hear for compatibility with Docker", ) // kernel-memory is deprecated in the runtime spec. _ = createFlags.MarkHidden("kernel-memory") logDriverFlagName := "log-driver" createFlags.StringVar( &cf.LogDriver, logDriverFlagName, logDriver(), "Logging driver for the container", ) _ = cmd.RegisterFlagCompletionFunc(logDriverFlagName, AutocompleteLogDriver) logOptFlagName := "log-opt" createFlags.StringSliceVar( &cf.LogOptions, logOptFlagName, []string{}, "Logging driver options", ) _ = cmd.RegisterFlagCompletionFunc(logOptFlagName, AutocompleteLogOpt) createFlags.BoolVar( &cf.NoHealthCheck, "no-healthcheck", false, "Disable healthchecks on container", ) createFlags.BoolVar( &cf.OOMKillDisable, "oom-kill-disable", false, "Disable OOM Killer", ) oomScoreAdjFlagName := "oom-score-adj" createFlags.Int( oomScoreAdjFlagName, 0, "Tune the host's OOM preferences (-1000 to 1000)", ) _ = cmd.RegisterFlagCompletionFunc(oomScoreAdjFlagName, completion.AutocompleteNone) archFlagName := "arch" createFlags.StringVar( &cf.Arch, archFlagName, "", "use `ARCH` instead of the architecture of the machine for choosing images", ) _ = cmd.RegisterFlagCompletionFunc(archFlagName, completion.AutocompleteArch) osFlagName := "os" createFlags.StringVar( &cf.OS, osFlagName, "", "use `OS` instead of the running OS for choosing images", ) _ = cmd.RegisterFlagCompletionFunc(osFlagName, completion.AutocompleteOS) variantFlagName := "variant" createFlags.StringVar( &cf.Variant, variantFlagName, "", "Use `VARIANT` instead of the running architecture variant for choosing images", ) _ = cmd.RegisterFlagCompletionFunc(variantFlagName, completion.AutocompleteNone) pidsLimitFlagName := "pids-limit" createFlags.Int64( pidsLimitFlagName, pidsLimit(), "Tune container pids limit (set -1 for unlimited)", ) _ = cmd.RegisterFlagCompletionFunc(pidsLimitFlagName, completion.AutocompleteNone) platformFlagName := "platform" createFlags.StringVar( &cf.Platform, platformFlagName, "", "Specify the platform for selecting the image. (Conflicts with --arch and --os)", ) _ = cmd.RegisterFlagCompletionFunc(platformFlagName, completion.AutocompleteNone) podIDFileFlagName := "pod-id-file" createFlags.StringVar( &cf.PodIDFile, podIDFileFlagName, "", "Read the pod ID from the file", ) _ = cmd.RegisterFlagCompletionFunc(podIDFileFlagName, completion.AutocompleteDefault) createFlags.BoolVar( &cf.Privileged, "privileged", false, "Give extended privileges to container", ) createFlags.BoolVarP( &cf.PublishAll, "publish-all", "P", false, "Publish all exposed ports to random ports on the host interface", ) pullFlagName := "pull" createFlags.StringVar( &cf.Pull, pullFlagName, policy(), `Pull image before creating ("always"|"missing"|"never")`, ) _ = cmd.RegisterFlagCompletionFunc(pullFlagName, AutocompletePullOption) createFlags.BoolVarP( &cf.Quiet, "quiet", "q", false, "Suppress output information when pulling images", ) createFlags.BoolVar( &cf.ReadOnly, "read-only", false, "Make containers root filesystem read-only", ) createFlags.BoolVar( &cf.ReadOnlyTmpFS, "read-only-tmpfs", true, "When running containers in read-only mode mount a read-write tmpfs on /run, /tmp and /var/tmp", ) requiresFlagName := "requires" createFlags.StringSliceVar( &cf.Requires, requiresFlagName, []string{}, "Add one or more requirement containers that must be started before this container will start", ) _ = cmd.RegisterFlagCompletionFunc(requiresFlagName, AutocompleteContainers) restartFlagName := "restart" createFlags.StringVar( &cf.Restart, restartFlagName, "", `Restart policy to apply when a container exits ("always"|"no"|"on-failure"|"unless-stopped")`, ) _ = cmd.RegisterFlagCompletionFunc(restartFlagName, AutocompleteRestartOption) createFlags.BoolVar( &cf.Rm, "rm", false, "Remove container (and pod if created) after exit", ) createFlags.BoolVar( &cf.RootFS, "rootfs", false, "The first argument is not an image but the rootfs to the exploded container", ) sdnotifyFlagName := "sdnotify" createFlags.StringVar( &cf.SdNotifyMode, sdnotifyFlagName, define.SdNotifyModeContainer, `control sd-notify behavior ("container"|"conmon"|"ignore")`, ) _ = cmd.RegisterFlagCompletionFunc(sdnotifyFlagName, AutocompleteSDNotify) secretFlagName := "secret" createFlags.StringArrayVar( &cf.Secrets, secretFlagName, []string{}, "Add secret to container", ) _ = cmd.RegisterFlagCompletionFunc(secretFlagName, AutocompleteSecrets) shmSizeFlagName := "shm-size" createFlags.String( shmSizeFlagName, shmSize(), "Size of /dev/shm "+sizeWithUnitFormat, ) _ = cmd.RegisterFlagCompletionFunc(shmSizeFlagName, completion.AutocompleteNone) stopSignalFlagName := "stop-signal" createFlags.StringVar( &cf.StopSignal, stopSignalFlagName, "", "Signal to stop a container. Default is SIGTERM", ) _ = cmd.RegisterFlagCompletionFunc(stopSignalFlagName, AutocompleteStopSignal) stopTimeoutFlagName := "stop-timeout" createFlags.UintVar( &cf.StopTimeout, stopTimeoutFlagName, containerConfig.Engine.StopTimeout, "Timeout (in seconds) that containers stopped by user command have to exit. If exceeded, the container will be forcibly stopped via SIGKILL.", ) _ = cmd.RegisterFlagCompletionFunc(stopTimeoutFlagName, completion.AutocompleteNone) systemdFlagName := "systemd" createFlags.StringVar( &cf.Systemd, systemdFlagName, "true", `Run container in systemd mode ("true"|"false"|"always")`, ) _ = cmd.RegisterFlagCompletionFunc(systemdFlagName, AutocompleteSystemdFlag) personalityFlagName := "personality" createFlags.StringVar( &cf.Personality, personalityFlagName, "", "Configure execution domain using personality (e.g., LINUX/LINUX32)", ) _ = cmd.RegisterFlagCompletionFunc(personalityFlagName, AutocompleteNamespace) timeoutFlagName := "timeout" createFlags.UintVar( &cf.Timeout, timeoutFlagName, 0, "Maximum length of time a container is allowed to run. The container will be killed automatically after the time expires.", ) _ = cmd.RegisterFlagCompletionFunc(timeoutFlagName, completion.AutocompleteNone) commonFlag.OptionalBoolFlag(createFlags, &cf.TLSVerify, "tls-verify", "Require HTTPS and verify certificates when contacting registries for pulling images", ) tmpfsFlagName := "tmpfs" createFlags.StringArrayVar( &cf.TmpFS, tmpfsFlagName, []string{}, "Mount a temporary filesystem (`tmpfs`) into a container", ) _ = cmd.RegisterFlagCompletionFunc(tmpfsFlagName, completion.AutocompleteDefault) createFlags.BoolVarP( &cf.TTY, "tty", "t", false, "Allocate a pseudo-TTY for container", ) timezoneFlagName := "tz" createFlags.StringVar( &cf.Timezone, timezoneFlagName, containerConfig.TZ(), "Set timezone in container", ) _ = cmd.RegisterFlagCompletionFunc(timezoneFlagName, completion.AutocompleteNone) //TODO: add timezone completion umaskFlagName := "umask" createFlags.StringVar( &cf.Umask, umaskFlagName, containerConfig.Umask(), "Set umask in container", ) _ = cmd.RegisterFlagCompletionFunc(umaskFlagName, completion.AutocompleteNone) ulimitFlagName := "ulimit" createFlags.StringSliceVar( &cf.Ulimit, ulimitFlagName, ulimits(), "Ulimit options", ) _ = cmd.RegisterFlagCompletionFunc(ulimitFlagName, completion.AutocompleteNone) userFlagName := "user" createFlags.StringVarP( &cf.User, userFlagName, "u", "", "Username or UID (format: <name|uid>[:<group|gid>])", ) _ = cmd.RegisterFlagCompletionFunc(userFlagName, AutocompleteUserFlag) utsFlagName := "uts" createFlags.String( utsFlagName, "", "UTS namespace to use", ) _ = cmd.RegisterFlagCompletionFunc(utsFlagName, AutocompleteNamespace) mountFlagName := "mount" createFlags.StringArrayVar( &cf.Mount, mountFlagName, []string{}, "Attach a filesystem mount to the container", ) _ = cmd.RegisterFlagCompletionFunc(mountFlagName, AutocompleteMountFlag) workdirFlagName := "workdir" createFlags.StringVarP( &cf.Workdir, workdirFlagName, "w", "", "Working directory inside the container", ) _ = cmd.RegisterFlagCompletionFunc(workdirFlagName, completion.AutocompleteDefault) seccompPolicyFlagName := "seccomp-policy" createFlags.StringVar( &cf.SeccompPolicy, seccompPolicyFlagName, "default", "Policy for selecting a seccomp profile (experimental)", ) _ = cmd.RegisterFlagCompletionFunc(seccompPolicyFlagName, completion.AutocompleteDefault) cgroupConfFlagName := "cgroup-conf" createFlags.StringSliceVar( &cf.CgroupConf, cgroupConfFlagName, []string{}, "Configure cgroup v2 (key=value)", ) _ = cmd.RegisterFlagCompletionFunc(cgroupConfFlagName, completion.AutocompleteNone) pidFileFlagName := "pidfile" createFlags.StringVar( &cf.PidFile, pidFileFlagName, "", "Write the container process ID to the file") _ = cmd.RegisterFlagCompletionFunc(pidFileFlagName, completion.AutocompleteDefault) chrootDirsFlagName := "chrootdirs" createFlags.StringSliceVar( &cf.ChrootDirs, chrootDirsFlagName, []string{}, "Chroot directories inside the container", ) _ = cmd.RegisterFlagCompletionFunc(chrootDirsFlagName, completion.AutocompleteDefault) passwdEntryName := "passwd-entry" createFlags.StringVar(&cf.PasswdEntry, passwdEntryName, "", "Entry to write to /etc/passwd") _ = cmd.RegisterFlagCompletionFunc(passwdEntryName, completion.AutocompleteNone) if registry.IsRemote() { _ = createFlags.MarkHidden("env-host") _ = createFlags.MarkHidden("http-proxy") } else { createFlags.StringVar( &cf.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)", ) _ = createFlags.MarkHidden("signature-policy") } createFlags.BoolVar( &cf.Replace, "replace", false, `If a container with the same name exists, replace it`, ) } if isInfra || (!clone && !isInfra) { // infra container flags, create should also pick these up sysctlFlagName := "sysctl" createFlags.StringSliceVar( &cf.Sysctl, sysctlFlagName, []string{}, "Sysctl options", ) //TODO: Add function for sysctl completion. _ = cmd.RegisterFlagCompletionFunc(sysctlFlagName, completion.AutocompleteNone) securityOptFlagName := "security-opt" createFlags.StringArrayVar( &cf.SecurityOpt, securityOptFlagName, []string{}, "Security Options", ) _ = cmd.RegisterFlagCompletionFunc(securityOptFlagName, AutocompleteSecurityOption) subgidnameFlagName := "subgidname" createFlags.StringVar( &cf.SubUIDName, subgidnameFlagName, "", "Name of range listed in /etc/subgid for use in user namespace", ) _ = cmd.RegisterFlagCompletionFunc(subgidnameFlagName, completion.AutocompleteSubgidName) subuidnameFlagName := "subuidname" createFlags.StringVar( &cf.SubGIDName, subuidnameFlagName, "", "Name of range listed in /etc/subuid for use in user namespace", ) _ = cmd.RegisterFlagCompletionFunc(subuidnameFlagName, completion.AutocompleteSubuidName) gidmapFlagName := "gidmap" createFlags.StringSliceVar( &cf.GIDMap, gidmapFlagName, []string{}, "GID map to use for the user namespace", ) _ = cmd.RegisterFlagCompletionFunc(gidmapFlagName, completion.AutocompleteNone) uidmapFlagName := "uidmap" createFlags.StringSliceVar( &cf.UIDMap, uidmapFlagName, []string{}, "UID map to use for the user namespace", ) _ = cmd.RegisterFlagCompletionFunc(uidmapFlagName, completion.AutocompleteNone) usernsFlagName := "userns" createFlags.String( usernsFlagName, os.Getenv("PODMAN_USERNS"), "User namespace to use", ) _ = cmd.RegisterFlagCompletionFunc(usernsFlagName, AutocompleteUserNamespace) cgroupParentFlagName := "cgroup-parent" createFlags.StringVar( &cf.CgroupParent, cgroupParentFlagName, "", "Optional parent cgroup for the container", ) _ = cmd.RegisterFlagCompletionFunc(cgroupParentFlagName, completion.AutocompleteDefault) var conmonPidfileFlagName string if !isInfra { conmonPidfileFlagName = "conmon-pidfile" } else { conmonPidfileFlagName = "infra-conmon-pidfile" } createFlags.StringVar( &cf.ConmonPIDFile, conmonPidfileFlagName, "", "Path to the file that will receive the PID of conmon", ) _ = cmd.RegisterFlagCompletionFunc(conmonPidfileFlagName, completion.AutocompleteDefault) var entrypointFlagName string if !isInfra { entrypointFlagName = "entrypoint" } else { entrypointFlagName = "infra-command" } createFlags.String(entrypointFlagName, "", "Overwrite the default ENTRYPOINT of the image", ) _ = cmd.RegisterFlagCompletionFunc(entrypointFlagName, completion.AutocompleteNone) hostnameFlagName := "hostname" createFlags.StringVarP( &cf.Hostname, hostnameFlagName, "h", "", "Set container hostname", ) _ = cmd.RegisterFlagCompletionFunc(hostnameFlagName, completion.AutocompleteNone) labelFlagName := "label" createFlags.StringArrayVarP( &cf.Label, labelFlagName, "l", []string{}, "Set metadata on container", ) _ = cmd.RegisterFlagCompletionFunc(labelFlagName, completion.AutocompleteNone) labelFileFlagName := "label-file" createFlags.StringSliceVar( &cf.LabelFile, labelFileFlagName, []string{}, "Read in a line delimited file of labels", ) _ = cmd.RegisterFlagCompletionFunc(labelFileFlagName, completion.AutocompleteDefault) if isInfra { nameFlagName := "infra-name" createFlags.StringVar( &cf.Name, nameFlagName, "", "Assign a name to the container", ) _ = cmd.RegisterFlagCompletionFunc(nameFlagName, completion.AutocompleteNone) } createFlags.Bool( "help", false, "", ) pidFlagName := "pid" createFlags.StringVar( &cf.PID, pidFlagName, "", "PID namespace to use", ) _ = cmd.RegisterFlagCompletionFunc(pidFlagName, AutocompleteNamespace) volumeDesciption := "Bind mount a volume into the container" if registry.IsRemote() { volumeDesciption = "Bind mount a volume into the container. Volume source will be on the server machine, not the client" } volumeFlagName := "volume" createFlags.StringArrayVarP( &cf.Volume, volumeFlagName, "v", volumes(), volumeDesciption, ) _ = cmd.RegisterFlagCompletionFunc(volumeFlagName, AutocompleteVolumeFlag) deviceFlagName := "device" createFlags.StringSliceVar( &cf.Devices, deviceFlagName, devices(), "Add a host device to the container", ) _ = cmd.RegisterFlagCompletionFunc(deviceFlagName, completion.AutocompleteDefault) deviceReadBpsFlagName := "device-read-bps" createFlags.StringSliceVar( &cf.DeviceReadBPs, deviceReadBpsFlagName, []string{}, "Limit read rate (bytes per second) from a device (e.g. --device-read-bps=/dev/sda:1mb)", ) _ = cmd.RegisterFlagCompletionFunc(deviceReadBpsFlagName, completion.AutocompleteDefault) volumesFromFlagName := "volumes-from" createFlags.StringArrayVar( &cf.VolumesFrom, volumesFromFlagName, []string{}, "Mount volumes from the specified container(s)", ) _ = cmd.RegisterFlagCompletionFunc(volumesFromFlagName, AutocompleteContainers) } if clone || !isInfra { // clone and create only flags, we need this level of separation so clone does not pick up all of the flags nameFlagName := "name" createFlags.StringVar( &cf.Name, nameFlagName, "", "Assign a name to the container", ) _ = cmd.RegisterFlagCompletionFunc(nameFlagName, completion.AutocompleteNone) podFlagName := "pod" createFlags.StringVar( &cf.Pod, podFlagName, "", "Run container in an existing pod", ) _ = cmd.RegisterFlagCompletionFunc(podFlagName, AutocompletePods) cpuPeriodFlagName := "cpu-period" createFlags.Uint64Var( &cf.CPUPeriod, cpuPeriodFlagName, 0, "Limit the CPU CFS (Completely Fair Scheduler) period", ) _ = cmd.RegisterFlagCompletionFunc(cpuPeriodFlagName, completion.AutocompleteNone) cpuQuotaFlagName := "cpu-quota" createFlags.Int64Var( &cf.CPUQuota, cpuQuotaFlagName, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota", ) _ = cmd.RegisterFlagCompletionFunc(cpuQuotaFlagName, completion.AutocompleteNone) cpuRtPeriodFlagName := "cpu-rt-period" createFlags.Uint64Var( &cf.CPURTPeriod, cpuRtPeriodFlagName, 0, "Limit the CPU real-time period in microseconds", ) _ = cmd.RegisterFlagCompletionFunc(cpuRtPeriodFlagName, completion.AutocompleteNone) cpuRtRuntimeFlagName := "cpu-rt-runtime" createFlags.Int64Var( &cf.CPURTRuntime, cpuRtRuntimeFlagName, 0, "Limit the CPU real-time runtime in microseconds", ) _ = cmd.RegisterFlagCompletionFunc(cpuRtRuntimeFlagName, completion.AutocompleteNone) cpuSharesFlagName := "cpu-shares" createFlags.Uint64Var( &cf.CPUShares, cpuSharesFlagName, 0, "CPU shares (relative weight)", ) _ = cmd.RegisterFlagCompletionFunc(cpuSharesFlagName, completion.AutocompleteNone) cpusetMemsFlagName := "cpuset-mems" createFlags.StringVar( &cf.CPUSetMems, cpusetMemsFlagName, "", "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.", ) _ = cmd.RegisterFlagCompletionFunc(cpusetMemsFlagName, completion.AutocompleteNone) memoryFlagName := "memory" createFlags.StringVarP( &cf.Memory, memoryFlagName, "m", "", "Memory limit "+sizeWithUnitFormat, ) _ = cmd.RegisterFlagCompletionFunc(memoryFlagName, completion.AutocompleteNone) memoryReservationFlagName := "memory-reservation" createFlags.StringVar( &cf.MemoryReservation, memoryReservationFlagName, "", "Memory soft limit "+sizeWithUnitFormat, ) _ = cmd.RegisterFlagCompletionFunc(memoryReservationFlagName, completion.AutocompleteNone) memorySwapFlagName := "memory-swap" createFlags.StringVar( &cf.MemorySwap, memorySwapFlagName, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap", ) _ = cmd.RegisterFlagCompletionFunc(memorySwapFlagName, completion.AutocompleteNone) memorySwappinessFlagName := "memory-swappiness" createFlags.Int64Var( &cf.MemorySwappiness, memorySwappinessFlagName, -1, "Tune container memory swappiness (0 to 100, or -1 for system default)", ) _ = cmd.RegisterFlagCompletionFunc(memorySwappinessFlagName, completion.AutocompleteNone) } // anyone can use these cpusFlagName := "cpus" createFlags.Float64Var( &cf.CPUS, cpusFlagName, 0, "Number of CPUs. The default is 0.000 which means no limit", ) _ = cmd.RegisterFlagCompletionFunc(cpusFlagName, completion.AutocompleteNone) cpusetCpusFlagName := "cpuset-cpus" createFlags.StringVar( &cf.CPUSetCPUs, cpusetCpusFlagName, "", "CPUs in which to allow execution (0-3, 0,1)", ) _ = cmd.RegisterFlagCompletionFunc(cpusetCpusFlagName, completion.AutocompleteNone) }
[ "\"PODMAN_USERNS\"" ]
[]
[ "PODMAN_USERNS" ]
[]
["PODMAN_USERNS"]
go
1
0
wycli/src/main/java/wycli/Main.java
// Copyright 2011 The Whiley Project Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package wycli; import java.io.File; import java.io.IOException; import java.io.PrintStream; import java.util.*; import jbfs.core.Build; import jbfs.core.Content; import jbfs.util.ByteRepository; import jbfs.util.DirectoryRoot; import jbfs.util.Pair; import jbfs.util.Trie; import jbfs.util.ZipFile; import wycc.util.AbstractCompilationUnit.Value; import wycc.lang.SyntacticException; import wycc.util.Logger; import wycli.cfg.*; import wycli.cfg.Configuration.Schema; import wycli.commands.*; import wycli.lang.Command; import wycli.lang.Package; import wycli.lang.Plugin; import wycli.util.LocalPackageRepository; import wycli.util.RemotePackageRepository; import wycli.util.StdPackageResolver; import wycli.util.CommandParser; /** * Provides a command-line interface to the Whiley Compiler Collection. This is * responsible for various tasks, such as loading various configuration files * from disk, activating plugins, parsing command-line arguments and actually * activating the tool itself. * * @author David J. Pearce * */ public class Main implements Command.Environment { /** * Trie to the dependency repository within the global root. */ public static final Trie DEFAULT_REPOSITORY_PATH = Trie.fromString("repository"); public static final Command.Descriptor[] DEFAULT_COMMANDS = { HelpCmd.DESCRIPTOR, // BuildCmd.DESCRIPTOR, // CleanCmd.DESCRIPTOR }; // ======================================================================== // Instance Fields // ======================================================================== private Logger logger = Logger.NULL; private Build.Meter meter = Build.NULL_METER; /** * Package resolver is reponsible for resolving packages in remote repositories and caching them in the * global repository. */ private final Package.Resolver resolver; /** * Plugin environment provides access to information sourced from the plugins, such as available * content-types, commands, etc. */ private final Plugin.Environment env; /** * The main repository for storing build artifacts and source files which is properly versioned. */ private final Build.Repository repository; /** * The working directoring where build artifacts are projected, etc. */ private final Content.Root workingRoot; /** * */ private final Schema localSchema; public Main(Plugin.Environment env, Iterable<Build.Artifact> entries, Content.Root workingRoot, Content.Root packageRepository) throws IOException { this.env = env; this.repository = new ByteRepository(env, entries); this.workingRoot = workingRoot; this.localSchema = constructSchema(); // Setup package resolver this.resolver = new StdPackageResolver(this, new RemotePackageRepository(this, packageRepository)); } @Override public List<Command.Descriptor> getCommandDescriptors() { return env.getCommandDescriptors(); } @Override public List<Command.Platform> getCommandPlatforms() { return env.getCommandPlatforms(); } @Override public Content.Root getWorkspaceRoot() { return workingRoot; } @Override public Package.Resolver getPackageResolver() { return resolver; } @Override public Content.Registry getContentRegistry() { return env; } @Override public Build.Repository getRepository() { return repository; } @Override public Configuration get(Trie path) { ArrayList<Configuration> files = new ArrayList<>(); // Pull out all configuration files upto the root while (path != null) { ConfigFile cf = repository.get(ConfigFile.ContentType, path.append("wy")); if (cf != null) { Configuration c = cf.toConfiguration(localSchema, false); files.add(c); } path = path.parent(); } // Construct the combinator return new ConfigurationCombinator(files.toArray(new Configuration[files.size()])); } @Override public Build.Meter getMeter() { return meter; } @Override public Logger getLogger() { return logger; } public void setLogger(Logger logger) { this.logger = logger; this.env.setLogger(logger); } public void setMeter(Build.Meter meter) { this.meter = meter; } private Schema constructSchema() { List<Command.Platform> buildPlatforms = getCommandPlatforms(); List<Command.Descriptor> cmdDescriptors = getCommandDescriptors(); Configuration.Schema[] schemas = new Configuration.Schema[buildPlatforms.size() + cmdDescriptors.size() + 1]; int index = 0; schemas[index++] = Schemas.PACKAGE; for (int i = 0; i != buildPlatforms.size(); ++i) { Command.Platform platform = buildPlatforms.get(i); schemas[index++] = platform.getConfigurationSchema(); } for (int i = 0; i != cmdDescriptors.size(); ++i) { Command.Descriptor cmd = cmdDescriptors.get(i); schemas[index++] = cmd.getConfigurationSchema(); } // Construct combined schema return Configuration.toCombinedSchema(schemas); } // ================================================================== // Main Method // ================================================================== public static void main(String[] args) throws Exception { Logger logger = BOOT_LOGGER; // Determine system-wide directory. This contains configuration relevant to the // entire ecosystem, such as the set of active plugins. DirectoryRoot SystemDir = determineSystemRoot(); // Read the system configuration file Configuration system = readConfigFile(SystemDir, Trie.fromString("wy"), logger, Schemas.SYSTEM_CONFIG_SCHEMA); // Construct plugin environment and activate plugins Plugin.Environment penv = activatePlugins(system, logger); // Register content type for configuration files penv.register(Content.Type.class, ConfigFile.ContentType); penv.register(Content.Type.class, ZipFile.ContentType); // Determine user-wide directory DirectoryRoot globalDir = determineGlobalRoot(logger, penv); // Identify repository Content.Root repositoryDir = globalDir.subroot(DEFAULT_REPOSITORY_PATH); // Determine top-level directory and relative path Pair<File, Trie> lrp = determineLocalRootDirectory(); File localDir = lrp.first(); Trie path = lrp.second(); // Construct build directory File buildDir = determineBuildDirectory(localDir, logger); // Construct workding directory DirectoryRoot workingDir = new DirectoryRoot(penv, localDir); // Extract build artifacts List<Build.Artifact> artifacts = new ArrayList<>(); for (Content content : workingDir) { if (content instanceof Build.Artifact) { artifacts.add((Build.Artifact) content); } } // Construct command environment! Main menv = new Main(penv, artifacts, workingDir, repositoryDir); // Execute the given command int exitCode = exec(menv, path, args); // Done System.exit(exitCode); } public static int exec(Main menv, Trie path, String[] args) { // Add default descriptors menv.getCommandDescriptors().addAll(Arrays.asList(DEFAULT_COMMANDS)); // Construct environment and execute arguments Command.Descriptor descriptor = wycli.commands.Root.DESCRIPTOR(menv.getCommandDescriptors()); // Parse the given command-line Command.Template template = new CommandParser(descriptor).parse(args); // Apply verbose setting boolean verbose = template.getOptions().get("verbose", Boolean.class); int profile = template.getOptions().get("profile", Integer.class); if(verbose || profile > 0) { // Configure environment menv.setLogger(BOOT_LOGGER); menv.setMeter(new Meter("Build", BOOT_LOGGER, profile)); } // Done try { // Create command instance Command instance = descriptor.initialise(menv); // Execute command boolean ec = instance.execute(path,template); // Done return ec ? 0 : 1; } catch(SyntacticException e) { e.outputSourceError(System.err, false); if (verbose) { printStackTrace(System.err, e); } return 1; } catch (Exception e) { System.err.println("Internal failure: " + e.getMessage()); if(verbose) { e.printStackTrace(); } return 2; } } // ================================================================== // Helpers // ================================================================== /** * Determine the system root. That is, the installation directory for the * compiler itself. * * @param tool * @return * @throws IOException */ private static DirectoryRoot determineSystemRoot() throws IOException { String whileyhome = System.getenv("WHILEYHOME"); if (whileyhome == null) { System.err.println("error: WHILEYHOME environment variable not set"); System.exit(-1); } return new DirectoryRoot(BOOT_REGISTRY, new File(whileyhome)); } /** * Determine the global root. That is, the hidden whiley directory in the user's * home directory (e.g. ~/.whiley). * * @param tool * @return * @throws IOException */ private static DirectoryRoot determineGlobalRoot(Logger logger, Content.Registry registry) throws IOException { String userhome = System.getProperty("user.home"); File whileydir = new File(userhome + File.separator + ".whiley"); if (!whileydir.exists()) { logger.logTimedMessage("mkdir " + whileydir.toString(), 0, 0); whileydir.mkdirs(); } return new DirectoryRoot(registry, whileydir); } /** * Determine the build root. That is, the hidden whiley directory at the top-level of the build system. * * @param dir root path of the build system we are in. * @return * @throws IOException */ private static File determineBuildDirectory(File dir, Logger logger) throws IOException { File whileydir = new File(dir + File.separator + ".whiley"); if (!whileydir.exists()) { logger.logTimedMessage("mkdir " + whileydir.toString(), 0, 0); whileydir.mkdirs(); } // NOTE: should not be a file repository! return whileydir; } /** * Determine where the root of this project is. This is the nearest enclosing * directory containing a "wy.toml" file. The point is that we may be operating * in some subdirectory of the project and want the tool to automatically search * out the real root for us. * * @return * @throws IOException */ private static Pair<File, Trie> determineLocalRootDirectory() throws IOException { // Search for inner configuration. File inner = findConfigFile(new File(".")); if (inner == null) { throw new IllegalArgumentException("unable to find build configuration (\"wy.toml\")"); } // Search for enclosing configuration (if applicable). File outer = findConfigFile(inner.getParentFile()); if (outer == null) { // No enclosing configuration found. return new Pair<>(inner, Trie.ROOT); } else { // Calculate relative path String path = inner.getPath().replace(outer.getPath(), "").replace(File.separatorChar, '/'); // Done return new Pair<>(outer, Trie.fromString(path)); } } /** * Activate the set of registed plugins which the tool uses. Currently this list * is statically determined, but eventually it will be possible to dynamically * add plugins to the system. * * @param verbose * @param locations * @return */ private static Plugin.Environment activatePlugins(Configuration global, Logger logger) { Plugin.Environment env = new Plugin.Environment(logger); // Determine the set of install plugins List<Trie> plugins = global.matchAll(Trie.fromString("plugins/*")); // start modules for (Trie id : plugins) { Value.UTF8 activator = global.get(Value.UTF8.class, id); // Only activate if enabled try { Class<?> c = Class.forName(activator.toString()); Plugin.Activator instance = (Plugin.Activator) c.newInstance(); env.activate(instance); } catch (ClassNotFoundException e) { e.printStackTrace(); } catch (InstantiationException e) { e.printStackTrace(); } catch (IllegalAccessException e) { e.printStackTrace(); } } // Done return env; } private static File findConfigFile(File dir) { // Traverse up the directory hierarchy while (dir != null && dir.exists() && dir.isDirectory()) { File wyf = new File(dir + File.separator + "wy.toml"); if (wyf.exists()) { return dir; } // Traverse back up the directory hierarchy looking for a suitable directory. dir = dir.getParentFile(); } // If we get here then it means we didn't find a root, therefore just use // current directory. return null; } /** * Used for reading the various configuration files prior to instantiating the * main tool itself. */ public static Content.Registry BOOT_REGISTRY = new Content.DefaultRegistry() .register(ConfigFile.ContentType, "toml").register(ZipFile.ContentType, "zip"); /** * Simple default logger */ public static Logger BOOT_LOGGER = new Logger.Default(System.err); /** * Attempt to read a configuration file from a given root. * * @param name * @param root * @return * @throws IOException */ public static Configuration readConfigFile(DirectoryRoot root, Trie id, Logger logger, Configuration.Schema... schemas) throws IOException { // Combine schemas together Configuration.Schema schema = Configuration.toCombinedSchema(schemas); try { // Read the configuration file ConfigFile cf = root.get(ConfigFile.ContentType, id); // Log the event logger.logTimedMessage("Read " + root.getDirectory() + "/" + id + ".toml", 0, 0); // Construct configuration according to given schema return cf.toConfiguration(schema, false); } catch (SyntacticException e) { e.outputSourceError(System.out, false); System.exit(-1); return null; } } /** * Print a complete stack trace. This differs from Throwable.printStackTrace() * in that it always prints all of the trace. * * @param out * @param err */ private static void printStackTrace(PrintStream out, Throwable err) { out.println(err.getClass().getName() + ": " + err.getMessage()); for (StackTraceElement ste : err.getStackTrace()) { out.println("\tat " + ste.toString()); } if (err.getCause() != null) { out.print("Caused by: "); printStackTrace(out, err.getCause()); } } public static class Meter implements Build.Meter { private final String name; private final Logger logger; private final int depth; private Meter parent; private final long time; private final long memory; private final Map<String,Integer> counts; public Meter(String name, Logger logger, int depth) { this.name = name; this.logger = logger; this.depth = depth; this.parent = null; this.time = System.currentTimeMillis(); this.memory = Runtime.getRuntime().freeMemory(); this.counts = new HashMap<>(); } @Override public Build.Meter fork(String name) { if(depth > 0) { Meter r = new Meter(name,logger,depth-1); r.parent = this; return r; } else { return jbfs.core.Build.NULL_METER; } } @Override public void step(String tag) { Integer i = counts.get(tag); if (i == null) { i = 1; } else { i = i + 1; } counts.put(tag, i); } @Override public void done() { long t = System.currentTimeMillis(); long m = Runtime.getRuntime().freeMemory(); logger.logTimedMessage(name, t - time, m - memory); ArrayList<String> keys = new ArrayList<>(counts.keySet()); Collections.sort(keys); for(String key : keys) { logger.logTimedMessage(name + "@" + key + "(" + counts.get(key) + " steps)", 0, 0); } } } }
[ "\"WHILEYHOME\"" ]
[]
[ "WHILEYHOME" ]
[]
["WHILEYHOME"]
java
1
0
leaky/generator/generate.py
import os,stat,sys import subprocess import struct import tarfile from pprint import pprint from hashlib import sha256 from binascii import unhexlify from pool import ThreadPool from Crypto.Cipher import AES def run(count, seed): cmd = [ "python", "generate_part.py", "%d" % count, "%d" % seed ] p = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE) p.wait() return b"".join(p.stdout.readlines()) if __name__ == "__main__": seed = os.getenv("SEED", "0") salt = os.getenv("SALT", "Salting isn't really needed, just doing it on the off-chance seeds are leaked by accident") workerCount = int(os.getenv("WORKERS", "4")) count = int(os.getenv("COUNT", "100000")) h = sha256() h.update(salt.encode('utf-8')) h.update(seed.encode('utf-8')) key = h.digest()[:16] sys.stderr.write("Key Is: %s\n" %key.hex()) sys.stderr.flush() with open("./chal.exe", "rb") as f: data = f.read() data = data.replace(b"\xde\xad\xbe\xef"*4, key) fileOut = "/src/patched.exe" with open(fileOut, "wb") as f: f.write(data) st = os.stat(fileOut) os.chmod(fileOut, st.st_mode | stat.S_IEXEC) workers = ThreadPool(workerCount) tasks = 1024 roundingerror = count - int(count/tasks)*tasks workers.add_task(run, int(count/tasks) + roundingerror, 0) for ii in range(1,tasks): workers.add_task(run, int(count/tasks), ii) textName = "/tmp/test.txt" results = workers.get_results() with open(textName, "wb") as f: for r in results: f.write(r) print(textName) readmeFile = "/tmp/Readme.txt" flag = os.getenv("FLAG", "flag{place:holder}") length = 16 - (len(flag) % 16) plain = flag + chr(length)*length cipher = AES.new(key, AES.MODE_ECB) ctext = cipher.encrypt(plain) sys.stderr.write("Key is: %s\n" % key.hex()) sys.stderr.flush() with open(readmeFile, "w") as f: f.write(""" Hello, fellow space enthusiasts! I have been tracking a specific satellite and managed to intercept an interesting piece of data. Unfortunately, the data is encrypted using an AES-128 key with ECB-Mode. Encrypted Data: %s Using proprietary documentation, I have learned that the process of generating the AES key always produces the same first 6 bytes, while the remaining bytes are random: Key Bytes 0..5: %s The communication protocol hashes every message into a 128bit digest, which is encrypted with the satellite key, and sent back as an authenticated ACK. This process fortunately happens BEFORE the satellite attempts to decrypt and process my message, which it will immediately drop my message as I cannot encrypt it properly without the key. I have read about "side channel attacks" on crypto but don't really understand them, so I'm reaching out to you for help. I know timing data could be important so I've already used this vulnerability to collect a large data set of encryption times for various hash values. Please take a look! \r\n""" % (ctext.hex(), key[0:6].hex())) print(readmeFile)
[]
[]
[ "SALT", "COUNT", "SEED", "FLAG", "WORKERS" ]
[]
["SALT", "COUNT", "SEED", "FLAG", "WORKERS"]
python
5
0
manage.py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Mtaa.settings") try: from django.core.management import execute_from_command_line except ImportError: # The above import may fail for some other reason. Ensure that the # issue is really that Django is missing to avoid masking other # exceptions on Python 2. try: import django except ImportError: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) raise execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
src/net/http/server.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // HTTP server. See RFC 2616. package http import ( "bufio" "bytes" "crypto/tls" "errors" "fmt" "io" "io/ioutil" "log" "net" "net/textproto" "net/url" "os" "path" "runtime" "strconv" "strings" "sync" "sync/atomic" "time" ) // Errors introduced by the HTTP server. var ( ErrWriteAfterFlush = errors.New("Conn.Write called after Flush") ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") ErrHijacked = errors.New("Conn has been hijacked") ErrContentLength = errors.New("Conn.Write wrote more than the declared Content-Length") ) // A Handler responds to an HTTP request. // // ServeHTTP should write reply headers and data to the ResponseWriter // and then return. Returning signals that the request is finished; it // is not valid to use the ResponseWriter or read from the // Request.Body after or concurrently with the completion of the // ServeHTTP call. // // Depending on the HTTP client software, HTTP protocol version, and // any intermediaries between the client and the Go server, it may not // be possible to read from the Request.Body after writing to the // ResponseWriter. Cautious handlers should read the Request.Body // first, and then reply. // // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes // that the effect of the panic was isolated to the active request. // It recovers the panic, logs a stack trace to the server error log, // and hangs up the connection. type Handler interface { ServeHTTP(ResponseWriter, *Request) } // A ResponseWriter interface is used by an HTTP handler to // construct an HTTP response. // // A ResponseWriter may not be used after the Handler.ServeHTTP method // has returned. type ResponseWriter interface { // Header returns the header map that will be sent by // WriteHeader. Changing the header after a call to // WriteHeader (or Write) has no effect unless the modified // headers were declared as trailers by setting the // "Trailer" header before the call to WriteHeader (see example). // To suppress implicit response headers, set their value to nil. Header() Header // Write writes the data to the connection as part of an HTTP reply. // If WriteHeader has not yet been called, Write calls WriteHeader(http.StatusOK) // before writing the data. If the Header does not contain a // Content-Type line, Write adds a Content-Type set to the result of passing // the initial 512 bytes of written data to DetectContentType. Write([]byte) (int, error) // WriteHeader sends an HTTP response header with status code. // If WriteHeader is not called explicitly, the first call to Write // will trigger an implicit WriteHeader(http.StatusOK). // Thus explicit calls to WriteHeader are mainly used to // send error codes. WriteHeader(int) } // The Flusher interface is implemented by ResponseWriters that allow // an HTTP handler to flush buffered data to the client. // // Note that even for ResponseWriters that support Flush, // if the client is connected through an HTTP proxy, // the buffered data may not reach the client until the response // completes. type Flusher interface { // Flush sends any buffered data to the client. Flush() } // The Hijacker interface is implemented by ResponseWriters that allow // an HTTP handler to take over the connection. type Hijacker interface { // Hijack lets the caller take over the connection. // After a call to Hijack(), the HTTP server library // will not do anything else with the connection. // // It becomes the caller's responsibility to manage // and close the connection. // // The returned net.Conn may have read or write deadlines // already set, depending on the configuration of the // Server. It is the caller's responsibility to set // or clear those deadlines as needed. Hijack() (net.Conn, *bufio.ReadWriter, error) } // The CloseNotifier interface is implemented by ResponseWriters which // allow detecting when the underlying connection has gone away. // // This mechanism can be used to cancel long operations on the server // if the client has disconnected before the response is ready. type CloseNotifier interface { // CloseNotify returns a channel that receives at most a // single value (true) when the client connection has gone // away. // // CloseNotify may wait to notify until Request.Body has been // fully read. // // After the Handler has returned, there is no guarantee // that the channel receives a value. // // If the protocol is HTTP/1.1 and CloseNotify is called while // processing an idempotent request (such a GET) while // HTTP/1.1 pipelining is in use, the arrival of a subsequent // pipelined request may cause a value to be sent on the // returned channel. In practice HTTP/1.1 pipelining is not // enabled in browsers and not seen often in the wild. If this // is a problem, use HTTP/2 or only use CloseNotify on methods // such as POST. CloseNotify() <-chan bool } // A conn represents the server side of an HTTP connection. type conn struct { // server is the server on which the connection arrived. // Immutable; never nil. server *Server // rwc is the underlying network connection. // This is never wrapped by other types and is the value given out // to CloseNotifier callers. It is usually of type *net.TCPConn or // *tls.Conn. rwc net.Conn // remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously // inside the Listener's Accept goroutine, as some implementations block. // It is populated immediately inside the (*conn).serve goroutine. // This is the value of a Handler's (*Request).RemoteAddr. remoteAddr string // tlsState is the TLS connection state when using TLS. // nil means not TLS. tlsState *tls.ConnectionState // werr is set to the first write error to rwc. // It is set via checkConnErrorWriter{w}, where bufw writes. werr error // r is bufr's read source. It's a wrapper around rwc that provides // io.LimitedReader-style limiting (while reading request headers) // and functionality to support CloseNotifier. See *connReader docs. r *connReader // bufr reads from r. // Users of bufr must hold mu. bufr *bufio.Reader // bufw writes to checkConnErrorWriter{c}, which populates werr on error. bufw *bufio.Writer // lastMethod is the method of the most recent request // on this connection, if any. lastMethod string // mu guards hijackedv, use of bufr, (*response).closeNotifyCh. mu sync.Mutex // hijackedv is whether this connection has been hijacked // by a Handler with the Hijacker interface. // It is guarded by mu. hijackedv bool } func (c *conn) hijacked() bool { c.mu.Lock() defer c.mu.Unlock() return c.hijackedv } // c.mu must be held. func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) { if c.hijackedv { return nil, nil, ErrHijacked } c.hijackedv = true rwc = c.rwc buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc)) c.setState(rwc, StateHijacked) return } // This should be >= 512 bytes for DetectContentType, // but otherwise it's somewhat arbitrary. const bufferBeforeChunkingSize = 2048 // chunkWriter writes to a response's conn buffer, and is the writer // wrapped by the response.bufw buffered writer. // // chunkWriter also is responsible for finalizing the Header, including // conditionally setting the Content-Type and setting a Content-Length // in cases where the handler's final output is smaller than the buffer // size. It also conditionally adds chunk headers, when in chunking mode. // // See the comment above (*response).Write for the entire write flow. type chunkWriter struct { res *response // header is either nil or a deep clone of res.handlerHeader // at the time of res.WriteHeader, if res.WriteHeader is // called and extra buffering is being done to calculate // Content-Type and/or Content-Length. header Header // wroteHeader tells whether the header's been written to "the // wire" (or rather: w.conn.buf). this is unlike // (*response).wroteHeader, which tells only whether it was // logically written. wroteHeader bool // set by the writeHeader method: chunking bool // using chunked transfer encoding for reply body } var ( crlf = []byte("\r\n") colonSpace = []byte(": ") ) func (cw *chunkWriter) Write(p []byte) (n int, err error) { if !cw.wroteHeader { cw.writeHeader(p) } if cw.res.req.Method == "HEAD" { // Eat writes. return len(p), nil } if cw.chunking { _, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p)) if err != nil { cw.res.conn.rwc.Close() return } } n, err = cw.res.conn.bufw.Write(p) if cw.chunking && err == nil { _, err = cw.res.conn.bufw.Write(crlf) } if err != nil { cw.res.conn.rwc.Close() } return } func (cw *chunkWriter) flush() { if !cw.wroteHeader { cw.writeHeader(nil) } cw.res.conn.bufw.Flush() } func (cw *chunkWriter) close() { if !cw.wroteHeader { cw.writeHeader(nil) } if cw.chunking { bw := cw.res.conn.bufw // conn's bufio writer // zero chunk to mark EOF bw.WriteString("0\r\n") if len(cw.res.trailers) > 0 { trailers := make(Header) for _, h := range cw.res.trailers { if vv := cw.res.handlerHeader[h]; len(vv) > 0 { trailers[h] = vv } } trailers.Write(bw) // the writer handles noting errors } // final blank line after the trailers (whether // present or not) bw.WriteString("\r\n") } } // A response represents the server side of an HTTP response. type response struct { conn *conn req *Request // request for this response reqBody io.ReadCloser wroteHeader bool // reply header has been (logically) written wroteContinue bool // 100 Continue response was written w *bufio.Writer // buffers output in chunks to chunkWriter cw chunkWriter // handlerHeader is the Header that Handlers get access to, // which may be retained and mutated even after WriteHeader. // handlerHeader is copied into cw.header at WriteHeader // time, and privately mutated thereafter. handlerHeader Header calledHeader bool // handler accessed handlerHeader via Header written int64 // number of bytes written in body contentLength int64 // explicitly-declared Content-Length; or -1 status int // status code passed to WriteHeader // close connection after this reply. set on request and // updated after response from handler if there's a // "Connection: keep-alive" response header and a // Content-Length. closeAfterReply bool // requestBodyLimitHit is set by requestTooLarge when // maxBytesReader hits its max size. It is checked in // WriteHeader, to make sure we don't consume the // remaining request body to try to advance to the next HTTP // request. Instead, when this is set, we stop reading // subsequent requests on this connection and stop reading // input from it. requestBodyLimitHit bool // trailers are the headers to be sent after the handler // finishes writing the body. This field is initialized from // the Trailer response header when the response header is // written. trailers []string handlerDone atomicBool // set true when the handler exits // Buffers for Date and Content-Length dateBuf [len(TimeFormat)]byte clenBuf [10]byte // closeNotifyCh is non-nil once CloseNotify is called. // Guarded by conn.mu closeNotifyCh <-chan bool } type atomicBool int32 func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } // declareTrailer is called for each Trailer header when the // response header is written. It notes that a header will need to be // written in the trailers at the end of the response. func (w *response) declareTrailer(k string) { k = CanonicalHeaderKey(k) switch k { case "Transfer-Encoding", "Content-Length", "Trailer": // Forbidden by RFC 2616 14.40. return } w.trailers = append(w.trailers, k) } // requestTooLarge is called by maxBytesReader when too much input has // been read from the client. func (w *response) requestTooLarge() { w.closeAfterReply = true w.requestBodyLimitHit = true if !w.wroteHeader { w.Header().Set("Connection", "close") } } // needsSniff reports whether a Content-Type still needs to be sniffed. func (w *response) needsSniff() bool { _, haveType := w.handlerHeader["Content-Type"] return !w.cw.wroteHeader && !haveType && w.written < sniffLen } // writerOnly hides an io.Writer value's optional ReadFrom method // from io.Copy. type writerOnly struct { io.Writer } func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { switch v := src.(type) { case *os.File: fi, err := v.Stat() if err != nil { return false, err } return fi.Mode().IsRegular(), nil case *io.LimitedReader: return srcIsRegularFile(v.R) default: return } } // ReadFrom is here to optimize copying from an *os.File regular file // to a *net.TCPConn with sendfile. func (w *response) ReadFrom(src io.Reader) (n int64, err error) { // Our underlying w.conn.rwc is usually a *TCPConn (with its // own ReadFrom method). If not, or if our src isn't a regular // file, just fall back to the normal copy method. rf, ok := w.conn.rwc.(io.ReaderFrom) regFile, err := srcIsRegularFile(src) if err != nil { return 0, err } if !ok || !regFile { bufp := copyBufPool.Get().(*[]byte) defer copyBufPool.Put(bufp) return io.CopyBuffer(writerOnly{w}, src, *bufp) } // sendfile path: if !w.wroteHeader { w.WriteHeader(StatusOK) } if w.needsSniff() { n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) n += n0 if err != nil { return n, err } } w.w.Flush() // get rid of any previous writes w.cw.flush() // make sure Header is written; flush data to rwc // Now that cw has been flushed, its chunking field is guaranteed initialized. if !w.cw.chunking && w.bodyAllowed() { n0, err := rf.ReadFrom(src) n += n0 w.written += n0 return n, err } n0, err := io.Copy(writerOnly{w}, src) n += n0 return n, err } // debugServerConnections controls whether all server connections are wrapped // with a verbose logging wrapper. const debugServerConnections = false // Create new connection from rwc. func (srv *Server) newConn(rwc net.Conn) *conn { c := &conn{ server: srv, rwc: rwc, } if debugServerConnections { c.rwc = newLoggingConn("server", c.rwc) } return c } type readResult struct { n int err error b byte // byte read, if n == 1 } // connReader is the io.Reader wrapper used by *conn. It combines a // selectively-activated io.LimitedReader (to bound request header // read sizes) with support for selectively keeping an io.Reader.Read // call blocked in a background goroutine to wait for activity and // trigger a CloseNotifier channel. type connReader struct { r io.Reader remain int64 // bytes remaining // ch is non-nil if a background read is in progress. // It is guarded by conn.mu. ch chan readResult } func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain } func (cr *connReader) setInfiniteReadLimit() { cr.remain = 1<<63 - 1 } func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 } func (cr *connReader) Read(p []byte) (n int, err error) { if cr.hitReadLimit() { return 0, io.EOF } if len(p) == 0 { return } if int64(len(p)) > cr.remain { p = p[:cr.remain] } // Is a background read (started by CloseNotifier) already in // flight? If so, wait for it and use its result. ch := cr.ch if ch != nil { cr.ch = nil res := <-ch if res.n == 1 { p[0] = res.b cr.remain -= 1 } return res.n, res.err } n, err = cr.r.Read(p) cr.remain -= int64(n) return } func (cr *connReader) startBackgroundRead(onReadComplete func()) { if cr.ch != nil { // Background read already started. return } cr.ch = make(chan readResult, 1) go cr.closeNotifyAwaitActivityRead(cr.ch, onReadComplete) } func (cr *connReader) closeNotifyAwaitActivityRead(ch chan<- readResult, onReadComplete func()) { var buf [1]byte n, err := cr.r.Read(buf[:1]) onReadComplete() ch <- readResult{n, err, buf[0]} } var ( bufioReaderPool sync.Pool bufioWriter2kPool sync.Pool bufioWriter4kPool sync.Pool ) var copyBufPool = sync.Pool{ New: func() interface{} { b := make([]byte, 32*1024) return &b }, } func bufioWriterPool(size int) *sync.Pool { switch size { case 2 << 10: return &bufioWriter2kPool case 4 << 10: return &bufioWriter4kPool } return nil } func newBufioReader(r io.Reader) *bufio.Reader { if v := bufioReaderPool.Get(); v != nil { br := v.(*bufio.Reader) br.Reset(r) return br } // Note: if this reader size is every changed, update // TestHandlerBodyClose's assumptions. return bufio.NewReader(r) } func putBufioReader(br *bufio.Reader) { br.Reset(nil) bufioReaderPool.Put(br) } func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { pool := bufioWriterPool(size) if pool != nil { if v := pool.Get(); v != nil { bw := v.(*bufio.Writer) bw.Reset(w) return bw } } return bufio.NewWriterSize(w, size) } func putBufioWriter(bw *bufio.Writer) { bw.Reset(nil) if pool := bufioWriterPool(bw.Available()); pool != nil { pool.Put(bw) } } // DefaultMaxHeaderBytes is the maximum permitted size of the headers // in an HTTP request. // This can be overridden by setting Server.MaxHeaderBytes. const DefaultMaxHeaderBytes = 1 << 20 // 1 MB func (srv *Server) maxHeaderBytes() int { if srv.MaxHeaderBytes > 0 { return srv.MaxHeaderBytes } return DefaultMaxHeaderBytes } func (srv *Server) initialReadLimitSize() int64 { return int64(srv.maxHeaderBytes()) + 4096 // bufio slop } // wrapper around io.ReaderCloser which on first read, sends an // HTTP/1.1 100 Continue header type expectContinueReader struct { resp *response readCloser io.ReadCloser closed bool sawEOF bool } func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { if ecr.closed { return 0, ErrBodyReadAfterClose } if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { ecr.resp.wroteContinue = true ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n") ecr.resp.conn.bufw.Flush() } n, err = ecr.readCloser.Read(p) if err == io.EOF { ecr.sawEOF = true } return } func (ecr *expectContinueReader) Close() error { ecr.closed = true return ecr.readCloser.Close() } // TimeFormat is the time format to use when generating times in HTTP // headers. It is like time.RFC1123 but hard-codes GMT as the time // zone. The time being formatted must be in UTC for Format to // generate the correct format. // // For parsing this time format, see ParseTime. const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) func appendTime(b []byte, t time.Time) []byte { const days = "SunMonTueWedThuFriSat" const months = "JanFebMarAprMayJunJulAugSepOctNovDec" t = t.UTC() yy, mm, dd := t.Date() hh, mn, ss := t.Clock() day := days[3*t.Weekday():] mon := months[3*(mm-1):] return append(b, day[0], day[1], day[2], ',', ' ', byte('0'+dd/10), byte('0'+dd%10), ' ', mon[0], mon[1], mon[2], ' ', byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', byte('0'+hh/10), byte('0'+hh%10), ':', byte('0'+mn/10), byte('0'+mn%10), ':', byte('0'+ss/10), byte('0'+ss%10), ' ', 'G', 'M', 'T') } var errTooLarge = errors.New("http: request too large") // Read next request from connection. func (c *conn) readRequest() (w *response, err error) { if c.hijacked() { return nil, ErrHijacked } if d := c.server.ReadTimeout; d != 0 { c.rwc.SetReadDeadline(time.Now().Add(d)) } if d := c.server.WriteTimeout; d != 0 { defer func() { c.rwc.SetWriteDeadline(time.Now().Add(d)) }() } c.r.setReadLimit(c.server.initialReadLimitSize()) c.mu.Lock() // while using bufr if c.lastMethod == "POST" { // RFC 2616 section 4.1 tolerance for old buggy clients. peek, _ := c.bufr.Peek(4) // ReadRequest will get err below c.bufr.Discard(numLeadingCRorLF(peek)) } req, err := readRequest(c.bufr, keepHostHeader) c.mu.Unlock() if err != nil { if c.r.hitReadLimit() { return nil, errTooLarge } return nil, err } c.lastMethod = req.Method c.r.setInfiniteReadLimit() hosts, haveHost := req.Header["Host"] if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) { return nil, badRequestError("missing required Host header") } if len(hosts) > 1 { return nil, badRequestError("too many Host headers") } if len(hosts) == 1 && !validHostHeader(hosts[0]) { return nil, badRequestError("malformed Host header") } for k, vv := range req.Header { if !validHeaderName(k) { return nil, badRequestError("invalid header name") } for _, v := range vv { if !validHeaderValue(v) { return nil, badRequestError("invalid header value") } } } delete(req.Header, "Host") req.RemoteAddr = c.remoteAddr req.TLS = c.tlsState if body, ok := req.Body.(*body); ok { body.doEarlyClose = true } w = &response{ conn: c, req: req, reqBody: req.Body, handlerHeader: make(Header), contentLength: -1, } w.cw.res = w w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) return w, nil } func (w *response) Header() Header { if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { // Accessing the header between logically writing it // and physically writing it means we need to allocate // a clone to snapshot the logically written state. w.cw.header = w.handlerHeader.clone() } w.calledHeader = true return w.handlerHeader } // maxPostHandlerReadBytes is the max number of Request.Body bytes not // consumed by a handler that the server will read from the client // in order to keep a connection alive. If there are more bytes than // this then the server to be paranoid instead sends a "Connection: // close" response. // // This number is approximately what a typical machine's TCP buffer // size is anyway. (if we have the bytes on the machine, we might as // well read them) const maxPostHandlerReadBytes = 256 << 10 func (w *response) WriteHeader(code int) { if w.conn.hijacked() { w.conn.server.logf("http: response.WriteHeader on hijacked connection") return } if w.wroteHeader { w.conn.server.logf("http: multiple response.WriteHeader calls") return } w.wroteHeader = true w.status = code if w.calledHeader && w.cw.header == nil { w.cw.header = w.handlerHeader.clone() } if cl := w.handlerHeader.get("Content-Length"); cl != "" { v, err := strconv.ParseInt(cl, 10, 64) if err == nil && v >= 0 { w.contentLength = v } else { w.conn.server.logf("http: invalid Content-Length of %q", cl) w.handlerHeader.Del("Content-Length") } } } // extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. // This type is used to avoid extra allocations from cloning and/or populating // the response Header map and all its 1-element slices. type extraHeader struct { contentType string connection string transferEncoding string date []byte // written if not nil contentLength []byte // written if not nil } // Sorted the same as extraHeader.Write's loop. var extraHeaderKeys = [][]byte{ []byte("Content-Type"), []byte("Connection"), []byte("Transfer-Encoding"), } var ( headerContentLength = []byte("Content-Length: ") headerDate = []byte("Date: ") ) // Write writes the headers described in h to w. // // This method has a value receiver, despite the somewhat large size // of h, because it prevents an allocation. The escape analysis isn't // smart enough to realize this function doesn't mutate h. func (h extraHeader) Write(w *bufio.Writer) { if h.date != nil { w.Write(headerDate) w.Write(h.date) w.Write(crlf) } if h.contentLength != nil { w.Write(headerContentLength) w.Write(h.contentLength) w.Write(crlf) } for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { if v != "" { w.Write(extraHeaderKeys[i]) w.Write(colonSpace) w.WriteString(v) w.Write(crlf) } } } // writeHeader finalizes the header sent to the client and writes it // to cw.res.conn.bufw. // // p is not written by writeHeader, but is the first chunk of the body // that will be written. It is sniffed for a Content-Type if none is // set explicitly. It's also used to set the Content-Length, if the // total body size was small and the handler has already finished // running. func (cw *chunkWriter) writeHeader(p []byte) { if cw.wroteHeader { return } cw.wroteHeader = true w := cw.res keepAlivesEnabled := w.conn.server.doKeepAlives() isHEAD := w.req.Method == "HEAD" // header is written out to w.conn.buf below. Depending on the // state of the handler, we either own the map or not. If we // don't own it, the exclude map is created lazily for // WriteSubset to remove headers. The setHeader struct holds // headers we need to add. header := cw.header owned := header != nil if !owned { header = w.handlerHeader } var excludeHeader map[string]bool delHeader := func(key string) { if owned { header.Del(key) return } if _, ok := header[key]; !ok { return } if excludeHeader == nil { excludeHeader = make(map[string]bool) } excludeHeader[key] = true } var setHeader extraHeader trailers := false for _, v := range cw.header["Trailer"] { trailers = true foreachHeaderElement(v, cw.res.declareTrailer) } te := header.get("Transfer-Encoding") hasTE := te != "" // If the handler is done but never sent a Content-Length // response header and this is our first (and last) write, set // it, even to zero. This helps HTTP/1.0 clients keep their // "keep-alive" connections alive. // Exceptions: 304/204/1xx responses never get Content-Length, and if // it was a HEAD request, we don't know the difference between // 0 actual bytes and 0 bytes because the handler noticed it // was a HEAD request and chose not to write anything. So for // HEAD, the handler should either write the Content-Length or // write non-zero bytes. If it's actually 0 bytes and the // handler never looked at the Request.Method, we just don't // send a Content-Length header. // Further, we don't send an automatic Content-Length if they // set a Transfer-Encoding, because they're generally incompatible. if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { w.contentLength = int64(len(p)) setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) } // If this was an HTTP/1.0 request with keep-alive and we sent a // Content-Length back, we can make this a keep-alive response ... if w.req.wantsHttp10KeepAlive() && keepAlivesEnabled { sentLength := header.get("Content-Length") != "" if sentLength && header.get("Connection") == "keep-alive" { w.closeAfterReply = false } } // Check for a explicit (and valid) Content-Length header. hasCL := w.contentLength != -1 if w.req.wantsHttp10KeepAlive() && (isHEAD || hasCL) { _, connectionHeaderSet := header["Connection"] if !connectionHeaderSet { setHeader.connection = "keep-alive" } } else if !w.req.ProtoAtLeast(1, 1) || w.req.wantsClose() { w.closeAfterReply = true } if header.get("Connection") == "close" || !keepAlivesEnabled { w.closeAfterReply = true } // If the client wanted a 100-continue but we never sent it to // them (or, more strictly: we never finished reading their // request body), don't reuse this connection because it's now // in an unknown state: we might be sending this response at // the same time the client is now sending its request body // after a timeout. (Some HTTP clients send Expect: // 100-continue but knowing that some servers don't support // it, the clients set a timer and send the body later anyway) // If we haven't seen EOF, we can't skip over the unread body // because we don't know if the next bytes on the wire will be // the body-following-the-timer or the subsequent request. // See Issue 11549. if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF { w.closeAfterReply = true } // Per RFC 2616, we should consume the request body before // replying, if the handler hasn't already done so. But we // don't want to do an unbounded amount of reading here for // DoS reasons, so we only try up to a threshold. if w.req.ContentLength != 0 && !w.closeAfterReply { var discard, tooBig bool switch bdy := w.req.Body.(type) { case *expectContinueReader: if bdy.resp.wroteContinue { discard = true } case *body: bdy.mu.Lock() switch { case bdy.closed: if !bdy.sawEOF { // Body was closed in handler with non-EOF error. w.closeAfterReply = true } case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes: tooBig = true default: discard = true } bdy.mu.Unlock() default: discard = true } if discard { _, err := io.CopyN(ioutil.Discard, w.reqBody, maxPostHandlerReadBytes+1) switch err { case nil: // There must be even more data left over. tooBig = true case ErrBodyReadAfterClose: // Body was already consumed and closed. case io.EOF: // The remaining body was just consumed, close it. err = w.reqBody.Close() if err != nil { w.closeAfterReply = true } default: // Some other kind of error occurred, like a read timeout, or // corrupt chunked encoding. In any case, whatever remains // on the wire must not be parsed as another HTTP request. w.closeAfterReply = true } } if tooBig { w.requestTooLarge() delHeader("Connection") setHeader.connection = "close" } } code := w.status if bodyAllowedForStatus(code) { // If no content type, apply sniffing algorithm to body. _, haveType := header["Content-Type"] if !haveType && !hasTE { setHeader.contentType = DetectContentType(p) } } else { for _, k := range suppressedHeaders(code) { delHeader(k) } } if _, ok := header["Date"]; !ok { setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) } if hasCL && hasTE && te != "identity" { // TODO: return an error if WriteHeader gets a return parameter // For now just ignore the Content-Length. w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", te, w.contentLength) delHeader("Content-Length") hasCL = false } if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { // do nothing } else if code == StatusNoContent { delHeader("Transfer-Encoding") } else if hasCL { delHeader("Transfer-Encoding") } else if w.req.ProtoAtLeast(1, 1) { // HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no // content-length has been provided. The connection must be closed after the // reply is written, and no chunking is to be done. This is the setup // recommended in the Server-Sent Events candidate recommendation 11, // section 8. if hasTE && te == "identity" { cw.chunking = false w.closeAfterReply = true } else { // HTTP/1.1 or greater: use chunked transfer encoding // to avoid closing the connection at EOF. cw.chunking = true setHeader.transferEncoding = "chunked" } } else { // HTTP version < 1.1: cannot do chunked transfer // encoding and we don't know the Content-Length so // signal EOF by closing connection. w.closeAfterReply = true delHeader("Transfer-Encoding") // in case already set } // Cannot use Content-Length with non-identity Transfer-Encoding. if cw.chunking { delHeader("Content-Length") } if !w.req.ProtoAtLeast(1, 0) { return } if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { delHeader("Connection") if w.req.ProtoAtLeast(1, 1) { setHeader.connection = "close" } } w.conn.bufw.WriteString(statusLine(w.req, code)) cw.header.WriteSubset(w.conn.bufw, excludeHeader) setHeader.Write(w.conn.bufw) w.conn.bufw.Write(crlf) } // foreachHeaderElement splits v according to the "#rule" construction // in RFC 2616 section 2.1 and calls fn for each non-empty element. func foreachHeaderElement(v string, fn func(string)) { v = textproto.TrimString(v) if v == "" { return } if !strings.Contains(v, ",") { fn(v) return } for _, f := range strings.Split(v, ",") { if f = textproto.TrimString(f); f != "" { fn(f) } } } // statusLines is a cache of Status-Line strings, keyed by code (for // HTTP/1.1) or negative code (for HTTP/1.0). This is faster than a // map keyed by struct of two fields. This map's max size is bounded // by 2*len(statusText), two protocol types for each known official // status code in the statusText map. var ( statusMu sync.RWMutex statusLines = make(map[int]string) ) // statusLine returns a response Status-Line (RFC 2616 Section 6.1) // for the given request and response status code. func statusLine(req *Request, code int) string { // Fast path: key := code proto11 := req.ProtoAtLeast(1, 1) if !proto11 { key = -key } statusMu.RLock() line, ok := statusLines[key] statusMu.RUnlock() if ok { return line } // Slow path: proto := "HTTP/1.0" if proto11 { proto = "HTTP/1.1" } codestring := strconv.Itoa(code) text, ok := statusText[code] if !ok { text = "status code " + codestring } line = proto + " " + codestring + " " + text + "\r\n" if ok { statusMu.Lock() defer statusMu.Unlock() statusLines[key] = line } return line } // bodyAllowed reports whether a Write is allowed for this response type. // It's illegal to call this before the header has been flushed. func (w *response) bodyAllowed() bool { if !w.wroteHeader { panic("") } return bodyAllowedForStatus(w.status) } // The Life Of A Write is like this: // // Handler starts. No header has been sent. The handler can either // write a header, or just start writing. Writing before sending a header // sends an implicitly empty 200 OK header. // // If the handler didn't declare a Content-Length up front, we either // go into chunking mode or, if the handler finishes running before // the chunking buffer size, we compute a Content-Length and send that // in the header instead. // // Likewise, if the handler didn't set a Content-Type, we sniff that // from the initial chunk of output. // // The Writers are wired together like: // // 1. *response (the ResponseWriter) -> // 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes // 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) // and which writes the chunk headers, if needed. // 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to -> // 5. checkConnErrorWriter{c}, which notes any non-nil error on Write // and populates c.werr with it if so. but otherwise writes to: // 6. the rwc, the net.Conn. // // TODO(bradfitz): short-circuit some of the buffering when the // initial header contains both a Content-Type and Content-Length. // Also short-circuit in (1) when the header's been sent and not in // chunking mode, writing directly to (4) instead, if (2) has no // buffered data. More generally, we could short-circuit from (1) to // (3) even in chunking mode if the write size from (1) is over some // threshold and nothing is in (2). The answer might be mostly making // bufferBeforeChunkingSize smaller and having bufio's fast-paths deal // with this instead. func (w *response) Write(data []byte) (n int, err error) { return w.write(len(data), data, "") } func (w *response) WriteString(data string) (n int, err error) { return w.write(len(data), nil, data) } // either dataB or dataS is non-zero. func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { if w.conn.hijacked() { w.conn.server.logf("http: response.Write on hijacked connection") return 0, ErrHijacked } if !w.wroteHeader { w.WriteHeader(StatusOK) } if lenData == 0 { return 0, nil } if !w.bodyAllowed() { return 0, ErrBodyNotAllowed } w.written += int64(lenData) // ignoring errors, for errorKludge if w.contentLength != -1 && w.written > w.contentLength { return 0, ErrContentLength } if dataB != nil { return w.w.Write(dataB) } else { return w.w.WriteString(dataS) } } func (w *response) finishRequest() { w.handlerDone.setTrue() if !w.wroteHeader { w.WriteHeader(StatusOK) } w.w.Flush() putBufioWriter(w.w) w.cw.close() w.conn.bufw.Flush() // Close the body (regardless of w.closeAfterReply) so we can // re-use its bufio.Reader later safely. w.reqBody.Close() if w.req.MultipartForm != nil { w.req.MultipartForm.RemoveAll() } } // shouldReuseConnection reports whether the underlying TCP connection can be reused. // It must only be called after the handler is done executing. func (w *response) shouldReuseConnection() bool { if w.closeAfterReply { // The request or something set while executing the // handler indicated we shouldn't reuse this // connection. return false } if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { // Did not write enough. Avoid getting out of sync. return false } // There was some error writing to the underlying connection // during the request, so don't re-use this conn. if w.conn.werr != nil { return false } if w.closedRequestBodyEarly() { return false } return true } func (w *response) closedRequestBodyEarly() bool { body, ok := w.req.Body.(*body) return ok && body.didEarlyClose() } func (w *response) Flush() { if !w.wroteHeader { w.WriteHeader(StatusOK) } w.w.Flush() w.cw.flush() } func (c *conn) finalFlush() { if c.bufr != nil { // Steal the bufio.Reader (~4KB worth of memory) and its associated // reader for a future connection. putBufioReader(c.bufr) c.bufr = nil } if c.bufw != nil { c.bufw.Flush() // Steal the bufio.Writer (~4KB worth of memory) and its associated // writer for a future connection. putBufioWriter(c.bufw) c.bufw = nil } } // Close the connection. func (c *conn) close() { c.finalFlush() c.rwc.Close() } // rstAvoidanceDelay is the amount of time we sleep after closing the // write side of a TCP connection before closing the entire socket. // By sleeping, we increase the chances that the client sees our FIN // and processes its final data before they process the subsequent RST // from closing a connection with known unread data. // This RST seems to occur mostly on BSD systems. (And Windows?) // This timeout is somewhat arbitrary (~latency around the planet). const rstAvoidanceDelay = 500 * time.Millisecond type closeWriter interface { CloseWrite() error } var _ closeWriter = (*net.TCPConn)(nil) // closeWrite flushes any outstanding data and sends a FIN packet (if // client is connected via TCP), signalling that we're done. We then // pause for a bit, hoping the client processes it before any // subsequent RST. // // See https://golang.org/issue/3595 func (c *conn) closeWriteAndWait() { c.finalFlush() if tcp, ok := c.rwc.(closeWriter); ok { tcp.CloseWrite() } time.Sleep(rstAvoidanceDelay) } // validNPN reports whether the proto is not a blacklisted Next // Protocol Negotiation protocol. Empty and built-in protocol types // are blacklisted and can't be overridden with alternate // implementations. func validNPN(proto string) bool { switch proto { case "", "http/1.1", "http/1.0": return false } return true } func (c *conn) setState(nc net.Conn, state ConnState) { if hook := c.server.ConnState; hook != nil { hook(nc, state) } } // badRequestError is a literal string (used by in the server in HTML, // unescaped) to tell the user why their request was bad. It should // be plain text without user info or other embedded errors. type badRequestError string func (e badRequestError) Error() string { return "Bad Request: " + string(e) } // Serve a new connection. func (c *conn) serve() { c.remoteAddr = c.rwc.RemoteAddr().String() defer func() { if err := recover(); err != nil { const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) } if !c.hijacked() { c.close() c.setState(c.rwc, StateClosed) } }() if tlsConn, ok := c.rwc.(*tls.Conn); ok { if d := c.server.ReadTimeout; d != 0 { c.rwc.SetReadDeadline(time.Now().Add(d)) } if d := c.server.WriteTimeout; d != 0 { c.rwc.SetWriteDeadline(time.Now().Add(d)) } if err := tlsConn.Handshake(); err != nil { c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) return } c.tlsState = new(tls.ConnectionState) *c.tlsState = tlsConn.ConnectionState() if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) { if fn := c.server.TLSNextProto[proto]; fn != nil { h := initNPNRequest{tlsConn, serverHandler{c.server}} fn(c.server, tlsConn, h) } return } } c.r = &connReader{r: c.rwc} c.bufr = newBufioReader(c.r) c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) for { w, err := c.readRequest() if c.r.remain != c.server.initialReadLimitSize() { // If we read any bytes off the wire, we're active. c.setState(c.rwc, StateActive) } if err != nil { if err == errTooLarge { // Their HTTP client may or may not be // able to read this if we're // responding to them and hanging up // while they're still writing their // request. Undefined behavior. io.WriteString(c.rwc, "HTTP/1.1 431 Request Header Fields Too Large\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n431 Request Header Fields Too Large") c.closeWriteAndWait() return } if err == io.EOF { return // don't reply } if neterr, ok := err.(net.Error); ok && neterr.Timeout() { return // don't reply } var publicErr string if v, ok := err.(badRequestError); ok { publicErr = ": " + string(v) } io.WriteString(c.rwc, "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n400 Bad Request"+publicErr) return } // Expect 100 Continue support req := w.req if req.expectsContinue() { if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { // Wrap the Body reader with one that replies on the connection req.Body = &expectContinueReader{readCloser: req.Body, resp: w} } } else if req.Header.get("Expect") != "" { w.sendExpectationFailed() return } // HTTP cannot have multiple simultaneous active requests.[*] // Until the server replies to this request, it can't read another, // so we might as well run the handler in this goroutine. // [*] Not strictly true: HTTP pipelining. We could let them all process // in parallel even if their responses need to be serialized. serverHandler{c.server}.ServeHTTP(w, w.req) if c.hijacked() { return } w.finishRequest() if !w.shouldReuseConnection() { if w.requestBodyLimitHit || w.closedRequestBodyEarly() { c.closeWriteAndWait() } return } c.setState(c.rwc, StateIdle) } } func (w *response) sendExpectationFailed() { // TODO(bradfitz): let ServeHTTP handlers handle // requests with non-standard expectation[s]? Seems // theoretical at best, and doesn't fit into the // current ServeHTTP model anyway. We'd need to // make the ResponseWriter an optional // "ExpectReplier" interface or something. // // For now we'll just obey RFC 2616 14.20 which says // "If a server receives a request containing an // Expect field that includes an expectation- // extension that it does not support, it MUST // respond with a 417 (Expectation Failed) status." w.Header().Set("Connection", "close") w.WriteHeader(StatusExpectationFailed) w.finishRequest() } // Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter // and a Hijacker. func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { if w.handlerDone.isSet() { panic("net/http: Hijack called after ServeHTTP finished") } if w.wroteHeader { w.cw.flush() } c := w.conn c.mu.Lock() defer c.mu.Unlock() if w.closeNotifyCh != nil { return nil, nil, errors.New("http: Hijack is incompatible with use of CloseNotifier in same ServeHTTP call") } // Release the bufioWriter that writes to the chunk writer, it is not // used after a connection has been hijacked. rwc, buf, err = c.hijackLocked() if err == nil { putBufioWriter(w.w) w.w = nil } return rwc, buf, err } func (w *response) CloseNotify() <-chan bool { if w.handlerDone.isSet() { panic("net/http: CloseNotify called after ServeHTTP finished") } c := w.conn c.mu.Lock() defer c.mu.Unlock() if w.closeNotifyCh != nil { return w.closeNotifyCh } ch := make(chan bool, 1) w.closeNotifyCh = ch if w.conn.hijackedv { // CloseNotify is undefined after a hijack, but we have // no place to return an error, so just return a channel, // even though it'll never receive a value. return ch } var once sync.Once notify := func() { once.Do(func() { ch <- true }) } if requestBodyRemains(w.reqBody) { // They're still consuming the request body, so we // shouldn't notify yet. registerOnHitEOF(w.reqBody, func() { c.mu.Lock() defer c.mu.Unlock() startCloseNotifyBackgroundRead(c, notify) }) } else { startCloseNotifyBackgroundRead(c, notify) } return ch } // c.mu must be held. func startCloseNotifyBackgroundRead(c *conn, notify func()) { if c.bufr.Buffered() > 0 { // They've consumed the request body, so anything // remaining is a pipelined request, which we // document as firing on. notify() } else { c.r.startBackgroundRead(notify) } } func registerOnHitEOF(rc io.ReadCloser, fn func()) { switch v := rc.(type) { case *expectContinueReader: registerOnHitEOF(v.readCloser, fn) case *body: v.registerOnHitEOF(fn) default: panic("unexpected type " + fmt.Sprintf("%T", rc)) } } // requestBodyRemains reports whether future calls to Read // on rc might yield more data. func requestBodyRemains(rc io.ReadCloser) bool { if rc == eofReader { return false } switch v := rc.(type) { case *expectContinueReader: return requestBodyRemains(v.readCloser) case *body: return v.bodyRemains() default: panic("unexpected type " + fmt.Sprintf("%T", rc)) } } // The HandlerFunc type is an adapter to allow the use of // ordinary functions as HTTP handlers. If f is a function // with the appropriate signature, HandlerFunc(f) is a // Handler that calls f. type HandlerFunc func(ResponseWriter, *Request) // ServeHTTP calls f(w, r). func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { f(w, r) } // Helper handlers // Error replies to the request with the specified error message and HTTP code. // The error message should be plain text. func Error(w ResponseWriter, error string, code int) { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("X-Content-Type-Options", "nosniff") w.WriteHeader(code) fmt.Fprintln(w, error) } // NotFound replies to the request with an HTTP 404 not found error. func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } // NotFoundHandler returns a simple request handler // that replies to each request with a ``404 page not found'' reply. func NotFoundHandler() Handler { return HandlerFunc(NotFound) } // StripPrefix returns a handler that serves HTTP requests // by removing the given prefix from the request URL's Path // and invoking the handler h. StripPrefix handles a // request for a path that doesn't begin with prefix by // replying with an HTTP 404 not found error. func StripPrefix(prefix string, h Handler) Handler { if prefix == "" { return h } return HandlerFunc(func(w ResponseWriter, r *Request) { if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { r.URL.Path = p h.ServeHTTP(w, r) } else { NotFound(w, r) } }) } // Redirect replies to the request with a redirect to url, // which may be a path relative to the request path. // // The provided code should be in the 3xx range and is usually // StatusMovedPermanently, StatusFound or StatusSeeOther. func Redirect(w ResponseWriter, r *Request, urlStr string, code int) { if u, err := url.Parse(urlStr); err == nil { // If url was relative, make absolute by // combining with request path. // The browser would probably do this for us, // but doing it ourselves is more reliable. // NOTE(rsc): RFC 2616 says that the Location // line must be an absolute URI, like // "http://www.google.com/redirect/", // not a path like "/redirect/". // Unfortunately, we don't know what to // put in the host name section to get the // client to connect to us again, so we can't // know the right absolute URI to send back. // Because of this problem, no one pays attention // to the RFC; they all send back just a new path. // So do we. if u.Scheme == "" && u.Host == "" { oldpath := r.URL.Path if oldpath == "" { // should not happen, but avoid a crash if it does oldpath = "/" } // no leading http://server if urlStr == "" || urlStr[0] != '/' { // make relative path absolute olddir, _ := path.Split(oldpath) urlStr = olddir + urlStr } var query string if i := strings.Index(urlStr, "?"); i != -1 { urlStr, query = urlStr[:i], urlStr[i:] } // clean up but preserve trailing slash trailing := strings.HasSuffix(urlStr, "/") urlStr = path.Clean(urlStr) if trailing && !strings.HasSuffix(urlStr, "/") { urlStr += "/" } urlStr += query } } w.Header().Set("Location", urlStr) w.WriteHeader(code) // RFC2616 recommends that a short note "SHOULD" be included in the // response because older user agents may not understand 301/307. // Shouldn't send the response for POST or HEAD; that leaves GET. if r.Method == "GET" { note := "<a href=\"" + htmlEscape(urlStr) + "\">" + statusText[code] + "</a>.\n" fmt.Fprintln(w, note) } } var htmlReplacer = strings.NewReplacer( "&", "&amp;", "<", "&lt;", ">", "&gt;", // "&#34;" is shorter than "&quot;". `"`, "&#34;", // "&#39;" is shorter than "&apos;" and apos was not in HTML until HTML5. "'", "&#39;", ) func htmlEscape(s string) string { return htmlReplacer.Replace(s) } // Redirect to a fixed URL type redirectHandler struct { url string code int } func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { Redirect(w, r, rh.url, rh.code) } // RedirectHandler returns a request handler that redirects // each request it receives to the given url using the given // status code. // // The provided code should be in the 3xx range and is usually // StatusMovedPermanently, StatusFound or StatusSeeOther. func RedirectHandler(url string, code int) Handler { return &redirectHandler{url, code} } // ServeMux is an HTTP request multiplexer. // It matches the URL of each incoming request against a list of registered // patterns and calls the handler for the pattern that // most closely matches the URL. // // Patterns name fixed, rooted paths, like "/favicon.ico", // or rooted subtrees, like "/images/" (note the trailing slash). // Longer patterns take precedence over shorter ones, so that // if there are handlers registered for both "/images/" // and "/images/thumbnails/", the latter handler will be // called for paths beginning "/images/thumbnails/" and the // former will receive requests for any other paths in the // "/images/" subtree. // // Note that since a pattern ending in a slash names a rooted subtree, // the pattern "/" matches all paths not matched by other registered // patterns, not just the URL with Path == "/". // // If a subtree has been registered and a request is received naming the // subtree root without its trailing slash, ServeMux redirects that // request to the subtree root (adding the trailing slash). This behavior can // be overridden with a separate registration for the path without // the trailing slash. For example, registering "/images/" causes ServeMux // to redirect a request for "/images" to "/images/", unless "/images" has // been registered separately. // // Patterns may optionally begin with a host name, restricting matches to // URLs on that host only. Host-specific patterns take precedence over // general patterns, so that a handler might register for the two patterns // "/codesearch" and "codesearch.google.com/" without also taking over // requests for "http://www.google.com/". // // ServeMux also takes care of sanitizing the URL request path, // redirecting any request containing . or .. elements or repeated slashes // to an equivalent, cleaner URL. type ServeMux struct { mu sync.RWMutex m map[string]muxEntry hosts bool // whether any patterns contain hostnames } type muxEntry struct { explicit bool h Handler pattern string } // NewServeMux allocates and returns a new ServeMux. func NewServeMux() *ServeMux { return &ServeMux{m: make(map[string]muxEntry)} } // DefaultServeMux is the default ServeMux used by Serve. var DefaultServeMux = NewServeMux() // Does path match pattern? func pathMatch(pattern, path string) bool { if len(pattern) == 0 { // should not happen return false } n := len(pattern) if pattern[n-1] != '/' { return pattern == path } return len(path) >= n && path[0:n] == pattern } // Return the canonical path for p, eliminating . and .. elements. func cleanPath(p string) string { if p == "" { return "/" } if p[0] != '/' { p = "/" + p } np := path.Clean(p) // path.Clean removes trailing slash except for root; // put the trailing slash back if necessary. if p[len(p)-1] == '/' && np != "/" { np += "/" } return np } // Find a handler on a handler map given a path string // Most-specific (longest) pattern wins func (mux *ServeMux) match(path string) (h Handler, pattern string) { var n = 0 for k, v := range mux.m { if !pathMatch(k, path) { continue } if h == nil || len(k) > n { n = len(k) h = v.h pattern = v.pattern } } return } // Handler returns the handler to use for the given request, // consulting r.Method, r.Host, and r.URL.Path. It always returns // a non-nil handler. If the path is not in its canonical form, the // handler will be an internally-generated handler that redirects // to the canonical path. // // Handler also returns the registered pattern that matches the // request or, in the case of internally-generated redirects, // the pattern that will match after following the redirect. // // If there is no registered handler that applies to the request, // Handler returns a ``page not found'' handler and an empty pattern. func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) { if r.Method != "CONNECT" { if p := cleanPath(r.URL.Path); p != r.URL.Path { _, pattern = mux.handler(r.Host, p) url := *r.URL url.Path = p return RedirectHandler(url.String(), StatusMovedPermanently), pattern } } return mux.handler(r.Host, r.URL.Path) } // handler is the main implementation of Handler. // The path is known to be in canonical form, except for CONNECT methods. func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) { mux.mu.RLock() defer mux.mu.RUnlock() // Host-specific pattern takes precedence over generic ones if mux.hosts { h, pattern = mux.match(host + path) } if h == nil { h, pattern = mux.match(path) } if h == nil { h, pattern = NotFoundHandler(), "" } return } // ServeHTTP dispatches the request to the handler whose // pattern most closely matches the request URL. func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { if r.RequestURI == "*" { if r.ProtoAtLeast(1, 1) { w.Header().Set("Connection", "close") } w.WriteHeader(StatusBadRequest) return } h, _ := mux.Handler(r) h.ServeHTTP(w, r) } // Handle registers the handler for the given pattern. // If a handler already exists for pattern, Handle panics. func (mux *ServeMux) Handle(pattern string, handler Handler) { mux.mu.Lock() defer mux.mu.Unlock() if pattern == "" { panic("http: invalid pattern " + pattern) } if handler == nil { panic("http: nil handler") } if mux.m[pattern].explicit { panic("http: multiple registrations for " + pattern) } mux.m[pattern] = muxEntry{explicit: true, h: handler, pattern: pattern} if pattern[0] != '/' { mux.hosts = true } // Helpful behavior: // If pattern is /tree/, insert an implicit permanent redirect for /tree. // It can be overridden by an explicit registration. n := len(pattern) if n > 0 && pattern[n-1] == '/' && !mux.m[pattern[0:n-1]].explicit { // If pattern contains a host name, strip it and use remaining // path for redirect. path := pattern if pattern[0] != '/' { // In pattern, at least the last character is a '/', so // strings.Index can't be -1. path = pattern[strings.Index(pattern, "/"):] } url := &url.URL{Path: path} mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(url.String(), StatusMovedPermanently), pattern: pattern} } } // HandleFunc registers the handler function for the given pattern. func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { mux.Handle(pattern, HandlerFunc(handler)) } // Handle registers the handler for the given pattern // in the DefaultServeMux. // The documentation for ServeMux explains how patterns are matched. func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } // HandleFunc registers the handler function for the given pattern // in the DefaultServeMux. // The documentation for ServeMux explains how patterns are matched. func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { DefaultServeMux.HandleFunc(pattern, handler) } // Serve accepts incoming HTTP connections on the listener l, // creating a new service goroutine for each. The service goroutines // read requests and then call handler to reply to them. // Handler is typically nil, in which case the DefaultServeMux is used. func Serve(l net.Listener, handler Handler) error { srv := &Server{Handler: handler} return srv.Serve(l) } // A Server defines parameters for running an HTTP server. // The zero value for Server is a valid configuration. type Server struct { Addr string // TCP address to listen on, ":http" if empty Handler Handler // handler to invoke, http.DefaultServeMux if nil ReadTimeout time.Duration // maximum duration before timing out read of the request WriteTimeout time.Duration // maximum duration before timing out write of the response MaxHeaderBytes int // maximum size of request headers, DefaultMaxHeaderBytes if 0 TLSConfig *tls.Config // optional TLS config, used by ListenAndServeTLS // TLSNextProto optionally specifies a function to take over // ownership of the provided TLS connection when an NPN // protocol upgrade has occurred. The map key is the protocol // name negotiated. The Handler argument should be used to // handle HTTP requests and will initialize the Request's TLS // and RemoteAddr if not already set. The connection is // automatically closed when the function returns. // If TLSNextProto is nil, HTTP/2 support is enabled automatically. TLSNextProto map[string]func(*Server, *tls.Conn, Handler) // ConnState specifies an optional callback function that is // called when a client connection changes state. See the // ConnState type and associated constants for details. ConnState func(net.Conn, ConnState) // ErrorLog specifies an optional logger for errors accepting // connections and unexpected behavior from handlers. // If nil, logging goes to os.Stderr via the log package's // standard logger. ErrorLog *log.Logger disableKeepAlives int32 // accessed atomically. nextProtoOnce sync.Once // guards initialization of TLSNextProto in Serve nextProtoErr error } // A ConnState represents the state of a client connection to a server. // It's used by the optional Server.ConnState hook. type ConnState int const ( // StateNew represents a new connection that is expected to // send a request immediately. Connections begin at this // state and then transition to either StateActive or // StateClosed. StateNew ConnState = iota // StateActive represents a connection that has read 1 or more // bytes of a request. The Server.ConnState hook for // StateActive fires before the request has entered a handler // and doesn't fire again until the request has been // handled. After the request is handled, the state // transitions to StateClosed, StateHijacked, or StateIdle. // For HTTP/2, StateActive fires on the transition from zero // to one active request, and only transitions away once all // active requests are complete. That means that ConnState // cannot be used to do per-request work; ConnState only notes // the overall state of the connection. StateActive // StateIdle represents a connection that has finished // handling a request and is in the keep-alive state, waiting // for a new request. Connections transition from StateIdle // to either StateActive or StateClosed. StateIdle // StateHijacked represents a hijacked connection. // This is a terminal state. It does not transition to StateClosed. StateHijacked // StateClosed represents a closed connection. // This is a terminal state. Hijacked connections do not // transition to StateClosed. StateClosed ) var stateName = map[ConnState]string{ StateNew: "new", StateActive: "active", StateIdle: "idle", StateHijacked: "hijacked", StateClosed: "closed", } func (c ConnState) String() string { return stateName[c] } // serverHandler delegates to either the server's Handler or // DefaultServeMux and also handles "OPTIONS *" requests. type serverHandler struct { srv *Server } func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { handler := sh.srv.Handler if handler == nil { handler = DefaultServeMux } if req.RequestURI == "*" && req.Method == "OPTIONS" { handler = globalOptionsHandler{} } handler.ServeHTTP(rw, req) } // ListenAndServe listens on the TCP network address srv.Addr and then // calls Serve to handle requests on incoming connections. // Accepted connections are configured to enable TCP keep-alives. // If srv.Addr is blank, ":http" is used. // ListenAndServe always returns a non-nil error. func (srv *Server) ListenAndServe() error { addr := srv.Addr if addr == "" { addr = ":http" } ln, err := net.Listen("tcp", addr) if err != nil { return err } return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}) } var testHookServerServe func(*Server, net.Listener) // used if non-nil // Serve accepts incoming connections on the Listener l, creating a // new service goroutine for each. The service goroutines read requests and // then call srv.Handler to reply to them. // Serve always returns a non-nil error. func (srv *Server) Serve(l net.Listener) error { defer l.Close() if fn := testHookServerServe; fn != nil { fn(srv, l) } var tempDelay time.Duration // how long to sleep on accept failure if err := srv.setupHTTP2(); err != nil { return err } for { rw, e := l.Accept() if e != nil { if ne, ok := e.(net.Error); ok && ne.Temporary() { if tempDelay == 0 { tempDelay = 5 * time.Millisecond } else { tempDelay *= 2 } if max := 1 * time.Second; tempDelay > max { tempDelay = max } srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay) time.Sleep(tempDelay) continue } return e } tempDelay = 0 c := srv.newConn(rw) c.setState(c.rwc, StateNew) // before Serve can return go c.serve() } } func (s *Server) doKeepAlives() bool { return atomic.LoadInt32(&s.disableKeepAlives) == 0 } // SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled. // By default, keep-alives are always enabled. Only very // resource-constrained environments or servers in the process of // shutting down should disable them. func (srv *Server) SetKeepAlivesEnabled(v bool) { if v { atomic.StoreInt32(&srv.disableKeepAlives, 0) } else { atomic.StoreInt32(&srv.disableKeepAlives, 1) } } func (s *Server) logf(format string, args ...interface{}) { if s.ErrorLog != nil { s.ErrorLog.Printf(format, args...) } else { log.Printf(format, args...) } } // ListenAndServe listens on the TCP network address addr // and then calls Serve with handler to handle requests // on incoming connections. // Accepted connections are configured to enable TCP keep-alives. // Handler is typically nil, in which case the DefaultServeMux is // used. // // A trivial example server is: // // package main // // import ( // "io" // "net/http" // "log" // ) // // // hello world, the web server // func HelloServer(w http.ResponseWriter, req *http.Request) { // io.WriteString(w, "hello, world!\n") // } // // func main() { // http.HandleFunc("/hello", HelloServer) // log.Fatal(http.ListenAndServe(":12345", nil)) // } // // ListenAndServe always returns a non-nil error. func ListenAndServe(addr string, handler Handler) error { server := &Server{Addr: addr, Handler: handler} return server.ListenAndServe() } // ListenAndServeTLS acts identically to ListenAndServe, except that it // expects HTTPS connections. Additionally, files containing a certificate and // matching private key for the server must be provided. If the certificate // is signed by a certificate authority, the certFile should be the concatenation // of the server's certificate, any intermediates, and the CA's certificate. // // A trivial example server is: // // import ( // "log" // "net/http" // ) // // func handler(w http.ResponseWriter, req *http.Request) { // w.Header().Set("Content-Type", "text/plain") // w.Write([]byte("This is an example server.\n")) // } // // func main() { // http.HandleFunc("/", handler) // log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/") // err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil) // log.Fatal(err) // } // // One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem. // // ListenAndServeTLS always returns a non-nil error. func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { server := &Server{Addr: addr, Handler: handler} return server.ListenAndServeTLS(certFile, keyFile) } // ListenAndServeTLS listens on the TCP network address srv.Addr and // then calls Serve to handle requests on incoming TLS connections. // Accepted connections are configured to enable TCP keep-alives. // // Filenames containing a certificate and matching private key for the // server must be provided if neither the Server's TLSConfig.Certificates // nor TLSConfig.GetCertificate are populated. If the certificate is // signed by a certificate authority, the certFile should be the // concatenation of the server's certificate, any intermediates, and // the CA's certificate. // // If srv.Addr is blank, ":https" is used. // // ListenAndServeTLS always returns a non-nil error. func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { addr := srv.Addr if addr == "" { addr = ":https" } // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig // before we clone it and create the TLS Listener. if err := srv.setupHTTP2(); err != nil { return err } config := cloneTLSConfig(srv.TLSConfig) if !strSliceContains(config.NextProtos, "http/1.1") { config.NextProtos = append(config.NextProtos, "http/1.1") } configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil if !configHasCert || certFile != "" || keyFile != "" { var err error config.Certificates = make([]tls.Certificate, 1) config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) if err != nil { return err } } ln, err := net.Listen("tcp", addr) if err != nil { return err } tlsListener := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, config) return srv.Serve(tlsListener) } func (srv *Server) setupHTTP2() error { srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults) return srv.nextProtoErr } // onceSetNextProtoDefaults configures HTTP/2, if the user hasn't // configured otherwise. (by setting srv.TLSNextProto non-nil) // It must only be called via srv.nextProtoOnce (use srv.setupHTTP2). func (srv *Server) onceSetNextProtoDefaults() { if strings.Contains(os.Getenv("GODEBUG"), "http2server=0") { return } // Enable HTTP/2 by default if the user hasn't otherwise // configured their TLSNextProto map. if srv.TLSNextProto == nil { srv.nextProtoErr = http2ConfigureServer(srv, nil) } } // TimeoutHandler returns a Handler that runs h with the given time limit. // // The new Handler calls h.ServeHTTP to handle each request, but if a // call runs for longer than its time limit, the handler responds with // a 503 Service Unavailable error and the given message in its body. // (If msg is empty, a suitable default message will be sent.) // After such a timeout, writes by h to its ResponseWriter will return // ErrHandlerTimeout. // // TimeoutHandler buffers all Handler writes to memory and does not // support the Hijacker or Flusher interfaces. func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { return &timeoutHandler{ handler: h, body: msg, dt: dt, } } // ErrHandlerTimeout is returned on ResponseWriter Write calls // in handlers which have timed out. var ErrHandlerTimeout = errors.New("http: Handler timeout") type timeoutHandler struct { handler Handler body string dt time.Duration // When set, no timer will be created and this channel will // be used instead. testTimeout <-chan time.Time } func (h *timeoutHandler) errorBody() string { if h.body != "" { return h.body } return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>" } func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { var t *time.Timer timeout := h.testTimeout if timeout == nil { t = time.NewTimer(h.dt) timeout = t.C } done := make(chan struct{}) tw := &timeoutWriter{ w: w, h: make(Header), } go func() { h.handler.ServeHTTP(tw, r) close(done) }() select { case <-done: tw.mu.Lock() defer tw.mu.Unlock() dst := w.Header() for k, vv := range tw.h { dst[k] = vv } w.WriteHeader(tw.code) w.Write(tw.wbuf.Bytes()) if t != nil { t.Stop() } case <-timeout: tw.mu.Lock() defer tw.mu.Unlock() w.WriteHeader(StatusServiceUnavailable) io.WriteString(w, h.errorBody()) tw.timedOut = true return } } type timeoutWriter struct { w ResponseWriter h Header wbuf bytes.Buffer mu sync.Mutex timedOut bool wroteHeader bool code int } func (tw *timeoutWriter) Header() Header { return tw.h } func (tw *timeoutWriter) Write(p []byte) (int, error) { tw.mu.Lock() defer tw.mu.Unlock() if tw.timedOut { return 0, ErrHandlerTimeout } if !tw.wroteHeader { tw.writeHeader(StatusOK) } return tw.wbuf.Write(p) } func (tw *timeoutWriter) WriteHeader(code int) { tw.mu.Lock() defer tw.mu.Unlock() if tw.timedOut || tw.wroteHeader { return } tw.writeHeader(code) } func (tw *timeoutWriter) writeHeader(code int) { tw.wroteHeader = true tw.code = code } // tcpKeepAliveListener sets TCP keep-alive timeouts on accepted // connections. It's used by ListenAndServe and ListenAndServeTLS so // dead TCP connections (e.g. closing laptop mid-download) eventually // go away. type tcpKeepAliveListener struct { *net.TCPListener } func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { tc, err := ln.AcceptTCP() if err != nil { return } tc.SetKeepAlive(true) tc.SetKeepAlivePeriod(3 * time.Minute) return tc, nil } // globalOptionsHandler responds to "OPTIONS *" requests. type globalOptionsHandler struct{} func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) { w.Header().Set("Content-Length", "0") if r.ContentLength != 0 { // Read up to 4KB of OPTIONS body (as mentioned in the // spec as being reserved for future use), but anything // over that is considered a waste of server resources // (or an attack) and we abort and close the connection, // courtesy of MaxBytesReader's EOF behavior. mb := MaxBytesReader(w, r.Body, 4<<10) io.Copy(ioutil.Discard, mb) } } type eofReaderWithWriteTo struct{} func (eofReaderWithWriteTo) WriteTo(io.Writer) (int64, error) { return 0, nil } func (eofReaderWithWriteTo) Read([]byte) (int, error) { return 0, io.EOF } // eofReader is a non-nil io.ReadCloser that always returns EOF. // It has a WriteTo method so io.Copy won't need a buffer. var eofReader = &struct { eofReaderWithWriteTo io.Closer }{ eofReaderWithWriteTo{}, ioutil.NopCloser(nil), } // Verify that an io.Copy from an eofReader won't require a buffer. var _ io.WriterTo = eofReader // initNPNRequest is an HTTP handler that initializes certain // uninitialized fields in its *Request. Such partially-initialized // Requests come from NPN protocol handlers. type initNPNRequest struct { c *tls.Conn h serverHandler } func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) { if req.TLS == nil { req.TLS = &tls.ConnectionState{} *req.TLS = h.c.ConnectionState() } if req.Body == nil { req.Body = eofReader } if req.RemoteAddr == "" { req.RemoteAddr = h.c.RemoteAddr().String() } h.h.ServeHTTP(rw, req) } // loggingConn is used for debugging. type loggingConn struct { name string net.Conn } var ( uniqNameMu sync.Mutex uniqNameNext = make(map[string]int) ) func newLoggingConn(baseName string, c net.Conn) net.Conn { uniqNameMu.Lock() defer uniqNameMu.Unlock() uniqNameNext[baseName]++ return &loggingConn{ name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]), Conn: c, } } func (c *loggingConn) Write(p []byte) (n int, err error) { log.Printf("%s.Write(%d) = ....", c.name, len(p)) n, err = c.Conn.Write(p) log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err) return } func (c *loggingConn) Read(p []byte) (n int, err error) { log.Printf("%s.Read(%d) = ....", c.name, len(p)) n, err = c.Conn.Read(p) log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err) return } func (c *loggingConn) Close() (err error) { log.Printf("%s.Close() = ...", c.name) err = c.Conn.Close() log.Printf("%s.Close() = %v", c.name, err) return } // checkConnErrorWriter writes to c.rwc and records any write errors to c.werr. // It only contains one field (and a pointer field at that), so it // fits in an interface value without an extra allocation. type checkConnErrorWriter struct { c *conn } func (w checkConnErrorWriter) Write(p []byte) (n int, err error) { n, err = w.c.rwc.Write(p) if err != nil && w.c.werr == nil { w.c.werr = err } return } func numLeadingCRorLF(v []byte) (n int) { for _, b := range v { if b == '\r' || b == '\n' { n++ continue } break } return } func strSliceContains(ss []string, s string) bool { for _, v := range ss { if v == s { return true } } return false }
[ "\"GODEBUG\"" ]
[]
[ "GODEBUG" ]
[]
["GODEBUG"]
go
1
0
jd_beauty_plant.py
#!/bin/env python3 # -*- coding: utf-8 -* ''' 感谢Curtin提供的其他脚本供我参考 感谢aburd ch大佬的指导 项目名称:xF_jd_beauty_plant.py Author: 一风一扬 功能:健康社区-种植园自动任务 Date: 2022-1-4 cron: 10 9,11,15,21 * * * jd_beauty_plant.py new Env('化妆馆-种植园自动任务'); 活动入口:25:/¥2EaeU74Gz07gJ% 教程:该活动与京东的ck通用,所以只需要填写第几个号运行改脚本就行了。 青龙变量填写export plant_cookie="1",代表京东CK的第一个号执行该脚本 多账号用&隔开,例如export plant_cookie="1&2",代表京东CK的第一、二个号执行该脚本。这样做,JD的ck过期就不用维护两次了,所以做出了更新。 青龙变量export choose_plant_id="true",表示自己选用浇水的ID,适用于种植了多个产品的人,默认为false,如果是false仅适用于种植了一个产品的人。 对于多账号的,只要有一个账号种植多个产品,都必须为true才能浇水。如果choose_plant_id="false",planted_id可以不填写变量值。 青龙变量export planted_id = 'xxxx',表示需要浇水的id,单账号可以先填写export planted_id = '111111',export choose_plant_id="true",运行一次脚本 日志输出会有planted_id,然后再重新修改export planted_id = 'xxxxxx'。多个账号也一样,如果2个账号export planted_id = '111111&111111' 3个账号export planted_id = '111111&111111&111111',以此类推。 注意:planted_id和ck位置要对应。而且你有多少个账号,就得填多少个planted_id,首次111111填写时,为6位数。 例如export plant_cookie="xxxx&xxxx&xxx",那export planted_id = "111111&111111&111111",也要写满3个id,这样才能保证所有账号都能跑 https://github.com/jsNO1/e ''' ######################################################以下代码请不要乱改###################################### UserAgent = '' account = '' cookie = '' cookies = [] choose_plant_id = 'false' planted_id = '' shop_id = '' beauty_plant_exchange = 'false' planted_ids = [] import requests import time, datetime import requests, re, os, sys, random, json from urllib.parse import quote, unquote import threading import urllib3 # urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) requests.packages.urllib3.disable_warnings () today = datetime.datetime.now ().strftime ('%Y-%m-%d') tomorrow = (datetime.datetime.now () + datetime.timedelta (days=1)).strftime ('%Y-%m-%d') nowtime = datetime.datetime.now ().strftime ('%Y-%m-%d %H:%M:%S.%f8') time1 = '21:00:00.00000000' time2 = '23:00:00.00000000' flag_time1 = '{} {}'.format (today, time1) flag_time2 = '{} {}'.format (today, time2) pwd = os.path.dirname (os.path.abspath (__file__)) + os.sep path = pwd + "env.sh" sid = ''.join (random.sample ('123456789abcdef123456789abcdef123456789abcdef123456789abcdef', 32)) sid_ck = ''.join (random.sample ('123456789abcdef123456789abcdef123456789abcdef123456789abcdefABCDEFGHIJKLMNOPQRSTUVWXYZ', 43)) def printT(s): print ("[{0}]: {1}".format (datetime.datetime.now ().strftime ("%Y-%m-%d %H:%M:%S"), s)) sys.stdout.flush () def getEnvs(label): try: if label == 'True' or label == 'yes' or label == 'true' or label == 'Yes': return True elif label == 'False' or label == 'no' or label == 'false' or label == 'No': return False except: pass try: if '.' in label: return float (label) elif '&' in label: return label.split ('&') elif '@' in label: return label.split ('@') else: return int (label) except: return label # 获取v4环境 特殊处理 try: with open (v4f, 'r', encoding='utf-8') as v4f: v4Env = v4f.read () r = re.compile (r'^export\s(.*?)=[\'\"]?([\w\.\-@#&=_,\[\]\{\}\(\)]{1,})+[\'\"]{0,1}$', re.M | re.S | re.I) r = r.findall (v4Env) curenv = locals () for i in r: if i[0] != 'JD_COOKIE': curenv[i[0]] = getEnvs (i[1]) except: pass ############# 在pycharm测试ql环境用,实际用下面的代码运行 ######### # with open(path, "r+", encoding="utf-8") as f: # ck = f.read() # if "JD_COOKIE" in ck: # r = re.compile (r"pt_key=.*?pt_pin=.*?;", re.M | re.S | re.I) # cookies = r.findall (ck) # # print(cookies) # # cookies = cookies[0] # # print(cookies) # # cookies = cookies.split ('&') # printT ("已获取并使用ck环境 Cookie") ####################################################################### if "plant_cookie" in os.environ: if len (os.environ["plant_cookie"]) == 1: is_ck = int(os.environ["plant_cookie"]) cookie1 = os.environ["JD_COOKIE"].split('&') cookie = cookie1[is_ck-1] printT ("已获取并使用Env环境cookie") elif len (os.environ["plant_cookie"]) > 1: cookies1 = [] cookies1 = os.environ["JD_COOKIE"] cookies1 = cookies1.split ('&') is_ck = os.environ["plant_cookie"].split('&') for i in is_ck: cookies.append(cookies1[int(i)-1]) printT ("已获取并使用Env环境plant_cookies") else: printT ("变量plant_cookie未填写") exit (0) if "choose_plant_id" in os.environ: choose_plant_id = os.environ["choose_plant_id"] printT (f"已获取并使用Env环境choose_plant_id={choose_plant_id}") else: printT ("变量choose_plant_id未填写,默认为false只种植了一个,如果种植了多个,请填写改变量planted_id") if "planted_id" in os.environ: if len (os.environ["planted_id"]) > 8: planted_ids = os.environ["planted_id"] planted_ids = planted_ids.split ('&') else: planted_id = os.environ["planted_id"] printT (f"已获取并使用Env环境planted_id={planted_id}") else: printT ("变量planted_id未填写,默认为false只种植了一个,如果种植了多个,请填写改变量planted_id") if "beauty_plant_exchange" in os.environ: beauty_plant_exchange = os.environ["beauty_plant_exchange"] printT (f"已获取并使用Env环境beauty_plant_exchange={beauty_plant_exchange}") else: printT ("变量beauty_plant_exchange未填写,默认为false,不用美妆币兑换肥料") def userAgent(): """ 随机生成一个UA :return: jdapp;iPhone;9.4.8;14.3;xxxx;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1 """ if not UserAgent: uuid = ''.join (random.sample ('123456789abcdef123456789abcdef123456789abcdef123456789abcdef', 40)) addressid = ''.join (random.sample ('1234567898647', 10)) iosVer = ''.join ( random.sample (["14.5.1", "14.4", "14.3", "14.2", "14.1", "14.0.1", "13.7", "13.1.2", "13.1.1"], 1)) iosV = iosVer.replace ('.', '_') iPhone = ''.join (random.sample (["8", "9", "10", "11", "12", "13"], 1)) ADID = ''.join (random.sample ('0987654321ABCDEF', 8)) + '-' + ''.join ( random.sample ('0987654321ABCDEF', 4)) + '-' + ''.join ( random.sample ('0987654321ABCDEF', 4)) + '-' + ''.join ( random.sample ('0987654321ABCDEF', 4)) + '-' + ''.join (random.sample ('0987654321ABCDEF', 12)) return f'jdapp;iPhone;10.0.4;{iosVer};{uuid};network/wifi;ADID/{ADID};supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone{iPhone},1;addressid/{addressid};supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS {iosV} like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1' else: return UserAgent ## 获取通知服务 class msg (object): def __init__(self, m=''): self.str_msg = m self.message () def message(self): global msg_info printT (self.str_msg) try: msg_info = "{}\n{}".format (msg_info, self.str_msg) except: msg_info = "{}".format (self.str_msg) sys.stdout.flush () # 这代码的作用就是刷新缓冲区。 # 当我们打印一些字符时,并不是调用print函数后就立即打印的。一般会先将字符送到缓冲区,然后再打印。 # 这就存在一个问题,如果你想等时间间隔的打印一些字符,但由于缓冲区没满,不会打印。就需要采取一些手段。如每次打印后强行刷新缓冲区。 def getsendNotify(self, a=0): if a == 0: a += 1 try: url = 'https://gitee.com/curtinlv/Public/raw/master/sendNotify.py' response = requests.get (url) if 'curtinlv' in response.text: with open ('sendNotify.py', "w+", encoding="utf-8") as f: f.write (response.text) else: if a < 5: a += 1 return self.getsendNotify (a) else: pass except: if a < 5: a += 1 return self.getsendNotify (a) else: pass def main(self): global send cur_path = os.path.abspath (os.path.dirname (__file__)) sys.path.append (cur_path) if os.path.exists (cur_path + "/sendNotify.py"): try: from sendNotify import send except: self.getsendNotify () try: from sendNotify import send except: printT ("加载通知服务失败~") else: self.getsendNotify () try: from sendNotify import send except: printT ("加载通知服务失败~") ################### msg ().main () def setName(cookie): try: r = re.compile (r"pt_pin=(.*?);") # 指定一个规则:查找pt_pin=与;之前的所有字符,但pt_pin=与;不复制。r"" 的作用是去除转义字符. userName = r.findall (cookie) # 查找pt_pin=与;之前的所有字符,并复制给r,其中pt_pin=与;不复制。 # print (userName) userName = unquote (userName[0]) # r.findall(cookie)赋值是list列表,这个赋值为字符串 # print(userName) return userName except Exception as e: print (e, "cookie格式有误!") exit (2) # 获取ck def get_ck(token, sid_ck, account): try: url = 'https://api.m.jd.com/client.action?functionId=isvObfuscator' headers = { # 'Connection': 'keep-alive', 'accept': '*/*', "cookie": f"{token}", 'host': 'api.m.jd.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'user-Agent': "JD4iPhone/167922%20(iPhone;%20iOS;%20Scale/2.00)", 'accept-Encoding': 'gzip, deflate, br', 'accept-Language': 'zh-Hans-CN;q=1', "content-type": "application/x-www-form-urlencoded", # "content-length":"1348", } timestamp = int (round (time.time () * 1000)) timestamp1 = int (timestamp / 1000) data = r'body=%7B%22url%22%3A%22https%3A%5C/%5C/xinruismzd-isv.isvjcloud.com%22%2C%22id%22%3A%22%22%7D&build=167922&client=apple&clientVersion=10.3.2&d_brand=apple&d_model=iPhone12%2C1&ef=1&eid=eidI4a9081236as4w7JpXa5zRZuwROIEo3ORpcOyassXhjPBIXtrtbjusqCxeW3E1fOtHUlGhZUCur1Q1iocDze1pQ9jBDGfQs8UXxMCTz02fk0RIHpB&ep=%7B%22ciphertype%22%3A5%2C%22cipher%22%3A%7B%22screen%22%3A%22ENS4AtO3EJS%3D%22%2C%22wifiBssid%22%3A%22' + f"{sid_ck}" + r'%3D%22%2C%22osVersion%22%3A%22CJUkCK%3D%3D%22%2C%22area%22%3A%22CJvpCJY1DV80ENY2XzK%3D%22%2C%22openudid%22%3A%22Ytq3YtKyDzO5CJuyZtu4CWSyZtC0Ytc1CJLsDwC5YwO0YtS5CNrsCK%3D%3D%22%2C%22uuid%22%3A%22aQf1ZRdxb2r4ovZ1EJZhcxYlVNZSZz09%22%7D%2C%22ts%22%3A1642002985%2C%22hdid%22%3A%22JM9F1ywUPwflvMIpYPok0tt5k9kW4ArJEU3lfLhxBqw%3D%22%2C%22version%22%3A%221.0.3%22%2C%22appname%22%3A%22com.360buy.jdmobile%22%2C%22ridx%22%3A-1%7D&ext=%7B%22prstate%22%3A%220%22%2C%22pvcStu%22%3A%221%22%7D&isBackground=N&joycious=88&lang=zh_CN&networkType=wifi&networklibtype=JDNetworkBaseAF&partner=apple&rfs=0000&scope=01&sign=946db60626658b250cf47aafb6f67691&st=1642002999847&sv=112&uemps=0-0&uts=0f31TVRjBSu3kkqwe7t25AkQCKuzV3pz8JrojVuU0630g%2BkZigs9kTwRghT26sE72/e92RRKan/%2B9SRjIJYCLuhew91djUwnIY47k31Rwne/U1fOHHr9FmR31X03JKJjwao/EC1gy4fj7PV1Co0ZOjiCMTscFo/8id2r8pCHYMZcaeH3yPTLq1MyFF3o3nkStM/993MbC9zim7imw8b1Fg%3D%3D' # data = '{"token":"AAFh3ANjADAPSunyKSzXTA-UDxrs3Tn9hoy92x4sWmVB0Kv9ey-gAMEdJaSDWLWtnMX8lqLujBo","source":"01"}' # print(data) response = requests.post (url=url, verify=False, headers=headers, data=data) result = response.json () # print(result) access_token = result['token'] # print(access_token) return access_token except Exception as e: msg ("账号【{0}】获取ck失败,cookie过期".format (account)) # 获取Authorization def get_Authorization(access_token, account): try: url = 'https://xinruimz-isv.isvjcloud.com/papi/auth' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": 'Bearer undefined', 'Referer': 'https://xinruimz-isv.isvjcloud.com/plantation/logined_jd/', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9', "Origin": "https://xinruimz-isv.isvjcloud.com", "Content-Type": "application/json;charset=utf-8", } data = '{"token":"' + f"{access_token}" + r'","source":"01"}' # print(data) response = requests.post (url=url, verify=False, headers=headers, data=data) result = response.json () print (result) access_token = result['access_token'] access_token = r"Bearer " + access_token # print(access_token) return access_token except Exception as e: msg ("账号【{0}】获取Authorization失败,cookie过期".format (account)) # 获取已种植的信息 def get_planted_info(cookie, sid, account): name_list = [] planted_id_list = [] position_list = [] shop_id_list = [] url = 'https://xinruimz-isv.isvjcloud.com/papi/get_home_info' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": cookie, 'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/?sid={sid}&un_area=19_1655_4866_0', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9' } response = requests.get (url=url, verify=False, headers=headers) result = response.json () # print(result) planted_list = result['plant_info'] # print(planted_list) for i in range (len (planted_list)): try: name = result['plant_info'][f'{i + 1}']['data']['name'] planted_id = result['plant_info'][f'{i + 1}']['data']['id'] position = result['plant_info'][f'{i + 1}']['data']['position'] shop_id = result['plant_info'][f'{i + 1}']['data']['shop_id'] # print(name,planted_id,position,shop_id) name_list.append (name) planted_id_list.append (planted_id) position_list.append (position) shop_id_list.append (shop_id) print (f"账号{account}种植的种子为", name, "planted_id:", planted_id, ",shop_id:", shop_id) except Exception as e: pass return name_list, position_list, shop_id_list, planted_id_list # 领取每日水滴 def get_water(cookie, position, sid, account): try: j = 0 url = 'https://xinruimz-isv.isvjcloud.com/papi/collect_water' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": cookie, 'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/?sid={sid}&un_area=19_1655_4866_0', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9', "Content-Type": "application/json;charset=utf-8", } for i in position: data = r'{"position":' + f"{i}" + r'}' response = requests.post (url=url, verify=False, headers=headers, data=data) # print(response.status_code) if response.status_code == 204: j += 1 total = j * 10 if response.status_code == 204: msg ("账号【{0}】成功领取每日水滴{1}".format (account, total)) except Exception as e: msg ("账号【{0}】领取每日水滴失败,可能是cookie过期".format (account)) # 领取每日肥料 def get_fertilizer(cookie, shop_id, account): try: j = 0 url = 'https://xinruimz-isv.isvjcloud.com/papi/collect_fertilizer' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": cookie, 'Referer': 'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id=12&channel=index', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9', "Content-Type": "application/json;charset=utf-8", } for i in shop_id: data = r'{"shop_id":' + f"{i}" + r'}' response = requests.post (url=url, verify=False, headers=headers, data=data) if response.status_code == 204: j += 1 total = j * 10 if response.status_code == 204: msg ("账号【{0}】成功领取每日肥料{1}".format (account, total)) except Exception as e: msg ("账号【{0}】领取每日肥料失败,可能是cookie过期".format (account)) # 获取任务信息 def get_task(cookie, account): try: taskName_list = [] taskId_list = [] taskName_list2 = [] taskId_list2 = [] taskName_list3 = [] taskId_list3 = [] url = 'https://xinruimz-isv.isvjcloud.com/papi/water_task_info' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": cookie, 'Referer': 'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id=12&channel=index', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9', "Content-Type": "application/json;charset=utf-8", } response = requests.get (url=url, verify=False, headers=headers) result = response.json () # print(result) task_list = result['shops'] task_list2 = result['meetingplaces'] task_list3 = result['prodcuts'] # 浏览加购 # print(task_list) for i in range (len (task_list)): try: taskName = task_list[i]['name'] taskId = task_list[i]['id'] taskId_list.append (taskId) taskName_list.append (taskName) except Exception as e: print (e) for i in range (len (task_list2)): try: taskName2 = task_list2[i]['name'] taskId2 = task_list2[i]['id'] taskId_list2.append (taskId2) taskName_list2.append (taskName2) except Exception as e: print (e) for i in range (len (task_list3)): try: taskName3 = task_list3[i]['name'] taskId3 = task_list3[i]['id'] taskId_list3.append (taskId3) taskName_list3.append (taskName3) except Exception as e: print (e) # print(taskName_list,taskId_list,taskName_list2,taskId_list2,taskName_list3,taskId_list3) return taskName_list, taskId_list, taskName_list2, taskId_list2, taskName_list3, taskId_list3 except Exception as e: print (e) message = result['message'] if "非法店铺" in message: msg ("【账号{0}】种子过期,请重新种植".format (account)) # 获取任务信息 def get_fertilizer_task(cookie, shop_id, account): try: # taskName_list = [] # taskId_list = [] taskName_list2 = [] taskId_list2 = [] taskName_list3 = [] taskId_list3 = [] taskName_list4 = [] taskId_list4 = [] url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_task_info?shop_id={shop_id}' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": cookie, 'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9', # "Content-Type": "application/json;charset=utf-8", } response = requests.get (url=url, verify=False, headers=headers) result = response.json () # print(result) # task_list = result['shops'] task_list2 = result['meetingplaces'] task_list3 = result['prodcuts'] # 浏览加购 task_list4 = result['live'] # 浏览直播 # print(task_list) # for i in range (len (task_list)): # try: # taskName = task_list[i]['name'] # taskId = task_list[i]['id'] # taskId_list.append(taskId) # taskName_list.append(taskName) # except Exception as e: # print(e) for i in range (len (task_list2)): try: taskName2 = task_list2[i]['name'] taskId2 = task_list2[i]['id'] taskId_list2.append (taskId2) taskName_list2.append (taskName2) except Exception as e: print (e) for i in range (len (task_list3)): try: taskName3 = task_list3[i]['name'] taskId3 = task_list3[i]['id'] taskId_list3.append (taskId3) taskName_list3.append (taskName3) except Exception as e: print (e) for i in range (len (task_list4)): try: taskName4 = task_list4[i]['name'] taskId4 = task_list4[i]['id'] taskId_list4.append (taskId4) taskName_list4.append (taskName4) except Exception as e: print (e) # print(taskName_list,taskId_list,taskName_list2,taskId_list2,taskName_list3,taskId_list3) return taskName_list2, taskId_list2, taskName_list3, taskId_list3, taskName_list4, taskId_list4 except Exception as e: print (e) message = result['message'] if "非法店铺" in message: msg ("【账号{0}】种子过期,请重新种植".format (account)) # 做任务1 def do_task1(cookie, taskName, taskId, account): try: url = f'https://xinruimz-isv.isvjcloud.com/papi/water_shop_view?shop_id={taskId}' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": cookie, 'Referer': 'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id=12&channel=index', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9', "Content-Type": "application/json;charset=utf-8", } response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8 result = response.json () print (result) score = result['inc'] print ("账号【{0}】执行浏览任务【{1}】等待10秒".format (account, taskName)) msg ("账号【{0}】执行浏览任务【{1}】成功,获取【{2}】水滴".format (account, taskName, score)) time.sleep (10) except Exception as e: print (e) time.sleep (1) # 做浏览任务 def do_task2(cookie, taskName, taskId, account): try: url = f'https://xinruimz-isv.isvjcloud.com/papi/water_meetingplace_view?meetingplace_id={taskId}' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": cookie, 'Referer': 'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id=12&channel=index', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9', "Content-Type": "application/json;charset=utf-8", } response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8 result = response.json () print (result) score = result['inc'] print ("账号【{0}】执行浏览任务【{1}】等待10秒".format (account, taskName)) msg ("账号【{0}】执行浏览任务【{1}】成功,获取【{2}】水滴".format (account, taskName, score)) time.sleep (10) except Exception as e: print (e) time.sleep (1) # 浏览加购 def do_task3(cookie, taskName, taskId, sid, account): try: url = f'https://xinruimz-isv.isvjcloud.com/papi/water_product_view?product_id={taskId}' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": cookie, 'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/?sid={sid}&un_area=19_1655_4866_0', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), # 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9', # "Content-Type": "application/json;charset=utf-8", } response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8 result = response.json () print (result) score = result['inc'] print ("账号【{0}】执行浏览加购【{1}】等待10秒".format (account, taskName)) msg ("账号【{0}】执行浏览加购【{1}】成功,获取【{2}】水滴".format (account, taskName, score)) time.sleep (10) except Exception as e: print (e) time.sleep (1) # 施肥中的任务-浏览关注 def do_fertilizer_task(cookie, shop_id, account): try: url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_shop_view?shop_id={shop_id}' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": cookie, 'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), # 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9', # "Content-Type": "application/json;charset=utf-8", } while True: response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8 result = response.json () print (result) score = result['inc'] print ("账号【{0}】执行【浏览关注】等待10秒".format (account)) msg ("账号【{0}】执行【浏览关注】任务成功,获取【{1}】肥料".format (account, score)) time.sleep (10) except Exception as e: print (e) time.sleep (1) # 施肥中的任务-浏览 def do_fertilizer_task2(cookie, name, meetingplace_id, shop_id, account): try: url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_meetingplace_view?meetingplace_id={meetingplace_id}&shop_id={shop_id}' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": cookie, 'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9', # "Content-Type": "application/json;charset=utf-8", } response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8 result = response.json () print (result) score = result['inc'] print ("账号【{0}】执行浏览关注{1}等待10秒".format (account, name)) msg ("账号【{0}】执行浏览关注{1}任务成功,获取【{2}】肥料".format (account, name, score)) time.sleep (10) except Exception as e: print (e) time.sleep (1) # 施肥中的任务-加购 def do_fertilizer_task3(cookie, name, product_id, shop_id, account): try: url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_product_view?product_id={product_id}&shop_id={shop_id}' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": cookie, 'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), # 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9', # "Content-Type": "application/json;charset=utf-8", } while True: response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8 result = response.json () print (result) score = result['inc'] print ("账号【{0}】执行浏览并加购{1}等待10秒".format (account, name)) msg ("账号【{0}】执行浏览并加购{1}任务成功,获取【{2}】肥料".format (account, name, score)) time.sleep (10) except Exception as e: print (e) time.sleep (1) # 施肥中的任务-观看其他小样 def do_fertilizer_task4(cookie, shop_id, account): try: url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_sample_view?shop_id={shop_id}' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": cookie, 'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), # 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9', # "Content-Type": "application/json;charset=utf-8", } response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8 result = response.json () print (result) score = result['inc'] print ("账号【{0}】执行【观看其他小样】等待10秒".format (account)) msg ("账号【{0}】执行【观看其他小样】任务成功,获取【{1}】肥料".format (account, score)) time.sleep (10) except Exception as e: print (e) time.sleep (1) # 施肥中的任务-浏览化妆馆 def do_fertilizer_task5(cookie, shop_id, account): try: url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_chanel_view?shop_id={shop_id}' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": cookie, 'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), # 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9', # "Content-Type": "application/json;charset=utf-8", } response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8 result = response.json () print (result) score = result['inc'] print ("账号【{0}】执行【浏览化妆馆】等待10秒".format (account)) msg ("账号【{0}】执行【浏览化妆馆】任务成功,获取【{1}】肥料".format (account, score)) time.sleep (10) except Exception as e: print (e) time.sleep (1) # 施肥中的任务-美妆币兑换,每天5次 def do_fertilizer_task6(cookie, shop_id, account): try: url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_exchange?shop_id={shop_id}' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": cookie, 'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), # 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9', # "Content-Type": "application/json;charset=utf-8", } for i in range (5): response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8 result = response.json () # print(result) score = result['inc'] print ("账号【{0}】【shop_id:{1}】正在【兑换肥料】等待10秒".format (account, shop_id)) msg ("账号【{0}】【shop_id:{2}】执行【兑换肥料】任务成功,获取【{1}】肥料".format (account, score, shop_id)) time.sleep (10) except Exception as e: print (e) msg ("账号【{0}】【shop_id:{1}】肥料兑换已达上限".format (account, shop_id)) time.sleep (1) # 浇水 def watering(cookie, plant_id, sid, account): try: url = 'https://xinruimz-isv.isvjcloud.com/papi/watering' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": cookie, 'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/?sid={sid}&un_area=19_1655_4866_0', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), # 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9', "Content-Type": "application/json;charset=utf-8", } data = r'{"plant_id":' + f"{plant_id}" + r'}' while True: response = requests.post (url=url, verify=False, headers=headers, data=data.encode ()) # data中有汉字,需要encode为utf-8 result = response.json () # print(result) level = result['level'] # 当前等级 complete_level = result['complete_level'] # 完成等级 msg ("【账号{0}】【plant_id:{3}】成功浇水10g,当前等级{1},种子成熟等级为{2}".format (account, level, complete_level, plant_id)) time.sleep (5) except Exception as e: print(e) # pass # 施肥 def fertilization(cookie, plant_id, shop_id, account): url = 'https://xinruimz-isv.isvjcloud.com/papi/fertilization' headers = { 'Connection': 'keep-alive', 'Accept': 'application/x.jd-school-raffle.v1+json', "Authorization": cookie, 'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index', 'Host': 'xinruimz-isv.isvjcloud.com', # 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1', 'User-Agent': userAgent (), # 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh-Hans;q=0.9', "Content-Type": "application/json;charset=utf-8", } data = r'{"plant_id":' + f"{plant_id}" + r'}' i = 1 while True: try: response = requests.post (url=url, verify=False, headers=headers, data=data) # data中有汉字,需要encode为utf-8 result = response.json () # print(result) level = result['level'] # 当前等级 complete_level = result['complete_level'] # 完成等级 printT ("【账号{0}】【plant_id:{3}】成功施肥10g,当前等级{1},种子成熟等级为{2}".format (account, level, complete_level, plant_id)) time.sleep (5) i += 1 except Exception as e: # print(e) message = result['message'] total = i * 10 if "肥料不足" in message: msg("【账号{0}】【plant_id:{1}】本次一共施肥{2}g".format (account, plant_id,total)) printT ("【账号{0}】【plant_id:{1}】肥料不足10g".format (account, plant_id)) break def start(): global cookie, cookies print (f"\n【准备开始...】\n") nowtime = datetime.datetime.now ().strftime ('%Y-%m-%d %H:%M:%S.%f8') if cookie != '': account = setName (cookie) access_token = get_ck (cookie, sid_ck, account) cookie = get_Authorization (access_token, account) name_list, position_list, shop_id_list, planted_id_list = get_planted_info (cookie, sid, account) taskName_list, taskId_list, taskName_list2, taskId_list2, taskName_list3, taskId_list3 = get_task (cookie,account) get_water (cookie, position_list, sid, account) get_fertilizer (cookie, shop_id_list, account) for i, j in zip (taskName_list, taskId_list): do_task1 (cookie, i, j, account) for i, j in zip (taskName_list2, taskId_list2): do_task2 (cookie, i, j, account) for i, j in zip (taskName_list3, taskId_list3): do_task3 (cookie, i, j, sid, account) flag = 0 for i in shop_id_list: do_fertilizer_task (cookie, i, account) # 浏览关注 for k in shop_id_list: taskName_list2, taskId_list2, taskName_list3, taskId_list3, taskName_list4, taskId_list4 = get_fertilizer_task (cookie, k, account) do_fertilizer_task4 (cookie, k, account) do_fertilizer_task5 (cookie, k, account) if beauty_plant_exchange == 'true': do_fertilizer_task6 (cookie, k, account) for i, j in zip (taskName_list2, taskId_list2): print (i, j, k) do_fertilizer_task2 (cookie, i, j, k, account) # 浏览 for i, j in zip (taskName_list3, taskId_list3): print (i, j, k) do_fertilizer_task3 (cookie, i, j, k, account) # 加购 if choose_plant_id == 'false': for i in planted_id_list: watering (cookie, i, sid, account) fertilization (cookie, i, k, account) else: fertilization (cookie, planted_id_list[flag], k, account) watering (cookie, planted_id, sid, account) flag += 1 elif cookies != '': for cookie, planted_id in zip (cookies, planted_ids): try: account = setName (cookie) access_token = get_ck (cookie, sid_ck, account) cookie = get_Authorization (access_token, account) name_list, position_list, shop_id_list, planted_id_list = get_planted_info (cookie, sid, account) except Exception as e: pass for cookie, planted_id in zip (cookies, planted_ids): try: account = setName (cookie) access_token = get_ck (cookie, sid_ck, account) cookie = get_Authorization (access_token, account) name_list, position_list, shop_id_list, planted_id_list = get_planted_info (cookie, sid, account) taskName_list, taskId_list, taskName_list2, taskId_list2, taskName_list3, taskId_list3 = get_task (cookie, account) get_water (cookie, position_list, sid, account) get_fertilizer (cookie, shop_id_list, account) for i, j in zip (taskName_list, taskId_list): do_task1 (cookie, i, j, account) for i, j in zip (taskName_list2, taskId_list2): do_task2 (cookie, i, j, account) for i, j in zip (taskName_list3, taskId_list3): do_task3 (cookie, i, j, sid, account) flag = 0 for i in shop_id_list: do_fertilizer_task (cookie, i, account) # 浏览关注 for k in shop_id_list: taskName_list2, taskId_list2, taskName_list3, taskId_list3, taskName_list4, taskId_list4 = get_fertilizer_task ( cookie, k, account) do_fertilizer_task4 (cookie, k, account) do_fertilizer_task5 (cookie, k, account) if beauty_plant_exchange == 'true': do_fertilizer_task6 (cookie, k, account) for i, j in zip (taskName_list2, taskId_list2): print (i, j, k) do_fertilizer_task2 (cookie, i, j, k, account) # 浏览 for i, j in zip (taskName_list3, taskId_list3): print (i, j, k) do_fertilizer_task3 (cookie, i, j, k, account) # 加购 if choose_plant_id == 'false': for i in planted_id_list: fertilization (cookie, i, k, account) watering (cookie, i, sid, account) else: print("【账号{}现在开始施肥】".format(account)) fertilization (cookie, planted_id_list[flag], k, account) print ("【账号{}现在开始浇水】".format (account)) watering (cookie, planted_id, sid, account) flag += 1 except Exception as e: pass else: printT ("请检查变量plant_cookie是否已填写") if __name__ == '__main__': printT ("美丽研究院-种植园") start () # if '成熟' in msg_info: # send ("美丽研究院-种植园", msg_info) if '成功' in msg_info: send ("美丽研究院-种植园", msg_info)
[]
[]
[ "JD_COOKIE", "choose_plant_id", "beauty_plant_exchange", "planted_id", "plant_cookie" ]
[]
["JD_COOKIE", "choose_plant_id", "beauty_plant_exchange", "planted_id", "plant_cookie"]
python
5
0
djangoapi/djangoapi/asgi.py
""" ASGI config for djangoapi project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoapi.settings') application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
v3/integrations/nrgrpc/nrgrpc_server.go
// Copyright 2020 New Relic Corporation. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package nrgrpc import ( "context" "net/http" "strings" "github.com/newrelic/go-agent/v3/newrelic" "google.golang.org/grpc" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) func startTransaction(ctx context.Context, app *newrelic.Application, fullMethod string) *newrelic.Transaction { method := strings.TrimPrefix(fullMethod, "/") var hdrs http.Header if md, ok := metadata.FromIncomingContext(ctx); ok { hdrs = make(http.Header, len(md)) for k, vs := range md { for _, v := range vs { hdrs.Add(k, v) } } } target := hdrs.Get(":authority") url := getURL(method, target) webReq := newrelic.WebRequest{ Header: hdrs, URL: url, Method: method, Transport: newrelic.TransportHTTP, } txn := app.StartTransaction(method) txn.SetWebRequest(webReq) return txn } // UnaryServerInterceptor instruments server unary RPCs. // // Use this function with grpc.UnaryInterceptor and a newrelic.Application to // create a grpc.ServerOption to pass to grpc.NewServer. This interceptor // records each unary call with a transaction. You must use both // UnaryServerInterceptor and StreamServerInterceptor to instrument unary and // streaming calls. // // Example: // // app, _ := newrelic.NewApplication( // newrelic.ConfigAppName("gRPC Server"), // newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), // newrelic.ConfigDebugLogger(os.Stdout), // ) // server := grpc.NewServer( // grpc.UnaryInterceptor(nrgrpc.UnaryServerInterceptor(app)), // grpc.StreamInterceptor(nrgrpc.StreamServerInterceptor(app)), // ) // // These interceptors add the transaction to the call context so it may be // accessed in your method handlers using newrelic.FromContext. // // Full example: // https://github.com/newrelic/go-agent/blob/master/v3/integrations/nrgrpc/example/server/server.go // func UnaryServerInterceptor(app *newrelic.Application) grpc.UnaryServerInterceptor { if app == nil { return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { return handler(ctx, req) } } return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { txn := startTransaction(ctx, app, info.FullMethod) defer txn.End() ctx = newrelic.NewContext(ctx, txn) resp, err = handler(ctx, req) txn.SetWebResponse(nil).WriteHeader(int(status.Code(err))) if err != nil { txn.NoticeError(err) } return } } type wrappedServerStream struct { grpc.ServerStream txn *newrelic.Transaction } func (s wrappedServerStream) Context() context.Context { ctx := s.ServerStream.Context() return newrelic.NewContext(ctx, s.txn) } func newWrappedServerStream(stream grpc.ServerStream, txn *newrelic.Transaction) grpc.ServerStream { return wrappedServerStream{ ServerStream: stream, txn: txn, } } // StreamServerInterceptor instruments server streaming RPCs. // // Use this function with grpc.StreamInterceptor and a newrelic.Application to // create a grpc.ServerOption to pass to grpc.NewServer. This interceptor // records each streaming call with a transaction. You must use both // UnaryServerInterceptor and StreamServerInterceptor to instrument unary and // streaming calls. // // Example: // // app, _ := newrelic.NewApplication( // newrelic.ConfigAppName("gRPC Server"), // newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), // newrelic.ConfigDebugLogger(os.Stdout), // ) // server := grpc.NewServer( // grpc.UnaryInterceptor(nrgrpc.UnaryServerInterceptor(app)), // grpc.StreamInterceptor(nrgrpc.StreamServerInterceptor(app)), // ) // // These interceptors add the transaction to the call context so it may be // accessed in your method handlers using newrelic.FromContext. // // Full example: // https://github.com/newrelic/go-agent/blob/master/v3/integrations/nrgrpc/example/server/server.go // func StreamServerInterceptor(app *newrelic.Application) grpc.StreamServerInterceptor { if app == nil { return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { return handler(srv, ss) } } return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { txn := startTransaction(ss.Context(), app, info.FullMethod) defer txn.End() err := handler(srv, newWrappedServerStream(ss, txn)) txn.SetWebResponse(nil).WriteHeader(int(status.Code(err))) if err != nil { txn.NoticeError(err) } return err } }
[ "\"NEW_RELIC_LICENSE_KEY\"", "\"NEW_RELIC_LICENSE_KEY\"" ]
[]
[ "NEW_RELIC_LICENSE_KEY" ]
[]
["NEW_RELIC_LICENSE_KEY"]
go
1
0
contrib/devtools/github-merge.py
#!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # This script will locally construct a merge commit for a pull request on a # github repository, inspect it, sign it and optionally push it. # The following temporary branches are created/overwritten and deleted: # * pull/$PULL/base (the current master we're merging onto) # * pull/$PULL/head (the current state of the remote pull request) # * pull/$PULL/merge (github's merge) # * pull/$PULL/local-merge (our merge) # In case of a clean merge that is accepted by the user, the local branch with # name $BRANCH is overwritten with the merged result, and optionally pushed. from __future__ import division,print_function,unicode_literals import os from sys import stdin,stdout,stderr import argparse import hashlib import subprocess import json,codecs try: from urllib.request import Request,urlopen except: from urllib2 import Request,urlopen # External tools (can be overridden using environment) GIT = os.getenv('GIT','git') BASH = os.getenv('BASH','bash') # OS specific configuration for terminal attributes ATTR_RESET = '' ATTR_PR = '' COMMIT_FORMAT = '%h %s (%an)%d' if os.name == 'posix': # if posix, assume we can use basic terminal escapes ATTR_RESET = '\033[0m' ATTR_PR = '\033[1;36m' COMMIT_FORMAT = '%C(audax blue)%h%Creset %s %C(cyan)(%an)%Creset%C(green)%d%Creset' def git_config_get(option, default=None): ''' Get named configuration option from git repository. ''' try: return subprocess.check_output([GIT,'config','--get',option]).rstrip().decode('utf-8') except subprocess.CalledProcessError as e: return default def retrieve_pr_info(repo,pull): ''' Retrieve pull request information from github. Return None if no title can be found, or an error happens. ''' try: req = Request("https://api.github.com/repos/"+repo+"/pulls/"+pull) result = urlopen(req) reader = codecs.getreader('utf-8') obj = json.load(reader(result)) return obj except Exception as e: print('Warning: unable to retrieve pull information from github: %s' % e) return None def ask_prompt(text): print(text,end=" ",file=stderr) stderr.flush() reply = stdin.readline().rstrip() print("",file=stderr) return reply def get_symlink_files(): files = sorted(subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', 'HEAD']).splitlines()) ret = [] for f in files: if (int(f.decode('utf-8').split(" ")[0], 8) & 0o170000) == 0o120000: ret.append(f.decode('utf-8').split("\t")[1]) return ret def tree_sha512sum(commit='HEAD'): # request metadata for entire tree, recursively files = [] blob_by_name = {} for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines(): name_sep = line.index(b'\t') metadata = line[:name_sep].split() # perms, 'blob', blobid assert(metadata[1] == b'blob') name = line[name_sep+1:] files.append(name) blob_by_name[name] = metadata[2] files.sort() # open connection to git-cat-file in batch mode to request data for all blobs # this is much faster than launching it per file p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE) overall = hashlib.sha512() for f in files: blob = blob_by_name[f] # request blob p.stdin.write(blob + b'\n') p.stdin.flush() # read header: blob, "blob", size reply = p.stdout.readline().split() assert(reply[0] == blob and reply[1] == b'blob') size = int(reply[2]) # hash the blob data intern = hashlib.sha512() ptr = 0 while ptr < size: bs = min(65536, size - ptr) piece = p.stdout.read(bs) if len(piece) == bs: intern.update(piece) else: raise IOError('Premature EOF reading git cat-file output') ptr += bs dig = intern.hexdigest() assert(p.stdout.read(1) == b'\n') # ignore LF that follows blob data # update overall hash with file hash overall.update(dig.encode("utf-8")) overall.update(" ".encode("utf-8")) overall.update(f) overall.update("\n".encode("utf-8")) p.stdin.close() if p.wait(): raise IOError('Non-zero return value executing git cat-file') return overall.hexdigest() def parse_arguments(): epilog = ''' In addition, you can set the following git configuration variables: githubmerge.repository (mandatory), user.signingkey (mandatory), githubmerge.host (default: [email protected]), githubmerge.branch (no default), githubmerge.testcmd (default: none). ''' parser = argparse.ArgumentParser(description='Utility to merge, sign and push github pull requests', epilog=epilog) parser.add_argument('pull', metavar='PULL', type=int, nargs=1, help='Pull request ID to merge') parser.add_argument('branch', metavar='BRANCH', type=str, nargs='?', default=None, help='Branch to merge against (default: githubmerge.branch setting, or base branch for pull, or \'master\')') return parser.parse_args() def main(): # Extract settings from git repo repo = git_config_get('githubmerge.repository') host = git_config_get('githubmerge.host','[email protected]') opt_branch = git_config_get('githubmerge.branch',None) testcmd = git_config_get('githubmerge.testcmd') signingkey = git_config_get('user.signingkey') if repo is None: print("ERROR: No repository configured. Use this command to set:", file=stderr) print("git config githubmerge.repository <owner>/<repo>", file=stderr) exit(1) if signingkey is None: print("ERROR: No GPG signing key set. Set one using:",file=stderr) print("git config --global user.signingkey <key>",file=stderr) exit(1) host_repo = host+":"+repo # shortcut for push/pull target # Extract settings from command line args = parse_arguments() pull = str(args.pull[0]) # Receive pull information from github info = retrieve_pr_info(repo,pull) if info is None: exit(1) title = info['title'] # precedence order for destination branch argument: # - command line argument # - githubmerge.branch setting # - base branch for pull (as retrieved from github) # - 'master' branch = args.branch or opt_branch or info['base']['ref'] or 'master' # Initialize source branches head_branch = 'pull/'+pull+'/head' base_branch = 'pull/'+pull+'/base' merge_branch = 'pull/'+pull+'/merge' local_merge_branch = 'pull/'+pull+'/local-merge' devnull = open(os.devnull,'w') try: subprocess.check_call([GIT,'checkout','-q',branch]) except subprocess.CalledProcessError as e: print("ERROR: Cannot check out branch %s." % (branch), file=stderr) exit(3) try: subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/pull/'+pull+'/*:refs/heads/pull/'+pull+'/*']) except subprocess.CalledProcessError as e: print("ERROR: Cannot find pull request #%s on %s." % (pull,host_repo), file=stderr) exit(3) try: subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+head_branch], stdout=devnull, stderr=stdout) except subprocess.CalledProcessError as e: print("ERROR: Cannot find head of pull request #%s on %s." % (pull,host_repo), file=stderr) exit(3) try: subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+merge_branch], stdout=devnull, stderr=stdout) except subprocess.CalledProcessError as e: print("ERROR: Cannot find merge of pull request #%s on %s." % (pull,host_repo), file=stderr) exit(3) try: subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/heads/'+branch+':refs/heads/'+base_branch]) except subprocess.CalledProcessError as e: print("ERROR: Cannot find branch %s on %s." % (branch,host_repo), file=stderr) exit(3) subprocess.check_call([GIT,'checkout','-q',base_branch]) subprocess.call([GIT,'branch','-q','-D',local_merge_branch], stderr=devnull) subprocess.check_call([GIT,'checkout','-q','-b',local_merge_branch]) try: # Go up to the repository's root. toplevel = subprocess.check_output([GIT,'rev-parse','--show-toplevel']).strip() os.chdir(toplevel) # Create unsigned merge commit. if title: firstline = 'Merge #%s: %s' % (pull,title) else: firstline = 'Merge #%s' % (pull,) message = firstline + '\n\n' message += subprocess.check_output([GIT,'log','--no-merges','--topo-order','--pretty=format:%h %s (%an)',base_branch+'..'+head_branch]).decode('utf-8') try: subprocess.check_call([GIT,'merge','-q','--commit','--no-edit','--no-ff','-m',message.encode('utf-8'),head_branch]) except subprocess.CalledProcessError as e: print("ERROR: Cannot be merged cleanly.",file=stderr) subprocess.check_call([GIT,'merge','--abort']) exit(4) logmsg = subprocess.check_output([GIT,'log','--pretty=format:%s','-n','1']).decode('utf-8') if logmsg.rstrip() != firstline.rstrip(): print("ERROR: Creating merge failed (already merged?).",file=stderr) exit(4) symlink_files = get_symlink_files() for f in symlink_files: print("ERROR: File %s was a symlink" % f) if len(symlink_files) > 0: exit(4) # Put tree SHA512 into the message try: first_sha512 = tree_sha512sum() message += '\n\nTree-SHA512: ' + first_sha512 except subprocess.CalledProcessError as e: printf("ERROR: Unable to compute tree hash") exit(4) try: subprocess.check_call([GIT,'commit','--amend','-m',message.encode('utf-8')]) except subprocess.CalledProcessError as e: printf("ERROR: Cannot update message.",file=stderr) exit(4) print('%s#%s%s %s %sinto %s%s' % (ATTR_RESET+ATTR_PR,pull,ATTR_RESET,title,ATTR_RESET+ATTR_PR,branch,ATTR_RESET)) subprocess.check_call([GIT,'log','--graph','--topo-order','--pretty=format:'+COMMIT_FORMAT,base_branch+'..'+head_branch]) print() # Run test command if configured. if testcmd: if subprocess.call(testcmd,shell=True): print("ERROR: Running %s failed." % testcmd,file=stderr) exit(5) # Show the created merge. diff = subprocess.check_output([GIT,'diff',merge_branch+'..'+local_merge_branch]) subprocess.check_call([GIT,'diff',base_branch+'..'+local_merge_branch]) if diff: print("WARNING: merge differs from github!",file=stderr) reply = ask_prompt("Type 'ignore' to continue.") if reply.lower() == 'ignore': print("Difference with github ignored.",file=stderr) else: exit(6) reply = ask_prompt("Press 'd' to accept the diff.") if reply.lower() == 'd': print("Diff accepted.",file=stderr) else: print("ERROR: Diff rejected.",file=stderr) exit(6) else: # Verify the result manually. print("Dropping you on a shell so you can try building/testing the merged source.",file=stderr) print("Run 'git diff HEAD~' to show the changes being merged.",file=stderr) print("Type 'exit' when done.",file=stderr) if os.path.isfile('/etc/debian_version'): # Show pull number on Debian default prompt os.putenv('debian_chroot',pull) subprocess.call([BASH,'-i']) reply = ask_prompt("Type 'm' to accept the merge.") if reply.lower() == 'm': print("Merge accepted.",file=stderr) else: print("ERROR: Merge rejected.",file=stderr) exit(7) second_sha512 = tree_sha512sum() if first_sha512 != second_sha512: print("ERROR: Tree hash changed unexpectedly",file=stderr) exit(8) # Sign the merge commit. reply = ask_prompt("Type 's' to sign off on the merge.") if reply == 's': try: subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit']) except subprocess.CalledProcessError as e: print("Error signing, exiting.",file=stderr) exit(1) else: print("Not signing off on merge, exiting.",file=stderr) exit(1) # Put the result in branch. subprocess.check_call([GIT,'checkout','-q',branch]) subprocess.check_call([GIT,'reset','-q','--hard',local_merge_branch]) finally: # Clean up temporary branches. subprocess.call([GIT,'checkout','-q',branch]) subprocess.call([GIT,'branch','-q','-D',head_branch],stderr=devnull) subprocess.call([GIT,'branch','-q','-D',base_branch],stderr=devnull) subprocess.call([GIT,'branch','-q','-D',merge_branch],stderr=devnull) subprocess.call([GIT,'branch','-q','-D',local_merge_branch],stderr=devnull) # Push the result. reply = ask_prompt("Type 'push' to push the result to %s, branch %s." % (host_repo,branch)) if reply.lower() == 'push': subprocess.check_call([GIT,'push',host_repo,'refs/heads/'+branch]) if __name__ == '__main__': main()
[]
[]
[ "GIT", "BASH" ]
[]
["GIT", "BASH"]
python
2
0
cmd/testrunner/cmd/run_gardener/run.go
// Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package run_gardener import ( "context" "fmt" "os" "time" ociopts "github.com/gardener/component-cli/ociclient/options" "github.com/gardener/test-infra/pkg/common" "github.com/gardener/test-infra/pkg/hostscheduler/gardenerscheduler" "github.com/gardener/test-infra/pkg/shootflavors" "github.com/gardener/test-infra/pkg/testmachinery/controller/watch" metadata2 "github.com/gardener/test-infra/pkg/testmachinery/metadata" "github.com/gardener/test-infra/pkg/testmachinery/testrun" "github.com/gardener/test-infra/pkg/testrun_renderer" _default "github.com/gardener/test-infra/pkg/testrun_renderer/default" "github.com/gardener/test-infra/pkg/testrun_renderer/templates" "github.com/gardener/test-infra/pkg/testrunner/componentdescriptor" "github.com/gardener/test-infra/pkg/util/cmdvalues" "github.com/gardener/test-infra/pkg/util/gardensetup" "github.com/gardener/test-infra/pkg/logger" "github.com/gardener/test-infra/pkg/util" "github.com/spf13/cobra" "github.com/gardener/test-infra/pkg/testmachinery" "github.com/gardener/test-infra/pkg/testrunner" "github.com/gardener/test-infra/pkg/testrunner/result" ) var ( tmKubeconfigPath string failOnError bool testrunnerConfig = testrunner.Config{} collectConfig = result.Config{} defaultConfig = _default.Config{} metadata = metadata2.Metadata{} ociOpts = &ociopts.Options{} testrunNamePrefix string kubernetesVersions []string cloudproviders []common.CloudProvider gardenerExtensions string testLabel string hibernation bool ) // AddCommand adds run-gardener to a command. func AddCommand(cmd *cobra.Command) { cmd.AddCommand(runCmd) } var runCmd = &cobra.Command{ Use: "run-gardener", Short: "Run the testrunner with the default gardener test", Aliases: []string{ "gardener", }, Run: func(cmd *cobra.Command, args []string) { var ( err error ctx, stopFunc = context.WithCancel(context.Background()) ) defer stopFunc() dryRun, _ := cmd.Flags().GetBool("dry-run") logger.Log.Info("Start testmachinery testrunner") collectConfig.OCIOpts = ociOpts components, err := componentdescriptor.GetComponentsFromFileWithOCIOptions(ctx, logger.Log, ociOpts, collectConfig.ComponentDescriptorPath) if err != nil { logger.Log.Error(err, "unable to render default testrun") os.Exit(1) } rawFlavors := make([]*common.ShootFlavor, len(cloudproviders)) for i, cp := range cloudproviders { versions := util.ConvertStringArrayToVersions(kubernetesVersions) rawFlavors[i] = &common.ShootFlavor{ Provider: cp, KubernetesVersions: common.ShootKubernetesVersionFlavor{ Versions: &versions, }, } } flavors, err := shootflavors.New(rawFlavors) if err != nil { logger.Log.Error(err, "unable to render default testrun") os.Exit(1) } defaultConfig.Shoots.Flavors = flavors defaultConfig.Components = components defaultConfig.Namespace = testrunnerConfig.Namespace defaultConfig.Shoots.DefaultTest = templates.TestWithLabels(testLabel) if hibernation { defaultConfig.Shoots.Tests = []testrun_renderer.TestsFunc{templates.HibernationLifecycle} } defaultConfig.GardenerExtensions, err = gardensetup.ParseFlag(gardenerExtensions) if err != nil { logger.Log.Error(err, "unable to parse gardener extensions") os.Exit(1) } tr, err := _default.Render(&defaultConfig) if err != nil { logger.Log.Error(err, "unable to render default testrun") os.Exit(1) } runs := testrunner.RunList{ &testrunner.Run{ Testrun: tr, Metadata: &metadata, Error: nil, }, } if dryRun { fmt.Print(util.PrettyPrintStruct(tr)) if err, _ := testrun.Validate(logger.Log.WithName("validation"), tr); err != nil { fmt.Println(err.Error()) } os.Exit(0) } watcher, err := watch.NewFromFile(logger.Log, tmKubeconfigPath, nil) if err != nil { logger.Log.Error(err, "unable to start testrun watch controller") os.Exit(1) } go func() { if err := watcher.Start(ctx); err != nil { logger.Log.Error(err, "unable to start testrun watch controller") os.Exit(1) } }() if err := watch.WaitForCacheSyncWithTimeout(watcher, 2*time.Minute); err != nil { logger.Log.Error(err, "unable to wait for cache") os.Exit(1) } testrunnerConfig.Watch = watcher testrunName := fmt.Sprintf("%s-", testrunNamePrefix) collector, err := result.New(logger.Log.WithName("collector"), collectConfig, tmKubeconfigPath) if err != nil { logger.Log.Error(err, "unable to initialize collector") os.Exit(1) } if err = testrunner.ExecuteTestruns(logger.Log.WithName("Execute"), &testrunnerConfig, runs, testrunName); err != nil { logger.Log.Error(err, "unable to run testruns") os.Exit(1) } failed, err := collector.Collect(ctx, logger.Log.WithName("Collect"), testrunnerConfig.Watch.Client(), testrunnerConfig.Namespace, runs) if err != nil { logger.Log.Error(err, "unable to collect results") os.Exit(1) } result.GenerateNotificationConfigForAlerting(runs.GetTestruns(), collectConfig.ConcourseOnErrorDir) logger.Log.Info("Testrunner finished") // Fail when one testrun is failed and we should fail on failed testruns. // Otherwise only fail when the testrun execution is erroneous. if runs.HasErrors() { os.Exit(1) } if failOnError && failed { os.Exit(1) } }, } func init() { // configuration flags runCmd.Flags().StringVar(&tmKubeconfigPath, "kubeconfig", os.Getenv("KUBECONFIG"), "Path to the testmachinery cluster kubeconfig") if err := runCmd.MarkFlagFilename("kubeconfig"); err != nil { logger.Log.Error(err, "mark flag filename", "flag", "kubeconfig") } runCmd.Flags().StringVar(&testrunNamePrefix, "testrun-prefix", "default-", "Testrun name prefix which is used to generate a unique testrun name.") runCmd.Flags().StringVarP(&testrunnerConfig.Namespace, "namespace", "n", "default", "Namespace where the testrun should be deployed.") runCmd.Flags().Var(cmdvalues.NewDurationValue(&testrunnerConfig.Timeout, time.Hour), "timeout", "Timout the testrunner to wait for the complete testrun to finish. Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'.") runCmd.Flags().String("interval", "20s", "[DEPRECTAED] Value has no effect on the testrunner") runCmd.Flags().BoolVar(&failOnError, "fail-on-error", true, "Testrunners exits with 1 if one testruns failed.") runCmd.Flags().StringVar(&collectConfig.ConcourseOnErrorDir, "concourse-onError-dir", os.Getenv("ON_ERROR_DIR"), "On error dir which is used by Concourse.") runCmd.Flags().StringVar(&collectConfig.ComponentDescriptorPath, "component-descriptor-path", "", "Path to the component descriptor (BOM) of the current landscape.") runCmd.Flags().Var(cmdvalues.NewHostProviderValue(&defaultConfig.HostProvider, gardenerscheduler.Name), "hostprovider", "Specify the provider for selecting the base cluster") runCmd.Flags().StringVar(&defaultConfig.GardenSetupRevision, "garden-setup-version", "master", "Specify the garden setup version to setup gardener") runCmd.Flags().Var(cmdvalues.NewCloudProviderValue(&defaultConfig.BaseClusterCloudprovider, common.CloudProviderGCP, common.CloudProviderGCP, common.CloudProviderAWS, common.CloudProviderAzure), "host-cloudprovider", "Specify the cloudprovider of the host cluster. Optional and only affect gardener base clusters") runCmd.Flags().StringVar(&defaultConfig.Gardener.Version, "gardener-version", "", "Specify the gardener version to be deployed by garden setup") runCmd.Flags().StringVar(&defaultConfig.Gardener.ImageTag, "gardener-image", "", "Specify the gardener image tag to be deployed by garden setup") runCmd.Flags().StringVar(&defaultConfig.Gardener.Commit, "gardener-commit", "", "Specify the gardener commit that is deployed by garden setup") runCmd.Flags().StringVar(&gardenerExtensions, "gardener-extensions", "provider-gcp=github.com/gardener/gardener-extensions.git::master", "Specify the gardener extensions versions to be deployed by garden setup") runCmd.Flags().StringVar(&defaultConfig.Shoots.Namespace, "project-namespace", "garden-core", "Specify the shoot namespace where the shoots should be created") runCmd.Flags().StringArrayVar(&kubernetesVersions, "kubernetes-version", []string{}, "Specify the kubernetes version to test") runCmd.Flags().VarP(cmdvalues.NewCloudProviderArrayValue(&cloudproviders, common.CloudProviderGCP, common.CloudProviderAWS, common.CloudProviderAzure), "cloudprovider", "p", "Specify the cloudproviders to test.") runCmd.Flags().StringVarP(&testLabel, "label", "l", string(testmachinery.TestLabelDefault), "Specify test label that should be fetched by the testmachinery") runCmd.Flags().BoolVar(&hibernation, "hibernation", false, "test hibernation") // status asset upload runCmd.Flags().BoolVar(&collectConfig.UploadStatusAsset, "upload-status-asset", false, "Upload testrun status as a github release asset.") runCmd.Flags().StringVar(&collectConfig.GithubUser, "github-user", os.Getenv("GITHUB_USER"), "GitHub username.") runCmd.Flags().StringVar(&collectConfig.GithubPassword, "github-password", os.Getenv("GITHUB_PASSWORD"), "Github password.") runCmd.Flags().StringArrayVar(&collectConfig.AssetComponents, "asset-component", []string{}, "The github components to which the testrun status shall be attached as an asset.") runCmd.Flags().StringVar(&collectConfig.AssetPrefix, "asset-prefix", "", "Prefix of the asset name.") // metadata runCmd.Flags().StringVar(&metadata.Landscape, "landscape", "", "gardener landscape name") ociOpts.AddFlags(runCmd.Flags()) // DEPRECATED FLAGS // DEPRECATED FLAGS // is now handled by the testmachinery runCmd.Flags().StringVar(&collectConfig.OutputDir, "output-dir-path", "./testout", "The filepath where the summary should be written to.") runCmd.Flags().String("es-config-name", "sap_internal", "The elasticsearch secret-server config name.") runCmd.Flags().String("es-endpoint", "", "endpoint of the elasticsearch instance") runCmd.Flags().String("es-username", "", "username to authenticate against a elasticsearch instance") runCmd.Flags().String("es-password", "", "password to authenticate against a elasticsearch instance") runCmd.Flags().String("s3-endpoint", os.Getenv("S3_ENDPOINT"), "S3 endpoint of the testmachinery cluster.") runCmd.Flags().Bool("s3-ssl", false, "S3 has SSL enabled.") _ = runCmd.Flags().MarkDeprecated("output-dir-path", "DEPRECATED: will not we used anymore") _ = runCmd.Flags().MarkDeprecated("es-config-name", "DEPRECATED: will not we used anymore") _ = runCmd.Flags().MarkDeprecated("es-endpoint", "DEPRECATED: will not we used anymore") _ = runCmd.Flags().MarkDeprecated("es-username", "DEPRECATED: will not we used anymore") _ = runCmd.Flags().MarkDeprecated("es-password", "DEPRECATED: will not we used anymore") _ = runCmd.Flags().MarkDeprecated("s3-endpoint", "DEPRECATED: will not we used anymore") _ = runCmd.Flags().MarkDeprecated("s3-ssl", "DEPRECATED: will not we used anymore") }
[ "\"KUBECONFIG\"", "\"ON_ERROR_DIR\"", "\"GITHUB_USER\"", "\"GITHUB_PASSWORD\"", "\"S3_ENDPOINT\"" ]
[]
[ "GITHUB_PASSWORD", "GITHUB_USER", "KUBECONFIG", "ON_ERROR_DIR", "S3_ENDPOINT" ]
[]
["GITHUB_PASSWORD", "GITHUB_USER", "KUBECONFIG", "ON_ERROR_DIR", "S3_ENDPOINT"]
go
5
0
students/K33421/Samoshchenkov_Alexei/lr_2/untitled1/wsgi.py
""" WSGI config for untitled1 project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'untitled1.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
google/cloud/retail/v2beta/retail-v2beta-py/tests/unit/gapic/retail_v2beta/test_prediction_service.py
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import mock import packaging.version import grpc from grpc.experimental import aio import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.retail_v2beta.services.prediction_service import PredictionServiceAsyncClient from google.cloud.retail_v2beta.services.prediction_service import PredictionServiceClient from google.cloud.retail_v2beta.services.prediction_service import transports from google.cloud.retail_v2beta.services.prediction_service.transports.base import _GOOGLE_AUTH_VERSION from google.cloud.retail_v2beta.types import common from google.cloud.retail_v2beta.types import prediction_service from google.cloud.retail_v2beta.types import product from google.cloud.retail_v2beta.types import user_event from google.oauth2 import service_account from google.protobuf import duration_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.protobuf import wrappers_pb2 # type: ignore import google.auth # TODO(busunkim): Once google-auth >= 1.25.0 is required transitively # through google-api-core: # - Delete the auth "less than" test cases # - Delete these pytest markers (Make the "greater than or equal to" tests the default). requires_google_auth_lt_1_25_0 = pytest.mark.skipif( packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), reason="This test requires google-auth < 1.25.0", ) requires_google_auth_gte_1_25_0 = pytest.mark.skipif( packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), reason="This test requires google-auth >= 1.25.0", ) def client_cert_source_callback(): return b"cert bytes", b"key bytes" # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. def modify_default_endpoint(client): return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" sandbox_endpoint = "example.sandbox.googleapis.com" sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" non_googleapi = "api.example.com" assert PredictionServiceClient._get_default_mtls_endpoint(None) is None assert PredictionServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint assert PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint assert PredictionServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi @pytest.mark.parametrize("client_class", [ PredictionServiceClient, PredictionServiceAsyncClient, ]) def test_prediction_service_client_from_service_account_info(client_class): creds = ga_credentials.AnonymousCredentials() with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: factory.return_value = creds info = {"valid": True} client = client_class.from_service_account_info(info) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == 'retail.googleapis.com:443' @pytest.mark.parametrize("transport_class,transport_name", [ (transports.PredictionServiceGrpcTransport, "grpc"), (transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), ]) def test_prediction_service_client_service_account_always_use_jwt(transport_class, transport_name): with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=False) use_jwt.assert_not_called() @pytest.mark.parametrize("client_class", [ PredictionServiceClient, PredictionServiceAsyncClient, ]) def test_prediction_service_client_from_service_account_file(client_class): creds = ga_credentials.AnonymousCredentials() with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == 'retail.googleapis.com:443' def test_prediction_service_client_get_transport_class(): transport = PredictionServiceClient.get_transport_class() available_transports = [ transports.PredictionServiceGrpcTransport, ] assert transport in available_transports transport = PredictionServiceClient.get_transport_class("grpc") assert transport == transports.PredictionServiceGrpcTransport @pytest.mark.parametrize("client_class,transport_class,transport_name", [ (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), ]) @mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) @mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) def test_prediction_service_client_client_options(client_class, transport_class, transport_name): # Check that if channel is provided we won't create a new one. with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: transport = transport_class( credentials=ga_credentials.AnonymousCredentials() ) client = client_class(transport=transport) gtc.assert_not_called() # Check that if channel is provided via str we will create a new one. with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: client = client_class(transport=transport_name) gtc.assert_called() # Check the case api_endpoint is provided. options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") with mock.patch.object(transport_class, '__init__') as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): with mock.patch.object(transport_class, '__init__') as patched: patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): with mock.patch.object(transport_class, '__init__') as patched: patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError): client = client_class() # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): with pytest.raises(ValueError): client = client_class() # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, '__init__') as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "true"), (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "false"), (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), ]) @mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) @mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_prediction_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. # Check the case client_cert_source is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) with mock.patch.object(transport_class, '__init__') as patched: patched.return_value = None client = client_class(client_options=options) if use_client_cert_env == "false": expected_client_cert_source = None expected_host = client.DEFAULT_ENDPOINT else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): with mock.patch.object(transport_class, '__init__') as patched: with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): if use_client_cert_env == "false": expected_host = client.DEFAULT_ENDPOINT expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT expected_client_cert_source = client_cert_source_callback patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case client_cert_source and ADC client cert are not provided. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): with mock.patch.object(transport_class, '__init__') as patched: with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize("client_class,transport_class,transport_name", [ (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), ]) def test_prediction_service_client_client_options_scopes(client_class, transport_class, transport_name): # Check the case scopes are provided. options = client_options.ClientOptions( scopes=["1", "2"], ) with mock.patch.object(transport_class, '__init__') as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize("client_class,transport_class,transport_name", [ (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), ]) def test_prediction_service_client_client_options_credentials_file(client_class, transport_class, transport_name): # Check the case credentials file is provided. options = client_options.ClientOptions( credentials_file="credentials.json" ) with mock.patch.object(transport_class, '__init__') as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) def test_prediction_service_client_client_options_from_dict(): with mock.patch('google.cloud.retail_v2beta.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__') as grpc_transport: grpc_transport.return_value = None client = PredictionServiceClient( client_options={'api_endpoint': 'squid.clam.whelk'} ) grpc_transport.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) def test_predict(transport: str = 'grpc', request_type=prediction_service.PredictRequest): client = PredictionServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.predict), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = prediction_service.PredictResponse( attribution_token='attribution_token_value', missing_ids=['missing_ids_value'], validate_only=True, ) response = client.predict(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == prediction_service.PredictRequest() # Establish that the response is the type that we expect. assert isinstance(response, prediction_service.PredictResponse) assert response.attribution_token == 'attribution_token_value' assert response.missing_ids == ['missing_ids_value'] assert response.validate_only is True def test_predict_from_dict(): test_predict(request_type=dict) def test_predict_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = PredictionServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.predict), '__call__') as call: client.predict() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == prediction_service.PredictRequest() @pytest.mark.asyncio async def test_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.PredictRequest): client = PredictionServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.predict), '__call__') as call: # Designate an appropriate return value for the call. call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse( attribution_token='attribution_token_value', missing_ids=['missing_ids_value'], validate_only=True, )) response = await client.predict(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == prediction_service.PredictRequest() # Establish that the response is the type that we expect. assert isinstance(response, prediction_service.PredictResponse) assert response.attribution_token == 'attribution_token_value' assert response.missing_ids == ['missing_ids_value'] assert response.validate_only is True @pytest.mark.asyncio async def test_predict_async_from_dict(): await test_predict_async(request_type=dict) def test_predict_field_headers(): client = PredictionServiceClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = prediction_service.PredictRequest() request.placement = 'placement/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.predict), '__call__') as call: call.return_value = prediction_service.PredictResponse() client.predict(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'placement=placement/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_predict_field_headers_async(): client = PredictionServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = prediction_service.PredictRequest() request.placement = 'placement/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.predict), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) await client.predict(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'placement=placement/value', ) in kw['metadata'] def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.PredictionServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = PredictionServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.PredictionServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = PredictionServiceClient( client_options={"credentials_file": "credentials.json"}, transport=transport, ) # It is an error to provide scopes and a transport instance. transport = transports.PredictionServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = PredictionServiceClient( client_options={"scopes": ["1", "2"]}, transport=transport, ) def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.PredictionServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) client = PredictionServiceClient(transport=transport) assert client.transport is transport def test_transport_get_channel(): # A client may be instantiated with a custom transport instance. transport = transports.PredictionServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel transport = transports.PredictionServiceGrpcAsyncIOTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel @pytest.mark.parametrize("transport_class", [ transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport, ]) def test_transport_adc(transport_class): # Test default credentials are used if not provided. with mock.patch.object(google.auth, 'default') as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class() adc.assert_called_once() def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = PredictionServiceClient( credentials=ga_credentials.AnonymousCredentials(), ) assert isinstance( client.transport, transports.PredictionServiceGrpcTransport, ) def test_prediction_service_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.PredictionServiceTransport( credentials=ga_credentials.AnonymousCredentials(), credentials_file="credentials.json" ) def test_prediction_service_base_transport(): # Instantiate the base transport. with mock.patch('google.cloud.retail_v2beta.services.prediction_service.transports.PredictionServiceTransport.__init__') as Transport: Transport.return_value = None transport = transports.PredictionServiceTransport( credentials=ga_credentials.AnonymousCredentials(), ) # Every method on the transport should just blindly # raise NotImplementedError. methods = ( 'predict', ) for method in methods: with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) with pytest.raises(NotImplementedError): transport.close() @requires_google_auth_gte_1_25_0 def test_prediction_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.retail_v2beta.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.PredictionServiceTransport( credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with("credentials.json", scopes=None, default_scopes=( 'https://www.googleapis.com/auth/cloud-platform', ), quota_project_id="octopus", ) @requires_google_auth_lt_1_25_0 def test_prediction_service_base_transport_with_credentials_file_old_google_auth(): # Instantiate the base transport with a credentials file with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.retail_v2beta.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.PredictionServiceTransport( credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with("credentials.json", scopes=( 'https://www.googleapis.com/auth/cloud-platform', ), quota_project_id="octopus", ) def test_prediction_service_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.retail_v2beta.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: Transport.return_value = None adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.PredictionServiceTransport() adc.assert_called_once() @requires_google_auth_gte_1_25_0 def test_prediction_service_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, 'default', autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) PredictionServiceClient() adc.assert_called_once_with( scopes=None, default_scopes=( 'https://www.googleapis.com/auth/cloud-platform', ), quota_project_id=None, ) @requires_google_auth_lt_1_25_0 def test_prediction_service_auth_adc_old_google_auth(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, 'default', autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) PredictionServiceClient() adc.assert_called_once_with( scopes=( 'https://www.googleapis.com/auth/cloud-platform',), quota_project_id=None, ) @pytest.mark.parametrize( "transport_class", [ transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport, ], ) @requires_google_auth_gte_1_25_0 def test_prediction_service_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, 'default', autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) adc.assert_called_once_with( scopes=["1", "2"], default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), quota_project_id="octopus", ) @pytest.mark.parametrize( "transport_class", [ transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport, ], ) @requires_google_auth_lt_1_25_0 def test_prediction_service_transport_auth_adc_old_google_auth(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class(quota_project_id="octopus") adc.assert_called_once_with(scopes=( 'https://www.googleapis.com/auth/cloud-platform', ), quota_project_id="octopus", ) @pytest.mark.parametrize( "transport_class,grpc_helpers", [ (transports.PredictionServiceGrpcTransport, grpc_helpers), (transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async) ], ) def test_prediction_service_transport_create_channel(transport_class, grpc_helpers): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( grpc_helpers, "create_channel", autospec=True ) as create_channel: creds = ga_credentials.AnonymousCredentials() adc.return_value = (creds, None) transport_class( quota_project_id="octopus", scopes=["1", "2"] ) create_channel.assert_called_with( "retail.googleapis.com:443", credentials=creds, credentials_file=None, quota_project_id="octopus", default_scopes=( 'https://www.googleapis.com/auth/cloud-platform', ), scopes=["1", "2"], default_host="retail.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) def test_prediction_service_grpc_transport_client_cert_source_for_mtls( transport_class ): cred = ga_credentials.AnonymousCredentials() # Check ssl_channel_credentials is used if provided. with mock.patch.object(transport_class, "create_channel") as mock_create_channel: mock_ssl_channel_creds = mock.Mock() transport_class( host="squid.clam.whelk", credentials=cred, ssl_channel_credentials=mock_ssl_channel_creds ) mock_create_channel.assert_called_once_with( "squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls # is used. with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: transport_class( credentials=cred, client_cert_source_for_mtls=client_cert_source_callback ) expected_cert, expected_key = client_cert_source_callback() mock_ssl_cred.assert_called_once_with( certificate_chain=expected_cert, private_key=expected_key ) def test_prediction_service_host_no_port(): client = PredictionServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions(api_endpoint='retail.googleapis.com'), ) assert client.transport._host == 'retail.googleapis.com:443' def test_prediction_service_host_with_port(): client = PredictionServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions(api_endpoint='retail.googleapis.com:8000'), ) assert client.transport._host == 'retail.googleapis.com:8000' def test_prediction_service_grpc_transport_channel(): channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.PredictionServiceGrpcTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None def test_prediction_service_grpc_asyncio_transport_channel(): channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.PredictionServiceGrpcAsyncIOTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) def test_prediction_service_transport_channel_mtls_with_client_cert_source( transport_class ): with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel cred = ga_credentials.AnonymousCredentials() with pytest.warns(DeprecationWarning): with mock.patch.object(google.auth, 'default') as adc: adc.return_value = (cred, None) transport = transport_class( host="squid.clam.whelk", api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=client_cert_source_callback, ) adc.assert_called_once() grpc_ssl_channel_cred.assert_called_once_with( certificate_chain=b"cert bytes", private_key=b"key bytes" ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) def test_prediction_service_transport_channel_mtls_with_adc( transport_class ): mock_ssl_cred = mock.Mock() with mock.patch.multiple( "google.auth.transport.grpc.SslCredentials", __init__=mock.Mock(return_value=None), ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel mock_cred = mock.Mock() with pytest.warns(DeprecationWarning): transport = transport_class( host="squid.clam.whelk", credentials=mock_cred, api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=None, ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel def test_product_path(): project = "squid" location = "clam" catalog = "whelk" branch = "octopus" product = "oyster" expected = "projects/{project}/locations/{location}/catalogs/{catalog}/branches/{branch}/products/{product}".format(project=project, location=location, catalog=catalog, branch=branch, product=product, ) actual = PredictionServiceClient.product_path(project, location, catalog, branch, product) assert expected == actual def test_parse_product_path(): expected = { "project": "nudibranch", "location": "cuttlefish", "catalog": "mussel", "branch": "winkle", "product": "nautilus", } path = PredictionServiceClient.product_path(**expected) # Check that the path construction is reversible. actual = PredictionServiceClient.parse_product_path(path) assert expected == actual def test_common_billing_account_path(): billing_account = "scallop" expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) actual = PredictionServiceClient.common_billing_account_path(billing_account) assert expected == actual def test_parse_common_billing_account_path(): expected = { "billing_account": "abalone", } path = PredictionServiceClient.common_billing_account_path(**expected) # Check that the path construction is reversible. actual = PredictionServiceClient.parse_common_billing_account_path(path) assert expected == actual def test_common_folder_path(): folder = "squid" expected = "folders/{folder}".format(folder=folder, ) actual = PredictionServiceClient.common_folder_path(folder) assert expected == actual def test_parse_common_folder_path(): expected = { "folder": "clam", } path = PredictionServiceClient.common_folder_path(**expected) # Check that the path construction is reversible. actual = PredictionServiceClient.parse_common_folder_path(path) assert expected == actual def test_common_organization_path(): organization = "whelk" expected = "organizations/{organization}".format(organization=organization, ) actual = PredictionServiceClient.common_organization_path(organization) assert expected == actual def test_parse_common_organization_path(): expected = { "organization": "octopus", } path = PredictionServiceClient.common_organization_path(**expected) # Check that the path construction is reversible. actual = PredictionServiceClient.parse_common_organization_path(path) assert expected == actual def test_common_project_path(): project = "oyster" expected = "projects/{project}".format(project=project, ) actual = PredictionServiceClient.common_project_path(project) assert expected == actual def test_parse_common_project_path(): expected = { "project": "nudibranch", } path = PredictionServiceClient.common_project_path(**expected) # Check that the path construction is reversible. actual = PredictionServiceClient.parse_common_project_path(path) assert expected == actual def test_common_location_path(): project = "cuttlefish" location = "mussel" expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) actual = PredictionServiceClient.common_location_path(project, location) assert expected == actual def test_parse_common_location_path(): expected = { "project": "winkle", "location": "nautilus", } path = PredictionServiceClient.common_location_path(**expected) # Check that the path construction is reversible. actual = PredictionServiceClient.parse_common_location_path(path) assert expected == actual def test_client_withDEFAULT_CLIENT_INFO(): client_info = gapic_v1.client_info.ClientInfo() with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: client = PredictionServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: transport_class = PredictionServiceClient.get_transport_class() transport = transport_class( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) @pytest.mark.asyncio async def test_transport_close_async(): client = PredictionServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: async with client: close.assert_not_called() close.assert_called_once() def test_transport_close(): transports = { "grpc": "_grpc_channel", } for transport, close_name in transports.items(): client = PredictionServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: with client: close.assert_not_called() close.assert_called_once() def test_client_ctx(): transports = [ 'grpc', ] for transport in transports: client = PredictionServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) # Test client calls underlying transport. with mock.patch.object(type(client.transport), "close") as close: close.assert_not_called() with client: pass close.assert_called()
[]
[]
[]
[]
[]
python
0
0
src/cmd/compile/internal/ssa/debug_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ssa_test import ( "bytes" "flag" "fmt" "internal/testenv" "io" "io/ioutil" "os" "os/exec" "path/filepath" "regexp" "runtime" "strconv" "strings" "testing" "time" ) var ( update = flag.Bool("u", false, "update test reference files") verbose = flag.Bool("v", false, "print debugger interactions (very verbose)") dryrun = flag.Bool("n", false, "just print the command line and first debugging bits") useGdb = flag.Bool("g", false, "use Gdb instead of Delve (dlv), use gdb reference files") force = flag.Bool("f", false, "force run under not linux-amd64; also do not use tempdir") repeats = flag.Bool("r", false, "detect repeats in debug steps and don't ignore them") inlines = flag.Bool("i", false, "do inlining for gdb (makes testing flaky till inlining info is correct)") ) var ( hexRe = regexp.MustCompile("0x[a-zA-Z0-9]+") numRe = regexp.MustCompile("-?[0-9]+") stringRe = regexp.MustCompile("\"([^\\\"]|(\\.))*\"") leadingDollarNumberRe = regexp.MustCompile("^[$][0-9]+") optOutGdbRe = regexp.MustCompile("[<]optimized out[>]") numberColonRe = regexp.MustCompile("^ *[0-9]+:") ) var gdb = "gdb" // Might be "ggdb" on Darwin, because gdb no longer part of XCode var debugger = "dlv" // For naming files, etc. var gogcflags = os.Getenv("GO_GCFLAGS") // optimizedLibs usually means "not running in a noopt test builder". var optimizedLibs = (!strings.Contains(gogcflags, "-N") && !strings.Contains(gogcflags, "-l")) // TestNexting go-builds a file, then uses a debugger (default delve, optionally gdb) // to next through the generated executable, recording each line landed at, and // then compares those lines with reference file(s). // Flag -u updates the reference file(s). // Flag -g changes the debugger to gdb (and uses gdb-specific reference files) // Flag -v is ever-so-slightly verbose. // Flag -n is for dry-run, and prints the shell and first debug commands. // // Because this test (combined with existing compiler deficiencies) is flaky, // for gdb-based testing by default inlining is disabled // (otherwise output depends on library internals) // and for both gdb and dlv by default repeated lines in the next stream are ignored // (because this appears to be timing-dependent in gdb, and the cleanest fix is in code common to gdb and dlv). // // Also by default, any source code outside of .../testdata/ is not mentioned // in the debugging histories. This deals both with inlined library code once // the compiler is generating clean inline records, and also deals with // runtime code between return from main and process exit. This is hidden // so that those files (in the runtime/library) can change without affecting // this test. // // These choices can be reversed with -i (inlining on) and -r (repeats detected) which // will also cause their own failures against the expected outputs. Note that if the compiler // and debugger were behaving properly, the inlined code and repeated lines would not appear, // so the expected output is closer to what we hope to see, though it also encodes all our // current bugs. // // The file being tested may contain comments of the form // //DBG-TAG=(v1,v2,v3) // where DBG = {gdb,dlv} and TAG={dbg,opt} // each variable may optionally be followed by a / and one or more of S,A,N,O // to indicate normalization of Strings, (hex) addresses, and numbers. // "O" is an explicit indication that we expect it to be optimized out. // For example: // // if len(os.Args) > 1 { //gdb-dbg=(hist/A,cannedInput/A) //dlv-dbg=(hist/A,cannedInput/A) // // TODO: not implemented for Delve yet, but this is the plan // // After a compiler change that causes a difference in the debug behavior, check // to see if it is sensible or not, and if it is, update the reference files with // go test debug_test.go -args -u // (for Delve) // go test debug_test.go -args -u -d // func TestNexting(t *testing.T) { testenv.SkipFlaky(t, 37404) skipReasons := "" // Many possible skip reasons, list all that apply if testing.Short() { skipReasons = "not run in short mode; " } testenv.MustHaveGoBuild(t) if *useGdb && !*force && !(runtime.GOOS == "linux" && runtime.GOARCH == "amd64") { // Running gdb on OSX/darwin is very flaky. // Sometimes it is called ggdb, depending on how it is installed. // It also sometimes requires an admin password typed into a dialog box. // Various architectures tend to differ slightly sometimes, and keeping them // all in sync is a pain for people who don't have them all at hand, // so limit testing to amd64 (for now) skipReasons += "not run when testing gdb (-g) unless forced (-f) or linux-amd64; " } if !*useGdb && !*force && testenv.Builder() == "linux-386-longtest" { // The latest version of Delve does support linux/386. However, the version currently // installed in the linux-386-longtest builder does not. See golang.org/issue/39309. skipReasons += "not run when testing delve on linux-386-longtest builder unless forced (-f); " } if *useGdb { debugger = "gdb" _, err := exec.LookPath(gdb) if err != nil { if runtime.GOOS != "darwin" { skipReasons += "not run because gdb not on path; " } else { // On Darwin, MacPorts installs gdb as "ggdb". _, err = exec.LookPath("ggdb") if err != nil { skipReasons += "not run because gdb (and also ggdb) request by -g option not on path; " } else { gdb = "ggdb" } } } } else { // Delve debugger = "dlv" _, err := exec.LookPath("dlv") if err != nil { skipReasons += "not run because dlv not on path; " } } if skipReasons != "" { t.Skip(skipReasons[:len(skipReasons)-2]) } optFlags := "" // Whatever flags are needed to test debugging of optimized code. dbgFlags := "-N -l" if *useGdb && !*inlines { // For gdb (default), disable inlining so that a compiler test does not depend on library code. // TODO: Technically not necessary in 1.10 and later, but it causes a largish regression that needs investigation. optFlags += " -l" } moreargs := []string{} if *useGdb && (runtime.GOOS == "darwin" || runtime.GOOS == "windows") { // gdb and lldb on Darwin do not deal with compressed dwarf. // also, Windows. moreargs = append(moreargs, "-ldflags=-compressdwarf=false") } subTest(t, debugger+"-dbg", "hist", dbgFlags, moreargs...) subTest(t, debugger+"-dbg", "scopes", dbgFlags, moreargs...) subTest(t, debugger+"-dbg", "i22558", dbgFlags, moreargs...) subTest(t, debugger+"-dbg-race", "i22600", dbgFlags, append(moreargs, "-race")...) optSubTest(t, debugger+"-opt", "hist", optFlags, 1000, moreargs...) optSubTest(t, debugger+"-opt", "scopes", optFlags, 1000, moreargs...) // Was optSubtest, this test is observed flaky on Linux in Docker on (busy) macOS, probably because of timing // glitches in this harness. // TODO get rid of timing glitches in this harness. skipSubTest(t, debugger+"-opt", "infloop", optFlags, 10, moreargs...) } // subTest creates a subtest that compiles basename.go with the specified gcflags and additional compiler arguments, // then runs the debugger on the resulting binary, with any comment-specified actions matching tag triggered. func subTest(t *testing.T, tag string, basename string, gcflags string, moreargs ...string) { t.Run(tag+"-"+basename, func(t *testing.T) { if t.Name() == "TestNexting/gdb-dbg-i22558" { testenv.SkipFlaky(t, 31263) } testNexting(t, basename, tag, gcflags, 1000, moreargs...) }) } // skipSubTest is the same as subTest except that it skips the test if execution is not forced (-f) func skipSubTest(t *testing.T, tag string, basename string, gcflags string, count int, moreargs ...string) { t.Run(tag+"-"+basename, func(t *testing.T) { if *force { testNexting(t, basename, tag, gcflags, count, moreargs...) } else { t.Skip("skipping flaky test becaused not forced (-f)") } }) } // optSubTest is the same as subTest except that it skips the test if the runtime and libraries // were not compiled with optimization turned on. (The skip may not be necessary with Go 1.10 and later) func optSubTest(t *testing.T, tag string, basename string, gcflags string, count int, moreargs ...string) { // If optimized test is run with unoptimized libraries (compiled with -N -l), it is very likely to fail. // This occurs in the noopt builders (for example). t.Run(tag+"-"+basename, func(t *testing.T) { if *force || optimizedLibs { testNexting(t, basename, tag, gcflags, count, moreargs...) } else { t.Skip("skipping for unoptimized stdlib/runtime") } }) } func testNexting(t *testing.T, base, tag, gcflags string, count int, moreArgs ...string) { // (1) In testdata, build sample.go into test-sample.<tag> // (2) Run debugger gathering a history // (3) Read expected history from testdata/sample.<tag>.nexts // optionally, write out testdata/sample.<tag>.nexts testbase := filepath.Join("testdata", base) + "." + tag tmpbase := filepath.Join("testdata", "test-"+base+"."+tag) // Use a temporary directory unless -f is specified if !*force { tmpdir, err := ioutil.TempDir("", "debug_test") if err != nil { panic(fmt.Sprintf("Problem creating TempDir, error %v\n", err)) } tmpbase = filepath.Join(tmpdir, "test-"+base+"."+tag) if *verbose { fmt.Printf("Tempdir is %s\n", tmpdir) } defer os.RemoveAll(tmpdir) } exe := tmpbase runGoArgs := []string{"build", "-o", exe, "-gcflags=all=" + gcflags} runGoArgs = append(runGoArgs, moreArgs...) runGoArgs = append(runGoArgs, filepath.Join("testdata", base+".go")) runGo(t, "", runGoArgs...) nextlog := testbase + ".nexts" tmplog := tmpbase + ".nexts" var dbg dbgr if *useGdb { dbg = newGdb(tag, exe) } else { dbg = newDelve(tag, exe) } h1 := runDbgr(dbg, count) if *dryrun { fmt.Printf("# Tag for above is %s\n", dbg.tag()) return } if *update { h1.write(nextlog) } else { h0 := &nextHist{} h0.read(nextlog) if !h0.equals(h1) { // Be very noisy about exactly what's wrong to simplify debugging. h1.write(tmplog) cmd := exec.Command("diff", "-u", nextlog, tmplog) line := asCommandLine("", cmd) bytes, err := cmd.CombinedOutput() if err != nil && len(bytes) == 0 { t.Fatalf("step/next histories differ, diff command %s failed with error=%v", line, err) } t.Fatalf("step/next histories differ, diff=\n%s", string(bytes)) } } } type dbgr interface { start() stepnext(s string) bool // step or next, possible with parameter, gets line etc. returns true for success, false for unsure response quit() hist() *nextHist tag() string } func runDbgr(dbg dbgr, maxNext int) *nextHist { dbg.start() if *dryrun { return nil } for i := 0; i < maxNext; i++ { if !dbg.stepnext("n") { break } } dbg.quit() h := dbg.hist() return h } func runGo(t *testing.T, dir string, args ...string) string { var stdout, stderr bytes.Buffer cmd := exec.Command(testenv.GoToolPath(t), args...) cmd.Dir = dir if *dryrun { fmt.Printf("%s\n", asCommandLine("", cmd)) return "" } cmd.Stdout = &stdout cmd.Stderr = &stderr if err := cmd.Run(); err != nil { t.Fatalf("error running cmd (%s): %v\nstdout:\n%sstderr:\n%s\n", asCommandLine("", cmd), err, stdout.String(), stderr.String()) } if s := stderr.String(); s != "" { t.Fatalf("Stderr = %s\nWant empty", s) } return stdout.String() } // tstring provides two strings, o (stdout) and e (stderr) type tstring struct { o string e string } func (t tstring) String() string { return t.o + t.e } type pos struct { line uint32 file uint8 // Artifact of plans to implement differencing instead of calling out to diff. } type nextHist struct { f2i map[string]uint8 fs []string ps []pos texts []string vars [][]string } func (h *nextHist) write(filename string) { file, err := os.Create(filename) if err != nil { panic(fmt.Sprintf("Problem opening %s, error %v\n", filename, err)) } defer file.Close() var lastfile uint8 for i, x := range h.texts { p := h.ps[i] if lastfile != p.file { fmt.Fprintf(file, " %s\n", h.fs[p.file-1]) lastfile = p.file } fmt.Fprintf(file, "%d:%s\n", p.line, x) // TODO, normalize between gdb and dlv into a common, comparable format. for _, y := range h.vars[i] { y = strings.TrimSpace(y) fmt.Fprintf(file, "%s\n", y) } } file.Close() } func (h *nextHist) read(filename string) { h.f2i = make(map[string]uint8) bytes, err := ioutil.ReadFile(filename) if err != nil { panic(fmt.Sprintf("Problem reading %s, error %v\n", filename, err)) } var lastfile string lines := strings.Split(string(bytes), "\n") for i, l := range lines { if len(l) > 0 && l[0] != '#' { if l[0] == ' ' { // file -- first two characters expected to be " " lastfile = strings.TrimSpace(l) } else if numberColonRe.MatchString(l) { // line number -- <number>:<line> colonPos := strings.Index(l, ":") if colonPos == -1 { panic(fmt.Sprintf("Line %d (%s) in file %s expected to contain '<number>:' but does not.\n", i+1, l, filename)) } h.add(lastfile, l[0:colonPos], l[colonPos+1:]) } else { h.addVar(l) } } } } // add appends file (name), line (number) and text (string) to the history, // provided that the file+line combo does not repeat the previous position, // and provided that the file is within the testdata directory. The return // value indicates whether the append occurred. func (h *nextHist) add(file, line, text string) bool { // Only record source code in testdata unless the inlines flag is set if !*inlines && !strings.Contains(file, "/testdata/") { return false } fi := h.f2i[file] if fi == 0 { h.fs = append(h.fs, file) fi = uint8(len(h.fs)) h.f2i[file] = fi } line = strings.TrimSpace(line) var li int var err error if line != "" { li, err = strconv.Atoi(line) if err != nil { panic(fmt.Sprintf("Non-numeric line: %s, error %v\n", line, err)) } } l := len(h.ps) p := pos{line: uint32(li), file: fi} if l == 0 || *repeats || h.ps[l-1] != p { h.ps = append(h.ps, p) h.texts = append(h.texts, text) h.vars = append(h.vars, []string{}) return true } return false } func (h *nextHist) addVar(text string) { l := len(h.texts) h.vars[l-1] = append(h.vars[l-1], text) } func invertMapSU8(hf2i map[string]uint8) map[uint8]string { hi2f := make(map[uint8]string) for hs, i := range hf2i { hi2f[i] = hs } return hi2f } func (h *nextHist) equals(k *nextHist) bool { if len(h.f2i) != len(k.f2i) { return false } if len(h.ps) != len(k.ps) { return false } hi2f := invertMapSU8(h.f2i) ki2f := invertMapSU8(k.f2i) for i, hs := range hi2f { if hs != ki2f[i] { return false } } for i, x := range h.ps { if k.ps[i] != x { return false } } for i, hv := range h.vars { kv := k.vars[i] if len(hv) != len(kv) { return false } for j, hvt := range hv { if hvt != kv[j] { return false } } } return true } // canonFileName strips everything before "/src/" from a filename. // This makes file names portable across different machines, // home directories, and temporary directories. func canonFileName(f string) string { i := strings.Index(f, "/src/") if i != -1 { f = f[i+1:] } return f } /* Delve */ type delveState struct { cmd *exec.Cmd tagg string *ioState atLineRe *regexp.Regexp // "\n =>" funcFileLinePCre *regexp.Regexp // "^> ([^ ]+) ([^:]+):([0-9]+) .*[(]PC: (0x[a-z0-9]+)" line string file string function string } func newDelve(tag, executable string, args ...string) dbgr { cmd := exec.Command("dlv", "exec", executable) cmd.Env = replaceEnv(cmd.Env, "TERM", "dumb") if len(args) > 0 { cmd.Args = append(cmd.Args, "--") cmd.Args = append(cmd.Args, args...) } s := &delveState{tagg: tag, cmd: cmd} // HAHA Delve has control characters embedded to change the color of the => and the line number // that would be '(\\x1b\\[[0-9;]+m)?' OR TERM=dumb s.atLineRe = regexp.MustCompile("\n=>[[:space:]]+[0-9]+:(.*)") s.funcFileLinePCre = regexp.MustCompile("> ([^ ]+) ([^:]+):([0-9]+) .*[(]PC: (0x[a-z0-9]+)[)]\n") s.ioState = newIoState(s.cmd) return s } func (s *delveState) tag() string { return s.tagg } func (s *delveState) stepnext(ss string) bool { x := s.ioState.writeReadExpect(ss+"\n", "[(]dlv[)] ") excerpts := s.atLineRe.FindStringSubmatch(x.o) locations := s.funcFileLinePCre.FindStringSubmatch(x.o) excerpt := "" if len(excerpts) > 1 { excerpt = excerpts[1] } if len(locations) > 0 { fn := canonFileName(locations[2]) if *verbose { if s.file != fn { fmt.Printf("%s\n", locations[2]) // don't canonocalize verbose logging } fmt.Printf(" %s\n", locations[3]) } s.line = locations[3] s.file = fn s.function = locations[1] s.ioState.history.add(s.file, s.line, excerpt) // TODO: here is where variable processing will be added. See gdbState.stepnext as a guide. // Adding this may require some amount of normalization so that logs are comparable. return true } if *verbose { fmt.Printf("DID NOT MATCH EXPECTED NEXT OUTPUT\nO='%s'\nE='%s'\n", x.o, x.e) } return false } func (s *delveState) start() { if *dryrun { fmt.Printf("%s\n", asCommandLine("", s.cmd)) fmt.Printf("b main.test\n") fmt.Printf("c\n") return } err := s.cmd.Start() if err != nil { line := asCommandLine("", s.cmd) panic(fmt.Sprintf("There was an error [start] running '%s', %v\n", line, err)) } s.ioState.readExpecting(-1, 5000, "Type 'help' for list of commands.") s.ioState.writeReadExpect("b main.test\n", "[(]dlv[)] ") s.stepnext("c") } func (s *delveState) quit() { expect("", s.ioState.writeRead("q\n")) } /* Gdb */ type gdbState struct { cmd *exec.Cmd tagg string args []string *ioState atLineRe *regexp.Regexp funcFileLinePCre *regexp.Regexp line string file string function string } func newGdb(tag, executable string, args ...string) dbgr { // Turn off shell, necessary for Darwin apparently cmd := exec.Command(gdb, "-nx", "-iex", fmt.Sprintf("add-auto-load-safe-path %s/src/runtime", runtime.GOROOT()), "-ex", "set startup-with-shell off", executable) cmd.Env = replaceEnv(cmd.Env, "TERM", "dumb") s := &gdbState{tagg: tag, cmd: cmd, args: args} s.atLineRe = regexp.MustCompile("(^|\n)([0-9]+)(.*)") s.funcFileLinePCre = regexp.MustCompile( "([^ ]+) [(][^)]*[)][ \\t\\n]+at ([^:]+):([0-9]+)") // runtime.main () at /Users/drchase/GoogleDrive/work/go/src/runtime/proc.go:201 // function file line // Thread 2 hit Breakpoint 1, main.main () at /Users/drchase/GoogleDrive/work/debug/hist.go:18 s.ioState = newIoState(s.cmd) return s } func (s *gdbState) tag() string { return s.tagg } func (s *gdbState) start() { run := "run" for _, a := range s.args { run += " " + a // Can't quote args for gdb, it will pass them through including the quotes } if *dryrun { fmt.Printf("%s\n", asCommandLine("", s.cmd)) fmt.Printf("tbreak main.test\n") fmt.Printf("%s\n", run) return } err := s.cmd.Start() if err != nil { line := asCommandLine("", s.cmd) panic(fmt.Sprintf("There was an error [start] running '%s', %v\n", line, err)) } s.ioState.readSimpleExpecting("[(]gdb[)] ") x := s.ioState.writeReadExpect("b main.test\n", "[(]gdb[)] ") expect("Breakpoint [0-9]+ at", x) s.stepnext(run) } func (s *gdbState) stepnext(ss string) bool { x := s.ioState.writeReadExpect(ss+"\n", "[(]gdb[)] ") excerpts := s.atLineRe.FindStringSubmatch(x.o) locations := s.funcFileLinePCre.FindStringSubmatch(x.o) excerpt := "" addedLine := false if len(excerpts) == 0 && len(locations) == 0 { if *verbose { fmt.Printf("DID NOT MATCH %s", x.o) } return false } if len(excerpts) > 0 { excerpt = excerpts[3] } if len(locations) > 0 { fn := canonFileName(locations[2]) if *verbose { if s.file != fn { fmt.Printf("%s\n", locations[2]) } fmt.Printf(" %s\n", locations[3]) } s.line = locations[3] s.file = fn s.function = locations[1] addedLine = s.ioState.history.add(s.file, s.line, excerpt) } if len(excerpts) > 0 { if *verbose { fmt.Printf(" %s\n", excerpts[2]) } s.line = excerpts[2] addedLine = s.ioState.history.add(s.file, s.line, excerpt) } if !addedLine { // True if this was a repeat line return true } // Look for //gdb-<tag>=(v1,v2,v3) and print v1, v2, v3 vars := varsToPrint(excerpt, "//"+s.tag()+"=(") for _, v := range vars { response := printVariableAndNormalize(v, func(v string) string { return s.ioState.writeReadExpect("p "+v+"\n", "[(]gdb[)] ").String() }) s.ioState.history.addVar(response) } return true } // printVariableAndNormalize extracts any slash-indicated normalizing requests from the variable // name, then uses printer to get the value of the variable from the debugger, and then // normalizes and returns the response. func printVariableAndNormalize(v string, printer func(v string) string) string { slashIndex := strings.Index(v, "/") substitutions := "" if slashIndex != -1 { substitutions = v[slashIndex:] v = v[:slashIndex] } response := printer(v) // expect something like "$1 = ..." dollar := strings.Index(response, "$") cr := strings.Index(response, "\n") if dollar == -1 { // some not entirely expected response, whine and carry on. if cr == -1 { response = strings.TrimSpace(response) // discards trailing newline response = strings.Replace(response, "\n", "<BR>", -1) return "$ Malformed response " + response } response = strings.TrimSpace(response[:cr]) return "$ " + response } if cr == -1 { cr = len(response) } // Convert the leading $<number> into the variable name to enhance readability // and reduce scope of diffs if an earlier print-variable is added. response = strings.TrimSpace(response[dollar:cr]) response = leadingDollarNumberRe.ReplaceAllString(response, v) // Normalize value as requested. if strings.Contains(substitutions, "A") { response = hexRe.ReplaceAllString(response, "<A>") } if strings.Contains(substitutions, "N") { response = numRe.ReplaceAllString(response, "<N>") } if strings.Contains(substitutions, "S") { response = stringRe.ReplaceAllString(response, "<S>") } if strings.Contains(substitutions, "O") { response = optOutGdbRe.ReplaceAllString(response, "<Optimized out, as expected>") } return response } // varsToPrint takes a source code line, and extracts the comma-separated variable names // found between lookfor and the next ")". // For example, if line includes "... //gdb-foo=(v1,v2,v3)" and // lookfor="//gdb-foo=(", then varsToPrint returns ["v1", "v2", "v3"] func varsToPrint(line, lookfor string) []string { var vars []string if strings.Contains(line, lookfor) { x := line[strings.Index(line, lookfor)+len(lookfor):] end := strings.Index(x, ")") if end == -1 { panic(fmt.Sprintf("Saw variable list begin %s in %s but no closing ')'", lookfor, line)) } vars = strings.Split(x[:end], ",") for i, y := range vars { vars[i] = strings.TrimSpace(y) } } return vars } func (s *gdbState) quit() { response := s.ioState.writeRead("q\n") if strings.Contains(response.o, "Quit anyway? (y or n)") { defer func() { if r := recover(); r != nil { if s, ok := r.(string); !(ok && strings.Contains(s, "'Y\n'")) { // Not the panic that was expected. fmt.Printf("Expected a broken pipe panic, but saw the following panic instead") panic(r) } } }() s.ioState.writeRead("Y\n") } } type ioState struct { stdout io.ReadCloser stderr io.ReadCloser stdin io.WriteCloser outChan chan string errChan chan string last tstring // Output of previous step history *nextHist } func newIoState(cmd *exec.Cmd) *ioState { var err error s := &ioState{} s.history = &nextHist{} s.history.f2i = make(map[string]uint8) s.stdout, err = cmd.StdoutPipe() line := asCommandLine("", cmd) if err != nil { panic(fmt.Sprintf("There was an error [stdoutpipe] running '%s', %v\n", line, err)) } s.stderr, err = cmd.StderrPipe() if err != nil { panic(fmt.Sprintf("There was an error [stdouterr] running '%s', %v\n", line, err)) } s.stdin, err = cmd.StdinPipe() if err != nil { panic(fmt.Sprintf("There was an error [stdinpipe] running '%s', %v\n", line, err)) } s.outChan = make(chan string, 1) s.errChan = make(chan string, 1) go func() { buffer := make([]byte, 4096) for { n, err := s.stdout.Read(buffer) if n > 0 { s.outChan <- string(buffer[0:n]) } if err == io.EOF || n == 0 { break } if err != nil { fmt.Printf("Saw an error forwarding stdout") break } } close(s.outChan) s.stdout.Close() }() go func() { buffer := make([]byte, 4096) for { n, err := s.stderr.Read(buffer) if n > 0 { s.errChan <- string(buffer[0:n]) } if err == io.EOF || n == 0 { break } if err != nil { fmt.Printf("Saw an error forwarding stderr") break } } close(s.errChan) s.stderr.Close() }() return s } func (s *ioState) hist() *nextHist { return s.history } // writeRead writes ss, then reads stdout and stderr, waiting 500ms to // be sure all the output has appeared. func (s *ioState) writeRead(ss string) tstring { if *verbose { fmt.Printf("=> %s", ss) } _, err := io.WriteString(s.stdin, ss) if err != nil { panic(fmt.Sprintf("There was an error writing '%s', %v\n", ss, err)) } return s.readExpecting(-1, 500, "") } // writeReadExpect writes ss, then reads stdout and stderr until something // that matches expectRE appears. expectRE should not be "" func (s *ioState) writeReadExpect(ss, expectRE string) tstring { if *verbose { fmt.Printf("=> %s", ss) } if expectRE == "" { panic("expectRE should not be empty; use .* instead") } _, err := io.WriteString(s.stdin, ss) if err != nil { panic(fmt.Sprintf("There was an error writing '%s', %v\n", ss, err)) } return s.readSimpleExpecting(expectRE) } func (s *ioState) readExpecting(millis, interlineTimeout int, expectedRE string) tstring { timeout := time.Millisecond * time.Duration(millis) interline := time.Millisecond * time.Duration(interlineTimeout) s.last = tstring{} var re *regexp.Regexp if expectedRE != "" { re = regexp.MustCompile(expectedRE) } loop: for { var timer <-chan time.Time if timeout > 0 { timer = time.After(timeout) } select { case x, ok := <-s.outChan: if !ok { s.outChan = nil } s.last.o += x case x, ok := <-s.errChan: if !ok { s.errChan = nil } s.last.e += x case <-timer: break loop } if re != nil { if re.MatchString(s.last.o) { break } if re.MatchString(s.last.e) { break } } timeout = interline } if *verbose { fmt.Printf("<= %s%s", s.last.o, s.last.e) } return s.last } func (s *ioState) readSimpleExpecting(expectedRE string) tstring { s.last = tstring{} var re *regexp.Regexp if expectedRE != "" { re = regexp.MustCompile(expectedRE) } for { select { case x, ok := <-s.outChan: if !ok { s.outChan = nil } s.last.o += x case x, ok := <-s.errChan: if !ok { s.errChan = nil } s.last.e += x } if re != nil { if re.MatchString(s.last.o) { break } if re.MatchString(s.last.e) { break } } } if *verbose { fmt.Printf("<= %s%s", s.last.o, s.last.e) } return s.last } // replaceEnv returns a new environment derived from env // by removing any existing definition of ev and adding ev=evv. func replaceEnv(env []string, ev string, evv string) []string { evplus := ev + "=" var found bool for i, v := range env { if strings.HasPrefix(v, evplus) { found = true env[i] = evplus + evv } } if !found { env = append(env, evplus+evv) } return env } // asCommandLine renders cmd as something that could be copy-and-pasted into a command line // If cwd is not empty and different from the command's directory, prepend an appropriate "cd" func asCommandLine(cwd string, cmd *exec.Cmd) string { s := "(" if cmd.Dir != "" && cmd.Dir != cwd { s += "cd" + escape(cmd.Dir) + ";" } for _, e := range cmd.Env { if !strings.HasPrefix(e, "PATH=") && !strings.HasPrefix(e, "HOME=") && !strings.HasPrefix(e, "USER=") && !strings.HasPrefix(e, "SHELL=") { s += escape(e) } } for _, a := range cmd.Args { s += escape(a) } s += " )" return s } // escape inserts escapes appropriate for use in a shell command line func escape(s string) string { s = strings.Replace(s, "\\", "\\\\", -1) s = strings.Replace(s, "'", "\\'", -1) // Conservative guess at characters that will force quoting if strings.ContainsAny(s, "\\ ;#*&$~?!|[]()<>{}`") { s = " '" + s + "'" } else { s = " " + s } return s } func expect(want string, got tstring) { if want != "" { match, err := regexp.MatchString(want, got.o) if err != nil { panic(fmt.Sprintf("Error for regexp %s, %v\n", want, err)) } if match { return } // Ignore error as we have already checked for it before match, _ = regexp.MatchString(want, got.e) if match { return } fmt.Printf("EXPECTED '%s'\n GOT O='%s'\nAND E='%s'\n", want, got.o, got.e) } }
[ "\"GO_GCFLAGS\"" ]
[]
[ "GO_GCFLAGS" ]
[]
["GO_GCFLAGS"]
go
1
0
cienv/github_actions.go
package cienv import ( "encoding/json" "errors" "os" ) // https://help.github.com/en/articles/virtual-environments-for-github-actions#default-environment-variables type GitHubEvent struct { PullRequest GitHubPullRequest `json:"pull_request"` Repository struct { Owner struct { Login string `json:"login"` } `json:"owner"` Name string `json:"name"` } `json:"repository"` CheckSuite struct { After string `json:"after"` PullRequests []GitHubPullRequest `json:"pull_requests"` } `json:"check_suite"` HeadCommit struct { ID string `json:"id"` } `json:"head_commit"` } type GitHubRepo struct { Owner struct { ID int64 `json:"id"` } } type GitHubPullRequest struct { Number int `json:"number"` Head struct { Sha string `json:"sha"` Ref string `json:"ref"` Repo GitHubRepo `json:"repo"` } `json:"head"` Base struct { Repo GitHubRepo `json:"repo"` } `json:"base"` } // LoadGitHubEvent loads GitHubEvent if it's running in GitHub Actions. func LoadGitHubEvent() (*GitHubEvent, error) { eventPath := os.Getenv("GITHUB_EVENT_PATH") if eventPath == "" { return nil, errors.New("GITHUB_EVENT_PATH not found") } return loadGitHubEventFromPath(eventPath) } func loadGitHubEventFromPath(eventPath string) (*GitHubEvent, error) { f, err := os.Open(eventPath) if err != nil { return nil, err } defer f.Close() var event GitHubEvent if err := json.NewDecoder(f).Decode(&event); err != nil { return nil, err } return &event, nil } func getBuildInfoFromGitHubAction() (*BuildInfo, bool, error) { eventPath := os.Getenv("GITHUB_EVENT_PATH") if eventPath == "" { return nil, false, errors.New("GITHUB_EVENT_PATH not found") } return getBuildInfoFromGitHubActionEventPath(eventPath) } func getBuildInfoFromGitHubActionEventPath(eventPath string) (*BuildInfo, bool, error) { event, err := loadGitHubEventFromPath(eventPath) if err != nil { return nil, false, err } info := &BuildInfo{ Owner: event.Repository.Owner.Login, Repo: event.Repository.Name, PullRequest: event.PullRequest.Number, Branch: event.PullRequest.Head.Ref, SHA: event.PullRequest.Head.Sha, } // For re-run check_suite event. if info.PullRequest == 0 && len(event.CheckSuite.PullRequests) > 0 { pr := event.CheckSuite.PullRequests[0] info.PullRequest = pr.Number info.Branch = pr.Head.Ref info.SHA = pr.Head.Sha } if info.SHA == "" { info.SHA = event.HeadCommit.ID } return info, info.PullRequest != 0, nil } // IsInGitHubAction returns true if reviewdog is running in GitHub Actions. func IsInGitHubAction() bool { // https://help.github.com/en/articles/virtual-environments-for-github-actions#default-environment-variables return os.Getenv("GITHUB_ACTION") != "" } // IsGitHubPRFromForkedRepo returns true if reviewdog is running in GitHub // Actions and running for PullRequests from forked repository. func IsGitHubPRFromForkedRepo() bool { event, err := LoadGitHubEvent() if err != nil { return false } return event.PullRequest.Head.Repo.Owner.ID != event.PullRequest.Base.Repo.Owner.ID }
[ "\"GITHUB_EVENT_PATH\"", "\"GITHUB_EVENT_PATH\"", "\"GITHUB_ACTION\"" ]
[]
[ "GITHUB_EVENT_PATH", "GITHUB_ACTION" ]
[]
["GITHUB_EVENT_PATH", "GITHUB_ACTION"]
go
2
0
Packs/Zscaler/Integrations/Zscaler/Zscaler.py
import demistomock as demisto from CommonServerPython import * ''' IMPORTS ''' import requests import time import json import random # disable insecure warnings requests.packages.urllib3.disable_warnings() ''' GLOBAL VARS ''' CLOUD_NAME = demisto.params()['cloud'] USERNAME = demisto.params()['credentials']['identifier'] PASSWORD = demisto.params()['credentials']['password'] API_KEY = str(demisto.params()['key']) BASE_URL = CLOUD_NAME + '/api/v1' USE_SSL = not demisto.params().get('insecure', False) PROXY = demisto.params().get('proxy', True) DEFAULT_HEADERS = { 'content-type': 'application/json' } EXCEEDED_RATE_LIMIT_STATUS_CODE = 429 MAX_SECONDS_TO_WAIT = 30 ERROR_CODES_DICT = { 400: 'Invalid or bad request', 401: 'Session is not authenticated or timed out', 403: 'One of the following permission errors occurred:\n-The API key was disabled by your service provider\n' '-User role has no access permissions or functional scope\n-A required SKU subscription is missing\n' 'Contact support or your account team for assistance.', 404: 'Resource does not exist', 409: 'Request could not be processed because of possible edit conflict occurred. Another admin might be saving a ' 'configuration change at the same time. In this scenario, the client is expected to retry after a short ' 'time period.', 415: 'Unsupported media type.', 429: 'Exceeded the rate limit or quota.', 500: 'Unexpected error', 503: 'Service is temporarily unavailable' } ''' HANDLE PROXY ''' if not PROXY: del os.environ['HTTP_PROXY'] del os.environ['HTTPS_PROXY'] del os.environ['http_proxy'] del os.environ['https_proxy'] ''' HELPER FUNCTIONS ''' def http_request(method, url_suffix, data=None, headers=None, num_of_seconds_to_wait=3): if headers is None: headers = DEFAULT_HEADERS data = {} if data is None else data url = BASE_URL + url_suffix try: res = requests.request(method, url, verify=USE_SSL, data=data, headers=headers ) if res.status_code not in (200, 204): if res.status_code == EXCEEDED_RATE_LIMIT_STATUS_CODE and num_of_seconds_to_wait <= MAX_SECONDS_TO_WAIT: random_num_of_seconds = random.randint(num_of_seconds_to_wait, num_of_seconds_to_wait + 3) time.sleep(random_num_of_seconds) return http_request(method, url_suffix, data, headers=headers, num_of_seconds_to_wait=num_of_seconds_to_wait + 3) else: raise Exception('Your request failed with the following error: ' + ERROR_CODES_DICT[res.status_code]) except Exception as e: LOG('Zscaler request failed with url={url}\tdata={data}'.format(url=url, data=data)) LOG(e) raise e return res def validate_urls(urls): for url in urls: if url.startswith('http://') or url.startswith('https://'): return_error( 'Enter a valid URL address without an http:// or https:// prefix. URL should have at least host.' 'domain pattern to qualify.') ''' FUNCTIONS ''' def login(): cmd_url = '/authenticatedSession' def obfuscateApiKey(seed): now = str(int(time.time() * 1000)) n = now[-6:] r = str(int(n) >> 1).zfill(6) key = "" for i in range(0, len(n), 1): key += seed[int(n[i])] for j in range(0, len(r), 1): key += seed[int(r[j]) + 2] return now, key ts, key = obfuscateApiKey(API_KEY) data = { 'username': USERNAME, 'timestamp': ts, 'password': PASSWORD, 'apiKey': key } json_data = json.dumps(data) result = http_request('POST', cmd_url, json_data, DEFAULT_HEADERS) return result.headers['Set-Cookie'] def activate_changes(): cmd_url = '/status/activate' http_request('POST', cmd_url, None, DEFAULT_HEADERS) def logout(): cmd_url = '/authenticatedSession' http_request('DELETE', cmd_url, None, DEFAULT_HEADERS) def blacklist_url(url): urls_to_blacklist = argToList(url) validate_urls(urls_to_blacklist) cmd_url = '/security/advanced/blacklistUrls?action=ADD_TO_LIST' data = { 'blacklistUrls': urls_to_blacklist } json_data = json.dumps(data) http_request('POST', cmd_url, json_data, DEFAULT_HEADERS) list_of_urls = '' for url in urls_to_blacklist: list_of_urls += '- ' + url + '\n' return 'Added the following URLs to the blacklist successfully:\n' + list_of_urls def unblacklist_url(url): urls_to_unblacklist = argToList(url) cmd_url = '/security/advanced/blacklistUrls?action=REMOVE_FROM_LIST' # Check if given URLs is blacklisted blacklisted_urls = get_blacklist()['blacklistUrls'] if len(urls_to_unblacklist) == 1: # Given only one URL to unblacklist if urls_to_unblacklist[0] not in blacklisted_urls: raise Exception('Given URL is not blacklisted.') elif not any(url in urls_to_unblacklist for url in blacklisted_urls): # Given more than one URL to blacklist raise Exception('Given URLs are not blacklisted.') data = { 'blacklistUrls': urls_to_unblacklist } json_data = json.dumps(data) http_request('POST', cmd_url, json_data, DEFAULT_HEADERS) list_of_urls = '' for url in urls_to_unblacklist: list_of_urls += '- ' + url + '\n' return 'Removed the following URLs from the blacklist successfully:\n' + list_of_urls def blacklist_ip(ip): ips_to_blacklist = argToList(ip) cmd_url = '/security/advanced/blacklistUrls?action=ADD_TO_LIST' data = { 'blacklistUrls': ips_to_blacklist } json_data = json.dumps(data) http_request('POST', cmd_url, json_data, DEFAULT_HEADERS) list_of_ips = '' for ip in ips_to_blacklist: list_of_ips += '- ' + ip + '\n' return 'Added the following IP addresses to the blacklist successfully:\n' + list_of_ips def unblacklist_ip(ip): ips_to_unblacklist = argToList(ip) cmd_url = '/security/advanced/blacklistUrls?action=REMOVE_FROM_LIST' # Check if given IPs is blacklisted blacklisted_ips = get_blacklist()['blacklistUrls'] if len(ips_to_unblacklist) == 1: # Given only one IP address to blacklist if ips_to_unblacklist[0] not in blacklisted_ips: raise Exception('Given IP address is not blacklisted.') elif not set(ips_to_unblacklist).issubset(set(blacklisted_ips)): # Given more than one IP address to blacklist raise Exception('Given IP addresses are not blacklisted.') data = { 'blacklistUrls': ips_to_unblacklist } json_data = json.dumps(data) http_request('POST', cmd_url, json_data, DEFAULT_HEADERS) list_of_ips = '' for ip in ips_to_unblacklist: list_of_ips += '- ' + ip + '\n' return 'Removed the following IP addresses from the blacklist successfully:\n' + list_of_ips def whitelist_url(url): cmd_url = '/security' urls_to_whitelist = argToList(url) # Get the current whitelist whitelist_urls = get_whitelist() if not whitelist_urls: whitelist_urls['whitelistUrls'] = [] whitelist_urls['whitelistUrls'] += urls_to_whitelist json_data = json.dumps(whitelist_urls) http_request('PUT', cmd_url, json_data, DEFAULT_HEADERS) list_of_urls = '' for url in urls_to_whitelist: list_of_urls += '- ' + url + '\n' return 'Added the following URLs to the whitelist successfully:\n' + list_of_urls def unwhitelist_url(url): cmd_url = '/security' urls_to_unwhitelist = argToList(url) # Get the current whitelist whitelist_urls = get_whitelist() if not whitelist_urls: whitelist_urls['whitelistUrls'] = [] # Check if given URL is whitelisted if len(urls_to_unwhitelist) == 1: # Given only one URL to whitelist if urls_to_unwhitelist[0] not in whitelist_urls['whitelistUrls']: raise Exception('Given host address is not whitelisted.') elif not set(urls_to_unwhitelist).issubset(set(whitelist_urls['whitelistUrls'])): # Given more than one URL to whitelist raise Exception('Given host addresses are not whitelisted.') # List comprehension to remove requested URLs from the whitelist whitelist_urls['whitelistUrls'] = [x for x in whitelist_urls['whitelistUrls'] if x not in urls_to_unwhitelist] json_data = json.dumps(whitelist_urls) http_request('PUT', cmd_url, json_data, DEFAULT_HEADERS) list_of_urls = '' for url in whitelist_urls: list_of_urls += '- ' + url + '\n' return 'Removed the following URLs from the whitelist successfully:\n' + list_of_urls def whitelist_ip(ip): cmd_url = '/security' ips_to_whitelist = argToList(ip) # Get the current whitelist whitelist_ips = get_whitelist() if not whitelist_ips: whitelist_ips['whitelistUrls'] = [] whitelist_ips['whitelistUrls'] += ips_to_whitelist json_data = json.dumps(whitelist_ips) http_request('PUT', cmd_url, json_data, DEFAULT_HEADERS) list_of_ips = '' for ip in ips_to_whitelist: list_of_ips += '- ' + ip + '\n' return 'Added the following URLs to the whitelist successfully:\n' + list_of_ips def unwhitelist_ip(ip): cmd_url = '/security' ips_to_unwhitelist = argToList(ip) # Get the current whitelist whitelist_ips = get_whitelist() if not whitelist_ips: whitelist_ips['whitelistUrls'] = [] # Check if given IP is whitelisted if len(ips_to_unwhitelist) == 1: # Given only one IP to whitelist if ips_to_unwhitelist[0] not in whitelist_ips['whitelistUrls']: raise Exception('Given IP address is not whitelisted.') elif not set(ips_to_unwhitelist).issubset(set(whitelist_ips['whitelistUrls'])): # Given more than one IP to whitelist raise Exception('Given IP address is not whitelisted.') # List comprehension to remove requested IPs from the whitelist whitelist_ips['whitelistUrls'] = [x for x in whitelist_ips['whitelistUrls'] if x not in ips_to_unwhitelist] json_data = json.dumps(whitelist_ips) http_request('PUT', cmd_url, json_data, DEFAULT_HEADERS) list_of_ips = '' for ip in ips_to_unwhitelist: list_of_ips += '- ' + ip + '\n' return 'Removed the following IP addresses from the whitelist successfully:\n' + list_of_ips def get_blacklist_command(): blacklist = get_blacklist().get('blacklistUrls') if blacklist: hr = '### Zscaler blacklist\n' for url in blacklist: hr += '- ' + url + '\n' ec = { 'Zscaler.Blacklist': blacklist } entry = { 'Type': entryTypes['note'], 'Contents': blacklist, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': hr, 'EntryContext': ec } return entry else: return 'No results found' def get_blacklist(): cmd_url = '/security/advanced' result = http_request('GET', cmd_url, None, DEFAULT_HEADERS) return json.loads(result.content) def get_whitelist_command(): whitelist = get_whitelist().get('whitelistUrls') if whitelist: hr = '### Zscaler whitelist\n' for url in whitelist: hr += '- ' + url + '\n' ec = { 'Zscaler.Whitelist': whitelist } entry = { 'Type': entryTypes['note'], 'Contents': whitelist, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': hr, 'EntryContext': ec } return entry else: return 'No results found' def get_whitelist(): cmd_url = '/security' result = http_request('GET', cmd_url, None, DEFAULT_HEADERS) return json.loads(result.content) def url_lookup(args): url = args.get('url', '') multiple = args.get('multiple', 'true').lower() == 'true' response = lookup_request(url, multiple) hr = json.loads(response.content) if hr: data = hr[0] suspicious_categories = ['SUSPICIOUS_DESTINATION', 'SPYWARE_OR_ADWARE'] ioc_context = {'Address': data['url'], 'Data': data['url']} score = 1 if len(data['urlClassifications']) == 0: data['urlClassifications'] = '' else: data['urlClassifications'] = ''.join(data['urlClassifications']) ioc_context['urlClassifications'] = data['urlClassifications'] if data['urlClassifications'] == 'MISCELLANEOUS_OR_UNKNOWN': score = 0 if len(data['urlClassificationsWithSecurityAlert']) == 0: data['urlClassificationsWithSecurityAlert'] = '' else: data['urlClassificationsWithSecurityAlert'] = ''.join(data['urlClassificationsWithSecurityAlert']) ioc_context['urlClassificationsWithSecurityAlert'] = data['urlClassificationsWithSecurityAlert'] if data['urlClassificationsWithSecurityAlert'] in suspicious_categories: score = 2 else: score = 3 ioc_context['Malicious'] = { 'Vendor': 'Zscaler', 'Description': data['urlClassificationsWithSecurityAlert'] } data['ip'] = data.pop('url') ioc_context = createContext(data=ioc_context, removeNull=True) ec = { outputPaths['url']: ioc_context, 'DBotScore': [ { "Indicator": url, "Score": score, "Type": "url", "Vendor": "Zscaler" } ] } title = 'Zscaler URL Lookup' entry = { 'Type': entryTypes['note'], 'Contents': hr, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown(title, data, removeNull=True), 'EntryContext': ec } else: entry = 'No results found.' # type: ignore return entry def ip_lookup(ip): response = lookup_request(ip) hr = json.loads(response.content) if hr: ioc_context = [None] * len(hr) # type: List[Any] suspicious_categories = ['SUSPICIOUS_DESTINATION', 'SPYWARE_OR_ADWARE'] dbot_score_array = [None] * len(hr) # type: List[Any] for i in range(len(hr)): ioc_context[i] = {} dbot_score_array[i] = {} ioc_context[i]['Address'] = hr[i]['url'] dbot_score_array[i]['Indicator'] = hr[i]['url'] score = 1 if len(hr[i]['urlClassifications']) == 0: hr[i]['iplClassifications'] = '' else: hr[i]['ipClassifications'] = ''.join(hr[i]['urlClassifications']) ioc_context[i]['ipClassifications'] = hr[i]['ipClassifications'] del hr[i]['urlClassifications'] if len(hr[i]['urlClassificationsWithSecurityAlert']) == 0: hr[i]['ipClassificationsWithSecurityAlert'] = '' else: hr[i]['ipClassificationsWithSecurityAlert'] = ''.join(hr[i]['urlClassificationsWithSecurityAlert']) if hr[i]['urlClassificationsWithSecurityAlert'] in suspicious_categories: score = 2 else: score = 3 ioc_context[i]['Malicious'] = { 'Vendor': 'Zscaler', 'Description': hr[i]['ipClassificationsWithSecurityAlert'] } del hr[i]['urlClassificationsWithSecurityAlert'] hr[i]['ip'] = hr[i].pop('url') dbot_score_array[i]['Score'] = score dbot_score_array[i]['Type'] = 'ip' dbot_score_array[i]['Vendor'] = 'Zscaler' ioc_context = createContext(data=ioc_context, removeNull=True) ec = { outputPaths['ip']: ioc_context, 'DBotScore': dbot_score_array } title = 'Zscaler IP Lookup' entry = { 'Type': entryTypes['note'], 'Contents': hr, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown(title, hr, removeNull=True), 'EntryContext': ec } else: entry = 'No results found.' # type: ignore return entry def lookup_request(ioc, multiple=True): cmd_url = '/urlLookup' if multiple: ioc_list = ioc.split(',') else: ioc_list = [ioc] ioc_list = [url.replace('https://', '').replace('http://', '') for url in ioc_list] json_data = json.dumps(ioc_list) response = http_request('POST', cmd_url, json_data, DEFAULT_HEADERS) return response def category_add_url(category_id, url): categories = get_categories() found_category = False for category in categories: if category['id'] == category_id: category_data = category found_category = True break if found_category: url_list = argToList(url) all_urls = url_list[:] all_urls.extend(list(map(lambda x: x.strip(), category_data['urls']))) category_data['urls'] = all_urls category_ioc_update(category_data) context = { 'ID': category_id, 'CustomCategory': category_data['customCategory'], 'URL': category_data['urls'] } if 'description' in category_data and category_data['description']: # Custom might not have description context['Description'] = category_data['description'] ec = { 'Zscaler.Category(val.ID && val.ID === obj.ID)': context } urls = '' for url in url_list: urls += '- ' + url + '\n' hr = 'Added the following URL addresses to category {}:\n{}'.format(category_id, urls) entry = { 'Type': entryTypes['note'], 'Contents': ec, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': hr, 'EntryContext': ec } return entry else: return return_error('Category could not be found.') def category_add_ip(category_id, ip): categories = get_categories() found_category = False for category in categories: if category['id'] == category_id: category_data = category found_category = True break if found_category: ip_list = argToList(ip) all_ips = ip_list[:] all_ips.extend(category_data['urls']) category_data['urls'] = all_ips response = category_ioc_update(category_data) context = { 'ID': category_id, 'CustomCategory': category_data['customCategory'], 'URL': category_data['urls'] } if 'description' in category_data and category_data['description']: # Custom might not have description context['Description'] = category_data['description'] ec = { 'Zscaler.Category(val.ID && val.ID === obj.ID)': context } ips = '' for ip in ip_list: ips += '- ' + ip + '\n' hr = 'Added the following IP addresses to category {}:\n{}'.format(category_id, ips) entry = { 'Type': entryTypes['note'], 'Contents': response, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': hr, 'EntryContext': ec } return entry else: return return_error('Category could not be found.') def category_remove_url(category_id, url): categories = get_categories() found_category = False for category in categories: if category['id'] == category_id: category_data = category found_category = True break if found_category: url_list = argToList(url) updated_urls = [url for url in category_data['urls'] if url not in url_list] # noqa if updated_urls == category_data['urls']: return return_error('Could not find given URL in the category.') category_data['urls'] = updated_urls response = category_ioc_update(category_data) context = { 'ID': category_id, 'CustomCategory': category_data['customCategory'], 'URL': category_data['urls'] } if 'description' in category_data and category_data['description']: # Custom might not have description context['Description'] = category_data['description'] ec = { 'Zscaler.Category(val.ID && val.ID === obj.ID)': context } urls = '' for url in url_list: urls += '- ' + url + '\n' hr = 'Removed the following URL addresses to category {}:\n{}'.format(category_id, urls) entry = { 'Type': entryTypes['note'], 'Contents': response, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': hr, 'EntryContext': ec } return entry else: return return_error('Category could not be found.') def category_remove_ip(category_id, ip): categories = get_categories() found_category = False for category in categories: if category['id'] == category_id: category_data = category found_category = True break if found_category: ip_list = ip.split(',') updated_ips = [ip for ip in category_data['urls'] if ip not in ip_list] # noqa if updated_ips == category_data['urls']: return return_error('Could not find given IP in the category.') category_data['urls'] = updated_ips response = category_ioc_update(category_data) context = { 'ID': category_id, 'CustomCategory': category_data['customCategory'], 'URL': category_data['urls'] } if 'description' in category_data and category_data['description']: # Custom might not have description context['Description'] = category_data['description'] ec = { 'Zscaler.Category(val.ID && val.ID === obj.ID)': context } ips = '' for ip in ip_list: ips += '- ' + ip + '\n' hr = 'Removed the following IP addresses to category {}:\n{}'.format(category_id, ips) entry = { 'Type': entryTypes['note'], 'Contents': response, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': hr, 'EntryContext': ec } return entry else: return return_error('Category could not be found.') def category_ioc_update(category_data): cmd_url = '/urlCategories/' + category_data['id'] data = { 'customCategory': category_data['customCategory'], 'urls': category_data['urls'], 'id': category_data['id'] } if 'description' in category_data: data['description'] = category_data['description'] if 'configuredName' in category_data: data['configuredName'] = category_data['configuredName'] json_data = json.dumps(data) response = http_request('PUT', cmd_url, json_data).json() return response def get_categories_command(display_url): display_urls = True if display_url == 'true' else False raw_categories = get_categories() categories = [] for raw_category in raw_categories: category = { 'ID': raw_category['id'], 'CustomCategory': raw_category['customCategory'] } if raw_category['urls']: category['URL'] = raw_category['urls'] if 'description' in raw_category: category['Description'] = raw_category['description'] if 'configuredName' in raw_category: category['Name'] = raw_category['configuredName'] categories.append(category) ec = { 'Zscaler.Category(val.ID && val.ID === obj.ID)': categories } if display_urls: headers = ['ID', 'Description', 'URL', 'CustomCategory', 'Name'] else: headers = ['ID', 'Description', 'CustomCategory', 'Name'] title = 'Zscaler Categories' entry = { 'Type': entryTypes['note'], 'Contents': raw_categories, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown(title, categories, headers), 'EntryContext': ec } return entry def get_categories(): cmd_url = '/urlCategories' response = http_request('GET', cmd_url).json() return response def sandbox_report_command(): md5 = demisto.getArg('md5') details = demisto.getArg('details') res = sandbox_report(md5, details) report = 'Full Details' if details == 'full' else 'Summary' ctype = demisto.get(res, '{}.Classification.Type'.format(report)) dbot_score = 3 if ctype == "MALICIOUS" else 2 if ctype == "SUSPICIOUS" else 1 if ctype == "BENIGN" else 0 ec = {outputPaths['dbotscore']: { 'Indicator': md5, 'Type': 'file', 'Vendor': 'Zscaler', 'Score': dbot_score }} human_readable_report = ec['DBotScore'].copy() human_readable_report["Detected Malware"] = str( demisto.get(res, '{}.Classification.DetectedMalware'.format(report))) human_readable_report["Zscaler Score"] = demisto.get(res, '{}.Classification.Score'.format(report)) human_readable_report["Category"] = demisto.get(res, '{}.Classification.Category'.format(report)) ec[outputPaths['file']] = { 'MD5': md5, 'Zscaler': { 'DetectedMalware': demisto.get(res, '{}.Classification.DetectedMalware'.format(report)), 'FileType': demisto.get(res, '{}.File Properties.File Type'.format(report)), } } if dbot_score == 3: ec[outputPaths['file']]['Malicious'] = { 'Vendor': 'Zscaler', 'Description': 'Classified as Malicious, with threat score: ' + str(human_readable_report["Zscaler Score"]) } demisto.results({ 'Type': entryTypes['note'], 'Contents': res, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Full Sandbox Report', human_readable_report, removeNull=True), 'EntryContext': ec }) def sandbox_report(md5, details): cmd_url = '/sandbox/report/{md5Hash}?details={details}'.format(md5Hash=md5, details=details) response = http_request('GET', cmd_url).json() return response ''' EXECUTION CODE ''' def main(): auth = login() jsession_id = auth[:auth.index(';')] DEFAULT_HEADERS['cookie'] = jsession_id LOG('command is %s' % (demisto.command(),)) try: if demisto.command() == 'test-module': # Checks if there is an authenticated session http_request('GET', '/authenticatedSession', None, DEFAULT_HEADERS) demisto.results('ok') elif demisto.command() == 'url': demisto.results(url_lookup(demisto.args())) elif demisto.command() == 'ip': demisto.results(ip_lookup(demisto.args()['ip'])) elif demisto.command() == 'zscaler-blacklist-url': demisto.results(blacklist_url(demisto.args()['url'])) elif demisto.command() == 'zscaler-undo-blacklist-url': demisto.results(unblacklist_url(demisto.args()['url'])) elif demisto.command() == 'zscaler-whitelist-url': demisto.results(whitelist_url(demisto.args()['url'])) elif demisto.command() == 'zscaler-undo-whitelist-url': demisto.results(unwhitelist_url(demisto.args()['url'])) elif demisto.command() == 'zscaler-blacklist-ip': demisto.results(blacklist_ip(demisto.args()['ip'])) elif demisto.command() == 'zscaler-undo-blacklist-ip': demisto.results(unblacklist_ip(demisto.args()['ip'])) elif demisto.command() == 'zscaler-whitelist-ip': demisto.results(whitelist_ip(demisto.args()['ip'])) elif demisto.command() == 'zscaler-undo-whitelist-ip': demisto.results(unwhitelist_ip(demisto.args()['ip'])) elif demisto.command() == 'zscaler-category-add-url': demisto.results(category_add_url(demisto.args()['category-id'], demisto.args()['url'])) elif demisto.command() == 'zscaler-category-add-ip': demisto.results(category_add_ip(demisto.args()['category-id'], demisto.args()['ip'])) elif demisto.command() == 'zscaler-category-remove-url': demisto.results(category_remove_url(demisto.args()['category-id'], demisto.args()['url'])) elif demisto.command() == 'zscaler-category-remove-ip': demisto.results(category_remove_ip(demisto.args()['category-id'], demisto.args()['ip'])) elif demisto.command() == 'zscaler-get-categories': demisto.results(get_categories_command(demisto.args()['displayURL'])) elif demisto.command() == 'zscaler-get-blacklist': demisto.results(get_blacklist_command()) elif demisto.command() == 'zscaler-get-whitelist': demisto.results(get_whitelist_command()) elif demisto.command() == 'zscaler-sandbox-report': demisto.results(sandbox_report_command()) except Exception as e: LOG(str(e)) LOG.print_log() raise finally: try: activate_changes() logout() except Exception as err: demisto.info("Zscaler error: " + str(err)) # python2 uses __builtin__ python3 uses builtins if __name__ == "__builtin__" or __name__ == "builtins": main()
[]
[]
[ "HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "https_proxy" ]
[]
["HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "https_proxy"]
python
4
0
pkg/event-consumers/kafka/kafka-consumer.go
/* Copyright (c) 2016-2017 Bitnami Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kafka import ( "context" "os" "strconv" "strings" "sync" "time" "github.com/Shopify/sarama" backoff "github.com/cenkalti/backoff/v4" "github.com/sirupsen/logrus" "k8s.io/client-go/kubernetes" "github.com/kubeless/kafka-trigger/pkg/utils" ) var ( stopM map[string]chan struct{} stoppedM map[string]chan struct{} consumerM map[string]bool brokers string maxBackOff time.Duration config *sarama.Config ) const clientID = "kubeless-kafka-trigger-controller" const defaultBrokers = "kafka.kubeless:9092" const kafkatriggersNamespace = "kafkatriggers.kubeless.io" func init() { stopM = make(map[string]chan struct{}) stoppedM = make(map[string]chan struct{}) consumerM = make(map[string]bool) if os.Getenv("KUBELESS_LOG_LEVEL") == "DEBUG" { logrus.SetLevel(logrus.DebugLevel) } sarama.Logger = logrus.StandardLogger() brokers = os.Getenv("KAFKA_BROKERS") if brokers == "" { brokers = defaultBrokers } if s := os.Getenv("BACKOFF_INTERVAL"); len(s) > 0 { if d, err := time.ParseDuration(s); err == nil { maxBackOff = d } else { logrus.Errorf("Failed to parse maximum back off interval BACKOFF_INTERVAL: %v", err) } } config = sarama.NewConfig() config.ClientID = clientID config.Version = sarama.V0_10_2_0 // Min supported version for consumer groups. config.Consumer.Return.Errors = true var err error if enableTLS, _ := strconv.ParseBool(os.Getenv("KAFKA_ENABLE_TLS")); enableTLS { config.Net.TLS.Enable = true config.Net.TLS.Config, err = GetTLSConfiguration(os.Getenv("KAFKA_CACERTS"), os.Getenv("KAFKA_CERT"), os.Getenv("KAFKA_KEY"), os.Getenv("KAFKA_INSECURE")) if err != nil { logrus.Fatalf("Failed to set tls configuration: %v", err) } } if enableSASL, _ := strconv.ParseBool(os.Getenv("KAFKA_ENABLE_SASL")); enableSASL { config.Net.SASL.Enable = true config.Net.SASL.User, config.Net.SASL.Password, err = GetSASLConfiguration(os.Getenv("KAFKA_USERNAME"), os.Getenv("KAFKA_PASSWORD")) if err != nil { logrus.Fatalf("Failed to set SASL configuration: %v", err) } } } // createConsumerProcess gets messages to a Kafka topic from the broker and send the payload to function service func createConsumerProcess(topic, funcName, ns, consumerGroupID string, clientset kubernetes.Interface, stopchan, stoppedchan chan struct{}) { defer close(stoppedchan) group, err := sarama.NewConsumerGroup(strings.Split(brokers, ","), consumerGroupID, config) if err != nil { logrus.Fatalf("Create consumer group (brokers = %v topic = %v namespace = %v function = %v consumerID = %v): %v", brokers, topic, ns, funcName, consumerGroupID, err) } defer func() { if err := group.Close(); err != nil { logrus.Errorf("Close consumer group (brokers = %v topic = %v namespace = %v function = %v consumerID = %v): %v", brokers, topic, ns, funcName, consumerGroupID, err) } }() funcPort, err := utils.GetFunctionPort(clientset, ns, funcName) if err != nil { logrus.Fatalf("Cannot get function port (namespace = %v function = %v): %v", ns, funcName, err) } ready := make(chan struct{}) consumer := NewConsumer(funcName, funcPort, ns, clientset, ready, maxBackOff) errchan := group.Errors() go func() { for err := range errchan { logrus.Errorf("Consumer group (brokers = %v topic = %v namespace = %v function = %v consumerID = %v): %v", brokers, topic, ns, funcName, consumerGroupID, err) } }() ctx, cancel := context.WithCancel(context.Background()) defer cancel() wg := &sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() for { if err := group.Consume(ctx, []string{topic}, consumer); err != nil { logrus.Errorf("Consumer group consuming (brokers = %v topic = %v namespace = %v function = %v consumerID = %v): %v", brokers, topic, ns, funcName, consumerGroupID, err) } if ctx.Err() != nil { return } consumer.Reset() } }() select { case <-ready: case <-stopchan: cancel() } wg.Wait() } // CreateKafkaConsumer creates a goroutine that subscribes to Kafka topic func CreateKafkaConsumer(triggerObjName, funcName, ns, topic string, clientset kubernetes.Interface) error { consumerID := generateUniqueConsumerGroupID(triggerObjName, funcName, ns, topic) if consumerM[consumerID] { logrus.Debugf("Creating consumer (namespace = %v function = %v trigger = %v topic = %v): already exists, skipping", ns, funcName, triggerObjName, topic) return nil } logrus.Debugf("Creating consumer (namespace = %v function = %v trigger = %v topic = %v)", ns, funcName, triggerObjName, topic) stopM[consumerID] = make(chan struct{}) stoppedM[consumerID] = make(chan struct{}) go createConsumerProcess(topic, funcName, ns, consumerID, clientset, stopM[consumerID], stoppedM[consumerID]) consumerM[consumerID] = true return nil } // DeleteKafkaConsumer deletes goroutine created by CreateKafkaConsumer func DeleteKafkaConsumer(triggerObjName, funcName, ns, topic string) error { consumerID := generateUniqueConsumerGroupID(triggerObjName, funcName, ns, topic) if !consumerM[consumerID] { logrus.Debugf("Stopping consumer (namespace = %v function = %v trigger = %v topic = %v): does not exist, skipping", ns, funcName, triggerObjName, topic) return nil } logrus.Debugf("Stopping consumer (namespace = %v function = %v trigger = %v topic = %v)", ns, funcName, triggerObjName, topic) close(stopM[consumerID]) <-stoppedM[consumerID] consumerM[consumerID] = false logrus.Debugf("Stopped consumer (namespace = %v function = %v trigger = %v topic = %v)", ns, funcName, triggerObjName, topic) return nil } func generateUniqueConsumerGroupID(triggerObjName, funcName, ns, topic string) string { return ns + "_" + triggerObjName + "_" + funcName + "_" + topic } // Consumer represents a Sarama consumer group consumer. type Consumer struct { funcName string funcPort int ns string clientset kubernetes.Interface ready chan struct{} backoff time.Duration } // NewConsumer returns new consumer. func NewConsumer(funcName string, funcPort int, ns string, clientset kubernetes.Interface, ready chan struct{}, backoff time.Duration) *Consumer { return &Consumer{ clientset: clientset, funcName: funcName, funcPort: funcPort, ns: ns, ready: ready, backoff: backoff, } } // Reset resets the consumer for new session. func (c *Consumer) Reset() { c.ready = make(chan struct{}) } // Setup is run at the beginning of a new session, before ConsumeClaim. func (c *Consumer) Setup(sarama.ConsumerGroupSession) error { // Mark the consumer as ready. close(c.ready) return nil } // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited. func (c *Consumer) Cleanup(sarama.ConsumerGroupSession) error { return nil } // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages(). func (c *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { b := getBackOff(c.backoff) for msg := range claim.Messages() { req, err := utils.GetHTTPReq(c.funcName, c.funcPort, msg.Topic, c.ns, kafkatriggersNamespace, "POST", string(msg.Value)) if err != nil { logrus.Errorf("Unable to elaborate request (namespace = %v function = %v topic = %v partition = %v offset = %v): %v", c.ns, c.funcName, msg.Topic, msg.Partition, msg.Offset, err) continue } err = utils.SendMessage(req) session.MarkMessage(msg, "") if err != nil { d := b.NextBackOff() logrus.Errorf("Failed to send message (namespace = %v function = %v topic = %v partition = %v offset = %v): %v: backing off for %v", c.ns, c.funcName, msg.Topic, msg.Partition, msg.Offset, err, d) time.Sleep(d) continue } logrus.Infof("Message sent successfully (namespace = %v function = %v topic = %v partition = %v offset = %v)", c.ns, c.funcName, msg.Topic, msg.Partition, msg.Offset) b.Reset() } return nil } type backOff interface { NextBackOff() time.Duration Reset() } type noopBackOff struct{} func (noopBackOff) NextBackOff() time.Duration { return 0 } func (noopBackOff) Reset() {} func getBackOff(maxBackOff time.Duration) backOff { if maxBackOff < 1 { return noopBackOff{} } b := backoff.NewExponentialBackOff() b.MaxElapsedTime = 0 // ... so that b.NextBackOff() never returns backoff.Stop. b.MaxInterval = maxBackOff return b }
[ "\"KUBELESS_LOG_LEVEL\"", "\"KAFKA_BROKERS\"", "\"BACKOFF_INTERVAL\"", "\"KAFKA_ENABLE_TLS\"", "\"KAFKA_CACERTS\"", "\"KAFKA_CERT\"", "\"KAFKA_KEY\"", "\"KAFKA_INSECURE\"", "\"KAFKA_ENABLE_SASL\"", "\"KAFKA_USERNAME\"", "\"KAFKA_PASSWORD\"" ]
[]
[ "BACKOFF_INTERVAL", "KUBELESS_LOG_LEVEL", "KAFKA_CERT", "KAFKA_KEY", "KAFKA_PASSWORD", "KAFKA_ENABLE_SASL", "KAFKA_USERNAME", "KAFKA_INSECURE", "KAFKA_ENABLE_TLS", "KAFKA_BROKERS", "KAFKA_CACERTS" ]
[]
["BACKOFF_INTERVAL", "KUBELESS_LOG_LEVEL", "KAFKA_CERT", "KAFKA_KEY", "KAFKA_PASSWORD", "KAFKA_ENABLE_SASL", "KAFKA_USERNAME", "KAFKA_INSECURE", "KAFKA_ENABLE_TLS", "KAFKA_BROKERS", "KAFKA_CACERTS"]
go
11
0
axl_FAC.py
"""AXL <addFacInfo> / <updateFacInfo>sample script, using the zeep library Copyright (c) 2020 Cisco and/or its affiliates. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from lxml import etree from requests import Session from requests.auth import HTTPBasicAuth from zeep import Client, Settings, Plugin from zeep.transports import Transport from zeep.exceptions import Fault import sys import urllib3 # Edit .env file to specify your Webex site/user details import os from dotenv import load_dotenv load_dotenv() # Change to true to enable output of request/response headers and XML DEBUG = False # The WSDL is a local file in the working directory, see README WSDL_FILE = 'schema/AXLAPI.wsdl' # This class lets you view the incoming and outgoing http headers and XML class MyLoggingPlugin( Plugin ): def egress( self, envelope, http_headers, operation, binding_options ): # Format the request body as pretty printed XML xml = etree.tostring( envelope, pretty_print = True, encoding = 'unicode') print( f'\nRequest\n-------\nHeaders:\n{http_headers}\n\nBody:\n{xml}' ) def ingress( self, envelope, http_headers, operation ): # Format the response body as pretty printed XML xml = etree.tostring( envelope, pretty_print = True, encoding = 'unicode') print( f'\nResponse\n-------\nHeaders:\n{http_headers}\n\nBody:\n{xml}' ) # This is where the meat of the application starts # The first step is to create a SOAP client session session = Session() # We avoid certificate verification by default # And disable insecure request warnings to keep the output clear session.verify = False urllib3.disable_warnings( urllib3.exceptions.InsecureRequestWarning ) # To enabled SSL cert checking (production) # place the CUCM Tomcat cert .pem file in the root of the project # and uncomment the two lines below # CERT = 'changeme.pem' # session.verify = CERT session.auth = HTTPBasicAuth(os.getenv( 'AXL_USERNAME' ), os.getenv( 'AXL_PASSWORD' ) ) transport = Transport( session = session, timeout = 10 ) # strict=False is not always necessary, but it allows zeep to parse imperfect XML settings = Settings( strict = False, xml_huge_tree = True ) # If debug output is requested, add the MyLoggingPlugin callback plugin = [ MyLoggingPlugin() ] if DEBUG else [ ] # Create the Zeep client with the specified settings client = Client( WSDL_FILE, settings = settings, transport = transport, plugins = plugin ) service = client.create_service( "{http://www.cisco.com/AXLAPIService/}AXLAPIBinding", f'https://{os.getenv( "CUCM_ADDRESS" )}:8443/axl/' ) # Create an object with the new FAC fields fac_data = { 'name': 'testFAC', 'code': '1234', 'authorizationLevel': '0' } # Execute an addFacInfo request try: resp = service.addFacInfo( fac_data ) except Fault as err: print('\nZeep error: addFacInfo: {err}'.format( err = err)) else: print('\naddFacInfo response:') print(resp) input( '\nPress Enter to continue...') # Update FAC try: resp = service.updateFacInfo( name = 'testFAC', newName = 'newTestFAC', code = '5678', authorizationLevel = '1' ) except Fault as err: print('\nZeep error: updateFacInfo: {err}'.format( err = err)) else: print('\nupdateFacInfo response:') print( resp ) input( 'Press Enter to continue...') # Delete FAC try: resp = service.removeFacInfo( name = 'newTestFAC' ) except Fault as err: print('\nZeep error: removeFacInfo: {err}'.format( err = err)) else: print('\nremoveFacInfo response:') print(resp)
[]
[]
[ "AXL_USERNAME'", "CUCM_ADDRESS\"", "AXL_PASSWORD'" ]
[]
["AXL_USERNAME'", "CUCM_ADDRESS\"", "AXL_PASSWORD'"]
python
3
0
tools/mb/mb_unittest.py
#!/usr/bin/python # Copyright 2020 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Tests for mb.py.""" from __future__ import print_function from __future__ import absolute_import import json import os import re import sys import unittest if sys.version_info.major == 2: from StringIO import StringIO else: from io import StringIO sys.path.insert( 0, os.path.abspath( os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))) from mb import mb class FakeMBW(mb.MetaBuildWrapper): def __init__(self, win32=False): super(FakeMBW, self).__init__() # Override vars for test portability. if win32: self.chromium_src_dir = 'c:\\fake_src' self.default_config = 'c:\\fake_src\\tools\\mb\\mb_config.pyl' self.default_isolate_map = ('c:\\fake_src\\testing\\buildbot\\' 'gn_isolate_map.pyl') self.platform = 'win32' self.executable = 'c:\\python\\python.exe' self.sep = '\\' self.cwd = 'c:\\fake_src\\out\\Default' else: self.chromium_src_dir = '/fake_src' self.default_config = '/fake_src/tools/mb/mb_config.pyl' self.default_isolate_map = '/fake_src/testing/buildbot/gn_isolate_map.pyl' self.executable = '/usr/bin/python' self.platform = 'linux2' self.sep = '/' self.cwd = '/fake_src/out/Default' self.files = {} self.calls = [] self.cmds = [] self.cross_compile = None self.out = '' self.err = '' self.rmdirs = [] def ExpandUser(self, path): return '$HOME/%s' % path def Exists(self, path): return self.files.get(self._AbsPath(path)) is not None def MaybeMakeDirectory(self, path): abpath = self._AbsPath(path) self.files[abpath] = True def PathJoin(self, *comps): return self.sep.join(comps) def ReadFile(self, path): return self.files[self._AbsPath(path)] def WriteFile(self, path, contents, force_verbose=False): if self.args.dryrun or self.args.verbose or force_verbose: self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path)) abpath = self._AbsPath(path) self.files[abpath] = contents def Call(self, cmd, env=None, buffer_output=True, stdin=None): self.calls.append(cmd) if self.cmds: return self.cmds.pop(0) return 0, '', '' def Print(self, *args, **kwargs): sep = kwargs.get('sep', ' ') end = kwargs.get('end', '\n') f = kwargs.get('file', sys.stdout) if f == sys.stderr: self.err += sep.join(args) + end else: self.out += sep.join(args) + end def TempFile(self, mode='w'): return FakeFile(self.files) def RemoveFile(self, path): abpath = self._AbsPath(path) self.files[abpath] = None def RemoveDirectory(self, path): abpath = self._AbsPath(path) self.rmdirs.append(abpath) files_to_delete = [f for f in self.files if f.startswith(abpath)] for f in files_to_delete: self.files[f] = None def _AbsPath(self, path): if not ((self.platform == 'win32' and path.startswith('c:')) or (self.platform != 'win32' and path.startswith('/'))): path = self.PathJoin(self.cwd, path) if self.sep == '\\': return re.sub(r'\\+', r'\\', path) else: return re.sub('/+', '/', path) class FakeFile(object): def __init__(self, files): self.name = '/tmp/file' self.buf = '' self.files = files def write(self, contents): self.buf += contents def close(self): self.files[self.name] = self.buf TEST_CONFIG = """\ { 'masters': { 'chromium': {}, 'fake_master': { 'fake_builder': 'rel_bot', 'fake_debug_builder': 'debug_goma', 'fake_args_bot': '//build/args/bots/fake_master/fake_args_bot.gn', 'fake_multi_phase': { 'phase_1': 'phase_1', 'phase_2': 'phase_2'}, 'fake_args_file': 'args_file_goma', 'fake_ios_error': 'ios_error', }, }, 'configs': { 'args_file_goma': ['args_file', 'goma'], 'rel_bot': ['rel', 'goma', 'fake_feature1'], 'debug_goma': ['debug', 'goma'], 'phase_1': ['phase_1'], 'phase_2': ['phase_2'], 'ios_error': ['error'], }, 'mixins': { 'error': { 'gn_args': 'error', }, 'fake_feature1': { 'gn_args': 'enable_doom_melon=true', }, 'goma': { 'gn_args': 'use_goma=true', }, 'args_file': { 'args_file': '//build/args/fake.gn', }, 'phase_1': { 'gn_args': 'phase=1', }, 'phase_2': { 'gn_args': 'phase=2', }, 'rel': { 'gn_args': 'is_debug=false', }, 'debug': { 'gn_args': 'is_debug=true', }, }, } """ TEST_BAD_CONFIG = """\ { 'configs': { 'rel_bot_1': ['rel', 'chrome_with_codecs'], 'rel_bot_2': ['rel', 'bad_nested_config'], }, 'masters': { 'chromium': { 'a': 'rel_bot_1', 'b': 'rel_bot_2', }, }, 'mixins': { 'chrome_with_codecs': { 'gn_args': 'proprietary_codecs=true', }, 'bad_nested_config': { 'mixins': ['chrome_with_codecs'], }, 'rel': { 'gn_args': 'is_debug=false', }, }, } """ TEST_ARGS_FILE_TWICE_CONFIG = """\ { 'masters': { 'chromium': {}, 'fake_master': { 'fake_args_file_twice': 'args_file_twice', }, }, 'configs': { 'args_file_twice': ['args_file', 'args_file'], }, 'mixins': { 'args_file': { 'args_file': '//build/args/fake.gn', }, }, } """ TEST_DUP_CONFIG = """\ { 'masters': { 'chromium': {}, 'fake_master': { 'fake_builder': 'some_config', 'other_builder': 'some_other_config', }, }, 'configs': { 'some_config': ['args_file'], 'some_other_config': ['args_file'], }, 'mixins': { 'args_file': { 'args_file': '//build/args/fake.gn', }, }, } """ TRYSERVER_CONFIG = """\ { 'masters': { 'not_a_tryserver': { 'fake_builder': 'fake_config', }, 'tryserver.chromium.linux': { 'try_builder': 'fake_config', }, 'tryserver.chromium.mac': { 'try_builder2': 'fake_config', }, }, 'configs': {}, 'mixins': {}, } """ class UnitTest(unittest.TestCase): def fake_mbw(self, files=None, win32=False): mbw = FakeMBW(win32=win32) mbw.files.setdefault(mbw.default_config, TEST_CONFIG) mbw.files.setdefault( mbw.ToAbsPath('//testing/buildbot/gn_isolate_map.pyl'), '''{ "foo_unittests": { "label": "//foo:foo_unittests", "type": "console_test_launcher", "args": [], }, }''') mbw.files.setdefault( mbw.ToAbsPath('//build/args/bots/fake_master/fake_args_bot.gn'), 'is_debug = false\n') if files: for path, contents in files.items(): mbw.files[path] = contents return mbw def check(self, args, mbw=None, files=None, out=None, err=None, ret=None, env=None): if not mbw: mbw = self.fake_mbw(files) try: prev_env = os.environ.copy() os.environ = env if env else prev_env actual_ret = mbw.Main(args) finally: os.environ = prev_env self.assertEqual(actual_ret, ret) if out is not None: self.assertEqual(mbw.out, out) if err is not None: self.assertEqual(mbw.err, err) return mbw def test_analyze(self): files = {'/tmp/in.json': '''{\ "files": ["foo/foo_unittest.cc"], "test_targets": ["foo_unittests"], "additional_compile_targets": ["all"] }''', '/tmp/out.json.gn': '''{\ "status": "Found dependency", "compile_targets": ["//foo:foo_unittests"], "test_targets": ["//foo:foo_unittests"] }'''} mbw = self.fake_mbw(files) mbw.Call = lambda cmd, env=None, buffer_output=True, stdin=None: (0, '', '') self.check(['analyze', '-c', 'debug_goma', '//out/Default', '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0) out = json.loads(mbw.files['/tmp/out.json']) self.assertEqual(out, { 'status': 'Found dependency', 'compile_targets': ['foo:foo_unittests'], 'test_targets': ['foo_unittests'] }) def test_analyze_optimizes_compile_for_all(self): files = {'/tmp/in.json': '''{\ "files": ["foo/foo_unittest.cc"], "test_targets": ["foo_unittests"], "additional_compile_targets": ["all"] }''', '/tmp/out.json.gn': '''{\ "status": "Found dependency", "compile_targets": ["//foo:foo_unittests", "all"], "test_targets": ["//foo:foo_unittests"] }'''} mbw = self.fake_mbw(files) mbw.Call = lambda cmd, env=None, buffer_output=True, stdin=None: (0, '', '') self.check(['analyze', '-c', 'debug_goma', '//out/Default', '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0) out = json.loads(mbw.files['/tmp/out.json']) # check that 'foo_unittests' is not in the compile_targets self.assertEqual(['all'], out['compile_targets']) def test_analyze_handles_other_toolchains(self): files = {'/tmp/in.json': '''{\ "files": ["foo/foo_unittest.cc"], "test_targets": ["foo_unittests"], "additional_compile_targets": ["all"] }''', '/tmp/out.json.gn': '''{\ "status": "Found dependency", "compile_targets": ["//foo:foo_unittests", "//foo:foo_unittests(bar)"], "test_targets": ["//foo:foo_unittests"] }'''} mbw = self.fake_mbw(files) mbw.Call = lambda cmd, env=None, buffer_output=True, stdin=None: (0, '', '') self.check(['analyze', '-c', 'debug_goma', '//out/Default', '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0) out = json.loads(mbw.files['/tmp/out.json']) # crbug.com/736215: If GN returns a label containing a toolchain, # MB (and Ninja) don't know how to handle it; to work around this, # we give up and just build everything we were asked to build. The # output compile_targets should include all of the input test_targets and # additional_compile_targets. self.assertEqual(['all', 'foo_unittests'], out['compile_targets']) def test_analyze_handles_way_too_many_results(self): too_many_files = ', '.join(['"//foo:foo%d"' % i for i in range(40 * 1024)]) files = {'/tmp/in.json': '''{\ "files": ["foo/foo_unittest.cc"], "test_targets": ["foo_unittests"], "additional_compile_targets": ["all"] }''', '/tmp/out.json.gn': '''{\ "status": "Found dependency", "compile_targets": [''' + too_many_files + '''], "test_targets": ["//foo:foo_unittests"] }'''} mbw = self.fake_mbw(files) mbw.Call = lambda cmd, env=None, buffer_output=True, stdin=None: (0, '', '') self.check(['analyze', '-c', 'debug_goma', '//out/Default', '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0) out = json.loads(mbw.files['/tmp/out.json']) # If GN returns so many compile targets that we might have command-line # issues, we should give up and just build everything we were asked to # build. The output compile_targets should include all of the input # test_targets and additional_compile_targets. self.assertEqual(['all', 'foo_unittests'], out['compile_targets']) def test_gen(self): mbw = self.fake_mbw() self.check(['gen', '-c', 'debug_goma', '//out/Default', '-g', '/goma'], mbw=mbw, ret=0) self.assertMultiLineEqual(mbw.files['/fake_src/out/Default/args.gn'], ('goma_dir = "/goma"\n' 'is_debug = true\n' 'use_goma = true\n')) # Make sure we log both what is written to args.gn and the command line. self.assertIn('Writing """', mbw.out) self.assertIn('/fake_src/buildtools/linux64/gn gen //out/Default --check', mbw.out) mbw = self.fake_mbw(win32=True) self.check(['gen', '-c', 'debug_goma', '-g', 'c:\\goma', '//out/Debug'], mbw=mbw, ret=0) self.assertMultiLineEqual(mbw.files['c:\\fake_src\\out\\Debug\\args.gn'], ('goma_dir = "c:\\\\goma"\n' 'is_debug = true\n' 'use_goma = true\n')) self.assertIn( 'c:\\fake_src\\buildtools\\win\\gn.exe gen //out/Debug ' '--check', mbw.out) mbw = self.fake_mbw() self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_bot', '//out/Debug'], mbw=mbw, ret=0) # TODO(https://crbug.com/1093038): This assert is inappropriately failing. # self.assertEqual( # mbw.files['/fake_src/out/Debug/args.gn'], # 'import("//build/args/bots/fake_master/fake_args_bot.gn")\n') def test_gen_args_file_mixins(self): mbw = self.fake_mbw() self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_file', '//out/Debug'], mbw=mbw, ret=0) self.assertEqual( mbw.files['/fake_src/out/Debug/args.gn'], ('import("//build/args/fake.gn")\n' 'use_goma = true\n')) def test_gen_args_file_twice(self): mbw = self.fake_mbw() mbw.files[mbw.default_config] = TEST_ARGS_FILE_TWICE_CONFIG self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_file_twice', '//out/Debug'], mbw=mbw, ret=1) def test_gen_fails(self): mbw = self.fake_mbw() mbw.Call = lambda cmd, env=None, buffer_output=True, stdin=None: (1, '', '') self.check(['gen', '-c', 'debug_goma', '//out/Default'], mbw=mbw, ret=1) def test_gen_swarming(self): files = { '/tmp/swarming_targets': 'base_unittests\n', '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( "{'base_unittests': {" " 'label': '//base:base_unittests'," " 'type': 'raw'," " 'args': []," "}}\n" ), } mbw = self.fake_mbw(files) def fake_call(cmd, env=None, buffer_output=True, stdin=None): del cmd del env del buffer_output del stdin mbw.files['/fake_src/out/Default/base_unittests.runtime_deps'] = ( 'base_unittests\n') return 0, '', '' mbw.Call = fake_call self.check(['gen', '-c', 'debug_goma', '--swarming-targets-file', '/tmp/swarming_targets', '//out/Default'], mbw=mbw, ret=0) self.assertIn('/fake_src/out/Default/base_unittests.isolate', mbw.files) self.assertIn('/fake_src/out/Default/base_unittests.isolated.gen.json', mbw.files) def test_gen_swarming_script(self): files = { '/tmp/swarming_targets': 'cc_perftests\n', '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( "{'cc_perftests': {" " 'label': '//cc:cc_perftests'," " 'type': 'script'," " 'script': '/fake_src/out/Default/test_script.py'," " 'args': []," "}}\n" ), } mbw = self.fake_mbw(files=files) def fake_call(cmd, env=None, buffer_output=True, stdin=None): del cmd del env del buffer_output del stdin mbw.files['/fake_src/out/Default/cc_perftests.runtime_deps'] = ( 'cc_perftests\n') return 0, '', '' mbw.Call = fake_call self.check(['gen', '-c', 'debug_goma', '--swarming-targets-file', '/tmp/swarming_targets', '--isolate-map-file', '/fake_src/testing/buildbot/gn_isolate_map.pyl', '//out/Default'], mbw=mbw, ret=0) self.assertIn('/fake_src/out/Default/cc_perftests.isolate', mbw.files) self.assertIn('/fake_src/out/Default/cc_perftests.isolated.gen.json', mbw.files) def test_multiple_isolate_maps(self): files = { '/tmp/swarming_targets': 'cc_perftests\n', '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( "{'cc_perftests': {" " 'label': '//cc:cc_perftests'," " 'type': 'raw'," " 'args': []," "}}\n" ), '/fake_src/testing/buildbot/gn_isolate_map2.pyl': ( "{'cc_perftests2': {" " 'label': '//cc:cc_perftests'," " 'type': 'raw'," " 'args': []," "}}\n" ), } mbw = self.fake_mbw(files=files) def fake_call(cmd, env=None, buffer_output=True, stdin=None): del cmd del env del buffer_output del stdin mbw.files['/fake_src/out/Default/cc_perftests.runtime_deps'] = ( 'cc_perftests_fuzzer\n') return 0, '', '' mbw.Call = fake_call self.check(['gen', '-c', 'debug_goma', '--swarming-targets-file', '/tmp/swarming_targets', '--isolate-map-file', '/fake_src/testing/buildbot/gn_isolate_map.pyl', '--isolate-map-file', '/fake_src/testing/buildbot/gn_isolate_map2.pyl', '//out/Default'], mbw=mbw, ret=0) self.assertIn('/fake_src/out/Default/cc_perftests.isolate', mbw.files) self.assertIn('/fake_src/out/Default/cc_perftests.isolated.gen.json', mbw.files) def test_duplicate_isolate_maps(self): files = { '/tmp/swarming_targets': 'cc_perftests\n', '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( "{'cc_perftests': {" " 'label': '//cc:cc_perftests'," " 'type': 'raw'," " 'args': []," "}}\n" ), '/fake_src/testing/buildbot/gn_isolate_map2.pyl': ( "{'cc_perftests': {" " 'label': '//cc:cc_perftests'," " 'type': 'raw'," " 'args': []," "}}\n" ), 'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': ( "cc_perftests\n" ), } mbw = self.fake_mbw(files=files, win32=True) # Check that passing duplicate targets into mb fails. self.check(['gen', '-c', 'debug_goma', '--swarming-targets-file', '/tmp/swarming_targets', '--isolate-map-file', '/fake_src/testing/buildbot/gn_isolate_map.pyl', '--isolate-map-file', '/fake_src/testing/buildbot/gn_isolate_map2.pyl', '//out/Default'], mbw=mbw, ret=1) def test_isolate(self): files = { '/fake_src/out/Default/toolchain.ninja': "", '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( "{'base_unittests': {" " 'label': '//base:base_unittests'," " 'type': 'raw'," " 'args': []," "}}\n" ), '/fake_src/out/Default/base_unittests.runtime_deps': ( "base_unittests\n" ), } self.check(['isolate', '-c', 'debug_goma', '//out/Default', 'base_unittests'], files=files, ret=0) # test running isolate on an existing build_dir files['/fake_src/out/Default/args.gn'] = 'is_debug = True\n' self.check(['isolate', '//out/Default', 'base_unittests'], files=files, ret=0) self.check(['isolate', '//out/Default', 'base_unittests'], files=files, ret=0) def test_isolate_dir(self): files = { '/fake_src/out/Default/toolchain.ninja': "", '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( "{'base_unittests': {" " 'label': '//base:base_unittests'," " 'type': 'raw'," " 'args': []," "}}\n" ), } mbw = self.fake_mbw(files=files) mbw.cmds.append((0, '', '')) # Result of `gn gen` mbw.cmds.append((0, '', '')) # Result of `autoninja` # Result of `gn desc runtime_deps` mbw.cmds.append((0, 'base_unitests\n../../test_data/\n', '')) self.check(['isolate', '-c', 'debug_goma', '//out/Default', 'base_unittests'], mbw=mbw, ret=0, err='') def test_isolate_generated_dir(self): files = { '/fake_src/out/Default/toolchain.ninja': "", '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( "{'base_unittests': {" " 'label': '//base:base_unittests'," " 'type': 'raw'," " 'args': []," "}}\n" ), } mbw = self.fake_mbw(files=files) mbw.cmds.append((0, '', '')) # Result of `gn gen` mbw.cmds.append((0, '', '')) # Result of `autoninja` # Result of `gn desc runtime_deps` mbw.cmds.append((0, 'base_unitests\ntest_data/\n', '')) expected_err = ('error: gn `data` items may not list generated directories;' ' list files in directory instead for:\n' '//out/Default/test_data/\n') self.check(['isolate', '-c', 'debug_goma', '//out/Default', 'base_unittests'], mbw=mbw, ret=1) self.assertEqual(mbw.out[-len(expected_err):], expected_err) def test_run(self): files = { '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( "{'base_unittests': {" " 'label': '//base:base_unittests'," " 'type': 'raw'," " 'args': []," "}}\n" ), '/fake_src/out/Default/base_unittests.runtime_deps': ( "base_unittests\n" ), } self.check(['run', '-c', 'debug_goma', '//out/Default', 'base_unittests'], files=files, ret=0) def test_run_swarmed(self): files = { '/fake_src/testing/buildbot/gn_isolate_map.pyl': ("{'base_unittests': {" " 'label': '//base:base_unittests'," " 'type': 'raw'," " 'args': []," "}}\n"), '/fake_src/out/Default/base_unittests.runtime_deps': ("base_unittests\n"), '/fake_src/out/Default/base_unittests.archive.json': ("{\"base_unittests\":\"fake_hash\"}"), '/fake_src/third_party/depot_tools/cipd_manifest.txt': ("# vpython\n" "/some/vpython/pkg git_revision:deadbeef\n"), } mbw = self.fake_mbw(files=files) original_impl = mbw.ToSrcRelPath def to_src_rel_path_stub(path): if path.endswith('base_unittests.archive.json'): return 'base_unittests.archive.json' return original_impl(path) mbw.ToSrcRelPath = to_src_rel_path_stub self.check(['run', '-s', '-c', 'debug_goma', '//out/Default', 'base_unittests'], mbw=mbw, ret=0) self.check(['run', '-s', '-c', 'debug_goma', '-d', 'os', 'Win7', '//out/Default', 'base_unittests'], mbw=mbw, ret=0) def test_lookup(self): self.check(['lookup', '-c', 'debug_goma'], ret=0, out=('\n' 'Writing """\\\n' 'is_debug = true\n' 'use_goma = true\n' '""" to _path_/args.gn.\n\n' '/fake_src/buildtools/linux64/gn gen _path_\n')) def test_quiet_lookup(self): self.check(['lookup', '-c', 'debug_goma', '--quiet'], ret=0, out=('is_debug = true\n' 'use_goma = true\n')) def test_lookup_goma_dir_expansion(self): self.check(['lookup', '-c', 'rel_bot', '-g', '/foo'], ret=0, out=('\n' 'Writing """\\\n' 'enable_doom_melon = true\n' 'goma_dir = "/foo"\n' 'is_debug = false\n' 'use_goma = true\n' '""" to _path_/args.gn.\n\n' '/fake_src/buildtools/linux64/gn gen _path_\n')) def test_help(self): orig_stdout = sys.stdout try: sys.stdout = StringIO() self.assertRaises(SystemExit, self.check, ['-h']) self.assertRaises(SystemExit, self.check, ['help']) self.assertRaises(SystemExit, self.check, ['help', 'gen']) finally: sys.stdout = orig_stdout def test_multiple_phases(self): # Check that not passing a --phase to a multi-phase builder fails. mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase'], ret=1) self.assertIn('Must specify a build --phase', mbw.out) # Check that passing a --phase to a single-phase builder fails. mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_builder', '--phase', 'phase_1'], ret=1) self.assertIn('Must not specify a build --phase', mbw.out) # Check that passing a wrong phase key to a multi-phase builder fails. mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase', '--phase', 'wrong_phase'], ret=1) self.assertIn('Phase wrong_phase doesn\'t exist', mbw.out) # Check that passing a correct phase key to a multi-phase builder passes. mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase', '--phase', 'phase_1'], ret=0) self.assertIn('phase = 1', mbw.out) mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase', '--phase', 'phase_2'], ret=0) self.assertIn('phase = 2', mbw.out) def test_recursive_lookup(self): files = { '/fake_src/build/args/fake.gn': ( 'enable_doom_melon = true\n' 'enable_antidoom_banana = true\n' ) } self.check(['lookup', '-m', 'fake_master', '-b', 'fake_args_file', '--recursive'], files=files, ret=0, out=('enable_antidoom_banana = true\n' 'enable_doom_melon = true\n' 'use_goma = true\n')) def test_validate(self): mbw = self.fake_mbw() self.check(['validate'], mbw=mbw, ret=0) def test_bad_validate(self): mbw = self.fake_mbw() mbw.files[mbw.default_config] = TEST_BAD_CONFIG self.check(['validate', '-f', mbw.default_config], mbw=mbw, ret=1) def test_duplicate_validate(self): mbw = self.fake_mbw() mbw.files[mbw.default_config] = TEST_DUP_CONFIG self.check(['validate'], mbw=mbw, ret=1) self.assertIn( 'Duplicate configs detected. When evaluated fully, the ' 'following configs are all equivalent: \'some_config\', ' '\'some_other_config\'.', mbw.out) def test_build_command_unix(self): files = { '/fake_src/out/Default/toolchain.ninja': '', '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( '{"base_unittests": {' ' "label": "//base:base_unittests",' ' "type": "raw",' ' "args": [],' '}}\n') } mbw = self.fake_mbw(files) self.check(['run', '//out/Default', 'base_unittests'], mbw=mbw, ret=0) self.assertIn(['autoninja', '-C', 'out/Default', 'base_unittests'], mbw.calls) def test_build_command_windows(self): files = { 'c:\\fake_src\\out\\Default\\toolchain.ninja': '', 'c:\\fake_src\\testing\\buildbot\\gn_isolate_map.pyl': ( '{"base_unittests": {' ' "label": "//base:base_unittests",' ' "type": "raw",' ' "args": [],' '}}\n') } mbw = self.fake_mbw(files, True) self.check(['run', '//out/Default', 'base_unittests'], mbw=mbw, ret=0) self.assertIn(['autoninja.bat', '-C', 'out\\Default', 'base_unittests'], mbw.calls) def test_ios_error_config_with_ios_json(self): """Ensures that ios_error config finds the correct iOS JSON file for args""" files = { '/fake_src/ios/build/bots/fake_master/fake_ios_error.json': ('{"gn_args": ["is_debug=true"]}\n') } mbw = self.fake_mbw(files) self.check(['lookup', '-m', 'fake_master', '-b', 'fake_ios_error'], mbw=mbw, ret=0, out=('\n' 'Writing """\\\n' 'is_debug = true\n' '""" to _path_/args.gn.\n\n' '/fake_src/buildtools/linux64/gn gen _path_\n')) def test_bot_definition_in_ios_json_only(self): """Ensures that logic checks iOS JSON file for args When builder definition is not present, ensure that ios/build/bots/ is checked. """ files = { '/fake_src/ios/build/bots/fake_master/fake_ios_bot.json': ('{"gn_args": ["is_debug=true"]}\n') } mbw = self.fake_mbw(files) self.check(['lookup', '-m', 'fake_master', '-b', 'fake_ios_bot'], mbw=mbw, ret=0, out=('\n' 'Writing """\\\n' 'is_debug = true\n' '""" to _path_/args.gn.\n\n' '/fake_src/buildtools/linux64/gn gen _path_\n')) def test_ios_error_config_missing_json_definition(self): """Ensures MBErr is thrown Expect MBErr with 'No iOS definition ...' for iOS bots when the bot config is ios_error, but there is no iOS JSON definition for it. """ mbw = self.fake_mbw() self.check(['lookup', '-m', 'fake_master', '-b', 'fake_ios_error'], mbw=mbw, ret=1) self.assertIn('MBErr: No iOS definition was found.', mbw.out) def test_bot_missing_definition(self): """Ensures builder missing MBErr is thrown Expect the original MBErr to be thrown for iOS bots when the bot definition doesn't exist at all. """ mbw = self.fake_mbw() self.check(['lookup', '-m', 'fake_master', '-b', 'random_bot'], mbw=mbw, ret=1) self.assertIn('MBErr: Builder name "random_bot" not found under masters', mbw.out) if __name__ == '__main__': unittest.main()
[]
[]
[]
[]
[]
python
0
0
example/runtests.py
#This file mainly exists to allow python setup.py test to work. import os, sys os.environ['DJANGO_SETTINGS_MODULE'] = 'example.example.settings' test_dir = os.path.dirname(__file__) print test_dir sys.path.insert(0, test_dir) from django.test.utils import get_runner from django.conf import settings def runtests(): TestRunner = get_runner(settings) runner = TestRunner(verbosity=1, interactive=True) failures = runner.run_tests([]) sys.exit(failures) if __name__ == '__main__': runtests()
[]
[]
[ "DJANGO_SETTINGS_MODULE" ]
[]
["DJANGO_SETTINGS_MODULE"]
python
1
0
tests/layers/test_layers_merge.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import unittest os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import numpy as np import tensorflow as tf import tensorlayer as tl from tests.utils import CustomTestCase class Layer_Merge_Test(CustomTestCase): @classmethod def setUpClass(cls): pass @classmethod def tearDownClass(cls): pass def test_concat(self): class CustomModel(tl.models.Model): def __init__(self): super(CustomModel, self).__init__(name="custom") self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu1_1') self.dense2 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu2_1') self.concat = tl.layers.Concat(concat_dim=1, name='concat_layer') def forward(self, inputs): d1 = self.dense1(inputs) d2 = self.dense2(inputs) outputs = self.concat([d1, d2]) return outputs model = CustomModel() model.train() inputs = tf.convert_to_tensor(np.random.random([4, 20]).astype(np.float32)) outputs = model(inputs) print(model) self.assertEqual(outputs.get_shape().as_list(), [4, 20]) def test_elementwise(self): class CustomModel(tl.models.Model): def __init__(self): super(CustomModel, self).__init__(name="custom") self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu1_1') self.dense2 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu2_1') self.element = tl.layers.Elementwise(combine_fn=tf.minimum, name='minimum', act=tf.identity) def forward(self, inputs): d1 = self.dense1(inputs) d2 = self.dense2(inputs) outputs = self.element([d1, d2]) return outputs, d1, d2 model = CustomModel() model.train() inputs = tf.convert_to_tensor(np.random.random([4, 20]).astype(np.float32)) outputs, d1, d2 = model(inputs) print(model) min = tf.minimum(d1, d2) self.assertEqual(outputs.get_shape().as_list(), [4, 10]) self.assertTrue(np.array_equal(min.numpy(), outputs.numpy())) if __name__ == '__main__': unittest.main()
[]
[]
[ "TF_CPP_MIN_LOG_LEVEL" ]
[]
["TF_CPP_MIN_LOG_LEVEL"]
python
1
0
varats-core/varats/utils/cli_util.py
"""Command line utilities.""" import logging import os import sys import typing as tp from enum import Enum from select import select import click from plumbum.lib import read_fd_decode_safely from plumbum.machines.local import PlumbumLocalPopen from rich.traceback import install def cli_yn_choice(question: str, default: str = 'y') -> bool: """Ask the user to make a y/n decision on the cli.""" choices = 'Y/n' if default.lower() in ('y', 'yes') else 'y/N' choice: str = str( input( "{message} ({choices}) ".format(message=question, choices=choices) ) ) values: tp.Union[tp.Tuple[str, str], tp.Tuple[str, str, str]] = ('y', 'yes', '' ) if choices == 'Y/n' else ('y', 'yes') return choice.strip().lower() in values ListType = tp.TypeVar("ListType") def cli_list_choice( question: str, choices: tp.List[ListType], choice_to_str: tp.Callable[[ListType], str], on_choice_callback: tp.Callable[[ListType], None], start_label: int = 0, default: int = 0, repeat: bool = False ) -> None: """ Ask the user to select an item from a list on the cli. Args: question: the question to ask the user choices: the choices the user has choice_to_str: a function converting a choice to a string on_choice_callback: action to perform when a choice has been made start_label: the number label of the first choice default: the default choice that is taken if no input is given repeat: whether to ask for another choice after ``on_choice_callback`` has finished """ if repeat: prompt = f"{question} or enter 'q' to quit (default={default}): " else: prompt = f"{question} (default={default}): " max_idx_digits = len(str(len(choices) - 1)) for idx, choice in enumerate(choices, start=start_label): idx_str = f"{idx}.".ljust(max_idx_digits + 1, " ") print(f"{idx_str} {choice_to_str(choice)}") user_choice = input(prompt) while not user_choice.startswith("q"): if not user_choice: user_choice = str(default) if user_choice.isdigit( ) and start_label <= int(user_choice) < start_label + len(choices): on_choice_callback(choices[int(user_choice) - start_label]) if not repeat: return user_choice = input(prompt) def initialize_cli_tool() -> None: """Initializes all relevant context and tools for varats cli tools.""" install(width=120) initialize_logger_config() def initialize_logger_config() -> None: """Initializes the logging framework with a basic config, allowing the user to pass the warning level via an environment variable ``LOG_LEVEL``.""" log_level = os.environ.get('LOG_LEVEL', "WARNING").upper() logging.basicConfig(level=log_level) EnumTy = tp.TypeVar("EnumTy", bound=Enum) class EnumChoice(click.Choice, tp.Generic[EnumTy]): """ Enum choice type for click. This type can be used with click to specify a choice from the given enum. """ def __init__(self, enum: tp.Type[EnumTy], case_sensitive: bool = True): self.__enum = enum super().__init__(list(dict(enum.__members__).keys()), case_sensitive) def convert( self, value: str, param: tp.Optional[click.Parameter], ctx: tp.Optional[click.Context] ) -> EnumTy: return self.__enum[super().convert(value, param, ctx)] def tee(process: PlumbumLocalPopen, buffered: bool = True) -> tp.Tuple[int, str, str]: """ Adapted from from plumbum's TEE implementation. Plumbum's TEE does not allow access to the underlying popen object, which we need to properly handle keyboard interrupts. Therefore, we just copy the relevant portion of plumbum's implementation and create the popen object by ourself. """ outbuf: tp.List[bytes] = [] errbuf: tp.List[bytes] = [] out = process.stdout err = process.stderr buffers = {out: outbuf, err: errbuf} tee_to = {out: sys.stdout, err: sys.stderr} done = False while not done: # After the process exits, we have to do one more # round of reading in order to drain any data in the # pipe buffer. Thus, we check poll() here, # unconditionally enter the read loop, and only then # break out of the outer loop if the process has # exited. done = process.poll() is not None # We continue this loop until we've done a full # `select()` call without collecting any input. This # ensures that our final pass -- after process exit -- # actually drains the pipe buffers, even if it takes # multiple calls to read(). progress = True while progress: progress = False ready, _, _ = select((out, err), (), ()) # logging.info(f"Streams ready: {[r.fileno() for r in ready]}") for file_descriptor in ready: buf = buffers[file_descriptor] data, text = read_fd_decode_safely(file_descriptor, 4096) if not data: # eof continue progress = True # Python conveniently line-buffers stdout and stderr for # us, so all we need to do is write to them # This will automatically add up to three bytes if it cannot be # decoded tee_to[file_descriptor].write(text) # And then "unbuffered" is just flushing after each write if not buffered: tee_to[file_descriptor].flush() buf.append(data) stdout = "".join([x.decode("utf-8") for x in outbuf]) stderr = "".join([x.decode("utf-8") for x in errbuf]) return process.returncode, stdout, stderr
[]
[]
[ "LOG_LEVEL" ]
[]
["LOG_LEVEL"]
python
1
0
terminusdb_client/woqlclient/woqlClient.py
"""woqlClient.py WOQLClient is the Python public API for TerminusDB""" import copy import gzip import json import os import urllib.parse as urlparse import warnings from collections.abc import Iterable from datetime import datetime from enum import Enum from typing import Any, Dict, List, Optional, Union import requests from ..__version__ import __version__ from ..errors import DatabaseError, InterfaceError from ..woql_utils import ( _clean_dict, _dt_dict, _dt_list, _finish_response, _result2stream, ) from ..woqlquery.woql_query import WOQLQuery # WOQL client object # license Apache Version 2 # summary Python module for accessing the Terminus DB API class JWTAuth(requests.auth.AuthBase): """Class for JWT Authentication in requests""" def __init__(self, token): self._token = token def __call__(self, r): r.headers["Authorization"] = f"Bearer {self._token}" return r class APITokenAuth(requests.auth.AuthBase): """Class for API Token Authentication in requests""" def __init__(self, token): self._token = token def __call__(self, r): r.headers["API_TOKEN"] = f"{self._token}" return r class ResourceType(Enum): """Enum for the different TerminusDB resources""" DB = 1 META = 2 REPO = 3 COMMITS = 4 REF = 5 BRANCH = 6 class Patch: def __init__(self, json=None): if json: self.from_json(json) else: self.content = None @property def update(self): def swap_value(swap_item): result_dict = {} for key, item in swap_item.items(): if isinstance(item, dict): operation = item.get("@op") if operation is not None and operation == "SwapValue": result_dict[key] = item.get("@after") elif operation is None: result_dict[key] = swap_value(item) return result_dict return swap_value(self.content) @update.setter def update(self): raise Exception("Cannot set update for patch") @update.deleter def update(self): raise Exception("Cannot delete update for patch") @property def before(self): def extract_before(extract_item): before_dict = {} for key, item in extract_item.items(): if isinstance(item, dict): value = item.get("@before") if value is not None: before_dict[key] = value else: before_dict[key] = extract_before(item) else: before_dict[key] = item return before_dict return extract_before(self.content) @before.setter def before(self): raise Exception("Cannot set before for patch") @before.deleter def before(self): raise Exception("Cannot delete before for patch") def from_json(self, json_str): content = json.loads(json_str) if isinstance(content, dict): self.content = _dt_dict(content) else: self.content = _dt_list(content) def to_json(self): return json.dumps(_clean_dict(self.content)) def copy(self): return copy.deepcopy(self) class WOQLClient: """Client for querying a TerminusDB server using WOQL queries. Attributes ---------- server_url: str URL of the server that this client connected. api: str API endpoint for this client. team: str Team that this client is using. "admin" for local dbs. db: str Database that this client is connected to. user: str TerminiusDB user that this client is using. "admin" for local dbs. branch: str Branch of the database that this client is connected to. Default to "main". ref: str, None Ref setting for the client. Default to None. repo: str Repo identifier of the database that this client is connected to. Default to "local". """ def __init__(self, server_url: str, **kwargs) -> None: r"""The WOQLClient constructor. Parameters ---------- server_url : str URL of the server that this client will connect to. \**kwargs Extra configuration options """ self.server_url = server_url.strip("/") self.api = f"{self.server_url}/api" self._connected = False # properties with get/setters self._team = None self._db = None self._user = None self._branch = None self._ref = None self._repo = None @property def team(self): if isinstance(self._team, str): return urlparse.unquote(self._team) else: return self._team @team.setter def team(self, value): if isinstance(value, str): self._team = urlparse.quote(value) else: self._team = value @property def db(self): if isinstance(self._db, str): return urlparse.unquote(self._db) else: return self._db @db.setter def db(self, value): if isinstance(value, str): self._db = urlparse.quote(value) else: self._db = value @property def user(self): if isinstance(self._user, str): return urlparse.unquote(self._user) else: return self._user @user.setter def user(self, value): if isinstance(value, str): self._user = urlparse.quote(value) else: self._user = value @property def branch(self): if isinstance(self._branch, str): return urlparse.unquote(self._branch) else: return self._branch @branch.setter def branch(self, value): if isinstance(value, str): self._branch = urlparse.quote(value) else: self._branch = value @property def repo(self): if isinstance(self._repo, str): return urlparse.unquote(self._repo) else: self._repo @repo.setter def repo(self, value): if isinstance(value, str): self._repo = urlparse.quote(value) else: self._repo = value @property def ref(self): return self._ref @ref.setter def ref(self, value): if isinstance(value, str): value = value.lower() if value in ["local", "remote", None]: self._ref = value else: raise ValueError("ref can only be 'local' or 'remote'") def connect( self, team: str = "admin", db: Optional[str] = None, remote_auth: str = None, use_token: bool = False, jwt_token: Optional[str] = None, api_token: Optional[str] = None, key: str = "root", user: str = "admin", branch: str = "main", ref: Optional[str] = None, repo: str = "local", **kwargs, ) -> None: r"""Connect to a Terminus server at the given URI with an API key. Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations. Parameters ---------- team: str Name of the team, default to be "admin" db: optional, str Name of the database connected remote_auth: optional, str Remote Auth setting key: optional, str API key for connecting, default to be "root" user: optional, str Name of the user, default to be "admin" use_token: bool Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token jwt_token: optional, str The Bearer JWT token to connect. Default to be None. api_token: optional, strs The API token to connect. Default to be None. branch: optional, str Branch to be connected, default to be "main" ref: optional, str Ref setting repo: optional, str Local or remote repo, default to be "local" \**kwargs Extra configuration options. Examples ------- >>> client = WOQLClient("https://127.0.0.1:6363") >>> client.connect(key="root", team="admin", user="admin", db="example_db") """ self.team = team self.db = db self._remote_auth = remote_auth self._key = key self.user = user self._use_token = use_token self._jwt_token = jwt_token self._api_token = api_token self.branch = branch self.ref = ref self.repo = repo self._connected = True try: self._db_info = json.loads( _finish_response( requests.get( self.api + "/info", headers={ "user-agent": f"terminusdb-client-python/{__version__}" }, auth=self._auth(), ) ) ) except Exception as error: raise InterfaceError( f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}" ) from None if self.db is not None: try: _finish_response( requests.head( self._db_url(), headers={ "user-agent": f"terminusdb-client-python/{__version__}" }, params={"exists": "true"}, auth=self._auth(), ) ) except DatabaseError: raise InterfaceError(f"Connection fail, {self.db} does not exist.") self._author = self.user def close(self) -> None: """Undo connect and close the connection. The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again.""" self._connected = False def _check_connection(self, check_db=True) -> None: """Raise connection InterfaceError if not connected Defaults to check if a db is connected""" if not self._connected: raise InterfaceError("Client is not connected to a TerminusDB server.") if check_db and self.db is None: raise InterfaceError( "No database is connected. Please either connect to a database or create a new database." ) def get_commit_history(self, max_history: int = 500) -> list: """Get the whole commit history. Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format: {"commit_id": {"author": "commit_author", "message": "commit_message", "timestamp: <datetime object of the timestamp>" } } Parameters ---------- max_history: int, optional maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit. Example ------- >>> from terminusdb_client import WOQLClient >>> client = WOQLClient("https://127.0.0.1:6363" >>> client.connect(db="bank_balance_example") >>> client.get_commit_history() [{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': '[email protected]', 'm essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n njjxe3i', 'author': '[email protected]', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': '[email protected]', 'message': 'Add mike', ' timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav [email protected]', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33) }, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': '[email protected]', 'message': 'Adding bank account object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}] Returns ------- list """ if max_history < 0: raise ValueError("max_history needs to be non-negative.") if max_history > 1: limit_history = max_history - 1 else: limit_history = 1 woql_query = ( WOQLQuery() .using("_commits") .limit(limit_history) .triple("v:branch", "name", WOQLQuery().string(self.branch)) .triple("v:branch", "head", "v:commit") .path("v:commit", "parent*", "v:target_commit") .triple("v:target_commit", "identifier", "v:cid") .triple("v:target_commit", "author", "v:author") .triple("v:target_commit", "message", "v:message") .triple("v:target_commit", "timestamp", "v:timestamp") ) result = self.query(woql_query).get("bindings") if not result: return result else: result_list = [] for result_item in result: result_list.append( { "commit": result_item["cid"]["@value"], "author": result_item["author"]["@value"], "message": result_item["message"]["@value"], "timestamp": datetime.fromtimestamp( int(result_item["timestamp"]["@value"]) ), } ) return result_list def _get_current_commit(self): woql_query = ( WOQLQuery() .using("_commits") .triple("v:branch", "name", WOQLQuery().string(self.branch)) .triple("v:branch", "head", "v:commit") .triple("v:commit", "identifier", "v:cid") ) result = self.query(woql_query) if not result: return None current_commit = result.get("bindings")[0].get("cid").get("@value") return current_commit def _get_target_commit(self, step): woql_query = ( WOQLQuery() .using("_commits") .path( "v:commit", f"parent{{{step},{step}}}", "v:target_commit", ) .triple("v:branch", "name", WOQLQuery().string(self.branch)) .triple("v:branch", "head", "v:commit") .triple("v:target_commit", "identifier", "v:cid") ) result = self.query(woql_query) target_commit = result.get("bindings")[0].get("cid").get("@value") return target_commit def get_all_branches(self, get_data_version=False): """Get all the branches available in the database.""" self._check_connection() api_url = self._documents_url().split("/") api_url = api_url[:-2] api_url = "/".join(api_url) + "/_commits" result = requests.get( api_url, headers={"user-agent": f"terminusdb-client-python/{__version__}"}, params={"type": "Branch"}, auth=self._auth(), ) if get_data_version: result, version = _finish_response(result, get_data_version) return list(_result2stream(result)), version return list(_result2stream(_finish_response(result))) def rollback(self, steps=1) -> None: """Curently not implementated. Please check back later. Raises ---------- NotImplementedError Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset """ raise NotImplementedError( "Open transactions are currently not supported. To reset commit head, check WOQLClient.reset" ) def copy(self) -> "WOQLClient": """Create a deep copy of this client. Returns ------- WOQLClient The copied client instance. Examples -------- >>> client = WOQLClient("https://127.0.0.1:6363/") >>> clone = client.copy() >>> assert client is not clone """ return copy.deepcopy(self) def set_db(self, dbid: str, team: Optional[str] = None) -> str: """Set the connection to another database. This will reset the connection. Parameters ---------- dbid : str Database identifer to set in the config. team : str Team identifer to set in the config. If not passed in, it will use the current one. Returns ------- str The current database identifier. Examples -------- >>> client = WOQLClient("https://127.0.0.1:6363") >>> client.set_db("database1") 'database1' """ self._check_connection(check_db=False) if team is None: team = self.team return self.connect( team=team, db=dbid, remote_auth=self._remote_auth, key=self._key, user=self.user, branch=self.branch, ref=self.ref, repo=self.repo, ) def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str: """Create a resource identifier string based on the current config. Parameters ---------- ttype : ResourceType Type of resource. val : str, optional Branch or commit identifier. Returns ------- str The constructed resource string. Examples -------- >>> client = WOQLClient("https://127.0.0.1:6363") >>> client.resource(ResourceType.DB) '<team>/<db>/' >>> client.resource(ResourceType.META) '<team>/<db>/_meta' >>> client.resource(ResourceType.COMMITS) '<team>/<db>/<repo>/_commits' >>> client.resource(ResourceType.REF, "<reference>") '<team>/<db>/<repo>/commit/<reference>' >>> client.resource(ResourceType.BRANCH, "<branch>") '<team>/<db>/<repo>/branch/<branch>' """ base = self.team + "/" + self.db + "/" ref_value = val if val else self.ref branch_value = val if val else self.branch urls = { ResourceType.DB: base, ResourceType.META: f"{base}_meta", ResourceType.REPO: f"{base}{self.repo}/_meta", ResourceType.COMMITS: f"{base}{self.repo}/_commits", ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}", ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}", } return urls[ttype] def _get_prefixes(self): """Get the prefixes for a given database""" self._check_connection() result = requests.get( self._db_base("prefixes"), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, auth=self._auth(), ) return json.loads(_finish_response(result)) def create_database( self, dbid: str, team: Optional[str] = None, label: Optional[str] = None, description: Optional[str] = None, prefixes: Optional[dict] = None, include_schema: bool = True, ) -> None: """Create a TerminusDB database by posting a terminus:Database document to the Terminus Server. Parameters ---------- dbid : str Unique identifier of the database. team : str, optional ID of the Team in which to create the DB (defaults to 'admin') label : str, optional Database name. description : str, optional Database description. prefixes : dict, optional Optional dict containing ``"@base"`` and ``"@schema"`` keys. @base (str) IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``. @schema (str) IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``. include_schema : bool If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created. Raises ------ InterfaceError if the client does not connect to a server Examples -------- >>> client = WOQLClient("https://127.0.0.1:6363/") >>> client.create_database("someDB", "admin", "Database Label", "My Description") """ self._check_connection(check_db=False) details: Dict[str, Any] = {} if label: details["label"] = label else: details["label"] = dbid if description: details["comment"] = description else: details["comment"] = "" if include_schema: details["schema"] = True if prefixes: details["prefixes"] = prefixes if team is None: team = self.team self.team = team self._connected = True self.db = dbid _finish_response( requests.post( self._db_url(), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, json=details, auth=self._auth(), ) ) def delete_database( self, dbid: Optional[str] = None, team: Optional[str] = None, force: bool = False, ) -> None: """Delete a TerminusDB database. If ``team`` is provided, then the team in the config will be updated and the new value will be used in future requests to the server. Parameters ---------- dbid : str ID of the database to delete team : str, optional the team in which the database resides (defaults to "admin") force: bool Raises ------ UserWarning If the value of dbid is None. InterfaceError if the client does not connect to a server. Examples ------- >>> client = WOQLClient("https://127.0.0.1:6363/") >>> client.delete_database("<database>", "<team>") """ self._check_connection(check_db=False) if dbid is None: raise UserWarning( f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead." ) self.db = dbid if team is None: warnings.warn( f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}" ) else: self.team = team payload = {"force": force} _finish_response( requests.delete( self._db_url(), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, auth=self._auth(), params=payload, ) ) self.db = None def _validate_graph_type(self, graph_type): if graph_type not in ["instance", "schema"]: raise ValueError("graph_type can only be 'instance' or 'schema'") def get_triples(self, graph_type: str) -> str: """Retrieves the contents of the specified graph as triples encoded in turtle format Parameters ---------- graph_type : str Graph type, either "instance" or "schema". Raises ------ InterfaceError if the client does not connect to a database Returns ------- str """ ### TODO: make triples works again raise InterfaceError("get_triples is temporary not avaliable in this version") self._check_connection() self._validate_graph_type(graph_type) result = requests.get( self._triples_url(graph_type), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, auth=self._auth(), ) return json.loads(_finish_response(result)) def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None: """Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents Parameters ---------- graph_type : str Graph type, either "instance" or "schema". turtle Valid set of triples in Turtle format. commit_msg : str Commit message. Raises ------ InterfaceError if the client does not connect to a database """ ### TODO: make triples works again raise InterfaceError( "update_triples is temporary not avaliable in this version" ) self._check_connection() self._validate_graph_type(graph_type) params = {"commit_info": self._generate_commit(commit_msg)} params["turtle"] = turtle result = requests.post( self._triples_url(graph_type), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, params=params, auth=self._auth(), ) return json.loads(_finish_response(result)) def insert_triples( self, graph_type: str, turtle, commit_msg: Optional[str] = None ) -> None: """Inserts into the specified graph with the triples encoded in turtle format. Parameters ---------- graph_type : str Graph type, either "instance" or "schema". turtle Valid set of triples in Turtle format. commit_msg : str Commit message. Raises ------ InterfaceError if the client does not connect to a database """ ### TODO: make triples works again raise InterfaceError( "insert_triples is temporary not avaliable in this version" ) self._check_connection() self._validate_graph_type(graph_type) params = {"commit_info": self._generate_commit(commit_msg)} params["turtle"] = turtle result = requests.put( self._triples_url(graph_type), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, params=params, auth=self._auth(), ) return json.loads(_finish_response(result)) def query_document( self, document_template: dict, graph_type: str = "instance", skip: int = 0, count: Optional[int] = None, as_list: bool = False, get_data_version: bool = False, **kwargs, ) -> Union[Iterable, list]: """Retrieves all documents that match a given document template Parameters ---------- document_template : dict Template for the document that is being retrived graph_type : str, optional Graph type, either "instance" or "schema". as_list: bool If the result returned as list rather than an iterator. get_data_version: bool If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple. Raises ------ InterfaceError if the client does not connect to a database Returns ------- Iterable """ self._validate_graph_type(graph_type) self._check_connection() payload = {"query": document_template, "graph_type": graph_type} payload["skip"] = skip if count is not None: payload["count"] = count add_args = ["prefixed", "minimized", "unfold"] for the_arg in add_args: if the_arg in kwargs: payload[the_arg] = kwargs[the_arg] result = requests.post( self._documents_url(), headers={ "user-agent": f"terminusdb-client-python/{__version__}", "X-HTTP-Method-Override": "GET", }, json=payload, auth=self._auth(), ) if get_data_version: result, version = _finish_response(result, get_data_version) return_obj = _result2stream(result) if as_list: return list(return_obj), version else: return return_obj, version return_obj = _result2stream(_finish_response(result)) if as_list: return list(return_obj) else: return return_obj def get_document( self, iri_id: str, graph_type: str = "instance", get_data_version: bool = False, **kwargs, ) -> dict: """Retrieves the document of the iri_id Parameters ---------- iri_id : str Iri id for the docuemnt that is retriving graph_type : str, optional Graph type, either "instance" or "schema". get_data_version: bool If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple. kwargs: Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold" Raises ------ InterfaceError if the client does not connect to a database Returns ------- dict """ self._validate_graph_type(graph_type) add_args = ["prefixed", "minimized", "unfold"] self._check_connection() payload = {"id": iri_id, "graph_type": graph_type} for the_arg in add_args: if the_arg in kwargs: payload[the_arg] = kwargs[the_arg] result = requests.get( self._documents_url(), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, params=payload, auth=self._auth(), ) if get_data_version: result, version = _finish_response(result, get_data_version) return json.loads(result), version return json.loads(_finish_response(result)) def get_documents_by_type( self, doc_type: str, graph_type: str = "instance", skip: int = 0, count: Optional[int] = None, as_list: bool = False, get_data_version=False, **kwargs, ) -> Union[Iterable, list]: """Retrieves the documents by type Parameters ---------- doc_type : str Specific type for the docuemnts that is retriving graph_type : str, optional Graph type, either "instance" or "schema". skip: int The starting posiion of the returning results, default to be 0 count: int or None The maximum number of returned result, if None (default) it will return all of the avalible result. as_list: bool If the result returned as list rather than an iterator. get_data_version: bool If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple. kwargs: Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold" Raises ------ InterfaceError if the client does not connect to a database Returns ------- iterable Stream of dictionaries """ self._validate_graph_type(graph_type) add_args = ["prefixed", "unfold"] self._check_connection() payload = {"type": doc_type, "graph_type": graph_type} payload["skip"] = skip if count is not None: payload["count"] = count for the_arg in add_args: if the_arg in kwargs: payload[the_arg] = kwargs[the_arg] result = requests.get( self._documents_url(), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, params=payload, auth=self._auth(), ) if get_data_version: result, version = _finish_response(result, get_data_version) return_obj = _result2stream(result) if as_list: return list(return_obj), version else: return return_obj, version return_obj = _result2stream(_finish_response(result)) if as_list: return list(return_obj) else: return return_obj def get_all_documents( self, graph_type: str = "instance", skip: int = 0, count: Optional[int] = None, as_list: bool = False, get_data_version: bool = False, **kwargs, ) -> Union[Iterable, list, tuple]: """Retrieves all avalibale the documents Parameters ---------- graph_type : str, optional Graph type, either "instance" or "schema". skip: int The starting posiion of the returning results, default to be 0 count: int or None The maximum number of returned result, if None (default) it will return all of the avalible result. as_list: bool If the result returned as list rather than an iterator. get_data_version: bool If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple. kwargs: Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold" Raises ------ InterfaceError if the client does not connect to a database Returns ------- iterable Stream of dictionaries """ self._validate_graph_type(graph_type) add_args = ["prefixed", "unfold"] self._check_connection() payload = {"graph_type": graph_type} payload["skip"] = skip if count is not None: payload["count"] = count for the_arg in add_args: if the_arg in kwargs: payload[the_arg] = kwargs[the_arg] result = requests.get( self._documents_url(), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, params=payload, auth=self._auth(), ) if get_data_version: result, version = _finish_response(result, get_data_version) return_obj = _result2stream(result) if as_list: return list(return_obj), version else: return return_obj, version return_obj = _result2stream(_finish_response(result)) if as_list: return list(return_obj) else: return return_obj def get_existing_classes(self): """Get all the existing classes (only ids) in a database.""" all_existing_obj = self.get_all_documents(graph_type="schema") all_existing_class = {} for item in all_existing_obj: if item.get("@id"): all_existing_class[item["@id"]] = item return all_existing_class def _conv_to_dict(self, obj): if isinstance(obj, dict): return _clean_dict(obj) elif hasattr(obj, "to_dict"): return obj.to_dict() elif hasattr(obj, "_to_dict"): if hasattr(obj, "_isinstance") and obj._isinstance: if hasattr(obj.__class__, "_subdocument"): raise ValueError("Subdocument cannot be added directly") return obj._obj_to_dict() else: return obj._to_dict() else: raise ValueError("Object cannot convert to dictionary") def _ref_extract(self, target_key, search_item): if hasattr(search_item, "items"): for key, value in search_item.items(): if key == target_key: yield value if isinstance(value, dict): yield from self._ref_extract(target_key, value) elif isinstance(value, list): for item in value: yield from self._ref_extract(target_key, item) def _convert_dcoument(self, document, graph_type): if isinstance(document, list): new_doc = [] captured = [] referenced = [] for item in document: item_dict = self._conv_to_dict(item) new_doc.append(item_dict) item_capture = item_dict.get("@capture") if item_capture: captured.append(item_capture) referenced += list(self._ref_extract("@ref", item_dict)) referenced = list(set(referenced)) for item in referenced: if item not in captured: raise ValueError( f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)." ) else: if hasattr(document, "to_dict") and graph_type != "schema": raise InterfaceError( "Inserting WOQLSchema object into non-schema graph." ) new_doc = self._conv_to_dict(document) if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)): raise ValueError( "There are uncaptured references. Seems you forgot to submit one or more object(s)." ) return new_doc def insert_document( self, document: Union[ dict, List[dict], "WOQLSchema", # noqa:F821 "DocumentTemplate", # noqa:F821 List["DocumentTemplate"], # noqa:F821 ], graph_type: str = "instance", full_replace: bool = False, commit_msg: Optional[str] = None, last_data_version: Optional[str] = None, compress: Union[str, int] = 1024, ) -> None: """Inserts the specified document(s) Parameters ---------- document: dict or list of dict Document(s) to be inserted. graph_type : str Graph type, either "inference", "instance" or "schema". full_replace:: bool If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option. commit_msg : str Commit message. last_data_version : str Last version before the update, used to check if the document has been changed unknowingly compress : str or int If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data. Raises ------ InterfaceError if the client does not connect to a database Returns ------- list list of ids of the inseted docuemnts """ self._validate_graph_type(graph_type) self._check_connection() params = self._generate_commit(commit_msg) params["graph_type"] = graph_type if full_replace: params["full_replace"] = "true" else: params["full_replace"] = "false" headers = {"user-agent": f"terminusdb-client-python/{__version__}"} if last_data_version is not None: headers["TerminusDB-Data-Version"] = last_data_version new_doc = self._convert_dcoument(document, graph_type) if len(new_doc) == 0: return elif not isinstance(new_doc, list): new_doc = [new_doc] if full_replace: if new_doc[0].get("@type") != "@context": raise ValueError( "The first item in docuemnt need to be dictionary representing the context object." ) else: if new_doc[0].get("@type") == "@context": warnings.warn( "To replace context, need to use `full_replace` or `replace_document`, skipping context object now." ) new_doc.pop(0) json_string = json.dumps(new_doc).encode("utf-8") if compress != "never" and len(json_string) > compress: headers.update( {"Content-Encoding": "gzip", "Content-Type": "application/json"} ) result = requests.post( self._documents_url(), headers=headers, params=params, data=gzip.compress(json_string), auth=self._auth(), ) else: result = requests.post( self._documents_url(), headers=headers, params=params, json=new_doc, auth=self._auth(), ) result = json.loads(_finish_response(result)) if isinstance(document, list): for idx, item in enumerate(document): if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"): item._backend_id = result[idx][len("terminusdb:///data/") :] return result def replace_document( self, document: Union[ dict, List[dict], "WOQLSchema", # noqa:F821 "DocumentTemplate", # noqa:F821 List["DocumentTemplate"], # noqa:F821 ], graph_type: str = "instance", commit_msg: Optional[str] = None, last_data_version: Optional[str] = None, compress: Union[str, int] = 1024, create: bool = False, ) -> None: """Updates the specified document(s) Parameters ---------- document: dict or list of dict Document(s) to be updated. graph_type : str Graph type, either "instance" or "schema". commit_msg : str Commit message. last_data_version : str Last version before the update, used to check if the document has been changed unknowingly compress : str or int If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data. create : bool Create the document if it does not yet exist. Raises ------ InterfaceError if the client does not connect to a database """ self._validate_graph_type(graph_type) self._check_connection() params = self._generate_commit(commit_msg) params["graph_type"] = graph_type params["create"] = "true" if create else "false" headers = {"user-agent": f"terminusdb-client-python/{__version__}"} if last_data_version is not None: headers["TerminusDB-Data-Version"] = last_data_version new_doc = self._convert_dcoument(document, graph_type) json_string = json.dumps(new_doc).encode("utf-8") if compress != "never" and len(json_string) > compress: headers.update( {"Content-Encoding": "gzip", "Content-Type": "application/json"} ) result = requests.put( self._documents_url(), headers=headers, params=params, data=gzip.compress(json_string), auth=self._auth(), ) else: result = requests.put( self._documents_url(), headers=headers, params=params, json=new_doc, auth=self._auth(), ) result = json.loads(_finish_response(result)) if isinstance(document, list): for idx, item in enumerate(document): if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"): item._backend_id = result[idx][len("terminusdb:///data/") :] return result def update_document( self, document: Union[ dict, List[dict], "WOQLSchema", # noqa:F821 "DocumentTemplate", # noqa:F821 List["DocumentTemplate"], # noqa:F821 ], graph_type: str = "instance", commit_msg: Optional[str] = None, last_data_version: Optional[str] = None, compress: Union[str, int] = 1024, ) -> None: """Updates the specified document(s). Add the document if not existed. Parameters ---------- document: dict or list of dict Document(s) to be updated. graph_type : str Graph type, either "instance" or "schema". commit_msg : str Commit message. last_data_version : str Last version before the update, used to check if the document has been changed unknowingly compress : str or int If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data. Raises ------ InterfaceError if the client does not connect to a database """ self.replace_document( document, graph_type, commit_msg, last_data_version, compress, True ) def delete_document( self, document: Union[str, list, dict, Iterable], graph_type: str = "instance", commit_msg: Optional[str] = None, last_data_version: Optional[str] = None, ) -> None: """Delete the specified document(s) Parameters ---------- document: str or list of str Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated. graph_type : str Graph type, either "instance" or "schema". commit_msg : str Commit message. last_data_version : str Last version before the update, used to check if the document has been changed unknowingly Raises ------ InterfaceError if the client does not connect to a database """ self._validate_graph_type(graph_type) self._check_connection() doc_id = [] if not isinstance(document, (str, list, dict)) and hasattr( document, "__iter__" ): document = list(document) if not isinstance(document, list): document = [document] for doc in document: if hasattr(doc, "_obj_to_dict"): doc = doc._obj_to_dict() if isinstance(doc, dict) and doc.get("@id"): doc_id.append(doc.get("@id")) elif isinstance(doc, str): doc_id.append(doc) params = self._generate_commit(commit_msg) params["graph_type"] = graph_type headers = {"user-agent": f"terminusdb-client-python/{__version__}"} if last_data_version is not None: headers["TerminusDB-Data-Version"] = last_data_version _finish_response( requests.delete( self._documents_url(), headers=headers, params=params, json=doc_id, auth=self._auth(), ) ) def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool: """Check if a certain document exist in a database Parameters ---------- doc_id: str Id of document to be checked. graph_type : str Graph type, either "instance" or "schema". returns ------- Bool if the document exist """ self._validate_graph_type(graph_type) self._check_connection() all_existing_obj = self.get_all_documents(graph_type=graph_type) all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj)) return doc_id in all_existing_id def get_class_frame(self, class_name): """Get the frame of the class of class_name. Provide information about all the avaliable properties of that class. Parameters ---------- class_name: str Name of the class returns ------- dict Dictionary containing information """ self._check_connection() opts = {"type": class_name} result = requests.get( self._class_frame_url(), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, params=opts, auth=self._auth(), ) return json.loads(_finish_response(result)) def commit(self): """Not implementated: open transactions currently not suportted. Please check back later.""" def query( self, woql_query: Union[dict, WOQLQuery], commit_msg: Optional[str] = None, get_data_version: bool = False, last_data_version: Optional[str] = None, # file_dict: Optional[dict] = None, ) -> Union[dict, str]: """Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents Parameters ---------- woql_query : dict or WOQLQuery object A woql query as an object or dict commit_mg : str A message that will be written to the commit log to describe the change get_data_version: bool If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple. last_data_version : str Last version before the update, used to check if the document has been changed unknowingly file_dict: **deprecated** File dictionary to be associated with post name => filename, for multipart POST Raises ------ InterfaceError if the client does not connect to a database Examples ------- >>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph") Returns ------- dict """ self._check_connection() query_obj = {"commit_info": self._generate_commit(commit_msg)} if isinstance(woql_query, WOQLQuery): request_woql_query = woql_query.to_dict() else: request_woql_query = woql_query query_obj["query"] = request_woql_query headers = {"user-agent": f"terminusdb-client-python/{__version__}"} if last_data_version is not None: headers["TerminusDB-Data-Version"] = last_data_version result = requests.post( self._query_url(), headers=headers, json=query_obj, auth=self._auth(), ) if get_data_version: result, version = _finish_response(result, get_data_version) result = json.loads(result) else: result = json.loads(_finish_response(result)) if result.get("inserts") or result.get("deletes"): return "Commit successfully made." elif get_data_version: return result, version else: return result def create_branch(self, new_branch_id: str, empty: bool = False) -> None: """Create a branch starting from the current branch. Parameters ---------- new_branch_id : str New branch identifier. empty : bool Create an empty branch if true (no starting commit) Raises ------ InterfaceError if the client does not connect to a database """ self._check_connection() if empty: source = {} elif self.ref: source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"} else: source = { "origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}" } _finish_response( requests.post( self._branch_url(new_branch_id), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, json=source, auth=self._auth(), ) ) def delete_branch(self, branch_id: str) -> None: """Delete a branch Parameters ---------- branch_id : str Branch to delete Raises ------ InterfaceError if the client does not connect to a database """ self._check_connection() _finish_response( requests.delete( self._branch_url(branch_id), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, auth=self._auth(), ) ) def pull( self, remote: str = "origin", remote_branch: Optional[str] = None, message: Optional[str] = None, author: Optional[str] = None, ) -> dict: """Pull updates from a remote repository to the current database. Parameters ---------- remote: str remote to pull from, default "origin" remote_branch: str, optional remote branch to pull from, default to be your current barnch message: str, optional optional commit message author: str, optional option to overide the author of the operation Raises ------ InterfaceError if the client does not connect to a database Returns ------- dict Examples -------- >>> client = WOQLClient("https://127.0.0.1:6363/") >>> client.pull() """ self._check_connection() if remote_branch is None: remote_branch = self.branch if author is None: author = self.author if message is None: message = ( f"Pulling from {remote}/{remote_branch} by Python client {__version__}" ) rc_args = { "remote": remote, "remote_branch": remote_branch, "author": author, "message": message, } result = requests.post( self._pull_url(), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, json=rc_args, auth=self._auth(), ) return json.loads(_finish_response(result)) def fetch(self, remote_id: str) -> dict: """Fatch the brach from a remote Parameters ---------- remote_id: str id of the remote Raises ------ InterfaceError if the client does not connect to a database""" self._check_connection() result = requests.post( self._fetch_url(remote_id), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, auth=self._auth(), ) return json.loads(_finish_response(result)) def push( self, remote: str = "origin", remote_branch: Optional[str] = None, message: Optional[str] = None, author: Optional[str] = None, ) -> dict: """Push changes from a branch to a remote repo Parameters ---------- remote: str remote to push to, default "origin" remote_branch: str, optional remote branch to push to, default to be your current barnch message: str, optional optional commit message author: str, optional option to overide the author of the operation Raises ------ InterfaceError if the client does not connect to a database Examples ------- >>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"}) Returns ------- dict """ self._check_connection() if remote_branch is None: remote_branch = self.branch if author is None: author = self._author if message is None: message = ( f"Pushing to {remote}/{remote_branch} by Python client {__version__}" ) rc_args = { "remote": remote, "remote_branch": remote_branch, "author": author, "message": message, } result = requests.post( self._push_url(), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, json=rc_args, auth=self._auth(), ) return json.loads(_finish_response(result)) def rebase( self, branch: Optional[str] = None, commit: Optional[str] = None, rebase_source: Optional[str] = None, message: Optional[str] = None, author: Optional[str] = None, ) -> dict: """Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'. Notes ----- The "remote" repo can live in the local database. Parameters ---------- branch : str, optional the branch for the rebase rebase_source : str, optional the source branch for the rebase message : str, optional the commit message author : str, optional the commit author Raises ------ InterfaceError if the client does not connect to a database Returns ------- dict Examples -------- >>> client = WOQLClient("https://127.0.0.1:6363/") >>> client.rebase("the_branch") """ self._check_connection() if branch is not None and commit is None: rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch]) elif branch is None and commit is not None: rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit]) elif branch is not None or commit is not None: raise RuntimeError("Cannot specify both branch and commit.") elif rebase_source is None: raise RuntimeError( "Need to specify one of 'branch', 'commit' or the 'rebase_source'" ) if author is None: author = self._author if message is None: message = f"Rebase from {rebase_source} by Python client {__version__}" rc_args = {"rebase_from": rebase_source, "author": author, "message": message} result = requests.post( self._rebase_url(), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, json=rc_args, auth=self._auth(), ) return json.loads(_finish_response(result)) def reset( self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False ) -> None: """Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done. Raises ------ InterfaceError if the client does not connect to a database Notes ----- The "remote" repo can live in the local database. Parameters ---------- commit: string Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset). soft: bool Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits. use_path : bool Wheather or not the commit given is an id or path. Default using id and use_path is False. Examples -------- >>> client = WOQLClient("https://127.0.0.1:6363/") >>> client.reset('234980523ffaf93') >>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True) """ self._check_connection() if soft: if use_path: self._ref = commit.split("/")[-1] else: self._ref = commit return None else: self._ref = None if commit is None: return None if use_path: commit_path = commit else: commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}" _finish_response( requests.post( self._reset_url(), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, json={"commit_descriptor": commit_path}, auth=self._auth(), ) ) def optimize(self, path: str) -> None: """Optimize the specified path. Raises ------ InterfaceError if the client does not connect to a database Notes ----- The "remote" repo can live in the local database. Parameters ---------- path : string Path to optimize, for instance admin/database/_meta for the repo graph. Examples -------- >>> client = WOQLClient("https://127.0.0.1:6363/") >>> client.optimize('admin/database') # optimise database branch (here main) >>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer) >>> client.optimize('admin/database/local/_commits') # commit graph is optimised """ self._check_connection() _finish_response( requests.post( self._optimize_url(path), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, auth=self._auth(), ) ) def squash( self, message: Optional[str] = None, author: Optional[str] = None, reset: bool = False, ) -> str: """Squash the current branch HEAD into a commit Raises ------ InterfaceError if the client does not connect to a database Notes ----- The "remote" repo can live in the local database. Parameters ---------- message : string Message for the newly created squash commit author : string Author of the commit reset : bool Perform reset after squash Returns ------- str commit id to be reset Examples -------- >>> client = WOQLClient("https://127.0.0.1:6363/") >>> client.connect(user="admin", key="root", team="admin", db="some_db") >>> client.squash('This is a squash commit message!') """ self._check_connection() result = requests.post( self._squash_url(), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, json={"commit_info": self._generate_commit(message, author)}, auth=self._auth(), ) # API response: # {'@type' : 'api:SquashResponse', # 'api:commit' : Commit, # 'api:old_commit' : Old_Commit, # 'api:status' : "api:success"} commit_id = json.loads(_finish_response(result)).get("api:commit") if reset: self.reset(commit_id) return commit_id def _convert_diff_dcoument(self, document): if isinstance(document, list): new_doc = [] for item in document: item_dict = self._conv_to_dict(item) new_doc.append(item_dict) else: new_doc = self._conv_to_dict(document) return new_doc def diff( self, before: Union[ str, dict, List[dict], "WOQLSchema", # noqa:F821 "DocumentTemplate", # noqa:F821 List["DocumentTemplate"], # noqa:F821 ], after: Union[ str, dict, List[dict], "WOQLSchema", # noqa:F821 "DocumentTemplate", # noqa:F821 List["DocumentTemplate"], # noqa:F821 ], document_id: Union[str, None] = None ): """Perform diff on 2 set of document(s), result in a Patch object. Do not connect when using public API. Returns ------- obj Patch object Examples -------- >>> client = WOQLClient("https://127.0.0.1:6363/") >>> client.connect(user="admin", key="root", team="admin", db="some_db") >>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"}) >>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'""" request_dict = {} for key, item in {"before": before, "after": after}.items(): if isinstance(item, str): request_dict[f"{key}_data_version"] = item else: request_dict[key] = self._convert_diff_dcoument(item) if document_id is not None: if "before_data_version" in request_dict: if document_id[:len("terminusdb:///data")] == "terminusdb:///data": request_dict["document_id"] = document_id else: raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}") else: raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object") if self._connected: result = _finish_response( requests.post( self._diff_url(), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, json=request_dict, auth=self._auth(), ) ) else: result = _finish_response( requests.post( self.server_url, headers={"user-agent": f"terminusdb-client-python/{__version__}"}, json=request_dict, ) ) return Patch(json=result) def patch( self, before: Union[ dict, List[dict], "WOQLSchema", # noqa:F821 "DocumentTemplate", # noqa:F821 List["DocumentTemplate"], # noqa:F821 ], patch: Patch, ): """Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph. Do not connect when using public API. Returns ------- dict After object Examples -------- >>> client = WOQLClient("https://127.0.0.1:6363/") >>> client.connect(user="admin", key="root", team="admin", db="some_db") >>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}') >>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj) >>> print(result) '{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'""" request_dict = { "before": self._convert_diff_dcoument(before), "patch": patch.content, } if self._connected: result = _finish_response( requests.post( self._patch_url(), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, json=request_dict, auth=self._auth(), ) ) else: result = _finish_response( requests.post( self.server_url, headers={"user-agent": f"terminusdb-client-python/{__version__}"}, json=request_dict, ) ) return json.loads(result) def clonedb( self, clone_source: str, newid: str, description: Optional[str] = None ) -> None: """Clone a remote repository and create a local copy. Parameters ---------- clone_source : str The source url of the repo to be cloned. newid : str Identifier of the new repository to create. Description : str, optional Optional description about the cloned database. Raises ------ InterfaceError if the client does not connect to a database Examples -------- >>> client = WOQLClient("https://127.0.0.1:6363/") >>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db") """ self._check_connection() if description is None: description = f"New database {newid}" rc_args = {"remote_url": clone_source, "label": newid, "comment": description} _finish_response( requests.post( self._clone_url(newid), headers={"user-agent": f"terminusdb-client-python/{__version__}"}, json=rc_args, auth=self._auth(), ) ) def _generate_commit( self, msg: Optional[str] = None, author: Optional[str] = None ) -> dict: """Pack the specified commit info into a dict format expected by the server. Parameters ---------- msg : str Commit message. author : str Commit author. Returns ------- dict Formatted commit info. Examples -------- >>> client = WOQLClient("https://127.0.0.1:6363/") >>> client._generate_commit("<message>", "<author>") {'author': '<author>', 'message': '<message>'} """ if author: mes_author = author else: mes_author = self._author if not msg: msg = f"Commit via python client {__version__}" return {"author": mes_author, "message": msg} def _auth(self): # if https basic if not self._use_token and self._connected and self._key and self.user: return (self.user, self._key) elif self._connected and self._jwt_token is not None: return JWTAuth(self._jwt_token) elif self._connected and self._api_token is not None: return APITokenAuth(self._api_token) elif self._connected: return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"]) else: raise RuntimeError("Client not connected.") # TODO: remote_auth def get_database(self, dbid: str) -> Optional[dict]: """ Returns metadata (id, organization, label, comment) about the requested database Parameters ---------- dbid : str The id of the database Raises ------ InterfaceError if the client does not connect to a server Returns ------- dict or None if not found """ self._check_connection(check_db=False) for this_db in self.get_databases(): if this_db["name"] == dbid: return this_db return None def get_databases(self) -> List[dict]: """ Returns a list of database metadata records for all databases the user has access to Raises ------ InterfaceError if the client does not connect to a server Returns ------- list of dicts """ self._check_connection(check_db=False) result = requests.get( self.api + "/", headers={"user-agent": f"terminusdb-client-python/{__version__}"}, auth=self._auth(), ) return json.loads(_finish_response(result)) def list_databases(self) -> List[Dict]: """ Returns a list of database ids for all databases the user has access to Raises ------ InterfaceError if the client does not connect to a server Returns ------- list of dicts """ self._check_connection(check_db=False) all_dbs = [] for data in self.get_databases(): all_dbs.append(data["name"]) return all_dbs def _db_url_fragment(self): if self._db == "_system": return self._db return f"{self._team}/{self._db}" def _db_base(self, action: str): return f"{self.api}/{action}/{self._db_url_fragment()}" def _branch_url(self, branch_id: str): base_url = self._repo_base("branch") branch_id = urlparse.quote(branch_id) return f"{base_url}/branch/{branch_id}" def _repo_base(self, action: str): return self._db_base(action) + f"/{self._repo}" def _branch_base(self, action: str): base = self._repo_base(action) if self._repo == "_meta": return base if self._branch == "_commits": return base + f"/{self._branch}" elif self.ref: return base + f"/commit/{self._ref}" else: return base + f"/branch/{self._branch}" return base def _query_url(self): if self._db == "_system": return self._db_base("woql") return self._branch_base("woql") def _class_frame_url(self): if self._db == "_system": return self._db_base("schema") return self._branch_base("schema") def _documents_url(self): if self._db == "_system": base_url = self._db_base("document") else: base_url = self._branch_base("document") return base_url def _triples_url(self, graph_type="instance"): if self._db == "_system": base_url = self._db_base("triples") else: base_url = self._branch_base("triples") return f"{base_url}/{graph_type}" def _clone_url(self, new_repo_id: str): new_repo_id = urlparse.quote(new_repo_id) return f"{self.api}/clone/{self._team}/{new_repo_id}" def _cloneable_url(self): crl = f"{self.server_url}/{self._team}/{self._db}" return crl def _pull_url(self): return self._branch_base("pull") def _fetch_url(self, remote_name: str): furl = self._branch_base("fetch") remote_name = urlparse.quote(remote_name) return furl + "/" + remote_name + "/_commits" def _rebase_url(self): return self._branch_base("rebase") def _reset_url(self): return self._branch_base("reset") def _optimize_url(self, path: str): path = urlparse.quote(path) return f"{self.api}/optimize/{path}" def _squash_url(self): return self._branch_base("squash") def _diff_url(self): return self._branch_base("diff") def _patch_url(self): return self._branch_base("patch") def _push_url(self): return self._branch_base("push") def _db_url(self): return self._db_base("db")
[]
[]
[ "TERMINUSDB_ACCESS_TOKEN" ]
[]
["TERMINUSDB_ACCESS_TOKEN"]
python
1
0
pkg/tests/observability_install_test.go
// Copyright (c) 2021 Red Hat, Inc. // Copyright Contributors to the Open Cluster Management project package tests import ( "fmt" "os" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/stolostron/observability-e2e-test/pkg/kustomize" "github.com/stolostron/observability-e2e-test/pkg/utils" ) func installMCO() { if os.Getenv("SKIP_INSTALL_STEP") == "true" { return } hubClient := utils.NewKubeClient( testOptions.HubCluster.MasterURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext) dynClient := utils.NewKubeClientDynamic( testOptions.HubCluster.MasterURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext) By("Checking MCO operator is existed") podList, err := hubClient.CoreV1().Pods("").List(metav1.ListOptions{LabelSelector: MCO_LABEL}) Expect(len(podList.Items)).To(Equal(1)) Expect(err).NotTo(HaveOccurred()) var ( mcoPod = "" mcoNs = "" ) for _, pod := range podList.Items { mcoPod = pod.GetName() mcoNs = pod.GetNamespace() Expect(string(mcoPod)).NotTo(Equal("")) Expect(string(pod.Status.Phase)).To(Equal("Running")) } // print mco logs if MCO installation failed defer func(testOptions utils.TestOptions, isHub bool, namespace, podName, containerName string, previous bool, tailLines int64) { if testFailed { mcoLogs, err := utils.GetPodLogs(testOptions, isHub, namespace, podName, containerName, previous, tailLines) Expect(err).NotTo(HaveOccurred()) fmt.Fprintf(GinkgoWriter, "[DEBUG] MCO is installed failed, checking MCO operator logs:\n%s\n", mcoLogs) } else { fmt.Fprintf(GinkgoWriter, "[DEBUG] MCO is installed successfully!\n") } }(testOptions, false, mcoNs, mcoPod, "multicluster-observability-operator", false, 1000) By("Checking Required CRDs is existed") Eventually(func() error { return utils.HaveCRDs(testOptions.HubCluster, testOptions.KubeConfig, []string{ "multiclusterobservabilities.observability.open-cluster-management.io", "observatoria.core.observatorium.io", "observabilityaddons.observability.open-cluster-management.io", }) }).Should(Succeed()) Expect(utils.CreateMCONamespace(testOptions)).NotTo(HaveOccurred()) if os.Getenv("IS_CANARY_ENV") == "true" { Expect(utils.CreatePullSecret(testOptions, mcoNs)).NotTo(HaveOccurred()) Expect(utils.CreateObjSecret(testOptions)).NotTo(HaveOccurred()) } //set resource quota and limit range for canary environment to avoid destruct the node yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../observability-gitops/policy"}) Expect(err).NotTo(HaveOccurred()) Expect(utils.Apply(testOptions.HubCluster.MasterURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) if os.Getenv("IS_CANARY_ENV") != "true" { By("Creating the MCO testing RBAC resources") Expect(utils.CreateMCOTestingRBAC(testOptions)).NotTo(HaveOccurred()) } if os.Getenv("SKIP_INTEGRATION_CASES") != "true" { By("Creating MCO instance of v1beta1") v1beta1KustomizationPath := "../../observability-gitops/mco/e2e/v1beta1" yamlB, err = kustomize.Render(kustomize.Options{KustomizationPath: v1beta1KustomizationPath}) Expect(err).NotTo(HaveOccurred()) Expect(utils.Apply(testOptions.HubCluster.MasterURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) By("Waiting for MCO ready status") allPodsIsReady := false Eventually(func() error { instance, err := dynClient.Resource(utils.NewMCOGVRV1BETA1()).Get(MCO_CR_NAME, metav1.GetOptions{}) if err == nil { allPodsIsReady = utils.StatusContainsTypeEqualTo(instance, "Ready") if allPodsIsReady { testFailed = false return nil } } testFailed = true if instance != nil && instance.Object != nil { return fmt.Errorf("MCO componnets cannot be running in 20 minutes. check the MCO CR status for the details: %v", instance.Object["status"]) } else { return fmt.Errorf("Wait for reconciling.") } }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) By("Check clustermanagementaddon CR is created") Eventually(func() error { _, err := dynClient.Resource(utils.NewMCOClusterManagementAddonsGVR()).Get("observability-controller", metav1.GetOptions{}) if err != nil { testFailed = true return err } testFailed = false return nil }).Should(Succeed()) By("Check the api conversion is working as expected") v1beta1Tov1beta2GoldenPath := "../../observability-gitops/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml" err = utils.CheckMCOConversion(testOptions, v1beta1Tov1beta2GoldenPath) Expect(err).NotTo(HaveOccurred()) } By("Apply MCO instance of v1beta2") v1beta2KustomizationPath := "../../observability-gitops/mco/e2e/v1beta2" yamlB, err = kustomize.Render(kustomize.Options{KustomizationPath: v1beta2KustomizationPath}) Expect(err).NotTo(HaveOccurred()) // add retry for update mco object failure Eventually(func() error { return utils.Apply(testOptions.HubCluster.MasterURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB) }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) // wait for pod restarting time.Sleep(60 * time.Second) By("Waiting for MCO ready status") Eventually(func() error { err = utils.CheckMCOComponents(testOptions) if err != nil { testFailed = true utils.PrintAllMCOPodsStatus(testOptions) return err } testFailed = false return nil }, EventuallyTimeoutMinute*25, EventuallyIntervalSecond*10).Should(Succeed()) By("Checking placementrule CR is created") Eventually(func() error { _, err := dynClient.Resource(utils.NewOCMPlacementRuleGVR()).Namespace(utils.MCO_NAMESPACE).Get("observability", metav1.GetOptions{}) if err != nil { testFailed = true return err } testFailed = false return nil }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) if os.Getenv("IS_CANARY_ENV") != "true" { // TODO(morvencao): remove the patch from placement is implemented by server foundation. By("Patching the placementrule CR's status") token, err := utils.FetchBearerToken(testOptions) Expect(err).NotTo(HaveOccurred()) Eventually(func() error { err = utils.PatchPlacementRule(testOptions, token) if err != nil { testFailed = true return err } testFailed = false return nil }).Should(Succeed()) } By("Check endpoint-operator and metrics-collector pods are created") Eventually(func() error { err = utils.CheckMCOAddon(testOptions) if err != nil { testFailed = true return err } testFailed = false return nil }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) By("Check clustermanagementaddon CR is created") Eventually(func() error { _, err := dynClient.Resource(utils.NewMCOClusterManagementAddonsGVR()).Get("observability-controller", metav1.GetOptions{}) if err != nil { testFailed = true return err } testFailed = false return nil }).Should(Succeed()) }
[ "\"SKIP_INSTALL_STEP\"", "\"IS_CANARY_ENV\"", "\"IS_CANARY_ENV\"", "\"SKIP_INTEGRATION_CASES\"", "\"IS_CANARY_ENV\"" ]
[]
[ "SKIP_INSTALL_STEP", "IS_CANARY_ENV", "SKIP_INTEGRATION_CASES" ]
[]
["SKIP_INSTALL_STEP", "IS_CANARY_ENV", "SKIP_INTEGRATION_CASES"]
go
3
0
manage.py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'spotify.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[]
[]
[]
[]
[]
python
0
0
001146StepikPyBegin/Stepik001146PyBeginсh02p04st07TASK03_Cube_20200411.py
''' Куб Напишите программу, вычисляющую объём куба и площадь его полной поверхности, по введённому значению длины ребра. Stepik001146PyBeginсh02p04st07TASK03_Cube_20200411.py ''' a = int(input()) v = a * a * a s = 6 * a * a print("Объем =", v) print("Площадь полной поверхности =", s)
[]
[]
[]
[]
[]
python
null
null
null
setup.py
import numpy as np import os import subprocess import sys from setuptools import find_packages, setup from setuptools.command.build_ext import build_ext from Cython.Build import cythonize is_posix = (os.name == "posix") if is_posix: os_name = subprocess.check_output("uname").decode("utf8") if "Darwin" in os_name: os.environ["CFLAGS"] = "-stdlib=libc++ -std=c++11" else: os.environ["CFLAGS"] = "-std=c++11" if os.environ.get('WITHOUT_CYTHON_OPTIMIZATIONS'): os.environ["CFLAGS"] += " -O0" # Avoid a gcc warning below: # cc1plus: warning: command line option ???-Wstrict-prototypes??? is valid # for C/ObjC but not for C++ class BuildExt(build_ext): def build_extensions(self): if os.name != "nt" and '-Wstrict-prototypes' in self.compiler.compiler_so: self.compiler.compiler_so.remove('-Wstrict-prototypes') super().build_extensions() def main(): cpu_count = os.cpu_count() or 8 version = "20211201" packages = find_packages(include=["hummingbot", "hummingbot.*"]) package_data = { "hummingbot": [ "core/cpp/*", "VERSION", "templates/*TEMPLATE.yml" ], } install_requires = [ "0x-contract-addresses", "0x-contract-wrappers", "0x-order-utils", "aioconsole", "aiohttp", "aiokafka", "appdirs", "appnope" "sync-timeout", "cachetools", "certifi", "cryptography", "cython", "cytoolz", "diff-cover", "dydx-python", "dydx-v3-python", "eth-abi", "eth-account", "eth-bloom", "eth-keyfile", "eth-typing", "eth-utils", "ethsnarks-loopring", "flake8", "hexbytes", "importlib-metadata", "mypy-extensions", "numpy", "pandas", "pip", "pre-commit", "prompt-toolkit", "psutil", "pyjwt", "pyperclip", "python-binance==0.7.5", "python-dateutil" "python-telegram-bot", "requests", "rsa", "ruamel-yaml", "scipy", "signalr-client-aio", "simplejson", "six", "sqlalchemy", "tzlocal", "ujson", "web3", "websockets", "yarl", ] cython_kwargs = { "language": "c++", "language_level": 3, } cython_sources = ["hummingbot/**/*.pyx"] if os.path.exists('test'): cython_sources.append("test/**/*.pyx") if os.environ.get('WITHOUT_CYTHON_OPTIMIZATIONS'): compiler_directives = { "optimize.use_switch": False, "optimize.unpack_method_calls": False, } else: compiler_directives = {} if is_posix: cython_kwargs["nthreads"] = cpu_count if "DEV_MODE" in os.environ: version += ".dev1" package_data[""] = [ "*.pxd", "*.pyx", "*.h" ] package_data["hummingbot"].append("core/cpp/*.cpp") if len(sys.argv) > 1 and sys.argv[1] == "build_ext" and is_posix: sys.argv.append(f"--parallel={cpu_count}") setup(name="hummingbot", version=version, description="Hummingbot", url="https://github.com/CoinAlpha/hummingbot", author="CoinAlpha, Inc.", author_email="[email protected]", license="Apache 2.0", packages=packages, package_data=package_data, install_requires=install_requires, ext_modules=cythonize(cython_sources, compiler_directives=compiler_directives, **cython_kwargs), include_dirs=[ np.get_include() ], scripts=[ "bin/hummingbot.py", "bin/hummingbot_quickstart.py" ], cmdclass={'build_ext': BuildExt}, ) if __name__ == "__main__": main()
[]
[]
[ "CFLAGS", "WITHOUT_CYTHON_OPTIMIZATIONS" ]
[]
["CFLAGS", "WITHOUT_CYTHON_OPTIMIZATIONS"]
python
2
0
CI/src/integration_tests/test_site_mounts.py
# Sarus # # Copyright (c) 2018-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause import unittest import subprocess import os import sys import shutil import json import common.util as util class TestSiteMounts(unittest.TestCase): """ These tests verify that using the siteMounts parameter in sarus.json correctly bind-mounts the requested directories into the container. """ TEST_FILES = {"/test_sitefs/": ["a.txt", "b.md", "c.py", "subdir/d.cpp", "subdir/e.pdf"], "/test_resources/": ["f.txt", "g.md", "h.py", "subdir/i.cpp", "subdir/j.pdf"], "/test_files/test_dir/": ["k.txt", "m.md", "l.py", "subdir/m.cpp", "subdir/n.pdf"], "/dev/test_site_mount/": ["testdev0"]} CHECK_TEMPLATE = ("for fname in {files}; do" " if [ ! -f $fname ]; then" " echo \"FAIL\"; " " fi; " "done; " "echo \"PASS\" ") @classmethod def setUpClass(cls): cls._create_source_directories() cls._modify_sarusjson_file() cls.container_image = "quay.io/ethcscs/ubuntu:20.04" util.pull_image_if_necessary(is_centralized_repository=False, image=cls.container_image) @classmethod def tearDownClass(cls): cls._remove_source_directories() cls._restore_sarusjson_file() @classmethod def _create_source_directories(cls): for k, v in cls.TEST_FILES.items(): source_dir = os.getcwd() + k os.makedirs(os.path.join(source_dir, "subdir"), exist_ok=True) for fname in v: open(source_dir+fname, 'w').close() @classmethod def _remove_source_directories(cls): for dir in cls.TEST_FILES.keys(): source_dir = "{}/{}".format(os.getcwd(), dir.split("/")[1]) shutil.rmtree(source_dir) @classmethod def _modify_sarusjson_file(cls): cls._sarusjson_filename = os.environ["CMAKE_INSTALL_PREFIX"] + "/etc/sarus.json" # Create backup subprocess.check_output(["sudo", "cp", cls._sarusjson_filename, cls._sarusjson_filename+'.bak']) # Build site mounts sitemounts = [] for destination_path in cls.TEST_FILES.keys(): source_path = os.getcwd() + destination_path sitemounts.append({"type": "bind", "source": source_path, "destination": destination_path}) # Modify config file with open(cls._sarusjson_filename, 'r') as f: data = f.read().replace('\n', '') config = json.loads(data) config["siteMounts"] = sitemounts data = json.dumps(config) with open("sarus.json.dummy", 'w') as f: f.write(data) subprocess.check_output(["sudo", "cp", "sarus.json.dummy", cls._sarusjson_filename]) os.remove("sarus.json.dummy") @classmethod def _restore_sarusjson_file(cls): subprocess.check_output(["sudo", "cp", cls._sarusjson_filename+'.bak', cls._sarusjson_filename]) def setUp(self): pass def test_sitefs_mounts(self): expected_files = [] for dir,files in self.TEST_FILES.items(): expected_files.extend([dir+f for f in files]) self.assertTrue(self._files_exist_in_container(expected_files, [])) def _files_exist_in_container(self, file_paths, sarus_options): file_names = ['"{}"'.format(fpath) for fpath in file_paths] check_script = self.__class__.CHECK_TEMPLATE.format(files=" ".join(file_names)) command = ["bash", "-c"] + [check_script] out = util.run_command_in_container(is_centralized_repository=False, image=self.__class__.container_image, command=command, options_of_run_command=sarus_options) return out == ["PASS"] if __name__ == "__main__": unittest.main()
[]
[]
[ "CMAKE_INSTALL_PREFIX" ]
[]
["CMAKE_INSTALL_PREFIX"]
python
1
0
download_from_google_storage.py
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Download files from Google Storage based on SHA1 sums.""" import hashlib import optparse import os import Queue import re import stat import sys import threading import time import subprocess2 GSUTIL_DEFAULT_PATH = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'third_party', 'gsutil', 'gsutil') # Maps sys.platform to what we actually want to call them. PLATFORM_MAPPING = { 'cygwin': 'win', 'darwin': 'mac', 'linux2': 'linux', 'win32': 'win', } class FileNotFoundError(IOError): pass class InvalidFileError(IOError): pass class InvalidPlatformError(Exception): pass def GetNormalizedPlatform(): """Returns the result of sys.platform accounting for cygwin. Under cygwin, this will always return "win32" like the native Python.""" if sys.platform == 'cygwin': return 'win32' return sys.platform # Common utilities class Gsutil(object): """Call gsutil with some predefined settings. This is a convenience object, and is also immutable.""" def __init__(self, path, boto_path, timeout=None, bypass_prodaccess=False): if not os.path.exists(path): raise FileNotFoundError('GSUtil not found in %s' % path) self.path = path self.timeout = timeout self.boto_path = boto_path self.bypass_prodaccess = bypass_prodaccess def get_sub_env(self): env = os.environ.copy() if self.boto_path == os.devnull: env['AWS_CREDENTIAL_FILE'] = '' env['BOTO_CONFIG'] = '' elif self.boto_path: env['AWS_CREDENTIAL_FILE'] = self.boto_path env['BOTO_CONFIG'] = self.boto_path else: custompath = env.get('AWS_CREDENTIAL_FILE', '~/.boto') + '.depot_tools' custompath = os.path.expanduser(custompath) if os.path.exists(custompath): env['AWS_CREDENTIAL_FILE'] = custompath return env def call(self, *args): cmd = [sys.executable, self.path] if self.bypass_prodaccess: cmd.append('--bypass_prodaccess') cmd.extend(args) return subprocess2.call(cmd, env=self.get_sub_env(), timeout=self.timeout) def check_call(self, *args): cmd = [sys.executable, self.path] if self.bypass_prodaccess: cmd.append('--bypass_prodaccess') cmd.extend(args) ((out, err), code) = subprocess2.communicate( cmd, stdout=subprocess2.PIPE, stderr=subprocess2.PIPE, env=self.get_sub_env(), timeout=self.timeout) # Parse output. status_code_match = re.search('status=([0-9]+)', err) if status_code_match: return (int(status_code_match.group(1)), out, err) if ('You are attempting to access protected data with ' 'no configured credentials.' in err): return (403, out, err) if 'No such object' in err: return (404, out, err) return (code, out, err) def check_bucket_permissions(bucket, gsutil): if not bucket: print >> sys.stderr, 'Missing bucket %s.' return (None, 1) base_url = 'gs://%s' % bucket code, _, ls_err = gsutil.check_call('ls', base_url) if code != 0: print >> sys.stderr, ls_err if code == 403: print >> sys.stderr, 'Got error 403 while authenticating to %s.' % base_url print >> sys.stderr, 'Try running "download_from_google_storage --config".' elif code == 404: print >> sys.stderr, '%s not found.' % base_url return (base_url, code) def check_platform(target): """Checks if any parent directory of target matches (win|mac|linux).""" assert os.path.isabs(target) root, target_name = os.path.split(target) if not target_name: return None if target_name in ('linux', 'mac', 'win'): return target_name return check_platform(root) def get_sha1(filename): sha1 = hashlib.sha1() with open(filename, 'rb') as f: while True: # Read in 1mb chunks, so it doesn't all have to be loaded into memory. chunk = f.read(1024*1024) if not chunk: break sha1.update(chunk) return sha1.hexdigest() # Download-specific code starts here def enumerate_work_queue(input_filename, work_queue, directory, recursive, ignore_errors, output, sha1_file, auto_platform): if sha1_file: if not os.path.exists(input_filename): if not ignore_errors: raise FileNotFoundError('%s not found.' % input_filename) print >> sys.stderr, '%s not found.' % input_filename with open(input_filename, 'rb') as f: sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip()) if sha1_match: work_queue.put( (sha1_match.groups(1)[0], input_filename.replace('.sha1', ''))) return 1 if not ignore_errors: raise InvalidFileError('No sha1 sum found in %s.' % input_filename) print >> sys.stderr, 'No sha1 sum found in %s.' % input_filename return 0 if not directory: work_queue.put((input_filename, output)) return 1 work_queue_size = 0 for root, dirs, files in os.walk(input_filename): if not recursive: for item in dirs[:]: dirs.remove(item) else: for exclude in ['.svn', '.git']: if exclude in dirs: dirs.remove(exclude) for filename in files: full_path = os.path.join(root, filename) if full_path.endswith('.sha1'): if auto_platform: # Skip if the platform does not match. target_platform = check_platform(os.path.abspath(full_path)) if not target_platform: err = ('--auto_platform passed in but no platform name found in ' 'the path of %s' % full_path) if not ignore_errors: raise InvalidFileError(err) print >> sys.stderr, err continue current_platform = PLATFORM_MAPPING[sys.platform] if current_platform != target_platform: continue with open(full_path, 'rb') as f: sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip()) if sha1_match: work_queue.put( (sha1_match.groups(1)[0], full_path.replace('.sha1', ''))) work_queue_size += 1 else: if not ignore_errors: raise InvalidFileError('No sha1 sum found in %s.' % filename) print >> sys.stderr, 'No sha1 sum found in %s.' % filename return work_queue_size def _downloader_worker_thread(thread_num, q, force, base_url, gsutil, out_q, ret_codes, verbose): while True: input_sha1_sum, output_filename = q.get() if input_sha1_sum is None: return if os.path.exists(output_filename) and not force: if get_sha1(output_filename) == input_sha1_sum: if verbose: out_q.put( '%d> File %s exists and SHA1 matches. Skipping.' % ( thread_num, output_filename)) continue # Check if file exists. file_url = '%s/%s' % (base_url, input_sha1_sum) if gsutil.check_call('ls', file_url)[0] != 0: out_q.put('%d> File %s for %s does not exist, skipping.' % ( thread_num, file_url, output_filename)) ret_codes.put((1, 'File %s for %s does not exist.' % ( file_url, output_filename))) continue # Fetch the file. out_q.put('%d> Downloading %s...' % (thread_num, output_filename)) try: os.remove(output_filename) # Delete the file if it exists already. except OSError: if os.path.exists(output_filename): out_q.put('%d> Warning: deleting %s failed.' % ( thread_num, output_filename)) code, _, err = gsutil.check_call('cp', '-q', file_url, output_filename) if code != 0: out_q.put('%d> %s' % (thread_num, err)) ret_codes.put((code, err)) # Set executable bit. if sys.platform == 'cygwin': # Under cygwin, mark all files as executable. The executable flag in # Google Storage will not be set when uploading from Windows, so if # this script is running under cygwin and we're downloading an # executable, it will be unrunnable from inside cygwin without this. st = os.stat(output_filename) os.chmod(output_filename, st.st_mode | stat.S_IEXEC) elif sys.platform != 'win32': # On non-Windows platforms, key off of the custom header # "x-goog-meta-executable". # # TODO(hinoka): It is supposedly faster to use "gsutil stat" but that # doesn't appear to be supported by the gsutil currently in our tree. When # we update, this code should use that instead of "gsutil ls -L". code, out, _ = gsutil.check_call('ls', '-L', file_url) if code != 0: out_q.put('%d> %s' % (thread_num, err)) ret_codes.put((code, err)) elif re.search('x-goog-meta-executable:', out): st = os.stat(output_filename) os.chmod(output_filename, st.st_mode | stat.S_IEXEC) def printer_worker(output_queue): while True: line = output_queue.get() # Its plausible we want to print empty lines. if line is None: break print line def download_from_google_storage( input_filename, base_url, gsutil, num_threads, directory, recursive, force, output, ignore_errors, sha1_file, verbose, auto_platform): # Start up all the worker threads. all_threads = [] download_start = time.time() stdout_queue = Queue.Queue() work_queue = Queue.Queue() ret_codes = Queue.Queue() ret_codes.put((0, None)) for thread_num in range(num_threads): t = threading.Thread( target=_downloader_worker_thread, args=[thread_num, work_queue, force, base_url, gsutil, stdout_queue, ret_codes, verbose]) t.daemon = True t.start() all_threads.append(t) printer_thread = threading.Thread(target=printer_worker, args=[stdout_queue]) printer_thread.daemon = True printer_thread.start() # Enumerate our work queue. work_queue_size = enumerate_work_queue( input_filename, work_queue, directory, recursive, ignore_errors, output, sha1_file, auto_platform) for _ in all_threads: work_queue.put((None, None)) # Used to tell worker threads to stop. # Wait for all downloads to finish. for t in all_threads: t.join() stdout_queue.put(None) printer_thread.join() # See if we ran into any errors. max_ret_code = 0 for ret_code, message in ret_codes.queue: max_ret_code = max(ret_code, max_ret_code) if message: print >> sys.stderr, message if verbose and not max_ret_code: print 'Success!' if verbose: print 'Downloading %d files took %1f second(s)' % ( work_queue_size, time.time() - download_start) return max_ret_code def main(args): usage = ('usage: %prog [options] target\n' 'Target must be:\n' ' (default) a sha1 sum ([A-Za-z0-9]{40}).\n' ' (-s or --sha1_file) a .sha1 file, containing a sha1 sum on ' 'the first line.\n' ' (-d or --directory) A directory to scan for .sha1 files.') parser = optparse.OptionParser(usage) parser.add_option('-o', '--output', help='Specify the output file name. Defaults to: ' '(a) Given a SHA1 hash, the name is the SHA1 hash. ' '(b) Given a .sha1 file or directory, the name will ' 'match (.*).sha1.') parser.add_option('-b', '--bucket', help='Google Storage bucket to fetch from.') parser.add_option('-e', '--boto', help='Specify a custom boto file.') parser.add_option('-c', '--no_resume', action='store_true', help='Resume download if file is partially downloaded.') parser.add_option('-f', '--force', action='store_true', help='Force download even if local file exists.') parser.add_option('-i', '--ignore_errors', action='store_true', help='Don\'t throw error if we find an invalid .sha1 file.') parser.add_option('-r', '--recursive', action='store_true', help='Scan folders recursively for .sha1 files. ' 'Must be used with -d/--directory') parser.add_option('-t', '--num_threads', default=1, type='int', help='Number of downloader threads to run.') parser.add_option('-d', '--directory', action='store_true', help='The target is a directory. ' 'Cannot be used with -s/--sha1_file.') parser.add_option('-s', '--sha1_file', action='store_true', help='The target is a file containing a sha1 sum. ' 'Cannot be used with -d/--directory.') parser.add_option('-g', '--config', action='store_true', help='Alias for "gsutil config". Run this if you want ' 'to initialize your saved Google Storage ' 'credentials. This will create a read-only ' 'credentials file in ~/.boto.depot_tools.') parser.add_option('-n', '--no_auth', action='store_true', help='Skip auth checking. Use if it\'s known that the ' 'target bucket is a public bucket.') parser.add_option('-p', '--platform', help='A regular expression that is compared against ' 'Python\'s sys.platform. If this option is specified, ' 'the download will happen only if there is a match.') parser.add_option('-a', '--auto_platform', action='store_true', help='Detects if any parent folder of the target matches ' '(linux|mac|win). If so, the script will only ' 'process files that are in the paths that ' 'that matches the current platform.') parser.add_option('-v', '--verbose', action='store_true', help='Output extra diagnostic and progress information.') (options, args) = parser.parse_args() # Make sure we should run at all based on platform matching. if options.platform: if options.auto_platform: parser.error('--platform can not be specified with --auto_platform') if not re.match(options.platform, GetNormalizedPlatform()): if options.verbose: print('The current platform doesn\'t match "%s", skipping.' % options.platform) return 0 # Set the boto file to /dev/null if we don't need auth. if options.no_auth: options.boto = os.devnull # Make sure gsutil exists where we expect it to. if os.path.exists(GSUTIL_DEFAULT_PATH): gsutil = Gsutil(GSUTIL_DEFAULT_PATH, boto_path=options.boto, bypass_prodaccess=options.no_auth) else: parser.error('gsutil not found in %s, bad depot_tools checkout?' % GSUTIL_DEFAULT_PATH) # Passing in -g/--config will run our copy of GSUtil, then quit. if options.config: return gsutil.call('config', '-r', '-o', os.path.expanduser('~/.boto.depot_tools')) if not args: parser.error('Missing target.') if len(args) > 1: parser.error('Too many targets.') if not options.bucket: parser.error('Missing bucket. Specify bucket with --bucket.') if options.sha1_file and options.directory: parser.error('Both --directory and --sha1_file are specified, ' 'can only specify one.') if options.recursive and not options.directory: parser.error('--recursive specified but --directory not specified.') if options.output and options.directory: parser.error('--directory is specified, so --output has no effect.') if (not (options.sha1_file or options.directory) and options.auto_platform): parser.error('--auto_platform must be specified with either ' '--sha1_file or --directory') input_filename = args[0] # Set output filename if not specified. if not options.output and not options.directory: if not options.sha1_file: # Target is a sha1 sum, so output filename would also be the sha1 sum. options.output = input_filename elif options.sha1_file: # Target is a .sha1 file. if not input_filename.endswith('.sha1'): parser.error('--sha1_file is specified, but the input filename ' 'does not end with .sha1, and no --output is specified. ' 'Either make sure the input filename has a .sha1 ' 'extension, or specify --output.') options.output = input_filename[:-5] else: parser.error('Unreachable state.') # Check if output file already exists. if not options.directory and not options.force and not options.no_resume: if os.path.exists(options.output): parser.error('Output file %s exists and --no_resume is specified.' % options.output) # Check we have a valid bucket with valid permissions. base_url, code = check_bucket_permissions(options.bucket, gsutil) if code: return code return download_from_google_storage( input_filename, base_url, gsutil, options.num_threads, options.directory, options.recursive, options.force, options.output, options.ignore_errors, options.sha1_file, options.verbose, options.auto_platform) if __name__ == '__main__': sys.exit(main(sys.argv))
[]
[]
[]
[]
[]
python
0
0
acceptance/openstack/blockstorage/v3/quotaset_test.go
//go:build acceptance || quotasets // +build acceptance quotasets package v3 import ( "os" "testing" "github.com/lxdcc/gophercloud" "github.com/lxdcc/gophercloud/acceptance/clients" "github.com/lxdcc/gophercloud/acceptance/tools" "github.com/lxdcc/gophercloud/openstack/blockstorage/extensions/quotasets" "github.com/lxdcc/gophercloud/openstack/blockstorage/v3/volumetypes" th "github.com/lxdcc/gophercloud/testhelper" ) func TestQuotasetGet(t *testing.T) { clients.RequireAdmin(t) client, projectID := getClientAndProject(t) quotaSet, err := quotasets.Get(client, projectID).Extract() th.AssertNoErr(t, err) tools.PrintResource(t, quotaSet) } func TestQuotasetGetDefaults(t *testing.T) { clients.RequireAdmin(t) client, projectID := getClientAndProject(t) quotaSet, err := quotasets.GetDefaults(client, projectID).Extract() th.AssertNoErr(t, err) tools.PrintResource(t, quotaSet) } func TestQuotasetGetUsage(t *testing.T) { clients.RequireAdmin(t) client, projectID := getClientAndProject(t) quotaSetUsage, err := quotasets.GetUsage(client, projectID).Extract() th.AssertNoErr(t, err) tools.PrintResource(t, quotaSetUsage) } var UpdateQuotaOpts = quotasets.UpdateOpts{ Volumes: gophercloud.IntToPointer(100), Snapshots: gophercloud.IntToPointer(200), Gigabytes: gophercloud.IntToPointer(300), PerVolumeGigabytes: gophercloud.IntToPointer(50), Backups: gophercloud.IntToPointer(2), BackupGigabytes: gophercloud.IntToPointer(300), Groups: gophercloud.IntToPointer(350), Extra: map[string]interface{}{ "volumes_foo": gophercloud.IntToPointer(100), }, } var UpdatedQuotas = quotasets.QuotaSet{ Volumes: 100, Snapshots: 200, Gigabytes: 300, PerVolumeGigabytes: 50, Backups: 2, BackupGigabytes: 300, Groups: 350, } var VolumeTypeIsPublic = true var VolumeTypeCreateOpts = volumetypes.CreateOpts{ Name: "foo", IsPublic: &VolumeTypeIsPublic, Description: "foo", ExtraSpecs: map[string]string{}, } func TestQuotasetUpdate(t *testing.T) { clients.RequireAdmin(t) client, projectID := getClientAndProject(t) // save original quotas orig, err := quotasets.Get(client, projectID).Extract() th.AssertNoErr(t, err) // create volumeType to test volume type quota volumeType, err := volumetypes.Create(client, VolumeTypeCreateOpts).Extract() th.AssertNoErr(t, err) defer func() { restore := quotasets.UpdateOpts{} FillUpdateOptsFromQuotaSet(*orig, &restore) err := volumetypes.Delete(client, volumeType.ID).ExtractErr() th.AssertNoErr(t, err) _, err = quotasets.Update(client, projectID, restore).Extract() th.AssertNoErr(t, err) }() // test Update resultQuotas, err := quotasets.Update(client, projectID, UpdateQuotaOpts).Extract() th.AssertNoErr(t, err) // We dont know the default quotas, so just check if the quotas are not the // same as before newQuotas, err := quotasets.Get(client, projectID).Extract() th.AssertNoErr(t, err) th.AssertEquals(t, resultQuotas.Volumes, newQuotas.Volumes) th.AssertEquals(t, resultQuotas.Extra["volumes_foo"], newQuotas.Extra["volumes_foo"]) // test that resultQuotas.Extra is populated with the 3 new quota types // for the new volumeType foo, don't take into account other volume types count := 0 for k, _ := range resultQuotas.Extra { tools.PrintResource(t, k) switch k { case "volumes_foo", "snapshots_foo", "gigabytes_foo": count += 1 } } th.AssertEquals(t, count, 3) // unpopulate resultQuotas.Extra as it is different per cloud and test // rest of the quotaSet resultQuotas.Extra = map[string]interface{}(nil) th.AssertDeepEquals(t, UpdatedQuotas, *resultQuotas) } func TestQuotasetDelete(t *testing.T) { clients.RequireAdmin(t) client, projectID := getClientAndProject(t) // save original quotas orig, err := quotasets.Get(client, projectID).Extract() th.AssertNoErr(t, err) defer func() { restore := quotasets.UpdateOpts{} FillUpdateOptsFromQuotaSet(*orig, &restore) _, err = quotasets.Update(client, projectID, restore).Extract() th.AssertNoErr(t, err) }() // Obtain environment default quotaset values to validate deletion. defaultQuotaSet, err := quotasets.GetDefaults(client, projectID).Extract() th.AssertNoErr(t, err) // Test Delete err = quotasets.Delete(client, projectID).ExtractErr() th.AssertNoErr(t, err) newQuotas, err := quotasets.Get(client, projectID).Extract() th.AssertNoErr(t, err) th.AssertEquals(t, newQuotas.Volumes, defaultQuotaSet.Volumes) } // getClientAndProject reduces boilerplate by returning a new blockstorage v3 // ServiceClient and a project ID obtained from the OS_PROJECT_NAME envvar. func getClientAndProject(t *testing.T) (*gophercloud.ServiceClient, string) { client, err := clients.NewBlockStorageV3Client() th.AssertNoErr(t, err) projectID := os.Getenv("OS_PROJECT_NAME") th.AssertNoErr(t, err) return client, projectID } func FillUpdateOptsFromQuotaSet(src quotasets.QuotaSet, dest *quotasets.UpdateOpts) { dest.Volumes = &src.Volumes dest.Snapshots = &src.Snapshots dest.Gigabytes = &src.Gigabytes dest.PerVolumeGigabytes = &src.PerVolumeGigabytes dest.Backups = &src.Backups dest.BackupGigabytes = &src.BackupGigabytes dest.Groups = &src.Groups }
[ "\"OS_PROJECT_NAME\"" ]
[]
[ "OS_PROJECT_NAME" ]
[]
["OS_PROJECT_NAME"]
go
1
0
engine/storage/backend/mysql/entity_storage_mysql_test.go
package entitystoragemysql import ( "testing" "os" "github.com/xiaonanln/goworld/engine/common" "github.com/xiaonanln/goworld/engine/gwlog" "github.com/xiaonanln/typeconv" ) func TestMySQLEntityStorage(t *testing.T) { pwd := "123456" if os.Getenv("TRAVIS") != "" { pwd = "" } es, err := OpenMySQL("root:" + pwd + "@tcp(127.0.0.1:3306)/goworld") if err != nil { t.Error(err) } gwlog.Infof("TestMySQLEntityStorage: %v", es) entityID := common.GenEntityID() gwlog.Infof("TESTING ENTITYID: %s", entityID) data, err := es.Read("Avatar", entityID) if data != nil { t.Errorf("should be nil") } testData := map[string]interface{}{ "a": 1, "b": "2", "c": true, "d": 1.11, } es.Write("Avatar", entityID, testData) verifyData, err := es.Read("Avatar", entityID) if err != nil { t.Error(err) } if typeconv.Int(verifyData.(map[string]interface{})["a"]) != 1 { t.Errorf("read wrong data: %v", verifyData) } if verifyData.(map[string]interface{})["b"].(string) != "2" { t.Errorf("read wrong data: %v", verifyData) } if verifyData.(map[string]interface{})["c"].(bool) != true { t.Errorf("read wrong data: %v", verifyData) } if verifyData.(map[string]interface{})["d"].(float64) != 1.11 { t.Errorf("read wrong data: %v", verifyData) } exists, err := es.Exists("Avatar", entityID) if err != nil { t.Error(err) } if !exists { t.Fatalf("should exist") } exists, err = es.Exists("Avatar", common.GenEntityID()) if err != nil { t.Error(err) } if exists { t.Fatalf("should not exist") } avatarIDs, err := es.List("Avatar") if err != nil { t.Error(err) } if len(avatarIDs) == 0 { t.Errorf("Avatar IDs is empty!") } gwlog.Infof("Found avatars saved: %v", avatarIDs) for _, avatarID := range avatarIDs { data, err := es.Read("Avatar", avatarID) if err != nil { t.Error(err) } t.Logf("Read Avatar %s => %v", avatarID, data) } }
[ "\"TRAVIS\"" ]
[]
[ "TRAVIS" ]
[]
["TRAVIS"]
go
1
0
integration-cli/docker_cli_pull_local_test.go
package main import ( "encoding/json" "fmt" "os" "path/filepath" "runtime" "strings" "testing" "github.com/docker/distribution" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema2" "github.com/docker/docker/integration-cli/cli/build" "github.com/opencontainers/go-digest" "gotest.tools/v3/assert" "gotest.tools/v3/icmd" ) // testPullImageWithAliases pulls a specific image tag and verifies that any aliases (i.e., other // tags for the same image) are not also pulled down. // // Ref: docker/docker#8141 func testPullImageWithAliases(c *testing.T) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) var repos []string for _, tag := range []string{"recent", "fresh"} { repos = append(repos, fmt.Sprintf("%v:%v", repoName, tag)) } // Tag and push the same image multiple times. for _, repo := range repos { dockerCmd(c, "tag", "busybox", repo) dockerCmd(c, "push", repo) } // Clear local images store. args := append([]string{"rmi"}, repos...) dockerCmd(c, args...) // Pull a single tag and verify it doesn't bring down all aliases. dockerCmd(c, "pull", repos[0]) dockerCmd(c, "inspect", repos[0]) for _, repo := range repos[1:] { _, _, err := dockerCmdWithError("inspect", repo) assert.ErrorContains(c, err, "", "Image %v shouldn't have been pulled down", repo) } } func (s *DockerRegistrySuite) TestPullImageWithAliases(c *testing.T) { testPullImageWithAliases(c) } func (s *DockerSchema1RegistrySuite) TestPullImageWithAliases(c *testing.T) { testPullImageWithAliases(c) } // testConcurrentPullWholeRepo pulls the same repo concurrently. func testConcurrentPullWholeRepo(c *testing.T) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) var repos []string for _, tag := range []string{"recent", "fresh", "todays"} { repo := fmt.Sprintf("%v:%v", repoName, tag) buildImageSuccessfully(c, repo, build.WithDockerfile(fmt.Sprintf(` FROM busybox ENTRYPOINT ["/bin/echo"] ENV FOO foo ENV BAR bar CMD echo %s `, repo))) dockerCmd(c, "push", repo) repos = append(repos, repo) } // Clear local images store. args := append([]string{"rmi"}, repos...) dockerCmd(c, args...) // Run multiple re-pulls concurrently numPulls := 3 results := make(chan error, numPulls) for i := 0; i != numPulls; i++ { go func() { result := icmd.RunCommand(dockerBinary, "pull", "-a", repoName) results <- result.Error }() } // These checks are separate from the loop above because the check // package is not goroutine-safe. for i := 0; i != numPulls; i++ { err := <-results assert.NilError(c, err, "concurrent pull failed with error: %v", err) } // Ensure all tags were pulled successfully for _, repo := range repos { dockerCmd(c, "inspect", repo) out, _ := dockerCmd(c, "run", "--rm", repo) assert.Equal(c, strings.TrimSpace(out), "/bin/sh -c echo "+repo) } } func (s *DockerRegistrySuite) TestConcurrentPullWholeRepo(c *testing.T) { testConcurrentPullWholeRepo(c) } func (s *DockerSchema1RegistrySuite) TestConcurrentPullWholeRepo(c *testing.T) { testConcurrentPullWholeRepo(c) } // testConcurrentFailingPull tries a concurrent pull that doesn't succeed. func testConcurrentFailingPull(c *testing.T) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) // Run multiple pulls concurrently numPulls := 3 results := make(chan error, numPulls) for i := 0; i != numPulls; i++ { go func() { result := icmd.RunCommand(dockerBinary, "pull", repoName+":asdfasdf") results <- result.Error }() } // These checks are separate from the loop above because the check // package is not goroutine-safe. for i := 0; i != numPulls; i++ { err := <-results assert.ErrorContains(c, err, "", "expected pull to fail") } } func (s *DockerRegistrySuite) TestConcurrentFailingPull(c *testing.T) { testConcurrentFailingPull(c) } func (s *DockerSchema1RegistrySuite) TestConcurrentFailingPull(c *testing.T) { testConcurrentFailingPull(c) } // testConcurrentPullMultipleTags pulls multiple tags from the same repo // concurrently. func testConcurrentPullMultipleTags(c *testing.T) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) var repos []string for _, tag := range []string{"recent", "fresh", "todays"} { repo := fmt.Sprintf("%v:%v", repoName, tag) buildImageSuccessfully(c, repo, build.WithDockerfile(fmt.Sprintf(` FROM busybox ENTRYPOINT ["/bin/echo"] ENV FOO foo ENV BAR bar CMD echo %s `, repo))) dockerCmd(c, "push", repo) repos = append(repos, repo) } // Clear local images store. args := append([]string{"rmi"}, repos...) dockerCmd(c, args...) // Re-pull individual tags, in parallel results := make(chan error, len(repos)) for _, repo := range repos { go func(repo string) { result := icmd.RunCommand(dockerBinary, "pull", repo) results <- result.Error }(repo) } // These checks are separate from the loop above because the check // package is not goroutine-safe. for range repos { err := <-results assert.NilError(c, err, "concurrent pull failed with error: %v", err) } // Ensure all tags were pulled successfully for _, repo := range repos { dockerCmd(c, "inspect", repo) out, _ := dockerCmd(c, "run", "--rm", repo) assert.Equal(c, strings.TrimSpace(out), "/bin/sh -c echo "+repo) } } func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *testing.T) { testConcurrentPullMultipleTags(c) } func (s *DockerSchema1RegistrySuite) TestConcurrentPullMultipleTags(c *testing.T) { testConcurrentPullMultipleTags(c) } // testPullIDStability verifies that pushing an image and pulling it back // preserves the image ID. func testPullIDStability(c *testing.T) { derivedImage := privateRegistryURL + "/dockercli/id-stability" baseImage := "busybox" buildImageSuccessfully(c, derivedImage, build.WithDockerfile(fmt.Sprintf(` FROM %s ENV derived true ENV asdf true RUN dd if=/dev/zero of=/file bs=1024 count=1024 CMD echo %s `, baseImage, derivedImage))) originalID := getIDByName(c, derivedImage) dockerCmd(c, "push", derivedImage) // Pull out, _ := dockerCmd(c, "pull", derivedImage) if strings.Contains(out, "Pull complete") { c.Fatalf("repull redownloaded a layer: %s", out) } derivedIDAfterPull := getIDByName(c, derivedImage) if derivedIDAfterPull != originalID { c.Fatal("image's ID unexpectedly changed after a repush/repull") } // Make sure the image runs correctly out, _ = dockerCmd(c, "run", "--rm", derivedImage) if strings.TrimSpace(out) != derivedImage { c.Fatalf("expected %s; got %s", derivedImage, out) } // Confirm that repushing and repulling does not change the computed ID dockerCmd(c, "push", derivedImage) dockerCmd(c, "rmi", derivedImage) dockerCmd(c, "pull", derivedImage) derivedIDAfterPull = getIDByName(c, derivedImage) if derivedIDAfterPull != originalID { c.Fatal("image's ID unexpectedly changed after a repush/repull") } // Make sure the image still runs out, _ = dockerCmd(c, "run", "--rm", derivedImage) if strings.TrimSpace(out) != derivedImage { c.Fatalf("expected %s; got %s", derivedImage, out) } } func (s *DockerRegistrySuite) TestPullIDStability(c *testing.T) { testPullIDStability(c) } func (s *DockerSchema1RegistrySuite) TestPullIDStability(c *testing.T) { testPullIDStability(c) } // #21213 func testPullNoLayers(c *testing.T) { repoName := fmt.Sprintf("%v/dockercli/scratch", privateRegistryURL) buildImageSuccessfully(c, repoName, build.WithDockerfile(` FROM scratch ENV foo bar`)) dockerCmd(c, "push", repoName) dockerCmd(c, "rmi", repoName) dockerCmd(c, "pull", repoName) } func (s *DockerRegistrySuite) TestPullNoLayers(c *testing.T) { testPullNoLayers(c) } func (s *DockerSchema1RegistrySuite) TestPullNoLayers(c *testing.T) { testPullNoLayers(c) } func (s *DockerRegistrySuite) TestPullManifestList(c *testing.T) { testRequires(c, NotArm) pushDigest, err := setupImage(c) assert.NilError(c, err, "error setting up image") // Inject a manifest list into the registry manifestList := &manifestlist.ManifestList{ Versioned: manifest.Versioned{ SchemaVersion: 2, MediaType: manifestlist.MediaTypeManifestList, }, Manifests: []manifestlist.ManifestDescriptor{ { Descriptor: distribution.Descriptor{ Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", Size: 3253, MediaType: schema2.MediaTypeManifest, }, Platform: manifestlist.PlatformSpec{ Architecture: "bogus_arch", OS: "bogus_os", }, }, { Descriptor: distribution.Descriptor{ Digest: pushDigest, Size: 3253, MediaType: schema2.MediaTypeManifest, }, Platform: manifestlist.PlatformSpec{ Architecture: runtime.GOARCH, OS: runtime.GOOS, }, }, }, } manifestListJSON, err := json.MarshalIndent(manifestList, "", " ") assert.NilError(c, err, "error marshalling manifest list") manifestListDigest := digest.FromBytes(manifestListJSON) hexDigest := manifestListDigest.Hex() registryV2Path := s.reg.Path() // Write manifest list to blob store blobDir := filepath.Join(registryV2Path, "blobs", "sha256", hexDigest[:2], hexDigest) err = os.MkdirAll(blobDir, 0755) assert.NilError(c, err, "error creating blob dir") blobPath := filepath.Join(blobDir, "data") err = os.WriteFile(blobPath, manifestListJSON, 0644) assert.NilError(c, err, "error writing manifest list") // Add to revision store revisionDir := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "revisions", "sha256", hexDigest) err = os.Mkdir(revisionDir, 0755) assert.Assert(c, err == nil, "error creating revision dir") revisionPath := filepath.Join(revisionDir, "link") err = os.WriteFile(revisionPath, []byte(manifestListDigest.String()), 0644) assert.Assert(c, err == nil, "error writing revision link") // Update tag tagPath := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "tags", "latest", "current", "link") err = os.WriteFile(tagPath, []byte(manifestListDigest.String()), 0644) assert.NilError(c, err, "error writing tag link") // Verify that the image can be pulled through the manifest list. out, _ := dockerCmd(c, "pull", repoName) // The pull output includes "Digest: <digest>", so find that matches := digestRegex.FindStringSubmatch(out) assert.Equal(c, len(matches), 2, fmt.Sprintf("unable to parse digest from pull output: %s", out)) pullDigest := matches[1] // Make sure the pushed and pull digests match assert.Equal(c, manifestListDigest.String(), pullDigest) // Was the image actually created? dockerCmd(c, "inspect", repoName) dockerCmd(c, "rmi", repoName) } // #23100 func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuthLoginWithScheme(c *testing.T) { workingDir, err := os.Getwd() assert.NilError(c, err) absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) assert.NilError(c, err) osPath := os.Getenv("PATH") testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) c.Setenv("PATH", testPath) repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) tmp, err := os.MkdirTemp("", "integration-cli-") assert.NilError(c, err) externalAuthConfig := `{ "credsStore": "shell-test" }` configPath := filepath.Join(tmp, "config.json") err = os.WriteFile(configPath, []byte(externalAuthConfig), 0644) assert.NilError(c, err) dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) b, err := os.ReadFile(configPath) assert.NilError(c, err) assert.Assert(c, !strings.Contains(string(b), "\"auth\":")) dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) dockerCmd(c, "--config", tmp, "push", repoName) dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), "https://"+privateRegistryURL) dockerCmd(c, "--config", tmp, "pull", repoName) // likewise push should work repoName2 := fmt.Sprintf("%v/dockercli/busybox:nocreds", privateRegistryURL) dockerCmd(c, "tag", repoName, repoName2) dockerCmd(c, "--config", tmp, "push", repoName2) // logout should work w scheme also because it will be stripped dockerCmd(c, "--config", tmp, "logout", "https://"+privateRegistryURL) } func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuth(c *testing.T) { workingDir, err := os.Getwd() assert.NilError(c, err) absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) assert.NilError(c, err) osPath := os.Getenv("PATH") testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) c.Setenv("PATH", testPath) repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) tmp, err := os.MkdirTemp("", "integration-cli-") assert.NilError(c, err) externalAuthConfig := `{ "credsStore": "shell-test" }` configPath := filepath.Join(tmp, "config.json") err = os.WriteFile(configPath, []byte(externalAuthConfig), 0644) assert.NilError(c, err) dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) b, err := os.ReadFile(configPath) assert.NilError(c, err) assert.Assert(c, !strings.Contains(string(b), "\"auth\":")) dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) dockerCmd(c, "--config", tmp, "push", repoName) dockerCmd(c, "--config", tmp, "pull", repoName) } // TestRunImplicitPullWithNoTag should pull implicitly only the default tag (latest) func (s *DockerRegistrySuite) TestRunImplicitPullWithNoTag(c *testing.T) { testRequires(c, DaemonIsLinux) repo := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) repoTag1 := fmt.Sprintf("%v:latest", repo) repoTag2 := fmt.Sprintf("%v:t1", repo) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoTag1) dockerCmd(c, "tag", "busybox", repoTag2) dockerCmd(c, "push", repo) dockerCmd(c, "rmi", repoTag1) dockerCmd(c, "rmi", repoTag2) out, _ := dockerCmd(c, "run", repo) assert.Assert(c, strings.Contains(out, fmt.Sprintf("Unable to find image '%s:latest' locally", repo))) // There should be only one line for repo, the one with repo:latest outImageCmd, _ := dockerCmd(c, "images", repo) splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") assert.Equal(c, len(splitOutImageCmd), 2) }
[ "\"PATH\"", "\"PATH\"" ]
[]
[ "PATH" ]
[]
["PATH"]
go
1
0
salesforce/examples/salesforce_versions/salesforce_versions.go
package main import ( "encoding/json" "fmt" "io" "log" "os" "github.com/grokify/goauth/credentials" "github.com/grokify/goauth/salesforce" "github.com/grokify/mogo/config" "github.com/grokify/mogo/fmt/fmtutil" "github.com/grokify/mogo/net/httputilmore" su "github.com/grokify/go-salesforce/clientutil" ) func main() { //err := config.LoadDotEnvSkipEmpty(os.Getenv("ENV_PATH"), "./.env") err := config.LoadDotEnvSkipEmpty("./.env") if err != nil { panic(err) } fmt.Printf(os.Getenv("SALESFORCE_CLIENT_SECRET")) client, err := salesforce.NewClientPassword( credentials.CredentialsOAuth2{ ClientID: os.Getenv("SALESFORCE_CLIENT_ID"), ClientSecret: os.Getenv("SALESFORCE_CLIENT_SECRET"), Username: os.Getenv("SALESFORCE_USERNAME"), Password: fmt.Sprintf("%v%v", os.Getenv("SALESFORCE_PASSWORD"), os.Getenv("SALESFORCE_SECURITY_KEY"))}) if err != nil { panic(err) } sc := salesforce.NewSalesforceClient(client, os.Getenv("SALESFORCE_INSTANCE_NAME")) apiURL := sc.URLBuilder.Build("services/data") resp, err := client.Get(apiURL.String()) if err != nil { panic(err) } httputilmore.PrintResponse(resp, true) if 1 == 1 { cu := su.ClientUtil{ HTTPClient: client, Instance: os.Getenv("SALESFORCE_INSTANCE_NAME"), Version: "v43.0"} resp, err := cu.Describe("ACCOUNT") if err != nil { log.Fatal(err) } //httputilmore.PrintResponse(resp, true) body, err := io.ReadAll(resp.Body) if err != nil { log.Fatal(err) } fmt.Println(string(body)) desc := su.Describe{} err = json.Unmarshal(body, &desc) if err != nil { log.Fatal(err) } fmtutil.PrintJSON(desc) types := map[string]int{} for _, f := range desc.Fields { if v, ok := types[f.Type]; ok { types[f.Type] = v + 1 } else { types[f.Type] = 1 } } fmtutil.PrintJSON(types) } if 1 == 0 { resp, err = sc.ExecSOQL("select id from contact") if err != nil { panic(err) } httputilmore.PrintResponse(resp, true) } if 1 == 0 { err = sc.DeleteContactsAll() if err != nil { panic(err) } } fmt.Println("DONE") }
[ "\"ENV_PATH\"", "\"SALESFORCE_CLIENT_SECRET\"", "\"SALESFORCE_CLIENT_ID\"", "\"SALESFORCE_CLIENT_SECRET\"", "\"SALESFORCE_USERNAME\"", "\"SALESFORCE_PASSWORD\"", "\"SALESFORCE_SECURITY_KEY\"", "\"SALESFORCE_INSTANCE_NAME\"", "\"SALESFORCE_INSTANCE_NAME\"" ]
[]
[ "SALESFORCE_USERNAME", "SALESFORCE_SECURITY_KEY", "ENV_PATH", "SALESFORCE_CLIENT_SECRET", "SALESFORCE_PASSWORD", "SALESFORCE_INSTANCE_NAME", "SALESFORCE_CLIENT_ID" ]
[]
["SALESFORCE_USERNAME", "SALESFORCE_SECURITY_KEY", "ENV_PATH", "SALESFORCE_CLIENT_SECRET", "SALESFORCE_PASSWORD", "SALESFORCE_INSTANCE_NAME", "SALESFORCE_CLIENT_ID"]
go
7
0
simcse/train_unsup.py
# -*- coding: utf-8 -*- # @Time : 2021/6/10 # @Author : kaka import argparse import logging import os from config import Params from datasets import load_dataset import torch import torch.nn.functional as F from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer import numpy as np from SimCSE import SimCSE os.environ["CUDA_VISIBLE_DEVICES"] = "1" def parse_args(): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) # parser.add_argument("train_file", type=str, help="train text file") # parser.add_argument("--pretrained", type=str, default="hfl/chinese-bert-wwm-ext", help="huggingface pretrained model") # parser.add_argument("--model_out", type=str, default="./finder_model", help="model output path") parser.add_argument("--num_proc", type=int, default=1, help="dataset process thread num") parser.add_argument("--max_length", type=int, default=64, help="sentence max length") parser.add_argument("--batch_size", type=int, default=32, help="batch size") parser.add_argument("--epochs", type=int, default=101, help="epochs") parser.add_argument("--lr", type=float, default=1e-5, help="learning rate") parser.add_argument("--tao", type=float, default=0.05, help="temperature") parser.add_argument("--device", type=str, default="cuda", help="device") parser.add_argument("--display_interval", type=int, default=500, help="display interval") parser.add_argument("--save_interval", type=int, default=10, help="save interval") parser.add_argument("--pool_type", type=str, default="pooler", help="pool_type") parser.add_argument("--dropout_rate", type=float, default=0.3, help="dropout_rate") args = parser.parse_args() return args def read_data(args): with open(Params.dialogues_file, 'r') as f: sentences = f.readlines() dl = DataLoader(sentences, batch_size=args.batch_size) return dl def duplicate_batch(batch, tokenzier, args): ''' 句子进行重复 ''' new_batch = [] for sentence in batch: new_batch.append(sentence) new_batch.append(sentence) batch_encoding = tokenzier(new_batch, padding=True, truncation=True, max_length=args.max_length, return_tensors='pt') return batch_encoding def compute_loss(y_pred, tao=0.05, device="cuda"): idxs = torch.arange(0, y_pred.shape[0], device=device) y_true = idxs + 1 - idxs % 2 * 2 similarities = F.cosine_similarity(y_pred.unsqueeze(1), y_pred.unsqueeze(0), dim=2) similarities = similarities - torch.eye(y_pred.shape[0], device=device) * 1e12 similarities = similarities / tao loss = F.cross_entropy(similarities, y_true) return torch.mean(loss) def train(args): tokenizer = AutoTokenizer.from_pretrained(Params.pretrained_model_path) dl = read_data(args) model = SimCSE(Params.pretrained_model_path, args.pool_type, args.dropout_rate).to(args.device) optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr) model.train() batch_idx = 0 min_loss = 10000000 for epoch_idx in range(args.epochs): epoch_losses = [] for data in tqdm(dl): batch_idx += 1 new_batch_data = duplicate_batch(data, tokenizer, args) pred = model(input_ids=new_batch_data["input_ids"].to(args.device), attention_mask=new_batch_data["attention_mask"].to(args.device), token_type_ids=new_batch_data["token_type_ids"].to(args.device)) loss = compute_loss(pred, args.tao, args.device) optimizer.zero_grad() loss.backward() optimizer.step() loss = loss.item() epoch_losses.append(loss) if batch_idx % args.display_interval == 0: logging.info(f"epoch: {epoch_idx}, batch_idx: {batch_idx}, loss: {loss:>10f}") avg_epoch_loss = np.mean(epoch_losses) if avg_epoch_loss < min_loss: min_loss = avg_epoch_loss torch.save({ 'epoch': epoch_idx, 'model_state_dict': model.state_dict(), 'loss': avg_epoch_loss }, Params.simcse_model_path) def main(): args = parse_args() train(args) if __name__ == "__main__": log_fmt = "%(asctime)s|%(name)s|%(levelname)s|%(message)s" logging.basicConfig(level=logging.INFO, format=log_fmt) main()
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
auth/fido2_test.go
package auth_test import ( "context" "os" "os/exec" "path/filepath" "strings" "testing" "github.com/getchill-app/keyring/auth" "github.com/getchill-app/keyring/auth/api" "github.com/getchill-app/keyring/testutil" "github.com/keys-pub/keys-ext/auth/fido2" "github.com/stretchr/testify/require" ) func gopath(t *testing.T) string { cmd := exec.Command("go", "env", "GOPATH") out, err := cmd.Output() require.NoError(t, err) return strings.TrimSpace(string(out)) } func TestFIDO2(t *testing.T) { if os.Getenv("TEST_FIDO2") == "" { t.Skip() } path := testutil.Path() db, err := auth.NewDB(path) require.NoError(t, err) defer func() { _ = os.Remove(path) }() mk := testutil.Seed(0x01) pin := "12345" fido2Path := filepath.Join(gopath(t), "bin", "fido2.so") fido2Plugin, err := fido2.OpenPlugin(fido2Path) if err != nil { t.Skipf("No fido2 plugin found %s", fido2Path) } t.Logf("Generate...") hs, err := auth.GenerateFIDO2HMACSecret(context.TODO(), fido2Plugin, pin, "", "test") require.NoError(t, err) t.Logf("Register...") reg, err := db.RegisterFIDO2HMACSecret(context.TODO(), fido2Plugin, hs, mk, pin) require.NoError(t, err) auths, err := db.ListByType(api.FIDO2HMACSecretType) require.NoError(t, err) require.Equal(t, 1, len(auths)) t.Logf("Auth...") out, mko, err := db.FIDO2HMACSecret(context.TODO(), fido2Plugin, pin) require.NoError(t, err) require.Equal(t, mk, mko) require.Equal(t, out.ID, reg.ID) }
[ "\"TEST_FIDO2\"" ]
[]
[ "TEST_FIDO2" ]
[]
["TEST_FIDO2"]
go
1
0
tests/test_build.py
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import os import unittest from collections import Counter from pathlib import Path # This file groups together tests which look at the code without running it. # When running the tests inside conda's build, the code is not available. in_conda_build = os.environ.get('CONDA_BUILD_STATE', '') == 'TEST' class TestBuild(unittest.TestCase): @unittest.skipIf(in_conda_build, 'In conda build') def test_name_clash(self): # For setup.py, all translation units need distinct names, so we # cannot have foo.cu and foo.cpp, even in different directories. test_dir = Path(__file__).resolve().parent source_dir = test_dir.parent / 'pytorch3d' stems = [] for extension in ['.cu', '.cpp']: files = source_dir.glob(f'**/*{extension}') stems.extend(f.stem for f in files) counter = Counter(stems) for k, v in counter.items(): self.assertEqual(v, 1, f'Too many files with stem {k}.') @unittest.skipIf(in_conda_build, 'In conda build') def test_deprecated_usage(self): # Check certain expressions do not occur in the csrc code test_dir = Path(__file__).resolve().parent source_dir = test_dir.parent / 'pytorch3d' / 'csrc' files = sorted(source_dir.glob('**/*.*')) self.assertGreater(len(files), 4) patterns = ['.type()', '.data()'] for file in files: with open(file) as f: text = f.read() for pattern in patterns: found = pattern in text msg = ( f'{pattern} found in {file.name}' + ', this has been deprecated.' ) self.assertFalse(found, msg) @unittest.skipIf(in_conda_build, 'In conda build') def test_copyright(self): test_dir = Path(__file__).resolve().parent root_dir = test_dir.parent extensions = ('py', 'cu', 'cuh', 'cpp', 'h', 'hpp', 'sh') expect = ( 'Copyright (c) Facebook, Inc. and its affiliates.' + ' All rights reserved.\n' ) for extension in extensions: for i in root_dir.glob(f'**/*.{extension}'): with open(i) as f: firstline = f.readline() if firstline.startswith(('# -*-', '#!')): firstline = f.readline() self.assertTrue( firstline.endswith(expect), f'{i} missing copyright header.', )
[]
[]
[ "CONDA_BUILD_STATE" ]
[]
["CONDA_BUILD_STATE"]
python
1
0
azure-iot-hub/samples/iothub_registry_manager_token_credential_sample.py
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- import sys import os from azure.iot.hub import IoTHubRegistryManager from azure.iot.hub.models import Twin, TwinProperties from azure.identity import DefaultAzureCredential device_id = "test-device" # os.getenv("IOTHUB_DEVICE_ID") def print_device_info(title, iothub_device): print(title + ":") print("device_id = {0}".format(iothub_device.device_id)) print("authentication.type = {0}".format(iothub_device.authentication.type)) print("authentication.symmetric_key = {0}".format(iothub_device.authentication.symmetric_key)) print( "authentication.x509_thumbprint = {0}".format(iothub_device.authentication.x509_thumbprint) ) print("connection_state = {0}".format(iothub_device.connection_state)) print( "connection_state_updated_tTime = {0}".format(iothub_device.connection_state_updated_time) ) print( "cloud_to_device_message_count = {0}".format(iothub_device.cloud_to_device_message_count) ) print("device_scope = {0}".format(iothub_device.device_scope)) print("etag = {0}".format(iothub_device.etag)) print("generation_id = {0}".format(iothub_device.generation_id)) print("last_activity_time = {0}".format(iothub_device.last_activity_time)) print("status = {0}".format(iothub_device.status)) print("status_reason = {0}".format(iothub_device.status_reason)) print("status_updated_time = {0}".format(iothub_device.status_updated_time)) print("") # DefaultAzureCredential supports different authentication mechanisms and determines # the appropriate credential type based of the environment it is executing in. # It attempts to use multiple credential types in an order until it finds a working credential. # - AZURE_URL: The tenant ID in Azure Active Directory url = os.getenv("AZURE_AAD_HOST") # DefaultAzureCredential expects the following three environment variables: # - AZURE_TENANT_ID: The tenant ID in Azure Active Directory # - AZURE_CLIENT_ID: The application (client) ID registered in the AAD tenant # - AZURE_CLIENT_SECRET: The client secret for the registered application credential = DefaultAzureCredential() # This sample creates and uses device with SAS authentication # For other authentication types use the appropriate create and update APIs: # X509: # new_device = iothub_registry_manager.create_device_with_x509(device_id, primary_thumbprint, secondary_thumbprint, status) # device_updated = iothub_registry_manager.update_device_with_X509(device_id, etag, primary_thumbprint, secondary_thumbprint, status) # Certificate authority: # new_device = iothub_registry_manager.create_device_with_certificate_authority(device_id, status) # device_updated = iothub_registry_manager.update_device_with_certificate_authority(self, device_id, etag, status): try: # Create IoTHubRegistryManager iothub_registry_manager = IoTHubRegistryManager.from_token_credential(url, credential) # Create a device primary_key = "aaabbbcccdddeeefffggghhhiiijjjkkklllmmmnnnoo" secondary_key = "111222333444555666777888999000aaabbbcccdddee" device_state = "enabled" new_device = iothub_registry_manager.create_device_with_sas( device_id, primary_key, secondary_key, device_state ) print_device_info("create_device", new_device) # Get device information device = iothub_registry_manager.get_device(device_id) print_device_info("get_device", device) # Delete the device iothub_registry_manager.delete_device(device_id) print("GetServiceStatistics") registry_statistics = iothub_registry_manager.get_service_statistics() print(registry_statistics) print("GetDeviceRegistryStatistics") registry_statistics = iothub_registry_manager.get_device_registry_statistics() print(registry_statistics) except Exception as ex: print("Unexpected error {0}".format(ex)) except KeyboardInterrupt: print("iothub_registry_manager_sample stopped")
[]
[]
[ "IOTHUB_DEVICE_ID", "AZURE_AAD_HOST" ]
[]
["IOTHUB_DEVICE_ID", "AZURE_AAD_HOST"]
python
2
0
main.go
package main import ( "bufio" "flag" "fmt" "log" "os" "strings" "github.com/howeyc/gopass" "github.com/micanzhang/qb/backup" ) var ( // default config location defaultConf string // backup action action string // customize file name name string // directory where file downloaded to dirpath string // page size of list files size int // marker for pagination marker string // search prefix prefix string // args args []string // backup provider provider backup.BackupProvider ) func init() { flag.StringVar(&name, "name", "", "specific name for files") flag.StringVar(&dirpath, "dir", "./", "directory where files downloaded") flag.IntVar(&size, "size", 10, "page size of list files") flag.StringVar(&marker, "marker", "", "marker for pagination") flag.StringVar(&prefix, "search", "", "search file name which only supports prefix search") // set command usage flag.Usage = Usage // init configuration initConf() // check default folder exists or not, if not, create new one if !backup.FileExists(backup.Conf.Path) { if _, err := os.Create(backup.Conf.Path); err != nil { panic(err) } } // pg action [flags] [args] temp := os.Args if len(os.Args) == 1 { flag.Usage() os.Exit(2) } action = os.Args[1] os.Args = os.Args[1:] flag.Parse() args = flag.Args() os.Args = temp } func main() { switch action { case "put": files := args if len(files) == 0 { flag.Usage() return } var keys []string if name != "" { keys = strings.Split(name, ",") if len(keys) != len(files) { log.Fatal("invalid length of names") } } for i, file := range files { key := "" if name != "" && keys[i] != "" { key = keys[i] } err := putFile(file, key) if err != nil { // handle error log.Printf("put file %s, key %s failed: %s", file, key, err) } } case "info": files := args if len(files) == 0 { flag.Usage() return } for _, file := range files { if entry, err := provider.Info(file); err != nil { panic(err) } else { fmt.Printf("info : %+v", entry) } } case "get": if len(args) == 0 { flag.Usage() return } if _, err := os.Stat(dirpath); err != nil { if os.IsNotExist(err) { log.Fatalf("directory of %s not exists", dirpath) } if os.IsPermission(err) { log.Fatalf("permission denied") } log.Fatalf("%s", err) } for _, name := range args { if err := provider.Get(name, dirpath); err != nil { log.Printf("get file %s failed: %s", name, err) } } case "remove": files := args if len(files) == 0 { flag.Usage() return } for _, file := range files { if err := provider.Remove(file); err != nil { panic(err) } } case "list": if err := provider.List(prefix, marker, size); err != nil { panic(err) } default: flag.Usage() } } func Usage() { fmt.Fprintf(os.Stdout, "%s is a cli tools for files backup to cloud storage services, like qiniu.\n", os.Args[0]) fmt.Fprintf(os.Stdout, "\nUsage:\n\n") fmt.Fprintf(os.Stdout, "\t%s command [flags] [arguments]\n", os.Args[0]) fmt.Fprintf(os.Stdout, ` the commands are: put put files to cloud get get files from cloud info get files's info remove remove files list list files `) fmt.Fprintf(os.Stdout, "the flags are:\n\n") flag.PrintDefaults() fmt.Fprintln(os.Stdout) } func putFile(filepath string, key string) (err error) { if key == "" { key, err = backup.FileKey(filepath) if err != nil { return err } } // check out exists or not entry, err := provider.Info(key) if err == nil { // calculate etag etag, err := backup.QEtag(filepath) if err != nil { return err } // same file if entry.Hash == etag { return backup.ErrDuplicated } // same name reader := bufio.NewReader(os.Stdin) READPUTACTION: fmt.Fprintf(os.Stdout, "file: %s already exists!, type [A]bort, [O]veride or [R]ename this file?\nOption: ", filepath) text, _ := reader.ReadString('\n') switch strings.TrimSpace(strings.ToLower(text)) { case "a": return nil case "o": return provider.Put(filepath, key) case "r": fmt.Fprint(os.Stdout, "Please type new file name: ") name, _ := reader.ReadString('\n') return putFile(filepath, strings.TrimSpace(name)) default: goto READPUTACTION } } else if err != backup.ErrNotFound { return err } else { return provider.Put(filepath, key) } } func initConf() { defaultDir := fmt.Sprintf("%s/.config/qb", os.Getenv("HOME")) defaultConf = fmt.Sprintf("%s/config.json", defaultDir) backup.Conf = backup.NewConfig() err := backup.Conf.Restore(defaultConf) if err != nil { if os.IsNotExist(err) { if _, err := os.Stat(defaultDir); os.IsNotExist(err) { if err := os.MkdirAll(defaultDir, 0700); err != nil { panic(err) } } if _, err := os.Create(defaultConf); err != nil { fmt.Println(defaultConf) panic(err) } } else { panic(err) } } validated := backup.Conf.Validate() if validated == false { reader := bufio.NewReader(os.Stdin) if backup.Conf.AccessKey == "" { fmt.Fprintf(os.Stdout, "Qiniu Access Key:") ak, err := reader.ReadString('\n') if err != nil { panic(err) } backup.Conf.AccessKey = strings.TrimSpace(ak) } if backup.Conf.Secretkey == "" { fmt.Fprintf(os.Stdout, "Qiniu Secret Key:") sk, err := gopass.GetPasswdMasked() if err != nil { panic(err) } backup.Conf.Secretkey = string(sk) } if backup.Conf.Domain == "" { fmt.Fprintf(os.Stdout, "Qiniu Domain for file download:") domain, err := reader.ReadString('\n') if err != nil { panic(err) } backup.Conf.Domain = strings.TrimSpace(domain) } if backup.Conf.Bucket == "" { fmt.Fprintf(os.Stdout, "Qiniu bucket:") bucket, err := reader.ReadString('\n') if err != nil { panic(err) } backup.Conf.Bucket = strings.TrimSpace(bucket) } } provider = backup.NewQBackup(backup.Conf.AccessKey, backup.Conf.Secretkey, backup.Conf.Domain, backup.Conf.Bucket) if validated == false { err = provider.List("", "", 1) if err != nil { panic(err) } err = backup.Conf.Save(defaultConf) if err != nil { panic(err) } } }
[ "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
api/data/Song.py
import os import json os.environ['LIBROSA_CACHE_DIR'] = '/tmp/librosa_cache' os.environ['LIBROSA_CACHE_LEVEL'] = '30' import librosa import api.data.Constants class Song: def __init__(self, audio_filepath): self._audio_filepath = audio_filepath # Load the audio as a waveform `y` and the sampling rate as `sr` self._y, self._sr = librosa.load(audio_filepath) # Run the default beat tracker self._tempo, self._beat_frames = librosa.beat.beat_track(y=self._y, sr=self._sr) # Convert the frame indices of beat events into timestamps self._beat_times = librosa.frames_to_time(self._beat_frames, sr=self._sr) self._beats_per_minute = round(float("{:.2f}".format(self._tempo))) def get_beat_times(self): return self._beat_times def get_beats_per_minute(self): return self._beats_per_minute def _generate_map(self, output_directory, difficulty_name, map_generator): map_generator.generate() in_events = map_generator.get_events() in_notes = map_generator.get_notes() in_obstacles = map_generator.get_obstacles() data = {} data["_version"] = "1.5.0" data["_beatsPerMinute"] = self.get_beats_per_minute() data["_beatsPerBar"] = 16 data["_noteJumpSpeed"] = 10 data["_shuffle"] = 0 data["_shufflePeriod"] = 0.5 data["_time"] = 0 data["_events"] = in_events data["_notes"] = in_notes data["_obstacles"] = in_obstacles data["_bookmarks"] = [] with open(os.path.join(output_directory, "{}.json".format(difficulty_name)), 'w') as outfile: json.dump(data, outfile) def generate_maps(self, output_directory, map_generators): for (name, _, generator) in map_generators: self._generate_map(output_directory, name, generator) audio_filename = os.path.basename(self._audio_filepath) (audio_name, _) = os.path.splitext(audio_filename) info_data = {} info_data["authorName"] = "KeikakuB" info_data["beatsPerMinute"] = self.get_beats_per_minute() info_data["coverImagePath"] = "cover.jpg" difficulty_levels = [] for (name, rank, generator) in map_generators: difficulty_levels.append({ "audioPath": audio_filename, "difficulty": "Expert", "difficultyLabel": name, "difficultyRank": rank, "jsonPath": "{}{}".format(name, api.data.Constants.MAP_EXTENSION), "offset": 0, "oldOffset": 0 } ) info_data["difficultyLevels"] = difficulty_levels info_data["environmentName"] = "DefaultEnvironment" info_data["previewDuration"] = 10 info_data["previewStartTime"] = 12 info_data["songName"] = audio_name info_data["songSubName"] = "" with open(os.path.join(output_directory, "info.json"), 'w') as outfile: json.dump(info_data, outfile)
[]
[]
[ "LIBROSA_CACHE_DIR", "LIBROSA_CACHE_LEVEL" ]
[]
["LIBROSA_CACHE_DIR", "LIBROSA_CACHE_LEVEL"]
python
2
0
python-daemon/marvin_python_daemon/management/engine.py
#!/usr/bin/env python # coding=utf-8 # Copyright [2020] [Apache Software Foundation] # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import json import os import sys import time import os.path import subprocess import multiprocessing from ..common.profiling import profiling from ..common.data import MarvinData from ..common.log import get_logger from ..common.config import Config, load_conf_from_file logger = get_logger('management.engine') CLAZZES = { "acquisitor": "AcquisitorAndCleaner", "tpreparator": "TrainingPreparator", "trainer": "Trainer", "evaluator": "MetricsEvaluator", "ppreparator": "PredictionPreparator", "predictor": "Predictor", "feedback": "Feedback" } ARTIFACTS = { "AcquisitorAndCleaner": [], "TrainingPreparator": ["initialdataset"], "Trainer": ["dataset"], "MetricsEvaluator": ["dataset", "model"], "PredictionPreparator": ["model", "metrics"], "Predictor": ["model", "metrics"], "Feedback": [] } def dryrun(config, action, profiling): # setting spark configuration directory os.environ["SPARK_CONF_DIR"] = os.path.join( os.environ["SPARK_HOME"], "conf") os.environ["YARN_CONF_DIR"] = os.environ["SPARK_CONF_DIR"] params = read_file('engine.params') messages_file = read_file('engine.messages') feedback_file = read_file('feedback.messages') if action == 'all': pipeline = ['acquisitor', 'tpreparator', 'trainer', 'evaluator', 'ppreparator', 'predictor', 'feedback'] else: pipeline = [action] _dryrun = MarvinDryRun(config=config, messages=[ messages_file, feedback_file]) initial_start_time = time.time() for step in pipeline: _dryrun.execute(clazz=CLAZZES[step], params=params, profiling_enabled=profiling) logger.info("Total Time : {:.2f}s".format( time.time() - initial_start_time)) class MarvinDryRun(object): def __init__(self, config, messages): self.predictor_messages = messages[0] self.feedback_messages = messages[1] self.pmessages = [] self.package_name = config['marvin_package'] def execute(self, clazz, params, profiling_enabled=False): self.print_start_step(clazz) _Step = dynamic_import("{}.{}".format(self.package_name, clazz)) kwargs = generate_kwargs(self.package_name, _Step, params) step = _Step(**kwargs) def call_online_actions(step, msg, msg_idx): if profiling_enabled: with profiling(output_path=".profiling", uid=clazz) as prof: result = step.execute(input_message=msg, params=params) prof.disable logger.info( "\nProfile images created in {}\n".format(prof.image_path)) else: result = step.execute(input_message=msg, params=params) return result if clazz == 'PredictionPreparator': for idx, msg in enumerate(self.predictor_messages): self.pmessages.append(call_online_actions(step, msg, idx)) elif clazz == 'Feedback': for idx, msg in enumerate(self.feedback_messages): self.pmessages.append(call_online_actions(step, msg, idx)) elif clazz == 'Predictor': self.execute("PredictionPreparator", params) self.pmessages = self.messages if not self.pmessages else self.pmessages for idx, msg in enumerate(self.pmessages): call_online_actions(step, msg, idx) else: if profiling_enabled: with profiling(output_path=".profiling", uid=clazz) as prof: step.execute(params=params) prof.disable logger.info( "\nProfile images created in {}\n".format(prof.image_path)) else: step.execute(params=params) self.print_finish_step() def print_finish_step(self): logger.info("STEP TAKES {:.4f} (seconds) ".format( (time.time() - self.start_time))) def print_start_step(self, name): logger.info("MARVIN DRYRUN - STEP [{}]".format(name)) self.start_time = time.time() def dynamic_import(clazz): components = clazz.split('.') mod = __import__(components[0]) for comp in components[1:]: mod = getattr(mod, comp) return mod def read_file(filename): fname = os.path.join("", filename) if os.path.exists(fname): logger.info("Engine file {} loaded!".format(filename)) with open(fname, 'r') as fp: return json.load(fp) else: logger.info("Engine file {} doesn't exists...".format(filename)) return {} def generate_kwargs(package_name, clazz, params=None, initial_dataset='initialdataset', dataset='dataset', model='model', metrics='metrics'): kwargs = {} kwargs["persistence_mode"] = 'local' kwargs["default_root_path"] = os.path.join( os.getenv('MARVIN_DATA_PATH'), '.artifacts') kwargs["is_remote_calling"] = True _artifact_folder = package_name.replace( 'marvin_', '').replace('_engine', '') _artifacts_to_load = ARTIFACTS[clazz.__name__] logger.debug("clazz: {0}, artifacts to load: {1}".format(clazz, str(_artifacts_to_load))) if params: kwargs["params"] = params if dataset in _artifacts_to_load: kwargs["dataset"] = clazz.retrieve_obj(os.path.join(kwargs["default_root_path"], _artifact_folder, dataset)) if initial_dataset in _artifacts_to_load: kwargs["initial_dataset"] = clazz.retrieve_obj(os.path.join(kwargs["default_root_path"], _artifact_folder, initial_dataset)) if model in _artifacts_to_load: kwargs["model"] = clazz.retrieve_obj(os.path.join(kwargs["default_root_path"], _artifact_folder, model)) if metrics in _artifacts_to_load: kwargs["metrics"] = clazz.retrieve_obj(os.path.join(kwargs["default_root_path"], _artifact_folder, metrics)) return kwargs class MarvinEngineServer(object): @classmethod def create(self, config, action, port, workers, rpc_workers, params, pipeline): package_name = config['marvin_package'] def create_object(act): clazz = CLAZZES[act] _Action = dynamic_import("{}.{}".format(package_name, clazz)) kwargs = generate_kwargs(package_name, _Action, params) return _Action(**kwargs) root_obj = create_object(action) previous_object = root_obj if pipeline: for step in list(reversed(pipeline)): previous_object._previous_step = create_object(step) previous_object = previous_object._previous_step server = root_obj._prepare_remote_server( port=port, workers=workers, rpc_workers=rpc_workers) logger.info( "Starting GRPC server [{}] for {} Action".format(port, action)) server.start() return server def engine_server(config, action, max_workers, max_rpc_workers): logger.info("Starting server ...") # setting spark configuration directory os.environ["SPARK_CONF_DIR"] = os.path.join( os.environ["SPARK_HOME"], "conf") os.environ["YARN_CONF_DIR"] = os.environ["SPARK_CONF_DIR"] params = read_file('engine.params') metadata = read_file('engine.metadata') default_actions = {action['name'] : action for action in metadata['actions']} if action == 'all': action = default_actions else: action = {action: default_actions[action]} servers = [] for action_name in action.keys(): # initializing server configuration engine_server = MarvinEngineServer.create( config=config, action=action_name, port=action[action_name]["port"], workers=max_workers, rpc_workers=max_rpc_workers, params=params, pipeline=action[action_name]["pipeline"] ) servers.append(engine_server) return servers
[]
[]
[ "SPARK_CONF_DIR", "YARN_CONF_DIR", "MARVIN_DATA_PATH", "SPARK_HOME" ]
[]
["SPARK_CONF_DIR", "YARN_CONF_DIR", "MARVIN_DATA_PATH", "SPARK_HOME"]
python
4
0
src/gen-ext-timezones.py
#!/usr/bin/env python3 # @@@LICENSE # # Copyright (c) 2014 LG Electronics, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # LICENSE@@@ import sys, os.path, os from getopt import gnu_getopt as getopt from datetime import datetime from itertools import * import pytz import json from abbrevs import abbrevs def findDST(tz, months = [datetime(datetime.utcnow().year, n+1, 1) for n in range(12)]): try: std = next(dropwhile(lambda m: tz.dst(m).seconds != 0, months)) except StopIteration: # next raises this if empty list raise Exception("Standart time should be present in any time-zone (even in %s)" % (tz)) summer = next(chain(dropwhile(lambda m: tz.dst(m).seconds == 0, months), [None])) return (std, summer) def genTimeZones(do_guess = True): for (cc, zoneIds) in list(pytz.country_timezones.items()): for zoneId in zoneIds: tz = pytz.timezone(zoneId) try: (std, summer) = findDST(tz) except Exception as e: sys.stderr.write("Exception: %s\n Do some magic for %s\n" % (e, tz)) std = datetime(datetime.utcnow().year, 1, 1) if tz.dst(std).seconds != 0: summer = std else: summer = None except StopIteration: raise Exception("Unexpected StopIteration") # use Country from tzdata country = pytz.country_names[cc] info = uiInfo.get(zoneId, None) if info is None: if not do_guess: # so we shouldn't try to guess? # lets skip unknown time-zones continue # guess City (zregion, zpoint) = zoneId.split('/',1) if zpoint != country: city = zpoint.replace('_',' ') else: city = '' # guess Description tzname = tz.tzname(std) description = abbrevs.get(tzname, tzname) preferred = False else: country = info.get('Country', country) # allow override city = info['City'] description = info['Description'] preferred = info.get('preferred', False) entry = { 'Country': country, 'CountryCode': cc, 'ZoneID': zoneId, 'supportsDST': 0 if summer is None else 1, 'offsetFromUTC': int(tz.utcoffset(std).total_seconds()/60), 'Description': description, 'City': city } if preferred: entry['preferred'] = True yield entry def genSysZones(): for offset in takewhile(lambda x: x < 12.5, count(-14, 0.5)): offset_str = str(abs(int(offset))) if offset != int(offset): offset_str = offset_str + ":30" if offset > 0: ids = [('Etc/GMT+%s' % offset_str, 'GMT-%s' % offset_str)] elif offset < 0: ids = [('Etc/GMT-%s' % offset_str, 'GMT+%s' % offset_str)] else: ids = [('Etc/' + x, 'GMT') for x in ['GMT-0', 'GMT+0']] for (zoneId, id) in ids: yield { 'Country': '', 'CountryCode': '', 'ZoneID': zoneId, 'supportsDST': 0, 'offsetFromUTC': int(-offset*60), 'Description': id, 'City': '' } ### Parse options output = None source_dir = os.path.curdir is_zoneinfo_default = True def set_zoneinfo_dir(zoneinfo_dir): global is_zoneinfo_default is_zoneinfo_default = False def resource_path(name): if os.path.isabs(name): raise ValueError('Bad path (absolute): %r' % name) name_parts = os.path.split(name) for part in name_parts: if part == os.path.pardir: raise ValueError('Bad path segment: %r' % part) filepath = os.path.join(zoneinfo_dir, *name_parts) return filepath pytz.open_resource = lambda name: open(resource_path(name), 'rb') pytz.resource_exists = lambda name: os.path.exists(resource_path(name)) opts, args = getopt(sys.argv[1:], 'z:o:s:w', longopts=[ 'zoneinfo-dir=', 'output=', 'source-dir=', 'no-guess', 'white-list-only' ]) do_guess = True for (opt, val) in opts: if opt in ('--zoneinfo-dir', '-z'): set_zoneinfo_dir(val) elif opt in ('--output', '-o'): output = val elif opt in ('--source-dir', '-s'): source_dir = val elif opt in ('--no-guess', '--white-list-only', '-w'): do_guess = False # openembedded sets some env variables. lets guess from one of it where is our sysroot. guess_sysroot = os.environ.get('PKG_CONFIG_SYSROOT_DIR') if guess_sysroot is not None and is_zoneinfo_default: set_zoneinfo_dir(os.path.join(guess_sysroot, 'usr', 'share', 'zoneinfo')) ### load reference files mccInfo = json.load(open(os.path.join(source_dir, 'mccInfo.json'), 'rb')) uiInfo = json.load(open(os.path.join(source_dir, 'uiTzInfo.json'), 'rb')) ### load natural timezones from pytz timeZones = list(genTimeZones(do_guess = do_guess)) timeZones.sort(key = (lambda x: x['offsetFromUTC'])) # gen Etc/* time-zones sysZones = list(genSysZones()) content = { 'timeZone': timeZones, 'syszones': sysZones, 'mccInfo': mccInfo } if output is None: import re s = json.dumps(content, ensure_ascii = False, indent = 2) s = re.sub(r'\s+$', '', s, flags = re.MULTILINE) + '\n' sys.stdout.write(s.encode('utf8')) else: s = json.dumps(content, ensure_ascii = False, indent = None, separators = (',', ':')) + '\n' open(output,'wb').write(s.encode('utf8'))
[]
[]
[ "PKG_CONFIG_SYSROOT_DIR" ]
[]
["PKG_CONFIG_SYSROOT_DIR"]
python
1
0
chainer/links/connection/bilinear.py
import numpy from chainer import cuda from chainer.functions.connection import bilinear from chainer import initializers from chainer import link class Bilinear(link.Link): """Bilinear layer that performs tensor multiplication. Bilinear is a primitive link that wraps the :func:`~chainer.functions.bilinear` functions. It holds parameters ``W``, ``V1``, ``V2``, and ``b`` corresponding to the arguments of :func:`~chainer.functions.bilinear`. Args: left_size (int): Dimension of input vector :math:`e^1` (:math:`J`) right_size (int): Dimension of input vector :math:`e^2` (:math:`K`) out_size (int): Dimension of output vector :math:`y` (:math:`L`) nobias (bool): If ``True``, parameters ``V1``, ``V2``, and ``b`` are omitted. initialW (3-D numpy array): Initial value of :math:`W`. Shape of this argument must be ``(left_size, right_size, out_size)``. If ``None``, :math:`W` is initialized by centered Gaussian distribution properly scaled according to the dimension of inputs and outputs. May also be a callable that takes ``numpy.ndarray`` or ``cupy.ndarray`` and edits its value. initial_bias (tuple): Initial values of :math:`V^1`, :math:`V^2` and :math:`b`. The length this argument must be 3. Each element of this tuple must have the shapes of ``(left_size, output_size)``, ``(right_size, output_size)``, and ``(output_size,)``, respectively. If ``None``, :math:`V^1` and :math:`V^2` is initialized by scaled centered Gaussian distributions and :math:`b` is set to :math:`0`. May also be a tuple of callables that take ``numpy.ndarray`` or ``cupy.ndarray`` and edit its value. .. seealso:: See :func:`chainer.functions.bilinear` for details. Attributes: W (~chainer.Variable): Bilinear weight parameter. V1 (~chainer.Variable): Linear weight parameter for the first argument. V2 (~chainer.Variable): Linear weight parameter for the second argument. b (~chainer.Variable): Bias parameter. """ def __init__(self, left_size, right_size, out_size, nobias=False, initialW=None, initial_bias=None): super(Bilinear, self).__init__(W=(left_size, right_size, out_size)) self.in_sizes = (left_size, right_size) self.nobias = nobias # TODO(Kenta OONO): I do not know appropriate way of # initializing weights in tensor network. # This initialization is a modification of # that of Linear function. if isinstance(initialW, (numpy.ndarray, cuda.ndarray)): assert initialW.shape == self.W.data.shape initializers.init_weight(self.W.data, initialW) if not self.nobias: self.add_param('V1', (left_size, out_size)) self.add_param('V2', (right_size, out_size)) self.add_param('b', out_size) if isinstance(initial_bias, tuple): V1, V2, b = initial_bias elif initial_bias is None: V1 = V2 = None b = 0 else: raise ValueError('initial_bias must be tuple or None') if isinstance(V1, (numpy.ndarray, cuda.ndarray)): assert V1.shape == self.V1.data.shape if isinstance(V2, (numpy.ndarray, cuda.ndarray)): assert V2.shape == self.V2.data.shape if isinstance(b, (numpy.ndarray, cuda.ndarray)): assert b.shape == self.b.data.shape initializers.init_weight(self.V1.data, V1) initializers.init_weight(self.V2.data, V2) initializers.init_weight(self.b.data, b) def __call__(self, e1, e2): """Applies the bilinear function to inputs and the internal parameters. Args: e1 (~chainer.Variable): Left input. e2 (~chainer.Variable): Right input. Returns: ~chainer.Variable: Output variable. """ if self.nobias: return bilinear.bilinear(e1, e2, self.W) else: return bilinear.bilinear(e1, e2, self.W, self.V1, self.V2, self.b) def zero_grads(self): # Left for backward compatibility self.zerograds()
[]
[]
[]
[]
[]
python
null
null
null
gogen/generate.go
// +build generate package gogen //go:generate mkfunccontrolparamtype -d "This determines whether or not a go subcommand should be run with its output displayed" -t CmdIO -v NoCmdIO -v ShowCmdIO
[]
[]
[]
[]
[]
go
null
null
null
qa/rpc-tests/p2p-acceptblock.py
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import time from test_framework.blocktools import create_block, create_coinbase ''' AcceptBlockTest -- test processing of unrequested blocks. Since behavior differs when receiving unrequested blocks from whitelisted peers versus non-whitelisted peers, this tests the behavior of both (effectively two separate tests running in parallel). Setup: two nodes, node0 and node1, not connected to each other. Node0 does not whitelist localhost, but node1 does. They will each be on their own chain for this test. We have one NodeConn connection to each, test_node and white_node respectively. The test: 1. Generate one block on each node, to leave IBD. 2. Mine a new block on each tip, and deliver to each node from node's peer. The tip should advance. 3. Mine a block that forks the previous block, and deliver to each node from corresponding peer. Node0 should not process this block (just accept the header), because it is unrequested and doesn't have more work than the tip. Node1 should process because this is coming from a whitelisted peer. 4. Send another block that builds on the forking block. Node0 should process this block but be stuck on the shorter chain, because it's missing an intermediate block. Node1 should reorg to this longer chain. 4b.Send 288 more blocks on the longer chain. Node0 should process all but the last block (too far ahead in height). Send all headers to Node1, and then send the last block in that chain. Node1 should accept the block because it's coming from a whitelisted peer. 5. Send a duplicate of the block in #3 to Node0. Node0 should not process the block because it is unrequested, and stay on the shorter chain. 6. Send Node0 an inv for the height 3 block produced in #4 above. Node0 should figure out that Node0 has the missing height 2 block and send a getdata. 7. Send Node0 the missing block again. Node0 should process and the tip should advance. ''' # TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending # p2p messages to a node, generating the messages in the main testing logic. class TestNode(NodeConnCB): def __init__(self): NodeConnCB.__init__(self) self.connection = None self.ping_counter = 1 self.last_pong = msg_pong() def add_connection(self, conn): self.connection = conn # Track the last getdata message we receive (used in the test) def on_getdata(self, conn, message): self.last_getdata = message # Spin until verack message is received from the node. # We use this to signal that our test can begin. This # is called from the testing thread, so it needs to acquire # the global lock. def wait_for_verack(self): while True: with mininode_lock: if self.verack_received: return time.sleep(0.05) # Wrapper for the NodeConn's send_message function def send_message(self, message): self.connection.send_message(message) def on_pong(self, conn, message): self.last_pong = message # Sync up with the node after delivery of a block def sync_with_ping(self, timeout=30): self.connection.send_message(msg_ping(nonce=self.ping_counter)) received_pong = False sleep_time = 0.05 while not received_pong and timeout > 0: time.sleep(sleep_time) timeout -= sleep_time with mininode_lock: if self.last_pong.nonce == self.ping_counter: received_pong = True self.ping_counter += 1 return received_pong class AcceptBlockTest(BitcoinTestFramework): def add_options(self, parser): parser.add_option("--testbinary", dest="testbinary", default=os.getenv("ECXD", "eclipsimd"), help="bitcoind binary to test") def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 2 def setup_network(self): # Node0 will be used to test behavior of processing unrequested blocks # from peers which are not whitelisted, while Node1 will be used for # the whitelisted case. self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"], binary=self.options.testbinary)) self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1"], binary=self.options.testbinary)) def run_test(self): # Setup the p2p connections and start up the network thread. test_node = TestNode() # connects to node0 (not whitelisted) white_node = TestNode() # connects to node1 (whitelisted) connections = [] connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)) connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node)) test_node.add_connection(connections[0]) white_node.add_connection(connections[1]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here test_node.wait_for_verack() white_node.wait_for_verack() # 1. Have both nodes mine a block (leave IBD) [ n.generate(1) for n in self.nodes ] tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ] # 2. Send one block that builds on each tip. # This should be accepted. blocks_h2 = [] # the height 2 blocks on each node's chain block_time = get_mocktime() + 1 for i in range(2): blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time + 1)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) white_node.send_message(msg_block(blocks_h2[1])) [ x.sync_with_ping() for x in [test_node, white_node] ] assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 2) print("First height 2 block accepted by both nodes") # 3. Send another block that builds on the original tip. blocks_h2f = [] # Blocks at height 2 that fork off the main chain for i in range(2): blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1)) blocks_h2f[i].solve() test_node.send_message(msg_block(blocks_h2f[0])) white_node.send_message(msg_block(blocks_h2f[1])) [ x.sync_with_ping() for x in [test_node, white_node] ] for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h2f[0].hash: assert_equal(x['status'], "headers-only") for x in self.nodes[1].getchaintips(): if x['hash'] == blocks_h2f[1].hash: assert_equal(x['status'], "valid-headers") print("Second height 2 block accepted only from whitelisted peer") # 4. Now send another block that builds on the forking chain. blocks_h3 = [] for i in range(2): blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1)) blocks_h3[i].solve() test_node.send_message(msg_block(blocks_h3[0])) white_node.send_message(msg_block(blocks_h3[1])) [ x.sync_with_ping() for x in [test_node, white_node] ] # Since the earlier block was not processed by node0, the new block # can't be fully validated. for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h3[0].hash: assert_equal(x['status'], "headers-only") # But this block should be accepted by node0 since it has more work. try: self.nodes[0].getblock(blocks_h3[0].hash) print("Unrequested more-work block accepted from non-whitelisted peer") except: raise AssertionError("Unrequested more work block was not processed") # Node1 should have accepted and reorged. assert_equal(self.nodes[1].getblockcount(), 3) print("Successfully reorged to length 3 chain from whitelisted peer") # 4b. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node0. Node1 should process the tip if # we give it the headers chain leading to the tip. tips = blocks_h3 headers_message = msg_headers() all_blocks = [] # node0's blocks for j in range(2): for i in range(288): next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1) next_block.solve() if j==0: test_node.send_message(msg_block(next_block)) all_blocks.append(next_block) else: headers_message.headers.append(CBlockHeader(next_block)) tips[j] = next_block set_mocktime(get_mocktime() + 2) set_node_times(self.nodes, get_mocktime()) for x in all_blocks: try: self.nodes[0].getblock(x.hash) if x == all_blocks[287]: raise AssertionError("Unrequested block too far-ahead should have been ignored") except: if x == all_blocks[287]: print("Unrequested block too far-ahead not processed") else: raise AssertionError("Unrequested block with more work should have been accepted") headers_message.headers.pop() # Ensure the last block is unrequested white_node.send_message(headers_message) # Send headers leading to tip white_node.send_message(msg_block(tips[1])) # Now deliver the tip try: white_node.sync_with_ping() self.nodes[1].getblock(tips[1].hash) print("Unrequested block far ahead of tip accepted from whitelisted peer") except: raise AssertionError("Unrequested block from whitelisted peer not accepted") # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). test_node.send_message(msg_block(blocks_h2f[0])) # Here, if the sleep is too short, the test could falsely succeed (if the # node hasn't processed the block by the time the sleep returns, and then # the node processes it and incorrectly advances the tip). # But this would be caught later on, when we verify that an inv triggers # a getdata request for this block. test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) print("Unrequested block that would complete more-work chain was ignored") # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_getdata = None test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_getdata # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256) print("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(blocks_h2f[0])) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) print("Successfully reorged to longer chain from non-whitelisted peer") [ c.disconnect_node() for c in connections ] if __name__ == '__main__': AcceptBlockTest().main()
[]
[]
[ "ECXD" ]
[]
["ECXD"]
python
1
0
tools/kafka-console-consumer/kafka-console-consumer.go
package main import ( "flag" "fmt" "log" "os" "os/signal" "strconv" "strings" "sync" "github.com/datatyp/sarama" "github.com/datatyp/sarama/tools/tls" ) var ( brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster") topic = flag.String("topic", "", "REQUIRED: the topic to consume") partitions = flag.String("partitions", "all", "The partitions to consume, can be 'all' or comma-separated numbers") offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`") verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging") tlsEnabled = flag.Bool("tls-enabled", false, "Whether to enable TLS") tlsSkipVerify = flag.Bool("tls-skip-verify", false, "Whether skip TLS server cert verification") tlsClientCert = flag.String("tls-client-cert", "", "Client cert for client authentication (use with -tls-enabled and -tls-client-key)") tlsClientKey = flag.String("tls-client-key", "", "Client key for client authentication (use with tls-enabled and -tls-client-cert)") bufferSize = flag.Int("buffer-size", 256, "The buffer size of the message channel.") logger = log.New(os.Stderr, "", log.LstdFlags) ) func main() { flag.Parse() if *brokerList == "" { printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.") } if *topic == "" { printUsageErrorAndExit("-topic is required") } if *verbose { sarama.Logger = logger } var initialOffset int64 switch *offset { case "oldest": initialOffset = sarama.OffsetOldest case "newest": initialOffset = sarama.OffsetNewest default: printUsageErrorAndExit("-offset should be `oldest` or `newest`") } config := sarama.NewConfig() if *tlsEnabled { tlsConfig, err := tls.NewConfig(*tlsClientCert, *tlsClientKey) if err != nil { printErrorAndExit(69, "Failed to create TLS config: %s", err) } config.Net.TLS.Enable = true config.Net.TLS.Config = tlsConfig config.Net.TLS.Config.InsecureSkipVerify = *tlsSkipVerify } c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), config) if err != nil { printErrorAndExit(69, "Failed to start consumer: %s", err) } partitionList, err := getPartitions(c) if err != nil { printErrorAndExit(69, "Failed to get the list of partitions: %s", err) } var ( messages = make(chan *sarama.ConsumerMessage, *bufferSize) closing = make(chan struct{}) wg sync.WaitGroup ) go func() { signals := make(chan os.Signal, 1) signal.Notify(signals, os.Kill, os.Interrupt) <-signals logger.Println("Initiating shutdown of consumer...") close(closing) }() for _, partition := range partitionList { pc, err := c.ConsumePartition(*topic, partition, initialOffset) if err != nil { printErrorAndExit(69, "Failed to start consumer for partition %d: %s", partition, err) } go func(pc sarama.PartitionConsumer) { <-closing pc.AsyncClose() }(pc) wg.Add(1) go func(pc sarama.PartitionConsumer) { defer wg.Done() for message := range pc.Messages() { messages <- message } }(pc) } go func() { for msg := range messages { fmt.Printf("Partition:\t%d\n", msg.Partition) fmt.Printf("Offset:\t%d\n", msg.Offset) fmt.Printf("Key:\t%s\n", string(msg.Key)) fmt.Printf("Value:\t%s\n", string(msg.Value)) fmt.Println() } }() wg.Wait() logger.Println("Done consuming topic", *topic) close(messages) if err := c.Close(); err != nil { logger.Println("Failed to close consumer: ", err) } } func getPartitions(c sarama.Consumer) ([]int32, error) { if *partitions == "all" { return c.Partitions(*topic) } tmp := strings.Split(*partitions, ",") var pList []int32 for i := range tmp { val, err := strconv.ParseInt(tmp[i], 10, 32) if err != nil { return nil, err } pList = append(pList, int32(val)) } return pList, nil } func printErrorAndExit(code int, format string, values ...interface{}) { fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) fmt.Fprintln(os.Stderr) os.Exit(code) } func printUsageErrorAndExit(format string, values ...interface{}) { fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) fmt.Fprintln(os.Stderr) fmt.Fprintln(os.Stderr, "Available command line options:") flag.PrintDefaults() os.Exit(64) }
[ "\"KAFKA_PEERS\"" ]
[]
[ "KAFKA_PEERS" ]
[]
["KAFKA_PEERS"]
go
1
0
iot/api-client/end_to_end_example/cloudiot_pubsub_example_server.py
# Copyright 2017 Google Inc. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Sample server that pushes configuration to Google Cloud IoT devices. This example represents a server that consumes telemetry data from multiple Cloud IoT devices. The devices report telemetry data, which the server consumes from a Cloud Pub/Sub topic. The server then decides whether to turn on or off individual devices fans. This example requires the Google Cloud Pub/Sub client library. Install it with $ pip install --upgrade google-cloud-pubsub If you are running this example from a Compute Engine VM, you will have to enable the Cloud Pub/Sub API for your project, which you can do from the Cloud Console. Create a pubsub topic, for example projects/my-project-id/topics/my-topic-name, and a subscription, for example projects/my-project-id/subscriptions/my-topic-subscription. You can then run the example with $ python cloudiot_pubsub_example_server.py \ --project_id=my-project-id \ --pubsub_subscription=my-topic-subscription \ """ import argparse import base64 import json import os import sys from threading import Lock import time from google.cloud import pubsub from google.oauth2 import service_account from googleapiclient import discovery from googleapiclient.errors import HttpError API_SCOPES = ['https://www.googleapis.com/auth/cloud-platform'] API_VERSION = 'v1' DISCOVERY_API = 'https://cloudiot.googleapis.com/$discovery/rest' SERVICE_NAME = 'cloudiot' class Server(object): """Represents the state of the server.""" def __init__(self, service_account_json): credentials = service_account.Credentials.from_service_account_file( service_account_json).with_scopes(API_SCOPES) if not credentials: sys.exit('Could not load service account credential ' 'from {}'.format(service_account_json)) discovery_url = '{}?version={}'.format(DISCOVERY_API, API_VERSION) self._service = discovery.build( SERVICE_NAME, API_VERSION, discoveryServiceUrl=discovery_url, credentials=credentials, cache_discovery=False) # Used to serialize the calls to the # modifyCloudToDeviceConfig REST method. This is needed # because the google-api-python-client library is built on top # of the httplib2 library, which is not thread-safe. For more # details, see: https://developers.google.com/ # api-client-library/python/guide/thread_safety self._update_config_mutex = Lock() def _update_device_config(self, project_id, region, registry_id, device_id, data): """Push the data to the given device as configuration.""" config_data = None print('The device ({}) has a temperature ' 'of: {}'.format(device_id, data['temperature'])) if data['temperature'] < 0: # Turn off the fan. config_data = {'fan_on': False} print('Setting fan state for device', device_id, 'to off.') elif data['temperature'] > 10: # Turn on the fan config_data = {'fan_on': True} print('Setting fan state for device', device_id, 'to on.') else: # Temperature is OK, don't need to push a new config. return config_data_json = json.dumps(config_data) body = { # The device configuration specifies a version to update, which # can be used to avoid having configuration updates race. In this # case, you use the special value of 0, which tells Cloud IoT to # always update the config. 'version_to_update': 0, # The data is passed as raw bytes, so you encode it as base64. # Note that the device will receive the decoded string, so you # do not need to base64 decode the string on the device. 'binary_data': base64.b64encode( config_data_json.encode('utf-8')).decode('ascii') } device_name = ('projects/{}/locations/{}/registries/{}/' 'devices/{}'.format( project_id, region, registry_id, device_id)) request = self._service.projects().locations().registries().devices( ).modifyCloudToDeviceConfig(name=device_name, body=body) # The http call for the device config change is thread-locked so # that there aren't competing threads simultaneously using the # httplib2 library, which is not thread-safe. self._update_config_mutex.acquire() try: request.execute() except HttpError as e: # If the server responds with a HtppError, log it here, but # continue so that the message does not stay NACK'ed on the # pubsub channel. print('Error executing ModifyCloudToDeviceConfig: {}'.format(e)) finally: self._update_config_mutex.release() def run(self, project_id, pubsub_subscription): """The main loop. Consumes messages from the Pub/Sub subscription. """ subscriber = pubsub.SubscriberClient() subscription_path = subscriber.subscription_path( project_id, pubsub_subscription) def callback(message): """Logic executed when a message is received from subscribed topic. """ try: data = json.loads(message.data.decode('utf-8')) except ValueError as e: print('Loading Payload ({}) threw an Exception: {}.'.format( message.data, e)) message.ack() return # Get the registry id and device id from the attributes. These are # automatically supplied by IoT, and allow the server to determine # which device sent the event. device_project_id = message.attributes['projectId'] device_registry_id = message.attributes['deviceRegistryId'] device_id = message.attributes['deviceId'] device_region = message.attributes['deviceRegistryLocation'] # Send the config to the device. self._update_device_config( device_project_id, device_region, device_registry_id, device_id, data) # Acknowledge the consumed message. This will ensure that they # are not redelivered to this subscription. message.ack() print('Listening for messages on {}'.format(subscription_path)) subscriber.subscribe(subscription_path, callback=callback) # The subscriber is non-blocking, so keep the main thread from # exiting to allow it to process messages in the background. while True: time.sleep(60) def parse_command_line_args(): """Parse command line arguments.""" parser = argparse.ArgumentParser( description='Example of Google Cloud IoT registry and ' 'device management.') # Required arguments parser.add_argument( '--project_id', default=os.environ.get("GOOGLE_CLOUD_PROJECT"), required=True, help='GCP cloud project name.') parser.add_argument( '--pubsub_subscription', required=True, help='Google Cloud Pub/Sub subscription name.') # Optional arguments parser.add_argument( '--service_account_json', default=os.environ.get("GOOGLE_APPLICATION_CREDENTIALS"), help='Path to service account json file.') return parser.parse_args() def main(): args = parse_command_line_args() server = Server(args.service_account_json) server.run(args.project_id, args.pubsub_subscription) if __name__ == '__main__': main()
[]
[]
[ "GOOGLE_CLOUD_PROJECT", "GOOGLE_APPLICATION_CREDENTIALS" ]
[]
["GOOGLE_CLOUD_PROJECT", "GOOGLE_APPLICATION_CREDENTIALS"]
python
2
0
cmd/virtual-kubelet/internal/commands/root/tracing_register_ocagent.go
// Copyright © 2017 The virtual-kubelet authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build !no_ocagent_exporter package root import ( "os" "contrib.go.opencensus.io/exporter/ocagent" "github.com/nuczzz/virtual-kubelet/errdefs" "go.opencensus.io/trace" ) func init() { RegisterTracingExporter("ocagent", NewOCAgentExporter) } // NewOCAgentExporter creates a new opencensus tracing exporter using the opencensus agent forwarder. func NewOCAgentExporter(opts TracingExporterOptions) (trace.Exporter, error) { agentOpts := append([]ocagent.ExporterOption{}, ocagent.WithServiceName(opts.ServiceName)) if endpoint := os.Getenv("OCAGENT_ENDPOINT"); endpoint != "" { agentOpts = append(agentOpts, ocagent.WithAddress(endpoint)) } else { return nil, errdefs.InvalidInput("must set endpoint address in OCAGENT_ENDPOINT") } switch os.Getenv("OCAGENT_INSECURE") { case "0", "no", "n", "off", "": case "1", "yes", "y", "on": agentOpts = append(agentOpts, ocagent.WithInsecure()) default: return nil, errdefs.InvalidInput("invalid value for OCAGENT_INSECURE") } return ocagent.NewExporter(agentOpts...) }
[ "\"OCAGENT_ENDPOINT\"", "\"OCAGENT_INSECURE\"" ]
[]
[ "OCAGENT_INSECURE", "OCAGENT_ENDPOINT" ]
[]
["OCAGENT_INSECURE", "OCAGENT_ENDPOINT"]
go
2
0
horenso_slack_reporter.go
package reporter import ( "encoding/json" "fmt" "io" "io/ioutil" "os" ) type horensoOut struct { Command string `json:"command"` CommandArgs []string `json:"commandArgs"` Output string `json:"output"` Stdout string `json:"stdout"` Stderr string `json:"stderr"` ExitCode int `json:"exitCode"` Result string `json:"result"` Pid int `json:"pid"` StartAt string `json:"startAt"` EndAt string `json:"endAt"` Hostname string `json:"hostName"` SystemTime float32 `json:"systemTime"` UserTime float32 `json:"userTime"` } type options struct { SlackWebhookURL string IgnoreSucceeded bool } func optionsFromEnv() (*options, error) { opts := new(options) opts.SlackWebhookURL = os.Getenv("SLACK_WEBHOOK_URL") if opts.SlackWebhookURL == "" { return nil, fmt.Errorf("SLACK_WEBHOOK_URL is missing") } doesIgnore := os.Getenv("IGNORE_SUCCEEDED") if doesIgnore == "" { opts.IgnoreSucceeded = false } else { opts.IgnoreSucceeded = true } return opts, nil } func parseHorensoOut(stdin io.Reader) (*horensoOut, error) { ho := new(horensoOut) text, err := ioutil.ReadAll(stdin) if err != nil { return nil, err } err = json.Unmarshal([]byte(text), ho) return ho, err } // Run the reporter func Run(stdin io.Reader, stdout io.Writer, stderr io.Writer, c SlackClient) int { opts, err := optionsFromEnv() if err != nil { fmt.Fprintln(stderr, err.Error()) return 1 } ho, err := parseHorensoOut(stdin) if err != nil { fmt.Fprintln(stderr, err.Error()) return 2 } if ho.ExitCode == 0 && opts.IgnoreSucceeded { return 0 } err = c.Post(ho, opts.SlackWebhookURL) if err != nil { fmt.Fprintln(stderr, err.Error()) return 3 } return 0 }
[ "\"SLACK_WEBHOOK_URL\"", "\"IGNORE_SUCCEEDED\"" ]
[]
[ "SLACK_WEBHOOK_URL", "IGNORE_SUCCEEDED" ]
[]
["SLACK_WEBHOOK_URL", "IGNORE_SUCCEEDED"]
go
2
0
cbs/client.go
package cbs import ( "os" "github.com/xawei/qcloudapi-sdk-go/common" ) const ( CbsHost = "cbs.api.qcloud.com" CbsPath = "/v2/index.php" ) type Client struct { *common.Client } func NewClient(credential common.CredentialInterface, opts common.Opts) (*Client, error) { if opts.Host == "" { opts.Host = CbsHost } if opts.Path == "" { opts.Path = CbsPath } client, err := common.NewClient(credential, opts) if err != nil { return &Client{}, err } return &Client{client}, nil } func NewClientFromEnv() (*Client, error) { secretId := os.Getenv("QCloudSecretId") secretKey := os.Getenv("QCloudSecretKey") region := os.Getenv("QCloudCbsAPIRegion") host := os.Getenv("QCloudCbsAPIHost") path := os.Getenv("QCloudCbsAPIPath") return NewClient( common.Credential{ secretId, secretKey, }, common.Opts{ Region: region, Host: host, Path: path, }, ) }
[ "\"QCloudSecretId\"", "\"QCloudSecretKey\"", "\"QCloudCbsAPIRegion\"", "\"QCloudCbsAPIHost\"", "\"QCloudCbsAPIPath\"" ]
[]
[ "QCloudSecretId", "QCloudCbsAPIHost", "QCloudSecretKey", "QCloudCbsAPIRegion", "QCloudCbsAPIPath" ]
[]
["QCloudSecretId", "QCloudCbsAPIHost", "QCloudSecretKey", "QCloudCbsAPIRegion", "QCloudCbsAPIPath"]
go
5
0
rollout/replicaset_test.go
package rollout import ( "strconv" "testing" "time" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sfake "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" "k8s.io/client-go/tools/record" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake" "github.com/argoproj/argo-rollouts/utils/annotations" logutil "github.com/argoproj/argo-rollouts/utils/log" ) func newRolloutControllerRef(r *v1alpha1.Rollout) *metav1.OwnerReference { isController := true return &metav1.OwnerReference{ APIVersion: "argoproj.io/v1alpha1", Kind: "Rollouts", Name: r.GetName(), UID: r.GetUID(), Controller: &isController, } } func int32Ptr(i int32) *int32 { return &i } func TestGetReplicaSetsForRollouts(t *testing.T) { newTimestamp := metav1.Date(2016, 5, 20, 2, 0, 0, 0, time.UTC) selector := map[string]string{ "app": "nginx", } diffSelector := map[string]string{ "app": "nginx2", } rollout := newRollout("foo", 1, int32Ptr(1), selector) diffRollout := newRollout("bar", 1, int32Ptr(1), selector) tests := []struct { name string existingRSs []*appsv1.ReplicaSet expectedSelectedRSs []*appsv1.ReplicaSet expectedError error }{ { name: "Grab corrected owned replicasets", existingRSs: []*appsv1.ReplicaSet{ rs("foo-v2", 1, selector, newTimestamp, newRolloutControllerRef(rollout)), rs("foo-v1", 1, selector, newTimestamp, newRolloutControllerRef(diffRollout)), }, expectedSelectedRSs: []*appsv1.ReplicaSet{ rs("foo-v2", 1, selector, newTimestamp, newRolloutControllerRef(rollout)), }, expectedError: nil, }, { name: "Adopt orphaned replica sets", existingRSs: []*appsv1.ReplicaSet{ rs("foo-v1", 1, selector, newTimestamp, nil), }, expectedSelectedRSs: []*appsv1.ReplicaSet{ rs("foo-v1", 1, selector, newTimestamp, newRolloutControllerRef(rollout)), }, expectedError: nil, }, { name: "No replica sets exist", existingRSs: []*appsv1.ReplicaSet{}, expectedSelectedRSs: []*appsv1.ReplicaSet{}, expectedError: nil, }, { name: "No selector provided so no adoption", existingRSs: []*appsv1.ReplicaSet{ rs("foo-v1", 1, nil, newTimestamp, newRolloutControllerRef(diffRollout)), }, expectedSelectedRSs: []*appsv1.ReplicaSet{}, expectedError: nil, }, { name: "Orphan RS with different selector", existingRSs: []*appsv1.ReplicaSet{ rs("foo-v1", 1, diffSelector, newTimestamp, newRolloutControllerRef(diffRollout)), }, expectedSelectedRSs: []*appsv1.ReplicaSet{}, expectedError: nil, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { f := newFixture(t) defer f.Close() f.rolloutLister = append(f.rolloutLister, rollout) f.objects = append(f.objects, rollout) f.replicaSetLister = append(f.replicaSetLister, test.existingRSs...) for _, rs := range test.existingRSs { f.kubeobjects = append(f.kubeobjects, rs) } c, informers, _ := f.newController(noResyncPeriodFunc) stopCh := make(chan struct{}) defer close(stopCh) informers.Start(stopCh) returnedRSs, err := c.getReplicaSetsForRollouts(rollout) assert.Equal(t, test.expectedError, err) assert.Equal(t, len(test.expectedSelectedRSs), len(returnedRSs)) for i, returnedRS := range returnedRSs { assert.Equal(t, test.expectedSelectedRSs[i].Name, returnedRS.Name) } }) } } func TestReconcileNewReplicaSet(t *testing.T) { tests := []struct { name string rolloutReplicas int newReplicas int scaleExpected bool expectedNewReplicas int }{ { name: "New Replica Set matches rollout replica: No scale", rolloutReplicas: 10, newReplicas: 10, scaleExpected: false, }, { name: "New Replica Set higher than rollout replica: Scale down", rolloutReplicas: 10, newReplicas: 12, scaleExpected: true, expectedNewReplicas: 10, }, { name: "New Replica Set lower than rollout replica: Scale up", rolloutReplicas: 10, newReplicas: 8, scaleExpected: true, expectedNewReplicas: 10, }, } for i := range tests { test := tests[i] t.Run(test.name, func(t *testing.T) { test := tests[i] newRS := rs("foo-v2", test.newReplicas, nil, noTimestamp, nil) rollout := newBlueGreenRollout("foo", test.rolloutReplicas, nil, "", "") fake := fake.Clientset{} k8sfake := k8sfake.Clientset{} roCtx := rolloutContext{ log: logutil.WithRollout(rollout), rollout: rollout, newRS: newRS, reconcilerBase: reconcilerBase{ argoprojclientset: &fake, kubeclientset: &k8sfake, recorder: &record.FakeRecorder{}, }, } scaled, err := roCtx.reconcileNewReplicaSet() if err != nil { t.Errorf("unexpected error: %v", err) return } if !test.scaleExpected { if scaled || len(fake.Actions()) > 0 { t.Errorf("unexpected scaling: %v", fake.Actions()) } return } if test.scaleExpected && !scaled { t.Errorf("expected scaling to occur") return } if len(k8sfake.Actions()) != 1 { t.Errorf("expected 1 action during scale, got: %v", fake.Actions()) return } updated := k8sfake.Actions()[0].(core.UpdateAction).GetObject().(*appsv1.ReplicaSet) if e, a := test.expectedNewReplicas, int(*(updated.Spec.Replicas)); e != a { t.Errorf("expected update to %d replicas, got %d", e, a) } }) } } func TestReconcileOldReplicaSet(t *testing.T) { tests := []struct { name string rolloutReplicas int oldReplicas int newReplicas int readyPodsFromOldRS int readyPodsFromNewRS int scaleExpected bool expectedOldReplicas int }{ { name: "No pods to scale down", rolloutReplicas: 10, oldReplicas: 0, newReplicas: 10, readyPodsFromOldRS: 0, readyPodsFromNewRS: 0, scaleExpected: false, }, { name: "Clean up unhealthy pods", rolloutReplicas: 10, oldReplicas: 10, newReplicas: 10, readyPodsFromOldRS: 8, readyPodsFromNewRS: 10, scaleExpected: true, expectedOldReplicas: 0, }, { name: "Normal scale down when new ReplicaSet is healthy", rolloutReplicas: 10, oldReplicas: 10, newReplicas: 10, readyPodsFromOldRS: 10, readyPodsFromNewRS: 10, scaleExpected: true, expectedOldReplicas: 0, }, } for i := range tests { test := tests[i] t.Run(test.name, func(t *testing.T) { newSelector := map[string]string{"foo": "new"} oldSelector := map[string]string{"foo": "old"} newRS := rs("foo-new", test.newReplicas, newSelector, noTimestamp, nil) newRS.Annotations = map[string]string{annotations.DesiredReplicasAnnotation: strconv.Itoa(test.newReplicas)} newRS.Status.AvailableReplicas = int32(test.readyPodsFromNewRS) oldRS := rs("foo-old", test.oldReplicas, oldSelector, noTimestamp, nil) oldRS.Annotations = map[string]string{annotations.DesiredReplicasAnnotation: strconv.Itoa(test.oldReplicas)} oldRS.Status.AvailableReplicas = int32(test.readyPodsFromOldRS) oldRSs := []*appsv1.ReplicaSet{oldRS} rollout := newBlueGreenRollout("foo", test.rolloutReplicas, nil, "", "") rollout.Spec.Selector = &metav1.LabelSelector{MatchLabels: newSelector} f := newFixture(t) defer f.Close() f.objects = append(f.objects, rollout) f.replicaSetLister = append(f.replicaSetLister, oldRS, newRS) f.kubeobjects = append(f.kubeobjects, oldRS, newRS) c, informers, _ := f.newController(noResyncPeriodFunc) stopCh := make(chan struct{}) informers.Start(stopCh) informers.WaitForCacheSync(stopCh) close(stopCh) roCtx, err := c.newRolloutContext(rollout) assert.NoError(t, err) scaled, err := roCtx.reconcileOldReplicaSets(oldRSs) if err != nil { t.Errorf("unexpected error: %v", err) return } if !test.scaleExpected && scaled { t.Errorf("unexpected scaling: %v", f.kubeclient.Actions()) } if test.scaleExpected && !scaled { t.Errorf("expected scaling to occur") return } }) } }
[]
[]
[]
[]
[]
go
null
null
null
go/engine/login_offline_test.go
package engine import ( "os" "testing" "time" "golang.org/x/net/context" "github.com/keybase/client/go/libkb" "github.com/keybase/clockwork" ) func TestLoginOffline(t *testing.T) { tc := SetupEngineTest(t, "login") defer tc.Cleanup() u1 := CreateAndSignupFakeUser(tc, "login") Logout(tc) u1.LoginOrBust(tc) // do a upak load to make sure it is cached arg := libkb.NewLoadUserByUIDArg(context.TODO(), tc.G, u1.UID()) tc.G.GetUPAKLoader().Load(arg) // Simulate restarting the service by wiping out the // passphrase stream cache and cached secret keys clearCaches(tc.G) tc.G.GetUPAKLoader().ClearMemory() // set server uri to nonexistent ip so api calls will fail prev := os.Getenv("KEYBASE_SERVER_URI") os.Setenv("KEYBASE_SERVER_URI", "http://127.0.0.127:3333") defer os.Setenv("KEYBASE_SERVER_URI", prev) tc.G.ConfigureAPI() eng := NewLoginOffline(tc.G) m := NewMetaContextForTest(tc) if err := RunEngine2(m, eng); err != nil { t.Fatal(err) } uv, deviceID, deviceName, skey, ekey := tc.G.ActiveDevice.AllFields() if uv.IsNil() { t.Errorf("uid is nil, expected it to exist") } if !uv.Uid.Equal(u1.UID()) { t.Errorf("uid: %v, expected %v", uv, u1) } if deviceID.IsNil() { t.Errorf("deviceID is nil, expected it to exist") } if deviceName != defaultDeviceName { t.Errorf("device name: %q, expected %q", deviceName, defaultDeviceName) } if skey == nil { t.Errorf("signing key is nil, expected it to exist") } if ekey == nil { t.Errorf("encryption key is nil, expected it to exist") } if tc.G.ActiveDevice.Name() != defaultDeviceName { t.Errorf("device name: %q, expected %q", tc.G.ActiveDevice.Name(), defaultDeviceName) } } // Use fake clock to test login offline after significant delay // (make sure upak loader won't use network) func TestLoginOfflineDelay(t *testing.T) { tc := SetupEngineTest(t, "login") defer tc.Cleanup() fakeClock := clockwork.NewFakeClockAt(time.Now()) tc.G.SetClock(fakeClock) u1 := CreateAndSignupFakeUser(tc, "login") Logout(tc) u1.LoginOrBust(tc) // do a upak load to make sure it is cached arg := libkb.NewLoadUserByUIDArg(context.TODO(), tc.G, u1.UID()) tc.G.GetUPAKLoader().Load(arg) // Simulate restarting the service by wiping out the // passphrase stream cache and cached secret keys clearCaches(tc.G) tc.G.GetUPAKLoader().ClearMemory() // set server uri to nonexistent ip so api calls will fail prev := os.Getenv("KEYBASE_SERVER_URI") os.Setenv("KEYBASE_SERVER_URI", "http://127.0.0.127:3333") defer os.Setenv("KEYBASE_SERVER_URI", prev) tc.G.ConfigureAPI() // advance the clock past the cache timeout fakeClock.Advance(libkb.CachedUserTimeout * 10) eng := NewLoginOffline(tc.G) m := NewMetaContextForTest(tc) if err := RunEngine2(m, eng); err != nil { t.Fatal(err) } uv, deviceID, deviceName, skey, ekey := tc.G.ActiveDevice.AllFields() if uv.IsNil() { t.Errorf("uid is nil, expected it to exist") } if !uv.Uid.Equal(u1.UID()) { t.Errorf("uid: %v, expected %v", uv, u1.UID()) } if deviceID.IsNil() { t.Errorf("deviceID is nil, expected it to exist") } if deviceName != defaultDeviceName { t.Errorf("device name: %q, expected %q", deviceName, defaultDeviceName) } if skey == nil { t.Errorf("signing key is nil, expected it to exist") } if ekey == nil { t.Errorf("encryption key is nil, expected it to exist") } } // Login offline with nothing in upak cache for self user. func TestLoginOfflineNoUpak(t *testing.T) { tc := SetupEngineTest(t, "login") defer tc.Cleanup() u1 := CreateAndSignupFakeUser(tc, "login") Logout(tc) u1.LoginOrBust(tc) // Simulate restarting the service by wiping out the // passphrase stream cache and cached secret keys tc.SimulateServiceRestart() tc.G.GetUPAKLoader().ClearMemory() // invalidate the cache for uid tc.G.GetUPAKLoader().Invalidate(context.Background(), u1.UID()) // set server uri to nonexistent ip so api calls will fail prev := os.Getenv("KEYBASE_SERVER_URI") os.Setenv("KEYBASE_SERVER_URI", "http://127.0.0.127:3333") defer os.Setenv("KEYBASE_SERVER_URI", prev) tc.G.ConfigureAPI() eng := NewLoginOffline(tc.G) m := NewMetaContextForTest(tc) err := RunEngine2(m, eng) if err != nil { t.Fatalf("LoginOffline should still work after upak cache invalidation; got %s", err) } }
[ "\"KEYBASE_SERVER_URI\"", "\"KEYBASE_SERVER_URI\"", "\"KEYBASE_SERVER_URI\"" ]
[]
[ "KEYBASE_SERVER_URI" ]
[]
["KEYBASE_SERVER_URI"]
go
1
0