filename
stringlengths
4
198
content
stringlengths
25
939k
environment
list
variablearg
list
constarg
list
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
eventbus/gcp/eventbus_test.go
// Copyright (c) 2014 - The Event Horizon authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gcp import ( "crypto/rand" "encoding/hex" "os" "testing" "time" "github.com/looplab/eventhorizon/eventbus" ) func TestEventBus(t *testing.T) { // Connect to localhost if not running inside docker if os.Getenv("PUBSUB_EMULATOR_HOST") == "" { os.Setenv("PUBSUB_EMULATOR_HOST", "localhost:8793") } // Get a random app ID. b := make([]byte, 8) if _, err := rand.Read(b); err != nil { t.Fatal(err) } appID := "app-" + hex.EncodeToString(b) bus1, err := NewEventBus("project_id", appID) if err != nil { t.Fatal("there should be no error:", err) } bus2, err := NewEventBus("project_id", appID) if err != nil { t.Fatal("there should be no error:", err) } eventbus.AcceptanceTest(t, bus1, bus2, time.Second) } func TestEventBusLoad(t *testing.T) { // Connect to localhost if not running inside docker if os.Getenv("PUBSUB_EMULATOR_HOST") == "" { os.Setenv("PUBSUB_EMULATOR_HOST", "localhost:8793") } // Get a random app ID. bts := make([]byte, 8) if _, err := rand.Read(bts); err != nil { t.Fatal(err) } appID := "app-" + hex.EncodeToString(bts) bus, err := NewEventBus("project_id", appID) if err != nil { t.Fatal("there should be no error:", err) } eventbus.LoadTest(t, bus) } func BenchmarkEventBus(b *testing.B) { // Connect to localhost if not running inside docker if os.Getenv("PUBSUB_EMULATOR_HOST") == "" { os.Setenv("PUBSUB_EMULATOR_HOST", "localhost:8793") } // Get a random app ID. bts := make([]byte, 8) if _, err := rand.Read(bts); err != nil { b.Fatal(err) } appID := "app-" + hex.EncodeToString(bts) bus, err := NewEventBus("project_id", appID) if err != nil { b.Fatal("there should be no error:", err) } eventbus.Benchmark(b, bus) }
[ "\"PUBSUB_EMULATOR_HOST\"", "\"PUBSUB_EMULATOR_HOST\"", "\"PUBSUB_EMULATOR_HOST\"" ]
[]
[ "PUBSUB_EMULATOR_HOST" ]
[]
["PUBSUB_EMULATOR_HOST"]
go
1
0
tcp_client/tcp-client.go
package main import ( "net" "fmt" "io/ioutil" "os" "log" "strings" "strconv" ) func main() { query := "" // connect to this socket conn, err := net.Dial("tcp", "moss.stanford.edu:7690") //conn, err := net.Dial("tcp", "127.0.0.1:3333") if err != nil { log.Fatalln(err) } log.Printf("Connected to %s",conn.RemoteAddr().String()) defer conn.Close() // MOSS_USER_ID is an environment variable. userId := os.Getenv("MOSS_USER_ID") message := fmt.Sprintf("moss %d\n", userId) conn.Write([]byte(message) conn.Write([]byte("directory 0\n")) conn.Write([]byte("X 0\n")) conn.Write([]byte("maxmatches 10\n")) conn.Write([]byte("show 250\n")) conn.Write([]byte("language java\n")) buff := make([]byte, 1024) n, _ := conn.Read(buff) message := string(buff[:n]) log.Printf("Receive: %s", message) message = strings.TrimSpace(message) if message == "no" { conn.Write([]byte("end\n")) conn.Close() log.Fatalln("Unrecognized language java") os.Exit(1) } var files = make([]string, 3) files[0] = "H1.java" files[1] = "H2.java" files[2] = "H3.java" for i := 1; i < 4; i++ { //uploadFile fname := files[i-1] log.Printf("Uploading %s ...", fname) dat, err := ioutil.ReadFile(fname) check(err) size := len(dat) m1 := fmt.Sprintf("file %d %s %d %s\n", i, "java", size, fname) log.Printf(m1) conn.Write([]byte(m1)) conn.Write(dat) log.Printf("done.\n") } conn.Write([]byte("query 0 " + query + " \n")) log.Println("Query submitted. Waiting for the server's response.") // listen for reply buff = make([]byte, 1024) n, err = conn.Read(buff) message = string(buff[:n]) // close connection conn.Write([]byte("end\n")) log.Println("n = " + strconv.Itoa(n)) log.Println(message) conn.Close() }
[ "\"MOSS_USER_ID\"" ]
[]
[ "MOSS_USER_ID" ]
[]
["MOSS_USER_ID"]
go
1
0
contrib/spendfrom/spendfrom.py
#!/usr/bin/env python # # Use the raw transactions API to spend bitcoins received on particular addresses, # and send any change back to that same address. # # Example usage: # spendfrom.py # Lists available funds # spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00 # # Assumes it will talk to a bitcoind or Bitcoin-Qt running # on localhost. # # Depends on jsonrpc # from decimal import * import getpass import math import os import os.path import platform import sys import time from jsonrpc import ServiceProxy, json BASE_FEE=Decimal("0.001") def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def determine_db_dir(): """Return the default location of the bitcoin data directory""" if platform.system() == "Darwin": return os.path.expanduser("~/Library/Application Support/Bitcoin/") elif platform.system() == "Windows": return os.path.join(os.environ['APPDATA'], "Bitcoin") return os.path.expanduser("~/.bitcoin") def read_bitcoin_config(dbdir): """Read the bitcoin.conf file from dbdir, returns dictionary of settings""" from ConfigParser import SafeConfigParser class FakeSecHead(object): def __init__(self, fp): self.fp = fp self.sechead = '[all]\n' def readline(self): if self.sechead: try: return self.sechead finally: self.sechead = None else: s = self.fp.readline() if s.find('#') != -1: s = s[0:s.find('#')].strip() +"\n" return s config_parser = SafeConfigParser() config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf")))) return dict(config_parser.items("all")) def connect_JSON(config): """Connect to a bitcoin JSON-RPC server""" testnet = config.get('testnet', '0') testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False if not 'rpcport' in config: config['rpcport'] = 19031 if testnet else 9031 connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport']) try: result = ServiceProxy(connect) # ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors, # but also make sure the bitcoind we're talking to is/isn't testnet: if result.getmininginfo()['testnet'] != testnet: sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n") sys.exit(1) return result except: sys.stderr.write("Error connecting to RPC server at "+connect+"\n") sys.exit(1) def unlock_wallet(bitcoind): info = bitcoind.getinfo() if 'unlocked_until' not in info: return True # wallet is not encrypted t = int(info['unlocked_until']) if t <= time.time(): try: passphrase = getpass.getpass("Wallet is locked; enter passphrase: ") bitcoind.walletpassphrase(passphrase, 5) except: sys.stderr.write("Wrong passphrase\n") info = bitcoind.getinfo() return int(info['unlocked_until']) > time.time() def list_available(bitcoind): address_summary = dict() address_to_account = dict() for info in bitcoind.listreceivedbyaddress(0): address_to_account[info["address"]] = info["account"] unspent = bitcoind.listunspent(0) for output in unspent: # listunspent doesn't give addresses, so: rawtx = bitcoind.getrawtransaction(output['txid'], 1) vout = rawtx["vout"][output['vout']] pk = vout["scriptPubKey"] # This code only deals with ordinary pay-to-bitcoin-address # or pay-to-script-hash outputs right now; anything exotic is ignored. if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash": continue address = pk["addresses"][0] if address in address_summary: address_summary[address]["total"] += vout["value"] address_summary[address]["outputs"].append(output) else: address_summary[address] = { "total" : vout["value"], "outputs" : [output], "account" : address_to_account.get(address, "") } return address_summary def select_coins(needed, inputs): # Feel free to improve this, this is good enough for my simple needs: outputs = [] have = Decimal("0.0") n = 0 while have < needed and n < len(inputs): outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]}) have += inputs[n]["amount"] n += 1 return (outputs, have-needed) def create_tx(bitcoind, fromaddresses, toaddress, amount, fee): all_coins = list_available(bitcoind) total_available = Decimal("0.0") needed = amount+fee potential_inputs = [] for addr in fromaddresses: if addr not in all_coins: continue potential_inputs.extend(all_coins[addr]["outputs"]) total_available += all_coins[addr]["total"] if total_available < needed: sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed)); sys.exit(1) # # Note: # Python's json/jsonrpc modules have inconsistent support for Decimal numbers. # Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode # Decimals, I'm casting amounts to float before sending them to bitcoind. # outputs = { toaddress : float(amount) } (inputs, change_amount) = select_coins(needed, potential_inputs) if change_amount > BASE_FEE: # don't bother with zero or tiny change change_address = fromaddresses[-1] if change_address in outputs: outputs[change_address] += float(change_amount) else: outputs[change_address] = float(change_amount) rawtx = bitcoind.createrawtransaction(inputs, outputs) signed_rawtx = bitcoind.signrawtransaction(rawtx) if not signed_rawtx["complete"]: sys.stderr.write("signrawtransaction failed\n") sys.exit(1) txdata = signed_rawtx["hex"] return txdata def compute_amount_in(bitcoind, txinfo): result = Decimal("0.0") for vin in txinfo['vin']: in_info = bitcoind.getrawtransaction(vin['txid'], 1) vout = in_info['vout'][vin['vout']] result = result + vout['value'] return result def compute_amount_out(txinfo): result = Decimal("0.0") for vout in txinfo['vout']: result = result + vout['value'] return result def sanity_test_fee(bitcoind, txdata_hex, max_fee): class FeeError(RuntimeError): pass try: txinfo = bitcoind.decoderawtransaction(txdata_hex) total_in = compute_amount_in(bitcoind, txinfo) total_out = compute_amount_out(txinfo) if total_in-total_out > max_fee: raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out)) tx_size = len(txdata_hex)/2 kb = tx_size/1000 # integer division rounds down if kb > 1 and fee < BASE_FEE: raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes") if total_in < 0.01 and fee < BASE_FEE: raise FeeError("Rejecting no-fee, tiny-amount transaction") # Exercise for the reader: compute transaction priority, and # warn if this is a very-low-priority transaction except FeeError as err: sys.stderr.write((str(err)+"\n")) sys.exit(1) def main(): import optparse parser = optparse.OptionParser(usage="%prog [options]") parser.add_option("--from", dest="fromaddresses", default=None, help="addresses to get bitcoins from") parser.add_option("--to", dest="to", default=None, help="address to get send bitcoins to") parser.add_option("--amount", dest="amount", default=None, help="amount to send") parser.add_option("--fee", dest="fee", default="0.0", help="fee to include") parser.add_option("--datadir", dest="datadir", default=determine_db_dir(), help="location of bitcoin.conf file with RPC username/password (default: %default)") parser.add_option("--testnet", dest="testnet", default=False, action="store_true", help="Use the test network") parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true", help="Don't broadcast the transaction, just create and print the transaction data") (options, args) = parser.parse_args() check_json_precision() config = read_bitcoin_config(options.datadir) if options.testnet: config['testnet'] = True bitcoind = connect_JSON(config) if options.amount is None: address_summary = list_available(bitcoind) for address,info in address_summary.iteritems(): n_transactions = len(info['outputs']) if n_transactions > 1: print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions)) else: print("%s %.8f %s"%(address, info['total'], info['account'])) else: fee = Decimal(options.fee) amount = Decimal(options.amount) while unlock_wallet(bitcoind) == False: pass # Keep asking for passphrase until they get it right txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee) sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01")) if options.dry_run: print(txdata) else: txid = bitcoind.sendrawtransaction(txdata) print(txid) if __name__ == '__main__': main()
[]
[]
[ "APPDATA" ]
[]
["APPDATA"]
python
1
0
vendor/github.com/gookit/color/utils.go
package color import ( "fmt" "io" "os" "strings" ) // Support color: // "TERM=xterm" // "TERM=xterm-vt220" // "TERM=xterm-256color" // "TERM=screen-256color" // "TERM=tmux-256color" // "TERM=rxvt-unicode-256color" // Don't support color: // "TERM=cygwin" var specialColorTerms = map[string]bool{ "screen-256color": true, "tmux-256color": true, "rxvt-unicode-256color": true, } // IsConsole 判断 w 是否为 stderr、stdout、stdin 三者之一 func IsConsole(out io.Writer) bool { o, ok := out.(*os.File) if !ok { return false } return o == os.Stdout || o == os.Stderr || o == os.Stdin } // IsMSys msys(MINGW64) 环境,不一定支持颜色 func IsMSys() bool { // like "MSYSTEM=MINGW64" if len(os.Getenv("MSYSTEM")) > 0 { return true } return false } // IsSupportColor check current console is support color. // // Supported: // linux, mac, or windows's ConEmu, Cmder, putty, git-bash.exe // Not support: // windows cmd.exe, powerShell.exe func IsSupportColor() bool { envTerm := os.Getenv("TERM") if strings.Contains(envTerm, "xterm") { return true } // it's special color term if _, ok := specialColorTerms[envTerm]; ok { return true } // like on ConEmu software, e.g "ConEmuANSI=ON" if os.Getenv("ConEmuANSI") == "ON" { return true } // like on ConEmu software, e.g "ANSICON=189x2000 (189x43)" if os.Getenv("ANSICON") != "" { return true } return false } // IsSupport256Color render func IsSupport256Color() bool { // "TERM=xterm-256color" // "TERM=screen-256color" // "TERM=tmux-256color" // "TERM=rxvt-unicode-256color" return strings.Contains(os.Getenv("TERM"), "256color") } // IsSupportTrueColor render. IsSupportRGBColor func IsSupportTrueColor() bool { // "COLORTERM=truecolor" return strings.Contains(os.Getenv("COLORTERM"), "truecolor") } // its Win system. linux windows darwin // func isWindows() bool { // return runtime.GOOS == "windows" // } func doPrint(code string, colors []Color, str string) { if isLikeInCmd { winPrint(str, colors...) } else { _, _ = fmt.Fprint(output, RenderString(code, str)) } } func doPrintln(code string, colors []Color, args []interface{}) { str := formatArgsForPrintln(args) if isLikeInCmd { winPrintln(str, colors...) } else { _, _ = fmt.Fprintln(output, RenderString(code, str)) } } func doPrintV2(code, str string) { if isLikeInCmd { renderColorCodeOnCmd(func() { _, _ = fmt.Fprint(output, RenderString(code, str)) }) } else { _, _ = fmt.Fprint(output, RenderString(code, str)) } } func doPrintlnV2(code string, args []interface{}) { str := formatArgsForPrintln(args) if isLikeInCmd { renderColorCodeOnCmd(func() { _, _ = fmt.Fprintln(output, RenderString(code, str)) }) } else { _, _ = fmt.Fprintln(output, RenderString(code, str)) } } func stringToArr(str, sep string) (arr []string) { str = strings.TrimSpace(str) if str == "" { return } ss := strings.Split(str, sep) for _, val := range ss { if val = strings.TrimSpace(val); val != "" { arr = append(arr, val) } } return } // if use Println, will add spaces for each arg func formatArgsForPrintln(args []interface{}) (message string) { if ln := len(args); ln == 0 { message = "" } else if ln == 1 { message = fmt.Sprint(args[0]) } else { message = fmt.Sprintln(args...) // clear last "\n" message = message[:len(message)-1] } return }
[ "\"MSYSTEM\"", "\"TERM\"", "\"ConEmuANSI\"", "\"ANSICON\"", "\"TERM\"", "\"COLORTERM\"" ]
[]
[ "TERM", "ANSICON", "MSYSTEM", "ConEmuANSI", "COLORTERM" ]
[]
["TERM", "ANSICON", "MSYSTEM", "ConEmuANSI", "COLORTERM"]
go
5
0
contrib/gitian-build.py
#!/usr/bin/env python3 import argparse import os import subprocess import sys def setup(): global args, workdir programs = ['ruby', 'git', 'apt-cacher-ng', 'make', 'wget'] if args.kvm: programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils'] elif args.docker: dockers = ['docker.io', 'docker-ce'] for i in dockers: return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i]) if return_code == 0: break if return_code != 0: print('Cannot find any way to install docker', file=sys.stderr) exit(1) else: programs += ['lxc', 'debootstrap'] subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs) if not os.path.isdir('gitian.sigs.ltc'): subprocess.check_call(['git', 'clone', 'https://github.com/LiteNoteProject/gitian.sigs.ltc.git']) if not os.path.isdir('litenote-detached-sigs'): subprocess.check_call(['git', 'clone', 'https://github.com/LiteNoteProject/litenote-detached-sigs.git']) if not os.path.isdir('gitian-builder'): subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git']) if not os.path.isdir('litenote'): subprocess.check_call(['git', 'clone', 'https://github.com/LiteNoteProject/litenote.git']) os.chdir('gitian-builder') make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64'] if args.docker: make_image_prog += ['--docker'] elif not args.kvm: make_image_prog += ['--lxc'] subprocess.check_call(make_image_prog) os.chdir(workdir) if args.is_bionic and not args.kvm and not args.docker: subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net']) print('Reboot is required') exit(0) def build(): global args, workdir os.makedirs('litenote-binaries/' + args.version, exist_ok=True) print('\nBuilding Dependencies\n') os.chdir('gitian-builder') os.makedirs('inputs', exist_ok=True) subprocess.check_call(['wget', '-N', '-P', 'inputs', 'http://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz']) subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch']) subprocess.check_call(['make', '-C', '../litenote/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common']) if args.linux: print('\nCompiling ' + args.version + ' Linux') subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'litenote='+args.commit, '--url', 'litenote='+args.url, '../litenote/contrib/gitian-descriptors/gitian-linux.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs.ltc/', '../litenote/contrib/gitian-descriptors/gitian-linux.yml']) subprocess.check_call('mv build/out/litenote-*.tar.gz build/out/src/litenote-*.tar.gz ../litenote-binaries/'+args.version, shell=True) if args.windows: print('\nCompiling ' + args.version + ' Windows') subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'litenote='+args.commit, '--url', 'litenote='+args.url, '../litenote/contrib/gitian-descriptors/gitian-win.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs.ltc/', '../litenote/contrib/gitian-descriptors/gitian-win.yml']) subprocess.check_call('mv build/out/litenote-*-win-unsigned.tar.gz inputs/litenote-win-unsigned.tar.gz', shell=True) subprocess.check_call('mv build/out/litenote-*.zip build/out/litenote-*.exe ../litenote-binaries/'+args.version, shell=True) if args.macos: print('\nCompiling ' + args.version + ' MacOS') subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'litenote='+args.commit, '--url', 'litenote='+args.url, '../litenote/contrib/gitian-descriptors/gitian-osx.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs.ltc/', '../litenote/contrib/gitian-descriptors/gitian-osx.yml']) subprocess.check_call('mv build/out/litenote-*-osx-unsigned.tar.gz inputs/litenote-osx-unsigned.tar.gz', shell=True) subprocess.check_call('mv build/out/litenote-*.tar.gz build/out/litenote-*.dmg ../litenote-binaries/'+args.version, shell=True) os.chdir(workdir) if args.commit_files: print('\nCommitting '+args.version+' Unsigned Sigs\n') os.chdir('gitian.sigs.ltc') subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer]) subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer]) subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer]) subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer]) os.chdir(workdir) def sign(): global args, workdir os.chdir('gitian-builder') if args.windows: print('\nSigning ' + args.version + ' Windows') subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../litenote/contrib/gitian-descriptors/gitian-win-signer.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs.ltc/', '../litenote/contrib/gitian-descriptors/gitian-win-signer.yml']) subprocess.check_call('mv build/out/litenote-*win64-setup.exe ../litenote-binaries/'+args.version, shell=True) subprocess.check_call('mv build/out/litenote-*win32-setup.exe ../litenote-binaries/'+args.version, shell=True) if args.macos: print('\nSigning ' + args.version + ' MacOS') subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../litenote/contrib/gitian-descriptors/gitian-osx-signer.yml']) subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs.ltc/', '../litenote/contrib/gitian-descriptors/gitian-osx-signer.yml']) subprocess.check_call('mv build/out/litenote-osx-signed.dmg ../litenote-binaries/'+args.version+'/litenote-'+args.version+'-osx.dmg', shell=True) os.chdir(workdir) if args.commit_files: print('\nCommitting '+args.version+' Signed Sigs\n') os.chdir('gitian.sigs.ltc') subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer]) subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer]) subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer]) os.chdir(workdir) def verify(): global args, workdir os.chdir('gitian-builder') print('\nVerifying v'+args.version+' Linux\n') subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs.ltc/', '-r', args.version+'-linux', '../litenote/contrib/gitian-descriptors/gitian-linux.yml']) print('\nVerifying v'+args.version+' Windows\n') subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs.ltc/', '-r', args.version+'-win-unsigned', '../litenote/contrib/gitian-descriptors/gitian-win.yml']) print('\nVerifying v'+args.version+' MacOS\n') subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs.ltc/', '-r', args.version+'-osx-unsigned', '../litenote/contrib/gitian-descriptors/gitian-osx.yml']) print('\nVerifying v'+args.version+' Signed Windows\n') subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs.ltc/', '-r', args.version+'-win-signed', '../litenote/contrib/gitian-descriptors/gitian-win-signer.yml']) print('\nVerifying v'+args.version+' Signed MacOS\n') subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs.ltc/', '-r', args.version+'-osx-signed', '../litenote/contrib/gitian-descriptors/gitian-osx-signer.yml']) os.chdir(workdir) def main(): global args, workdir parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version') parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch') parser.add_argument('-u', '--url', dest='url', default='https://github.com/LiteNoteProject/litenote', help='Specify the URL of the repository. Default is %(default)s') parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build') parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build') parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS') parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries') parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS') parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s') parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s') parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC') parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC') parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)') parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.') parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git') parser.add_argument('signer', help='GPG signer to sign each build assert file') parser.add_argument('version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified') args = parser.parse_args() workdir = os.getcwd() args.linux = 'l' in args.os args.windows = 'w' in args.os args.macos = 'm' in args.os args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs']) if args.buildsign: args.build=True args.sign=True if args.kvm and args.docker: raise Exception('Error: cannot have both kvm and docker') args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign' # Set enviroment variable USE_LXC or USE_DOCKER, let gitian-builder know that we use lxc or docker if args.docker: os.environ['USE_DOCKER'] = '1' elif not args.kvm: os.environ['USE_LXC'] = '1' if not 'GITIAN_HOST_IP' in os.environ.keys(): os.environ['GITIAN_HOST_IP'] = '10.0.3.1' if not 'LXC_GUEST_IP' in os.environ.keys(): os.environ['LXC_GUEST_IP'] = '10.0.3.5' # Disable for MacOS if no SDK found if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'): print('Cannot build for MacOS, SDK does not exist. Will build for other OSes') args.macos = False script_name = os.path.basename(sys.argv[0]) # Signer and version shouldn't be empty if args.signer == '': print(script_name+': Missing signer.') print('Try '+script_name+' --help for more information') exit(1) if args.version == '': print(script_name+': Missing version.') print('Try '+script_name+' --help for more information') exit(1) # Add leading 'v' for tags args.commit = ('' if args.commit else 'v') + args.version print(args.commit) if args.setup: setup() os.chdir('litenote') subprocess.check_call(['git', 'fetch']) subprocess.check_call(['git', 'checkout', args.commit]) os.chdir(workdir) if args.build: build() if args.sign: sign() if args.verify: verify() if __name__ == '__main__': main()
[]
[]
[ "USE_DOCKER", "GITIAN_HOST_IP", "USE_LXC", "LXC_GUEST_IP" ]
[]
["USE_DOCKER", "GITIAN_HOST_IP", "USE_LXC", "LXC_GUEST_IP"]
python
4
0
docs/conf.py
# -*- coding: utf-8 -*- # # flake8: noqa # Disable Flake8 because of all the sphinx imports # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Airflow documentation build configuration file, created by # sphinx-quickstart on Thu Oct 9 20:50:01 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys import airflow autodoc_mock_imports = [ 'MySQLdb', 'adal', 'analytics', 'azure', 'azure.cosmos', 'azure.datalake', 'azure.mgmt', 'boto3', 'botocore', 'bson', 'cassandra', 'celery', 'cloudant', 'cryptography', 'cx_Oracle', 'datadog', 'distributed', 'docker', 'google', 'google_auth_httplib2', 'googleapiclient', 'grpc', 'hdfs', 'httplib2', 'jaydebeapi', 'jenkins', 'jira', 'kubernetes', 'msrestazure', 'pandas', 'pandas_gbq', 'paramiko', 'pinotdb', 'psycopg2', 'pydruid', 'pyhive', 'pyhive', 'pymongo', 'pymssql', 'pysftp', 'qds_sdk', 'redis', 'simple_salesforce', 'slackclient', 'smbclient', 'snowflake', 'sshtunnel', 'tenacity', 'vertica_python', 'winrm', 'zdesk', ] # Hack to allow changing for piece of the code to behave differently while # the docs are being built. The main objective was to alter the # behavior of the utils.apply_default that was hiding function headers os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.join(os.path.dirname(__file__), 'exts')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinxarg.ext', 'sphinxcontrib.httpdomain', 'sphinx.ext.intersphinx', 'autoapi.extension', 'exampleinclude', 'docroles' ] autodoc_default_options = { 'show-inheritance': True, 'members': True } viewcode_follow_imported_members = True # Add any paths that contain templates here, relative to this directory. templates_path = ['templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Airflow' # copyright = '' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. # version = '1.0.0' version = airflow.__version__ # The full version, including alpha/beta/rc tags. # release = '1.0.0' release = airflow.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [ '_api/airflow/_vendor', '_api/airflow/api', '_api/airflow/bin', '_api/airflow/config_templates', '_api/airflow/configuration', '_api/airflow/contrib/auth', '_api/airflow/contrib/example_dags', '_api/airflow/contrib/index.rst', '_api/airflow/contrib/kubernetes', '_api/airflow/contrib/task_runner', '_api/airflow/contrib/utils', '_api/airflow/dag', '_api/airflow/default_login', '_api/airflow/example_dags', '_api/airflow/exceptions', '_api/airflow/index.rst', '_api/airflow/jobs', '_api/airflow/lineage', '_api/airflow/logging_config', '_api/airflow/macros', '_api/airflow/migrations', '_api/airflow/plugins_manager', '_api/airflow/security', '_api/airflow/settings', '_api/airflow/stats', '_api/airflow/task', '_api/airflow/ti_deps', '_api/airflow/utils', '_api/airflow/version', '_api/airflow/www', '_api/main', 'autoapi_templates', 'howto/operator/gcp/_partials', ] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. keep_warnings = True intersphinx_mapping = { 'boto3': ('https://boto3.amazonaws.com/v1/documentation/api/latest/', None), 'google-cloud-python': ( 'https://googleapis.github.io/google-cloud-python/latest/', None), 'mongodb': ('https://api.mongodb.com/python/current/', None), 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None), 'python': ('https://docs.python.org/3/', None), 'requests': ('http://docs.python-requests.org/en/master/', None), 'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', None), 'hdfs': ('https://hdfscli.readthedocs.io/en/latest/', None), } # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] import sphinx_rtd_theme html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = "Airflow Documentation" # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = "" # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Airflowdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'Airflow.tex', 'Airflow Documentation', 'Apache Airflow', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'airflow', 'Airflow Documentation', ['Apache Airflow'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [( 'index', 'Airflow', 'Airflow Documentation', 'Apache Airflow', 'Airflow', 'Airflow is a system to programmaticaly author, schedule and monitor data pipelines.', 'Miscellaneous' ), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # sphinx-autoapi configuration # See: # https://sphinx-autoapi.readthedocs.io/en/latest/config.html # Paths (relative or absolute) to the source code that you wish to generate # your API documentation from. autoapi_dirs = [ os.path.abspath('../airflow'), ] # A directory that has user-defined templates to override our default templates. autoapi_template_dir = 'autoapi_templates' # A list of patterns to ignore when finding files autoapi_ignore = [ # These modules are backcompat shims, don't build docs for them '*/airflow/contrib/operators/s3_to_gcs_transfer_operator.py', '*/airflow/contrib/operators/gcs_to_gcs_transfer_operator.py', '*/airflow/contrib/operators/gcs_to_gcs_transfer_operator.py', '*/node_modules/*', '*/migrations/*', ] # Keep the AutoAPI generated files on the filesystem after the run. # Useful for debugging. autoapi_keep_files = False # Relative path to output the AutoAPI files into. This can also be used to place the generated documentation # anywhere in your documentation hierarchy. autoapi_root = '_api' # -- Options for example include ------------------------------------------ exampleinclude_sourceroot = os.path.abspath('..')
[]
[]
[ "BUILDING_AIRFLOW_DOCS" ]
[]
["BUILDING_AIRFLOW_DOCS"]
python
1
0
providers/shopify/session.go
package shopify import ( "crypto/hmac" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "os" "regexp" "strings" "time" "github.com/a93h/goth" ) const ( shopifyHostnameRegex = `^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$` ) // Session stores data during the auth process with Shopify. type Session struct { AuthURL string AccessToken string Hostname string HMAC string ExpiresAt time.Time } var _ goth.Session = &Session{} // GetAuthURL will return the URL set by calling the `BeginAuth` function on the Shopify provider. func (s Session) GetAuthURL() (string, error) { if s.AuthURL == "" { return "", errors.New(goth.NoAuthUrlErrorMessage) } return s.AuthURL, nil } // Authorize the session with Shopify and return the access token to be stored for future use. func (s *Session) Authorize(provider goth.Provider, params goth.Params) (string, error) { // Validate the incoming HMAC is valid. // See: https://help.shopify.com/en/api/getting-started/authentication/oauth#verification digest := fmt.Sprintf( "code=%s&shop=%s&state=%s&timestamp=%s", params.Get("code"), params.Get("shop"), params.Get("state"), params.Get("timestamp"), ) h := hmac.New(sha256.New, []byte(os.Getenv("SHOPIFY_SECRET"))) h.Write([]byte(digest)) sha := hex.EncodeToString(h.Sum(nil)) // Ensure our HMAC hash's match. if sha != params.Get("hmac") { return "", errors.New("Invalid HMAC received") } // Validate the hostname matches what we're expecting. // See: https://help.shopify.com/en/api/getting-started/authentication/oauth#step-3-confirm-installation re := regexp.MustCompile(shopifyHostnameRegex) if !re.MatchString(params.Get("shop")) { return "", errors.New("Invalid hostname received") } // Make the exchange for an access token. p := provider.(*Provider) token, err := p.config.Exchange(goth.ContextForClient(p.Client()), params.Get("code")) if err != nil { return "", err } // Ensure it's valid. if !token.Valid() { return "", errors.New("Invalid token received from provider") } s.AccessToken = token.AccessToken s.Hostname = params.Get("hostname") s.HMAC = params.Get("hmac") return token.AccessToken, err } // Marshal the session into a string func (s Session) Marshal() string { b, _ := json.Marshal(s) return string(b) } func (s Session) String() string { return s.Marshal() } // UnmarshalSession wil unmarshal a JSON string into a session. func (p *Provider) UnmarshalSession(data string) (goth.Session, error) { s := &Session{} err := json.NewDecoder(strings.NewReader(data)).Decode(s) return s, err }
[ "\"SHOPIFY_SECRET\"" ]
[]
[ "SHOPIFY_SECRET" ]
[]
["SHOPIFY_SECRET"]
go
1
0
src/java_tools/junitrunner/java/com/google/testing/junit/runner/junit4/JUnit4InstanceModules.java
// Copyright 2012 The Bazel Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.testing.junit.runner.junit4; import java.util.Arrays; import java.util.List; import javax.inject.Singleton; /** * Dagger modules which hold state or are, for testing purposes, implemented with non-static * provider methods. These types are collected here so they can be cleanly named in the * component builder, but still be obvious in module includes and component declarations. */ public final class JUnit4InstanceModules { /** * A stateful dagger module that holds the supplied test suite class. */ public static final class SuiteClass { private final Class<?> suiteClass; public SuiteClass(Class<?> suiteClass) { this.suiteClass = suiteClass; } @TopLevelSuite Class<?> topLevelSuite() { return suiteClass; } @TopLevelSuite static String topLevelSuiteName(@TopLevelSuite Class<?> suite) { return suite.getCanonicalName(); } } /** * A module which supplies a JUnit4Config object, which can be overridden at test-time. */ public static final class Config { private final List<String> args; /** * Creates a module that can provide a {@link JUnit4Config} from supplied command-line * arguments */ public Config(String... args) { this.args = Arrays.asList(args); } @Singleton JUnit4Options options() { return JUnit4Options.parse(System.getenv(), args); } @Singleton static JUnit4Config config(JUnit4Options options) { return new JUnit4Config(options.getTestIncludeFilter(), options.getTestExcludeFilter()); } } private JUnit4InstanceModules() {} }
[]
[]
[]
[]
[]
java
0
0
src/cmd/dist/test.go
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "bytes" "errors" "flag" "fmt" "io/ioutil" "log" "os" "os/exec" "path/filepath" "reflect" "regexp" "runtime" "strconv" "strings" "sync" "time" ) func cmdtest() { gogcflags = os.Getenv("GO_GCFLAGS") var t tester var noRebuild bool flag.BoolVar(&t.listMode, "list", false, "list available tests") flag.BoolVar(&t.rebuild, "rebuild", false, "rebuild everything first") flag.BoolVar(&noRebuild, "no-rebuild", false, "overrides -rebuild (historical dreg)") flag.BoolVar(&t.keepGoing, "k", false, "keep going even when error occurred") flag.BoolVar(&t.race, "race", false, "run in race builder mode (different set of tests)") flag.BoolVar(&t.compileOnly, "compile-only", false, "compile tests, but don't run them. This is for some builders. Not all dist tests respect this flag, but most do.") flag.StringVar(&t.banner, "banner", "##### ", "banner prefix; blank means no section banners") flag.StringVar(&t.runRxStr, "run", os.Getenv("GOTESTONLY"), "run only those tests matching the regular expression; empty means to run all. "+ "Special exception: if the string begins with '!', the match is inverted.") xflagparse(-1) // any number of args if noRebuild { t.rebuild = false } t.run() } // tester executes cmdtest. type tester struct { race bool listMode bool rebuild bool failed bool keepGoing bool compileOnly bool // just try to compile all tests, but no need to run runRxStr string runRx *regexp.Regexp runRxWant bool // want runRx to match (true) or not match (false) runNames []string // tests to run, exclusive with runRx; empty means all banner string // prefix, or "" for none lastHeading string // last dir heading printed cgoEnabled bool partial bool haveTime bool // the 'time' binary is available tests []distTest timeoutScale int worklist []*work } type work struct { dt *distTest cmd *exec.Cmd start chan bool out []byte err error end chan bool } // A distTest is a test run by dist test. // Each test has a unique name and belongs to a group (heading) type distTest struct { name string // unique test name; may be filtered with -run flag heading string // group section; this header is printed before the test is run. fn func(*distTest) error } func (t *tester) run() { timelog("start", "dist test") var exeSuffix string if goos == "windows" { exeSuffix = ".exe" } if _, err := os.Stat(filepath.Join(gobin, "go"+exeSuffix)); err == nil { os.Setenv("PATH", fmt.Sprintf("%s%c%s", gobin, os.PathListSeparator, os.Getenv("PATH"))) } slurp, err := exec.Command("go", "env", "CGO_ENABLED").Output() if err != nil { log.Fatalf("Error running go env CGO_ENABLED: %v", err) } t.cgoEnabled, _ = strconv.ParseBool(strings.TrimSpace(string(slurp))) if flag.NArg() > 0 && t.runRxStr != "" { log.Fatalf("the -run regular expression flag is mutually exclusive with test name arguments") } t.runNames = flag.Args() if t.hasBash() { if _, err := exec.LookPath("time"); err == nil { t.haveTime = true } } if t.rebuild { t.out("Building packages and commands.") // Force rebuild the whole toolchain. goInstall("go", append([]string{"-a", "-i"}, toolchain...)...) } // Complete rebuild bootstrap, even with -no-rebuild. // If everything is up-to-date, this is a no-op. // If everything is not up-to-date, the first checkNotStale // during the test process will kill the tests, so we might // as well install the world. // Now that for example "go install cmd/compile" does not // also install runtime (you need "go install -i cmd/compile" // for that), it's easy for previous workflows like // "rebuild the compiler and then run run.bash" // to break if we don't automatically refresh things here. // Rebuilding is a shortened bootstrap. // See cmdbootstrap for a description of the overall process. // // But don't do this if we're running in the Go build system, // where cmd/dist is invoked many times. This just slows that // down (Issue 24300). if !t.listMode && os.Getenv("GO_BUILDER_NAME") == "" { goInstall("go", append([]string{"-i"}, toolchain...)...) goInstall("go", append([]string{"-i"}, toolchain...)...) goInstall("go", "std", "cmd") checkNotStale("go", "std", "cmd") } t.timeoutScale = 1 switch goarch { case "arm": t.timeoutScale = 2 case "mips", "mipsle", "mips64", "mips64le": t.timeoutScale = 4 } if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" { t.timeoutScale, err = strconv.Atoi(s) if err != nil { log.Fatalf("failed to parse $GO_TEST_TIMEOUT_SCALE = %q as integer: %v", s, err) } } if t.runRxStr != "" { if t.runRxStr[0] == '!' { t.runRxWant = false t.runRxStr = t.runRxStr[1:] } else { t.runRxWant = true } t.runRx = regexp.MustCompile(t.runRxStr) } t.registerTests() if t.listMode { for _, tt := range t.tests { fmt.Println(tt.name) } return } // We must unset GOROOT_FINAL before tests, because runtime/debug requires // correct access to source code, so if we have GOROOT_FINAL in effect, // at least runtime/debug test will fail. // If GOROOT_FINAL was set before, then now all the commands will appear stale. // Nothing we can do about that other than not checking them below. // (We call checkNotStale but only with "std" not "cmd".) os.Setenv("GOROOT_FINAL_OLD", os.Getenv("GOROOT_FINAL")) // for cmd/link test os.Unsetenv("GOROOT_FINAL") for _, name := range t.runNames { if !t.isRegisteredTestName(name) { log.Fatalf("unknown test %q", name) } } for _, dt := range t.tests { if !t.shouldRunTest(dt.name) { t.partial = true continue } dt := dt // dt used in background after this iteration if err := dt.fn(&dt); err != nil { t.runPending(&dt) // in case that hasn't been done yet t.failed = true if t.keepGoing { log.Printf("Failed: %v", err) } else { log.Fatalf("Failed: %v", err) } } } t.runPending(nil) timelog("end", "dist test") if t.failed { fmt.Println("\nFAILED") os.Exit(1) } else if t.partial { fmt.Println("\nALL TESTS PASSED (some were excluded)") } else { fmt.Println("\nALL TESTS PASSED") } } func (t *tester) shouldRunTest(name string) bool { if t.runRx != nil { return t.runRx.MatchString(name) == t.runRxWant } if len(t.runNames) == 0 { return true } for _, runName := range t.runNames { if runName == name { return true } } return false } // short returns a -short flag to pass to 'go test'. // It returns "-short", unless the environment variable // GO_TEST_SHORT is set to a non-empty, false-ish string. // // This environment variable is meant to be an internal // detail between the Go build system and cmd/dist // and is not intended for use by users. func short() string { if v := os.Getenv("GO_TEST_SHORT"); v != "" { short, err := strconv.ParseBool(v) if err != nil { log.Fatalf("invalid GO_TEST_SHORT %q: %v", v, err) } if !short { return "-short=false" } } return "-short" } // goTest returns the beginning of the go test command line. // Callers should use goTest and then pass flags overriding these // defaults as later arguments in the command line. func (t *tester) goTest() []string { return []string{ "go", "test", short(), "-count=1", t.tags(), t.runFlag(""), } } func (t *tester) tags() string { if t.iOS() { return "-tags=lldb" } return "-tags=" } func (t *tester) timeout(sec int) string { return "-timeout=" + fmt.Sprint(time.Duration(sec)*time.Second*time.Duration(t.timeoutScale)) } // ranGoTest and stdMatches are state closed over by the stdlib // testing func in registerStdTest below. The tests are run // sequentially, so there's no need for locks. // // ranGoBench and benchMatches are the same, but are only used // in -race mode. var ( ranGoTest bool stdMatches []string ranGoBench bool benchMatches []string ) func (t *tester) registerStdTest(pkg string) { testName := "go_test:" + pkg if t.runRx == nil || t.runRx.MatchString(testName) == t.runRxWant { stdMatches = append(stdMatches, pkg) } t.tests = append(t.tests, distTest{ name: testName, heading: "Testing packages.", fn: func(dt *distTest) error { if ranGoTest { return nil } t.runPending(dt) timelog("start", dt.name) defer timelog("end", dt.name) ranGoTest = true timeoutSec := 180 for _, pkg := range stdMatches { if pkg == "cmd/go" { timeoutSec *= 2 break } } args := []string{ "test", short(), t.tags(), t.timeout(timeoutSec), "-gcflags=all=" + gogcflags, } if t.race { args = append(args, "-race") } if t.compileOnly { args = append(args, "-run=^$") } args = append(args, stdMatches...) cmd := exec.Command("go", args...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd.Run() }, }) } func (t *tester) registerRaceBenchTest(pkg string) { testName := "go_test_bench:" + pkg if t.runRx == nil || t.runRx.MatchString(testName) == t.runRxWant { benchMatches = append(benchMatches, pkg) } t.tests = append(t.tests, distTest{ name: testName, heading: "Running benchmarks briefly.", fn: func(dt *distTest) error { if ranGoBench { return nil } t.runPending(dt) timelog("start", dt.name) defer timelog("end", dt.name) ranGoBench = true args := []string{ "test", short(), "-race", t.timeout(1200), // longer timeout for race with benchmarks "-run=^$", // nothing. only benchmarks. "-benchtime=.1s", "-cpu=4", } if !t.compileOnly { args = append(args, "-bench=.*") } args = append(args, benchMatches...) cmd := exec.Command("go", args...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd.Run() }, }) } // stdOutErrAreTerminals is defined in test_linux.go, to report // whether stdout & stderr are terminals. var stdOutErrAreTerminals func() bool func (t *tester) registerTests() { if strings.HasSuffix(os.Getenv("GO_BUILDER_NAME"), "-vetall") { // Run vet over std and cmd and call it quits. for k := range cgoEnabled { osarch := k t.tests = append(t.tests, distTest{ name: "vet/" + osarch, heading: "cmd/vet/all", fn: func(dt *distTest) error { t.addCmd(dt, "src/cmd/vet/all", "go", "run", "main.go", "-p="+osarch) return nil }, }) } return } // Fast path to avoid the ~1 second of `go list std cmd` when // the caller lists specific tests to run. (as the continuous // build coordinator does). if len(t.runNames) > 0 { for _, name := range t.runNames { if strings.HasPrefix(name, "go_test:") { t.registerStdTest(strings.TrimPrefix(name, "go_test:")) } if strings.HasPrefix(name, "go_test_bench:") { t.registerRaceBenchTest(strings.TrimPrefix(name, "go_test_bench:")) } } } else { // Use a format string to only list packages and commands that have tests. const format = "{{if (or .TestGoFiles .XTestGoFiles)}}{{.ImportPath}}{{end}}" cmd := exec.Command("go", "list", "-f", format) if t.race { cmd.Args = append(cmd.Args, "-tags=race") } cmd.Args = append(cmd.Args, "std") if !t.race { cmd.Args = append(cmd.Args, "cmd") } all, err := cmd.Output() if err != nil { log.Fatalf("Error running go list std cmd: %v, %s", err, all) } pkgs := strings.Fields(string(all)) for _, pkg := range pkgs { t.registerStdTest(pkg) } if t.race { for _, pkg := range pkgs { if t.packageHasBenchmarks(pkg) { t.registerRaceBenchTest(pkg) } } } } // Test the os/user package in the pure-Go mode too. if !t.compileOnly { t.tests = append(t.tests, distTest{ name: "osusergo", heading: "os/user with tag osusergo", fn: func(dt *distTest) error { t.addCmd(dt, "src", t.goTest(), t.timeout(300), "-tags=osusergo", "os/user") return nil }, }) } if t.race { return } // Runtime CPU tests. if !t.compileOnly { testName := "runtime:cpu124" t.tests = append(t.tests, distTest{ name: testName, heading: "GOMAXPROCS=2 runtime -cpu=1,2,4 -quick", fn: func(dt *distTest) error { cmd := t.addCmd(dt, "src", t.goTest(), t.timeout(300), "runtime", "-cpu=1,2,4", "-quick") // We set GOMAXPROCS=2 in addition to -cpu=1,2,4 in order to test runtime bootstrap code, // creation of first goroutines and first garbage collections in the parallel setting. cmd.Env = append(os.Environ(), "GOMAXPROCS=2") return nil }, }) } // This test needs its stdout/stderr to be terminals, so we don't run it from cmd/go's tests. // See issue 18153. if goos == "linux" { t.tests = append(t.tests, distTest{ name: "cmd_go_test_terminal", heading: "cmd/go terminal test", fn: func(dt *distTest) error { t.runPending(dt) timelog("start", dt.name) defer timelog("end", dt.name) if !stdOutErrAreTerminals() { fmt.Println("skipping terminal test; stdout/stderr not terminals") return nil } cmd := exec.Command("go", "test") cmd.Dir = filepath.Join(os.Getenv("GOROOT"), "src/cmd/go/testdata/testterminal18153") cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd.Run() }, }) } // On the builders only, test that a moved GOROOT still works. // Fails on iOS because CC_FOR_TARGET refers to clangwrap.sh // in the unmoved GOROOT. // Fails on Android with an exec format error. // Fails on plan9 with "cannot find GOROOT" (issue #21016). if os.Getenv("GO_BUILDER_NAME") != "" && goos != "android" && !t.iOS() && goos != "plan9" { t.tests = append(t.tests, distTest{ name: "moved_goroot", heading: "moved GOROOT", fn: func(dt *distTest) error { t.runPending(dt) timelog("start", dt.name) defer timelog("end", dt.name) moved := goroot + "-moved" if err := os.Rename(goroot, moved); err != nil { if goos == "windows" { // Fails on Windows (with "Access is denied") if a process // or binary is in this directory. For instance, using all.bat // when run from c:\workdir\go\src fails here // if GO_BUILDER_NAME is set. Our builders invoke tests // a different way which happens to work when sharding // tests, but we should be tolerant of the non-sharded // all.bat case. log.Printf("skipping test on Windows") return nil } return err } // Run `go test fmt` in the moved GOROOT. // Disable GOCACHE because it points back at the old GOROOT. cmd := exec.Command(filepath.Join(moved, "bin", "go"), "test", "fmt") cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr // Don't set GOROOT in the environment. for _, e := range os.Environ() { if !strings.HasPrefix(e, "GOROOT=") && !strings.HasPrefix(e, "GOCACHE=") { cmd.Env = append(cmd.Env, e) } } cmd.Env = append(cmd.Env, "GOCACHE=off") err := cmd.Run() if rerr := os.Rename(moved, goroot); rerr != nil { log.Fatalf("failed to restore GOROOT: %v", rerr) } return err }, }) } // Test that internal linking of standard packages does not // require libgcc. This ensures that we can install a Go // release on a system that does not have a C compiler // installed and still build Go programs (that don't use cgo). for _, pkg := range cgoPackages { if !t.internalLink() { break } // ARM libgcc may be Thumb, which internal linking does not support. if goarch == "arm" { break } pkg := pkg var run string if pkg == "net" { run = "TestTCPStress" } t.tests = append(t.tests, distTest{ name: "nolibgcc:" + pkg, heading: "Testing without libgcc.", fn: func(dt *distTest) error { t.addCmd(dt, "src", t.goTest(), "-ldflags=-linkmode=internal -libgcc=none", pkg, t.runFlag(run)) return nil }, }) } // Test internal linking of PIE binaries where it is supported. if goos == "linux" && goarch == "amd64" && !isAlpineLinux() { // Issue 18243: We don't have a way to set the default // dynamic linker used in internal linking mode. So // this test is skipped on Alpine. t.tests = append(t.tests, distTest{ name: "pie_internal", heading: "internal linking of -buildmode=pie", fn: func(dt *distTest) error { t.addCmd(dt, "src", t.goTest(), "reflect", "-buildmode=pie", "-ldflags=-linkmode=internal", t.timeout(60)) return nil }, }) } // sync tests t.tests = append(t.tests, distTest{ name: "sync_cpu", heading: "sync -cpu=10", fn: func(dt *distTest) error { t.addCmd(dt, "src", t.goTest(), "sync", t.timeout(120), "-cpu=10", t.runFlag("")) return nil }, }) if t.raceDetectorSupported() { t.tests = append(t.tests, distTest{ name: "race", heading: "Testing race detector", fn: t.raceTest, }) } if t.cgoEnabled && !t.iOS() { // Disabled on iOS. golang.org/issue/15919 t.tests = append(t.tests, distTest{ name: "cgo_stdio", heading: "../misc/cgo/stdio", fn: func(dt *distTest) error { t.addCmd(dt, "misc/cgo/stdio", "go", "run", filepath.Join(os.Getenv("GOROOT"), "test/run.go"), "-", ".") return nil }, }) t.tests = append(t.tests, distTest{ name: "cgo_life", heading: "../misc/cgo/life", fn: func(dt *distTest) error { t.addCmd(dt, "misc/cgo/life", "go", "run", filepath.Join(os.Getenv("GOROOT"), "test/run.go"), "-", ".") return nil }, }) fortran := os.Getenv("FC") if fortran == "" { fortran, _ = exec.LookPath("gfortran") } if t.hasBash() && fortran != "" { t.tests = append(t.tests, distTest{ name: "cgo_fortran", heading: "../misc/cgo/fortran", fn: func(dt *distTest) error { t.addCmd(dt, "misc/cgo/fortran", "./test.bash", fortran) return nil }, }) } if t.hasSwig() && goos != "android" { t.tests = append(t.tests, distTest{ name: "swig_stdio", heading: "../misc/swig/stdio", fn: func(dt *distTest) error { t.addCmd(dt, "misc/swig/stdio", t.goTest()) return nil }, }) if cxx, _ := exec.LookPath(compilerEnvLookup(defaultcxx, goos, goarch)); cxx != "" { t.tests = append(t.tests, distTest{ name: "swig_callback", heading: "../misc/swig/callback", fn: func(dt *distTest) error { t.addCmd(dt, "misc/swig/callback", t.goTest()) return nil }, }) } } } if t.cgoEnabled { t.tests = append(t.tests, distTest{ name: "cgo_test", heading: "../misc/cgo/test", fn: t.cgoTest, }) } if t.hasBash() && t.cgoEnabled && goos != "android" && goos != "darwin" { t.registerTest("testgodefs", "../misc/cgo/testgodefs", "./test.bash") } // Don't run these tests with $GO_GCFLAGS because most of them // assume that they can run "go install" with no -gcflags and not // recompile the entire standard library. If make.bash ran with // special -gcflags, that's not true. if t.cgoEnabled && gogcflags == "" { if t.cgoTestSOSupported() { t.tests = append(t.tests, distTest{ name: "testso", heading: "../misc/cgo/testso", fn: func(dt *distTest) error { return t.cgoTestSO(dt, "misc/cgo/testso") }, }) t.tests = append(t.tests, distTest{ name: "testsovar", heading: "../misc/cgo/testsovar", fn: func(dt *distTest) error { return t.cgoTestSO(dt, "misc/cgo/testsovar") }, }) } if t.supportedBuildmode("c-archive") { t.registerHostTest("testcarchive", "../misc/cgo/testcarchive", "misc/cgo/testcarchive", "carchive_test.go") } if t.supportedBuildmode("c-shared") { t.registerHostTest("testcshared", "../misc/cgo/testcshared", "misc/cgo/testcshared", "cshared_test.go") } if t.supportedBuildmode("shared") { t.registerTest("testshared", "../misc/cgo/testshared", t.goTest(), t.timeout(600)) } if t.supportedBuildmode("plugin") { t.registerTest("testplugin", "../misc/cgo/testplugin", "./test.bash") } if gohostos == "linux" && goarch == "amd64" { t.registerTest("testasan", "../misc/cgo/testasan", "go", "run", "main.go") } if goos == "linux" && (goarch == "amd64" || goarch == "arm64") { t.registerHostTest("testsanitizers/msan", "../misc/cgo/testsanitizers", "misc/cgo/testsanitizers", ".") } if t.hasBash() && goos != "android" && !t.iOS() && gohostos != "windows" { t.registerHostTest("cgo_errors", "../misc/cgo/errors", "misc/cgo/errors", ".") } if gohostos == "linux" && t.extLink() { t.registerTest("testsigfwd", "../misc/cgo/testsigfwd", "go", "run", "main.go") } } // Doc tests only run on builders. // They find problems approximately never. if t.hasBash() && goos != "nacl" && goos != "android" && !t.iOS() && os.Getenv("GO_BUILDER_NAME") != "" { t.registerTest("doc_progs", "../doc/progs", "time", "go", "run", "run.go") t.registerTest("wiki", "../doc/articles/wiki", "./test.bash") t.registerTest("codewalk", "../doc/codewalk", "time", "./run") } if goos != "android" && !t.iOS() { t.registerTest("bench_go1", "../test/bench/go1", t.goTest(), t.timeout(600)) } if goos != "android" && !t.iOS() { // Only start multiple test dir shards on builders, // where they get distributed to multiple machines. // See issue 20141. nShards := 1 if os.Getenv("GO_BUILDER_NAME") != "" { nShards = 10 } for shard := 0; shard < nShards; shard++ { shard := shard t.tests = append(t.tests, distTest{ name: fmt.Sprintf("test:%d_%d", shard, nShards), heading: "../test", fn: func(dt *distTest) error { return t.testDirTest(dt, shard, nShards) }, }) } } if goos != "nacl" && goos != "android" && !t.iOS() { t.tests = append(t.tests, distTest{ name: "api", heading: "API check", fn: func(dt *distTest) error { if t.compileOnly { t.addCmd(dt, "src", "go", "build", filepath.Join(goroot, "src/cmd/api/run.go")) return nil } t.addCmd(dt, "src", "go", "run", filepath.Join(goroot, "src/cmd/api/run.go")) return nil }, }) } } // isRegisteredTestName reports whether a test named testName has already // been registered. func (t *tester) isRegisteredTestName(testName string) bool { for _, tt := range t.tests { if tt.name == testName { return true } } return false } func (t *tester) registerTest1(seq bool, name, dirBanner string, cmdline ...interface{}) { bin, args := flattenCmdline(cmdline) if bin == "time" && !t.haveTime { bin, args = args[0], args[1:] } if t.isRegisteredTestName(name) { panic("duplicate registered test name " + name) } t.tests = append(t.tests, distTest{ name: name, heading: dirBanner, fn: func(dt *distTest) error { if seq { t.runPending(dt) timelog("start", name) defer timelog("end", name) return t.dirCmd(filepath.Join(goroot, "src", dirBanner), bin, args).Run() } t.addCmd(dt, filepath.Join(goroot, "src", dirBanner), bin, args) return nil }, }) } func (t *tester) registerTest(name, dirBanner string, cmdline ...interface{}) { t.registerTest1(false, name, dirBanner, cmdline...) } func (t *tester) registerSeqTest(name, dirBanner string, cmdline ...interface{}) { t.registerTest1(true, name, dirBanner, cmdline...) } func (t *tester) bgDirCmd(dir, bin string, args ...string) *exec.Cmd { cmd := exec.Command(bin, args...) if filepath.IsAbs(dir) { cmd.Dir = dir } else { cmd.Dir = filepath.Join(goroot, dir) } return cmd } func (t *tester) dirCmd(dir string, cmdline ...interface{}) *exec.Cmd { bin, args := flattenCmdline(cmdline) cmd := t.bgDirCmd(dir, bin, args...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if vflag > 1 { errprintf("%s\n", strings.Join(cmd.Args, " ")) } return cmd } // flattenCmdline flattens a mixture of string and []string as single list // and then interprets it as a command line: first element is binary, then args. func flattenCmdline(cmdline []interface{}) (bin string, args []string) { var list []string for _, x := range cmdline { switch x := x.(type) { case string: list = append(list, x) case []string: list = append(list, x...) default: panic("invalid addCmd argument type: " + reflect.TypeOf(x).String()) } } // The go command is too picky about duplicated flags. // Drop all but the last of the allowed duplicated flags. drop := make([]bool, len(list)) have := map[string]int{} for i := 1; i < len(list); i++ { j := strings.Index(list[i], "=") if j < 0 { continue } flag := list[i][:j] switch flag { case "-run", "-tags": if have[flag] != 0 { drop[have[flag]] = true } have[flag] = i } } out := list[:0] for i, x := range list { if !drop[i] { out = append(out, x) } } list = out return list[0], list[1:] } func (t *tester) addCmd(dt *distTest, dir string, cmdline ...interface{}) *exec.Cmd { bin, args := flattenCmdline(cmdline) w := &work{ dt: dt, cmd: t.bgDirCmd(dir, bin, args...), } t.worklist = append(t.worklist, w) return w.cmd } func (t *tester) iOS() bool { return goos == "darwin" && (goarch == "arm" || goarch == "arm64") } func (t *tester) out(v string) { if t.banner == "" { return } fmt.Println("\n" + t.banner + v) } func (t *tester) extLink() bool { pair := gohostos + "-" + goarch switch pair { case "android-arm", "darwin-386", "darwin-amd64", "darwin-arm", "darwin-arm64", "dragonfly-amd64", "freebsd-386", "freebsd-amd64", "freebsd-arm", "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-mips64", "linux-mips64le", "linux-mips", "linux-mipsle", "linux-s390x", "netbsd-386", "netbsd-amd64", "openbsd-386", "openbsd-amd64", "windows-386", "windows-amd64": return true } return false } func (t *tester) internalLink() bool { if gohostos == "dragonfly" { // linkmode=internal fails on dragonfly since errno is a TLS relocation. return false } if gohostarch == "ppc64le" { // linkmode=internal fails on ppc64le because cmd/link doesn't // handle the TOC correctly (issue 15409). return false } if goos == "android" { return false } if goos == "darwin" && (goarch == "arm" || goarch == "arm64") { return false } // Internally linking cgo is incomplete on some architectures. // https://golang.org/issue/10373 // https://golang.org/issue/14449 if goarch == "arm64" || goarch == "mips64" || goarch == "mips64le" || goarch == "mips" || goarch == "mipsle" { return false } if isAlpineLinux() { // Issue 18243. return false } return true } func (t *tester) supportedBuildmode(mode string) bool { pair := goos + "-" + goarch switch mode { case "c-archive": if !t.extLink() { return false } switch pair { case "darwin-386", "darwin-amd64", "darwin-arm", "darwin-arm64", "linux-amd64", "linux-386", "linux-ppc64le", "linux-s390x", "freebsd-amd64", "windows-amd64", "windows-386": return true } return false case "c-shared": switch pair { case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-s390x", "darwin-amd64", "darwin-386", "freebsd-amd64", "android-arm", "android-arm64", "android-386", "windows-amd64", "windows-386": return true } return false case "shared": switch pair { case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-s390x": return true } return false case "plugin": // linux-arm64 is missing because it causes the external linker // to crash, see https://golang.org/issue/17138 switch pair { case "linux-386", "linux-amd64", "linux-arm", "linux-s390x", "linux-ppc64le": return true case "darwin-amd64": return true } return false case "pie": switch pair { case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-s390x", "android-amd64", "android-arm", "android-arm64", "android-386": return true case "darwin-amd64": return true } return false default: log.Fatalf("internal error: unknown buildmode %s", mode) return false } } func (t *tester) registerHostTest(name, heading, dir, pkg string) { t.tests = append(t.tests, distTest{ name: name, heading: heading, fn: func(dt *distTest) error { t.runPending(dt) timelog("start", name) defer timelog("end", name) return t.runHostTest(dir, pkg) }, }) } func (t *tester) runHostTest(dir, pkg string) error { defer os.Remove(filepath.Join(goroot, dir, "test.test")) cmd := t.dirCmd(dir, t.goTest(), "-c", "-o", "test.test", pkg) cmd.Env = append(os.Environ(), "GOARCH="+gohostarch, "GOOS="+gohostos) if err := cmd.Run(); err != nil { return err } return t.dirCmd(dir, "./test.test").Run() } func (t *tester) cgoTest(dt *distTest) error { t.addCmd(dt, "misc/cgo/test", t.goTest(), "-ldflags", "-linkmode=auto") if t.internalLink() { t.addCmd(dt, "misc/cgo/test", t.goTest(), "-tags=internal", "-ldflags", "-linkmode=internal") } pair := gohostos + "-" + goarch switch pair { case "darwin-386", "darwin-amd64", "openbsd-386", "openbsd-amd64", "windows-386", "windows-amd64": // test linkmode=external, but __thread not supported, so skip testtls. if !t.extLink() { break } t.addCmd(dt, "misc/cgo/test", t.goTest(), "-ldflags", "-linkmode=external") t.addCmd(dt, "misc/cgo/test", t.goTest(), "-ldflags", "-linkmode=external -s") case "android-arm", "dragonfly-amd64", "freebsd-386", "freebsd-amd64", "freebsd-arm", "linux-386", "linux-amd64", "linux-arm", "linux-ppc64le", "linux-s390x", "netbsd-386", "netbsd-amd64": t.addCmd(dt, "misc/cgo/test", t.goTest(), "-ldflags", "-linkmode=external") t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-ldflags", "-linkmode=auto") t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-ldflags", "-linkmode=external") switch pair { case "netbsd-386", "netbsd-amd64": // no static linking case "freebsd-arm": // -fPIC compiled tls code will use __tls_get_addr instead // of __aeabi_read_tp, however, on FreeBSD/ARM, __tls_get_addr // is implemented in rtld-elf, so -fPIC isn't compatible with // static linking on FreeBSD/ARM with clang. (cgo depends on // -fPIC fundamentally.) default: cmd := t.dirCmd("misc/cgo/test", compilerEnvLookup(defaultcc, goos, goarch), "-xc", "-o", "/dev/null", "-static", "-") cmd.Stdin = strings.NewReader("int main() {}") if err := cmd.Run(); err != nil { fmt.Println("No support for static linking found (lacks libc.a?), skip cgo static linking test.") } else { if goos != "android" { t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-ldflags", `-linkmode=external -extldflags "-static -pthread"`) } t.addCmd(dt, "misc/cgo/nocgo", t.goTest()) t.addCmd(dt, "misc/cgo/nocgo", t.goTest(), "-ldflags", `-linkmode=external`) if goos != "android" { t.addCmd(dt, "misc/cgo/nocgo", t.goTest(), "-ldflags", `-linkmode=external -extldflags "-static -pthread"`) } } if t.supportedBuildmode("pie") { t.addCmd(dt, "misc/cgo/test", t.goTest(), "-buildmode=pie") t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-buildmode=pie") t.addCmd(dt, "misc/cgo/nocgo", t.goTest(), "-buildmode=pie") } } } return nil } // run pending test commands, in parallel, emitting headers as appropriate. // When finished, emit header for nextTest, which is going to run after the // pending commands are done (and runPending returns). // A test should call runPending if it wants to make sure that it is not // running in parallel with earlier tests, or if it has some other reason // for needing the earlier tests to be done. func (t *tester) runPending(nextTest *distTest) { checkNotStale("go", "std") worklist := t.worklist t.worklist = nil for _, w := range worklist { w.start = make(chan bool) w.end = make(chan bool) go func(w *work) { if !<-w.start { timelog("skip", w.dt.name) w.out = []byte(fmt.Sprintf("skipped due to earlier error\n")) } else { timelog("start", w.dt.name) w.out, w.err = w.cmd.CombinedOutput() } timelog("end", w.dt.name) w.end <- true }(w) } started := 0 ended := 0 var last *distTest for ended < len(worklist) { for started < len(worklist) && started-ended < maxbg { //println("start", started) w := worklist[started] started++ w.start <- !t.failed || t.keepGoing } w := worklist[ended] dt := w.dt if dt.heading != "" && t.lastHeading != dt.heading { t.lastHeading = dt.heading t.out(dt.heading) } if dt != last { // Assumes all the entries for a single dt are in one worklist. last = w.dt if vflag > 0 { fmt.Printf("# go tool dist test -run=^%s$\n", dt.name) } } if vflag > 1 { errprintf("%s\n", strings.Join(w.cmd.Args, " ")) } //println("wait", ended) ended++ <-w.end os.Stdout.Write(w.out) if w.err != nil { log.Printf("Failed: %v", w.err) t.failed = true } checkNotStale("go", "std") } if t.failed && !t.keepGoing { log.Fatal("FAILED") } if dt := nextTest; dt != nil { if dt.heading != "" && t.lastHeading != dt.heading { t.lastHeading = dt.heading t.out(dt.heading) } if vflag > 0 { fmt.Printf("# go tool dist test -run=^%s$\n", dt.name) } } } func (t *tester) cgoTestSOSupported() bool { if goos == "android" || t.iOS() { // No exec facility on Android or iOS. return false } if goarch == "ppc64" { // External linking not implemented on ppc64 (issue #8912). return false } if goarch == "mips64le" || goarch == "mips64" { // External linking not implemented on mips64. return false } return true } func (t *tester) cgoTestSO(dt *distTest, testpath string) error { t.runPending(dt) timelog("start", dt.name) defer timelog("end", dt.name) dir := filepath.Join(goroot, testpath) // build shared object output, err := exec.Command("go", "env", "CC").Output() if err != nil { return fmt.Errorf("Error running go env CC: %v", err) } cc := strings.TrimSuffix(string(output), "\n") if cc == "" { return errors.New("CC environment variable (go env CC) cannot be empty") } output, err = exec.Command("go", "env", "GOGCCFLAGS").Output() if err != nil { return fmt.Errorf("Error running go env GOGCCFLAGS: %v", err) } gogccflags := strings.Split(strings.TrimSuffix(string(output), "\n"), " ") ext := "so" args := append(gogccflags, "-shared") switch goos { case "darwin": ext = "dylib" args = append(args, "-undefined", "suppress", "-flat_namespace") case "windows": ext = "dll" args = append(args, "-DEXPORT_DLL") } sofname := "libcgosotest." + ext args = append(args, "-o", sofname, "cgoso_c.c") if err := t.dirCmd(dir, cc, args).Run(); err != nil { return err } defer os.Remove(filepath.Join(dir, sofname)) if err := t.dirCmd(dir, "go", "build", "-o", "main.exe", "main.go").Run(); err != nil { return err } defer os.Remove(filepath.Join(dir, "main.exe")) cmd := t.dirCmd(dir, "./main.exe") if goos != "windows" { s := "LD_LIBRARY_PATH" if goos == "darwin" { s = "DYLD_LIBRARY_PATH" } cmd.Env = append(os.Environ(), s+"=.") // On FreeBSD 64-bit architectures, the 32-bit linker looks for // different environment variables. if goos == "freebsd" && gohostarch == "386" { cmd.Env = append(cmd.Env, "LD_32_LIBRARY_PATH=.") } } return cmd.Run() } func (t *tester) hasBash() bool { switch gohostos { case "windows", "plan9": return false } return true } func (t *tester) hasSwig() bool { swig, err := exec.LookPath("swig") if err != nil { return false } // Check that swig was installed with Go support by checking // that a go directory exists inside the swiglib directory. // See https://golang.org/issue/23469. output, err := exec.Command(swig, "-go", "-swiglib").Output() if err != nil { return false } swigDir := strings.TrimSpace(string(output)) _, err = os.Stat(filepath.Join(swigDir, "go")) if err != nil { return false } // Check that swig has a new enough version. // See https://golang.org/issue/22858. out, err := exec.Command(swig, "-version").CombinedOutput() if err != nil { return false } re := regexp.MustCompile(`[vV]ersion +([\d]+)([.][\d]+)?([.][\d]+)?`) matches := re.FindSubmatch(out) if matches == nil { // Can't find version number; hope for the best. return true } major, err := strconv.Atoi(string(matches[1])) if err != nil { // Can't find version number; hope for the best. return true } if major < 3 { return false } if major > 3 { // 4.0 or later return true } // We have SWIG version 3.x. if len(matches[2]) > 0 { minor, err := strconv.Atoi(string(matches[2][1:])) if err != nil { return true } if minor > 0 { // 3.1 or later return true } } // We have SWIG version 3.0.x. if len(matches[3]) > 0 { patch, err := strconv.Atoi(string(matches[3][1:])) if err != nil { return true } if patch < 6 { // Before 3.0.6. return false } } return true } func (t *tester) raceDetectorSupported() bool { switch gohostos { case "linux", "darwin", "freebsd", "windows": // The race detector doesn't work on Alpine Linux: // golang.org/issue/14481 return t.cgoEnabled && (goarch == "amd64" || goarch == "ppc64le") && gohostos == goos && !isAlpineLinux() } return false } func isAlpineLinux() bool { if runtime.GOOS != "linux" { return false } fi, err := os.Lstat("/etc/alpine-release") return err == nil && fi.Mode().IsRegular() } func (t *tester) runFlag(rx string) string { if t.compileOnly { return "-run=^$" } return "-run=" + rx } func (t *tester) raceTest(dt *distTest) error { t.addCmd(dt, "src", t.goTest(), "-race", "-i", "runtime/race", "flag", "os", "os/exec") t.addCmd(dt, "src", t.goTest(), "-race", t.runFlag("Output"), "runtime/race") t.addCmd(dt, "src", t.goTest(), "-race", t.runFlag("TestParse|TestEcho|TestStdinCloseRace|TestClosedPipeRace|TestTypeRace"), "flag", "os", "os/exec", "encoding/gob") // We don't want the following line, because it // slows down all.bash (by 10 seconds on my laptop). // The race builder should catch any error here, but doesn't. // TODO(iant): Figure out how to catch this. // t.addCmd(dt, "src", t.goTest(), "-race", "-run=TestParallelTest", "cmd/go") if t.cgoEnabled { cmd := t.addCmd(dt, "misc/cgo/test", t.goTest(), "-race") cmd.Env = append(os.Environ(), "GOTRACEBACK=2") } if t.extLink() { // Test with external linking; see issue 9133. t.addCmd(dt, "src", t.goTest(), "-race", "-ldflags=-linkmode=external", t.runFlag("TestParse|TestEcho|TestStdinCloseRace"), "flag", "os/exec") } return nil } var runtest struct { sync.Once exe string err error } func (t *tester) testDirTest(dt *distTest, shard, shards int) error { runtest.Do(func() { const exe = "runtest.exe" // named exe for Windows, but harmless elsewhere cmd := t.dirCmd("test", "go", "build", "-o", exe, "run.go") cmd.Env = append(os.Environ(), "GOOS="+gohostos, "GOARCH="+gohostarch) runtest.exe = filepath.Join(cmd.Dir, exe) if err := cmd.Run(); err != nil { runtest.err = err return } xatexit(func() { os.Remove(runtest.exe) }) }) if runtest.err != nil { return runtest.err } if t.compileOnly { return nil } t.addCmd(dt, "test", runtest.exe, fmt.Sprintf("--shard=%d", shard), fmt.Sprintf("--shards=%d", shards), ) return nil } // cgoPackages is the standard packages that use cgo. var cgoPackages = []string{ "crypto/x509", "net", "os/user", } var funcBenchmark = []byte("\nfunc Benchmark") // packageHasBenchmarks reports whether pkg has benchmarks. // On any error, it conservatively returns true. // // This exists just to eliminate work on the builders, since compiling // a test in race mode just to discover it has no benchmarks costs a // second or two per package, and this function returns false for // about 100 packages. func (t *tester) packageHasBenchmarks(pkg string) bool { pkgDir := filepath.Join(goroot, "src", pkg) d, err := os.Open(pkgDir) if err != nil { return true // conservatively } defer d.Close() names, err := d.Readdirnames(-1) if err != nil { return true // conservatively } for _, name := range names { if !strings.HasSuffix(name, "_test.go") { continue } slurp, err := ioutil.ReadFile(filepath.Join(pkgDir, name)) if err != nil { return true // conservatively } if bytes.Contains(slurp, funcBenchmark) { return true } } return false }
[ "\"GO_GCFLAGS\"", "\"GOTESTONLY\"", "\"PATH\"", "\"GO_BUILDER_NAME\"", "\"GO_TEST_TIMEOUT_SCALE\"", "\"GOROOT_FINAL\"", "\"GO_TEST_SHORT\"", "\"GO_BUILDER_NAME\"", "\"GOROOT\"", "\"GO_BUILDER_NAME\"", "\"GOROOT\"", "\"GOROOT\"", "\"FC\"", "\"GO_BUILDER_NAME\"", "\"GO_BUILDER_NAME\"" ]
[]
[ "GO_GCFLAGS", "FC", "GOTESTONLY", "GO_BUILDER_NAME", "GO_TEST_TIMEOUT_SCALE", "GOROOT_FINAL", "GO_TEST_SHORT", "GOROOT", "PATH" ]
[]
["GO_GCFLAGS", "FC", "GOTESTONLY", "GO_BUILDER_NAME", "GO_TEST_TIMEOUT_SCALE", "GOROOT_FINAL", "GO_TEST_SHORT", "GOROOT", "PATH"]
go
9
0
evio_unix.go
// Copyright 2018 Joshua J Baker. All rights reserved. // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. // +build darwin netbsd freebsd openbsd dragonfly linux package evio import ( "fmt" "io" "net" "os" "runtime" "sync" "sync/atomic" "syscall" "time" reuseport "github.com/kavu/go_reuseport" "github.com/zhujintao/evio/internal" ) type conn struct { fd int // file descriptor lnidx int // listener index in the server lns list flidx string // client id flag out []byte // write buffer sa syscall.Sockaddr // remote socket address reuse bool // should reuse input buffer opened bool // connection opened event fired action Action // next user action ctx interface{} // user-defined context addrIndex int // index of listening address localAddr net.Addr // local addre remoteAddr net.Addr // remote addr loop *loop // connected loop } func (c *conn) Context() interface{} { return c.ctx } func (c *conn) SetContext(ctx interface{}) { c.ctx = ctx } func (c *conn) AddrIndex() int { return c.addrIndex } func (c *conn) LocalAddr() net.Addr { return c.localAddr } func (c *conn) RemoteAddr() net.Addr { return c.remoteAddr } func (c *conn) Wake() { if c.loop != nil { c.loop.poll.Trigger(c) } } type server struct { events Events // user events loops []*loop // all the loops lns []*listener // all the listeners wg sync.WaitGroup // loop close waitgroup cond *sync.Cond // shutdown signaler balance LoadBalance // load balancing method accepted uintptr // accept counter tch chan time.Duration // ticker channel clients map[string]*conn wx *sync.RWMutex //ticktm time.Time // next tick time } type loop struct { idx int // loop index in the server loops list poll *internal.Poll // epoll or kqueue packet []byte // read packet buffer fdconns map[int]*conn // loop connections fd -> conn count int32 // connection count } // waitForShutdown waits for a signal to shutdown func (s *server) waitForShutdown() { s.cond.L.Lock() s.cond.Wait() s.cond.L.Unlock() } // signalShutdown signals a shutdown an begins server closing func (s *server) signalShutdown() { s.cond.L.Lock() s.cond.Signal() s.cond.L.Unlock() } func serve(events Events, listeners []*listener) error { // figure out the correct number of loops/goroutines to use. numLoops := events.NumLoops if numLoops <= 0 { if numLoops == 0 { numLoops = 1 } else { numLoops = runtime.NumCPU() } } s := &server{} s.events = events s.lns = listeners s.cond = sync.NewCond(&sync.Mutex{}) s.balance = events.LoadBalance s.tch = make(chan time.Duration) s.clients = make(map[string]*conn) s.wx = &sync.RWMutex{} //s.events.Sender.MsgChan = make(chan *[]byte) //s.events.Sender.ToChan = make(chan *string) //println("-- server starting") if s.events.Serving != nil { var svr Server svr.NumLoops = numLoops svr.Addrs = make([]net.Addr, len(listeners)) for i, ln := range listeners { svr.Addrs[i] = ln.lnaddr } action := s.events.Serving(svr) switch action { case None: case Shutdown: return nil } } defer func() { // wait on a signal for shutdown s.waitForShutdown() // notify all loops to close by closing all listeners for _, l := range s.loops { l.poll.Trigger(errClosing) } // wait on all loops to complete reading events s.wg.Wait() // close loops and all outstanding connections for _, l := range s.loops { for _, c := range l.fdconns { loopCloseConn(s, l, c, nil) } l.poll.Close() } //println("-- server stopped") }() // create loops locally and bind the listeners. for i := 0; i < numLoops; i++ { l := &loop{ idx: i, poll: internal.OpenPoll(), packet: make([]byte, 0xFFFF), fdconns: make(map[int]*conn), } for _, ln := range listeners { l.poll.AddRead(ln.fd) } s.loops = append(s.loops, l) } // start loops in background s.wg.Add(len(s.loops)) for _, l := range s.loops { go loopRun(s, l) go loopSendConn(s, l) } return nil } func loopSendConn(s *server, l *loop) { for { flag := <-s.events.Sender.ToChan msg := <-s.events.Sender.MsgChan if *flag == "toall" { for _, l := range s.loops { for _, c := range l.fdconns { syscall.Write(c.fd, *msg) } } } else { s.wx.RLock() c, ok := s.clients[*flag] s.wx.RUnlock() if ok { syscall.Write(c.fd, *msg) } } } } func loopCloseConn(s *server, l *loop, c *conn, err error) error { atomic.AddInt32(&l.count, -1) delete(l.fdconns, c.fd) s.wx.Lock() delete(s.clients, c.flidx) s.wx.Unlock() syscall.Close(c.fd) if s.events.Closed != nil { switch s.events.Closed(c, c.flidx, err) { case None: case Shutdown: return errClosing } } return nil } func loopDetachConn(s *server, l *loop, c *conn, err error) error { if s.events.Detached == nil { return loopCloseConn(s, l, c, err) } l.poll.ModDetach(c.fd) atomic.AddInt32(&l.count, -1) delete(l.fdconns, c.fd) if err := syscall.SetNonblock(c.fd, false); err != nil { return err } switch s.events.Detached(c, &detachedConn{fd: c.fd}) { case None: case Shutdown: return errClosing } return nil } func loopNote(s *server, l *loop, note interface{}) error { var err error switch v := note.(type) { case time.Duration: delay, action := s.events.Tick() switch action { case None: case Shutdown: err = errClosing } s.tch <- delay case error: // shutdown err = v case *conn: // Wake called for connection if l.fdconns[v.fd] != v { return nil // ignore stale wakes } return loopWake(s, l, v) } return err } func loopRun(s *server, l *loop) { defer func() { //fmt.Println("-- loop stopped --", l.idx) s.signalShutdown() s.wg.Done() }() if l.idx == 0 && s.events.Tick != nil { go loopTicker(s, l) } //fmt.Println("-- loop started --", l.idx) l.poll.Wait(func(fd int, note interface{}) error { if fd == 0 { return loopNote(s, l, note) } c := l.fdconns[fd] switch { case c == nil: return loopAccept(s, l, fd) case !c.opened: return loopOpened(s, l, c) case len(c.out) > 0: return loopWrite(s, l, c) case c.action != None: return loopAction(s, l, c) default: return loopRead(s, l, c) } }) } func loopTicker(s *server, l *loop) { for { if err := l.poll.Trigger(time.Duration(0)); err != nil { break } time.Sleep(<-s.tch) } } func loopAccept(s *server, l *loop, fd int) error { for i, ln := range s.lns { if ln.fd == fd { if len(s.loops) > 1 { switch s.balance { case LeastConnections: n := atomic.LoadInt32(&l.count) for _, lp := range s.loops { if lp.idx != l.idx { if atomic.LoadInt32(&lp.count) < n { return nil // do not accept } } } case RoundRobin: idx := int(atomic.LoadUintptr(&s.accepted)) % len(s.loops) if idx != l.idx { return nil // do not accept } atomic.AddUintptr(&s.accepted, 1) } } if ln.pconn != nil { return loopUDPRead(s, l, i, fd) } nfd, sa, err := syscall.Accept(fd) if err != nil { if err == syscall.EAGAIN { return nil } return err } if err := syscall.SetNonblock(nfd, true); err != nil { return err } c := &conn{fd: nfd, sa: sa, lnidx: i, loop: l} l.fdconns[c.fd] = c l.poll.AddReadWrite(c.fd) atomic.AddInt32(&l.count, 1) break } } return nil } func loopUDPRead(s *server, l *loop, lnidx, fd int) error { n, sa, err := syscall.Recvfrom(fd, l.packet, 0) if err != nil || n == 0 { return nil } if s.events.Data != nil { var sa6 syscall.SockaddrInet6 switch sa := sa.(type) { case *syscall.SockaddrInet4: sa6.ZoneId = 0 sa6.Port = sa.Port for i := 0; i < 12; i++ { sa6.Addr[i] = 0 } sa6.Addr[12] = sa.Addr[0] sa6.Addr[13] = sa.Addr[1] sa6.Addr[14] = sa.Addr[2] sa6.Addr[15] = sa.Addr[3] case *syscall.SockaddrInet6: sa6 = *sa } c := &conn{} c.addrIndex = lnidx c.localAddr = s.lns[lnidx].lnaddr c.remoteAddr = internal.SockaddrToAddr(&sa6) in := append([]byte{}, l.packet[:n]...) out, action := s.events.Data(c, in) if len(out) > 0 { if s.events.PreWrite != nil { s.events.PreWrite() } syscall.Sendto(fd, out, 0, sa) } switch action { case Shutdown: return errClosing } } return nil } func loopOpened(s *server, l *loop, c *conn) error { c.opened = true c.addrIndex = c.lnidx c.localAddr = s.lns[c.lnidx].lnaddr c.remoteAddr = internal.SockaddrToAddr(c.sa) if s.events.Opened != nil { out, opts, action := s.events.Opened(c) if len(out) > 0 { c.out = append([]byte{}, out...) } c.action = action c.reuse = opts.ReuseInputBuffer if opts.TCPKeepAlive > 0 { if _, ok := s.lns[c.lnidx].ln.(*net.TCPListener); ok { internal.SetKeepAlive(c.fd, int(opts.TCPKeepAlive/time.Second)) } } } if len(c.out) == 0 && c.action == None { l.poll.ModRead(c.fd) } return nil } func loopWrite(s *server, l *loop, c *conn) error { if s.events.PreWrite != nil { s.events.PreWrite() } n, err := syscall.Write(c.fd, c.out) if err != nil { if err == syscall.EAGAIN { return nil } return loopCloseConn(s, l, c, err) } if n == len(c.out) { c.out = nil } else { //fmt.Println(n, string(c.out)) c.out = c.out[n:] } if len(c.out) == 0 && c.action == None { l.poll.ModRead(c.fd) } return nil } func loopAction(s *server, l *loop, c *conn) error { switch c.action { default: c.action = None case Close: return loopCloseConn(s, l, c, nil) case Shutdown: return errClosing case Detach: return loopDetachConn(s, l, c, nil) } if len(c.out) == 0 && c.action == None { l.poll.ModRead(c.fd) } return nil } func loopWake(s *server, l *loop, c *conn) error { if s.events.Data == nil { return nil } out, action := s.events.Data(c, nil) c.action = action if len(out) > 0 { c.out = append([]byte{}, out...) } if len(c.out) != 0 || c.action != None { l.poll.ModReadWrite(c.fd) } return nil } func loopRead(s *server, l *loop, c *conn) error { var in []byte n, err := syscall.Read(c.fd, l.packet) if n == 0 || err != nil { if err == syscall.EAGAIN { return nil } return loopCloseConn(s, l, c, err) } in = l.packet[:n] if !c.reuse { in = append([]byte{}, in...) } if s.events.Make != nil { flag := s.events.Make(c, in) if flag != "" { s.wx.Lock() s.clients[flag] = c c.flidx = flag s.wx.Unlock() } } if s.events.Unpack != nil { ctx, flag, action := s.events.Unpack(c, in) c.action = action if flag != "" { s.wx.Lock() s.clients[flag] = c c.flidx = flag s.wx.Unlock() } if ctx != nil { s.events.Ctx <- &ctx } fmt.Println("[evio] s.clients MAP length Total: ", len(s.clients)) } if s.events.Data != nil { out, action := s.events.Data(c, in) c.action = action if len(out) > 0 { c.out = append([]byte{}, out...) } } if len(c.out) != 0 || c.action != None { l.poll.ModReadWrite(c.fd) } return nil } type detachedConn struct { fd int } func (c *detachedConn) Close() error { err := syscall.Close(c.fd) if err != nil { return err } c.fd = -1 return nil } func (c *detachedConn) Read(p []byte) (n int, err error) { n, err = syscall.Read(c.fd, p) if err != nil { return n, err } if n == 0 { if len(p) == 0 { return 0, nil } return 0, io.EOF } return n, nil } func (c *detachedConn) Write(p []byte) (n int, err error) { n = len(p) for len(p) > 0 { nn, err := syscall.Write(c.fd, p) if err != nil { return n, err } p = p[nn:] } return n, nil } func (ln *listener) close() { if ln.fd != 0 { syscall.Close(ln.fd) } if ln.f != nil { ln.f.Close() } if ln.ln != nil { ln.ln.Close() } if ln.pconn != nil { ln.pconn.Close() } if ln.network == "unix" { os.RemoveAll(ln.addr) } } // system takes the net listener and detaches it from it's parent // event loop, grabs the file descriptor, and makes it non-blocking. func (ln *listener) system() error { var err error switch netln := ln.ln.(type) { case nil: switch pconn := ln.pconn.(type) { case *net.UDPConn: ln.f, err = pconn.File() } case *net.TCPListener: ln.f, err = netln.File() case *net.UnixListener: ln.f, err = netln.File() } if err != nil { ln.close() return err } ln.fd = int(ln.f.Fd()) return syscall.SetNonblock(ln.fd, true) } func reuseportListenPacket(proto, addr string) (l net.PacketConn, err error) { return reuseport.ListenPacket(proto, addr) } func reuseportListen(proto, addr string) (l net.Listener, err error) { return reuseport.Listen(proto, addr) }
[]
[]
[]
[]
[]
go
null
null
null
tensorflow/compiler/xla/python/xla_client.py
# Lint as: python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """An XLA client in Python.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import enum # pylint: disable=g-bad-import-order import inspect import itertools import os from absl import logging import numpy as np # Note this module does *not* depend on any Python protocol buffers. The XLA # Python bindings are currently packaged both as part of jaxlib and as part # of TensorFlow. If we use protocol buffers here, then importing both jaxlib # and TensorFlow may fail with duplicate protocol buffer message definitions. from tensorflow.compiler.xla.python import xla_extension as _xla from tensorflow.compiler.xla.python.xla_extension import ops # Most functions are snake_case for consistency with other modules, whereas # method names of ComputationBuilder and Computation are CamelCase for # consistency with XLA. # pylint: disable=invalid-name class Backend(object, metaclass=abc.ABCMeta): """Abstract base class for XLA backends.""" def __init__(self, platform): """Creates a new Backend. Args: platform: A string naming the platform; for example 'gpu'. """ self.platform = platform @abc.abstractmethod def device_count(self): """Returns the number of devices known to the backend.""" @abc.abstractmethod def local_device_count(self): """Returns the number of devices local to this host.""" @abc.abstractmethod def devices(self): """Returns a list of `device_count()` Device subclasses.""" @abc.abstractmethod def host_id(self): """Returns the integer ID of this host.""" @abc.abstractmethod def buffer_from_pyval(self, pyval, device=None): """Allocates a fresh buffer and populates it with `pyval`.""" @abc.abstractmethod def make_tuple(self, c_buffers, device): """Makes a tuple from a sequence of backend buffer objects.""" @abc.abstractmethod def compile(self, computation, compile_options): """Compiles a computation. Returns an executable.""" @abc.abstractmethod def get_default_device_assignment(self, num_replicas): """Returns the default device assignment that `compile` would use. If `compile_options.device_assignment` isn't set, `compile` will pick a deterministic device assignment based on the number of replicas, possibly optimizing for device locality. This method returns that assignment, which is useful for e.g. manually replicating a value before passing it to a compiled executable. Args: num_replicas: the number of replicas needed. Returns: A list of Devices of length `num_replicas` indexed by replica ID. """ class LocalBackend(Backend): """XLA backend implemented using the in-process xla::LocalClient API.""" def __init__(self, platform, client): """Creates a new LocalBackend. Args: platform: A string; the user-visible platform name, e.g. 'gpu'. client: An _xla.PyLocalClient object. """ super(LocalBackend, self).__init__(platform) self.client = client def device_count(self): return self.client.device_count() def local_device_count(self): return self.client.local_device_count() def devices(self): return self.client.devices() def local_devices(self): return self.client.local_devices() def host_id(self): return self.client.host_id() def buffer_from_pyval(self, pyval, device=None): if device is None: device = self.local_devices()[0] return _xla.PyLocalBuffer.from_python(pyval, self.client, device) def make_tuple(self, c_buffers, device): return _xla.PyLocalBuffer.make_tuple(c_buffers, self.client, device) def compile(self, c_computation, compile_options): options = _xla.ExecutableBuildOptions() options.num_replicas = compile_options.num_replicas if compile_options.result_layout: options.result_layout = compile_options.result_layout options.debug_options.xla_cpu_fast_math_honor_infs = True options.debug_options.xla_cpu_fast_math_honor_nans = True options.debug_options.xla_cpu_fast_math_honor_division = True options.debug_options.xla_cpu_fast_math_honor_functions = True options.debug_options.xla_gpu_enable_fast_min_max = False return _xla.LocalExecutable.Compile(c_computation, compile_options.argument_layouts, options, self.client, compile_options.device_assignment) def get_default_device_assignment(self, num_replicas): return self.client.GetDefaultDeviceAssignment(num_replicas) def serialize(self, executable): return self.client.SerializeExecutable(executable) def deserialize(self, serialized_executable): return self.client.DeserializeExecutable(serialized_executable, self.client) xla_platform_names = { 'cpu': 'Host', 'gpu': 'CUDA', } def _cpu_backend_factory(): client = _xla.LocalClient.Get( platform='cpu', xla_platform_id=xla_platform_names['cpu'], asynchronous=True) return LocalBackend(platform='cpu', client=client) def _gpu_backend_factory(): """Returns a GPU backend. BFC allocator is used by default.""" allocator = os.getenv('XLA_PYTHON_CLIENT_ALLOCATOR', 'default').lower() memory_fraction = os.getenv('XLA_PYTHON_CLIENT_MEM_FRACTION') preallocate = os.getenv('XLA_PYTHON_CLIENT_PREALLOCATE') if allocator not in ('default', 'platform', 'bfc'): raise ValueError( 'XLA_PYTHON_CLIENT_ALLOCATOR env var must be "default", "platform", or ' '"bfc", got "%s"' % allocator) config = _xla.AllocatorConfig() if allocator == 'default': config.kind = _xla.AllocatorConfig.Kind.DEFAULT if allocator == 'platform': config.kind = _xla.AllocatorConfig.Kind.PLATFORM if allocator == 'bfc': config.kind = _xla.AllocatorConfig.Kind.BFC if memory_fraction: config.memory_fraction = float(memory_fraction) config.preallocate = preallocate not in ('0', 'false', 'False') client = _xla.LocalClient.Get( platform='gpu', xla_platform_id=xla_platform_names['gpu'], asynchronous=True, allocator_config=config) return LocalBackend(platform='gpu', client=client) # Backend factories, keyed by user-visible name, in increasing priority order. _local_backend_factories = collections.OrderedDict([ ('cpu', _cpu_backend_factory), ('gpu', _gpu_backend_factory), ]) def register_local_backend_factory(name, factory): _local_backend_factories[name] = factory _local_backends = None def _get_local_backends(): """Instantiates all known local backends.""" global _local_backends if _local_backends is not None: return _local_backends _local_backends = collections.OrderedDict() for name, factory in _local_backend_factories.items(): logging.vlog(2, "Initializing backend '%s'" % name) try: backend = factory() except RuntimeError: if name == 'cpu': # We always expect CPU to initialize successfully. raise else: # If the backend isn't built into the binary, or if it has no devices, # we expect a RuntimeError. continue _local_backends[name] = backend return _local_backends def get_local_backend(name=None): """Returns a local backend. Args: name: the backend name. If `None`, a default local backend is returned, typically `gpu` if one is present, or `cpu` if not. If a string, the named backend is returned or an exception raised. Returns: A LocalBackend object. """ backends = _get_local_backends() if name is not None: try: return backends[name] except KeyError: raise RuntimeError('Unknown backend {}'.format(name)) return list(backends.values())[-1] class OpMetadata(object): """Python representation of a xla.OpMetadata protobuf.""" __slots__ = ('op_type', 'op_name', 'source_file', 'source_line') def __init__(self, op_type='', op_name='', source_file='', source_line=0): self.op_type = op_type self.op_name = op_name self.source_file = source_file self.source_line = source_line def CurrentSourceInfoMetadata(op_type=None, op_name=None, skip_frames=1): """Helper for use in source mapping that returns an OpMetadata object.""" full_filename, lineno = inspect.stack()[skip_frames][1:3] filename = os.path.basename(full_filename) return OpMetadata( op_type=op_type, op_name=op_name, source_file=filename, source_line=lineno) PrimitiveType = _xla.PrimitiveType bfloat16 = _xla.bfloat16_dtype() XLA_ELEMENT_TYPE_TO_DTYPE = { PrimitiveType.PRED: np.dtype('bool'), PrimitiveType.S8: np.dtype('int8'), PrimitiveType.S16: np.dtype('int16'), PrimitiveType.S32: np.dtype('int32'), PrimitiveType.S64: np.dtype('int64'), PrimitiveType.U8: np.dtype('uint8'), PrimitiveType.U16: np.dtype('uint16'), PrimitiveType.U32: np.dtype('uint32'), PrimitiveType.U64: np.dtype('uint64'), PrimitiveType.BF16: np.dtype(bfloat16), PrimitiveType.F16: np.dtype('float16'), PrimitiveType.F32: np.dtype('float32'), PrimitiveType.F64: np.dtype('float64'), PrimitiveType.C64: np.dtype('complex64'), PrimitiveType.C128: np.dtype('complex128'), PrimitiveType.TUPLE: np.dtype(np.object), PrimitiveType.TOKEN: np.dtype(np.object), } # Note the conversion on the key. Numpy has a known issue wherein dtype hashing # doesn't work as expected (https://github.com/numpy/numpy/issues/7242). Thus, # when keying by dtype in this dict, we use the string form of dtypes. DTYPE_TO_XLA_ELEMENT_TYPE = { str(dt): et for et, dt in XLA_ELEMENT_TYPE_TO_DTYPE.items() } def dtype_to_etype(dtype): """Convenience function for reading DTYPE_TO_XLA_ELEMENT_TYPE.""" return DTYPE_TO_XLA_ELEMENT_TYPE[str(np.dtype(dtype))] Shape = _xla.Shape Shape.__doc__ = """ A Shape is an object defined in C++ that duck types like the following class: class Shape(object): '''Represents an XLA shape. A shape is either an array shape, having rank-many integer dimensions and an element type (represented by a Numpy dtype), or it is a tuple shape, having a shape for every tuple component: type shape = TupleShape of shape list | ArrayShape of { dimensions: int list; element_type: dtype } ''' @staticmethod def tuple_shape(tuple_shapes) -> Shape: "Construct a tuple shape." @staticmethod def array_shape(element_type, dimensions, minor_to_major=None) -> Shape: @staticmethod def from_pyval(pyval) -> Shape: "Returns a Shape that describes a tuple-tree of Numpy arrays." def __init__(self, str) -> Shape: "Parses a shape string." def __eq__(self, other: Shape) -> bool: def __ne__(self, other: Shape) -> bool: def __hash__(self): def __repr__(self): def is_tuple(self) -> bool: def is_array(self) -> bool: def tuple_shapes(self) -> [Shape]: def numpy_dtype(self) -> np.dtype: "Like element_type(), but returns dtype('O') for a tuple shape." def xla_element_type(self) -> PrimitiveType: def element_type(self) -> np.dtype: def dimensions(self) -> (int, int, ...): def rank(self) -> int: def with_major_to_minor_layout_if_absent(self) -> Shape: "Returns a copy with missing layouts set to major-to-minor." def to_serialized_proto(self) -> bytes: "Returns 'shape' as a serialized proto." """ ProgramShape = _xla.ProgramShape ProgramShape.__doc__ = """ A ProgramShape is a C++ object that duck types like the following class. class ProgramShape(object): def __init__(self, parameter_shapes, result_shape): def parameter_shapes(self) -> [Shape]: def result_shape(self) -> Shape: def __repr__(self): """ class Buffer(object): """Represents a handle to data owned by XLA. The referent is ready for use in executing a local, compiled Computation. On XLA platforms involving a device (e.g. GPU), this means the referent is in device memory. """ @staticmethod def from_pyval(pyval, device=None, backend=None): """Copies the `pyval` to a freshly allocated on-device buffer.""" backend = backend or get_local_backend() return backend.buffer_from_pyval(pyval, device) @staticmethod def make_tuple(buffers, device, backend=None): backend = backend or get_local_backend() return backend.make_tuple(buffers, device) # Buffer is not an instantiable type and exists only for its static methods. # The underlying buffer objects are C++ object with the following # API: # def shape(self) -> Shape: # def device(self) -> int: # def delete(self): # def destructure(self) -> [Buffer] # def is_deleted(self) -> bool: # def block_host_until_ready(self): # """Blocks the calling thread until the buffer is ready on device.""" # def copy_to_host_async(self): # """Requests a copy of the buffer to the host. # # Does not block waiting for the copy. Values fetched are available via # `to_py()`; the purpose of `copy_to_host_async` is to prefetch values # for subsequent `to_py()` calls, especially when requesting many values # at once. # """ # def to_py(self): # """Returns the value of the buffer as a Python tuple tree of ndarrays.""" # # TODO(phawkins): remove Buffer and its static methods completely, have # clients call methods on Backend to create buffers. # TODO(phawkins): Alias for backward compatibility. Remove after JAX drops # compatibility with Jaxlib versions older than 0.1.13. LocalBuffer = Buffer def shape_from_pyval(pyval): """Returns a Shape that describes a tuple-tree of Numpy arrays.""" def convert(pyval): if isinstance(pyval, tuple): return Shape.tuple_shape(tuple(convert(elt) for elt in pyval)) else: return Shape.array_shape(pyval.dtype, np.shape(pyval)) return convert(pyval) def transfer_to_infeed(value, device=None): """Transfers the given value into the XLA infeed queue. XLA's infeed queue is a single queue that feeds the "XLA virtual machine" with a totally ordered stream of values. This is dequeued from XLA computations via the Infeed() operation. Args: value: the value that the caller would like to enqueue into the XLA infeed queue device: the device to infeed the value to. Each device has a distinct infeed queue. """ # TODO(phawkins): support non-default backends. backend = get_local_backend() device = device or backend.local_devices()[0] backend.client.TransferToInfeed(value, device) def transfer_from_outfeed(shape, device=None): """Transfers a literal of the given shape from `device`'s outfeed. Args: shape: The shape of the value to transfer from outfeed. device: The device from which to transfer the outfeed value. Each device has a distinct outfeed queue.. Returns: The literal value that is produced from the outfeed queue. """ # TODO(phawkins): support non-default backends. backend = get_local_backend() device = device or backend.local_devices()[0] return backend.client.TransferFromOutfeed( shape.with_major_to_minor_layout_if_absent(), device) DeviceAssignment = _xla.DeviceAssignment DeviceAssignment.__doc__ = """ A DeviceAssignment is a C++ object with the following signature. def create(assignment): '''Builds a device assignment. Args: assignment: a 2D numpy array of device ordinal integers, indexed by [replica][computation_in_replica]. Returns: A device assignment. ''' def replica_count(): '''Returns the number of replicas.''' def computation_count(): '''Returns the number of computations per replica.''' """ Device = _xla.Device class CompileOptions(object): """Python object for XLA compile options. These options can be passed to the 'compile' step when using a local XLA client. """ def __init__(self): self.xla_dump_to = None self.dump_hlo_pass_re = None self.dump_hlo_module_re = None self.dump_hlo_as_text = None self.dump_hlo_as_proto = None self.hlo_profile = None self.num_replicas = 1 self.argument_layouts = None self.result_layout = None self.device_assignment = None class Computation(object): """Python wrapper for an XLA Computation. A Computation can be compiled to form an Executable, or used as a subcomputation in ComputationBuilder methods. """ def __init__(self, c_computation, backend=None): self._c_computation = c_computation # The backend argument is deprecated. Pass a backend to Compile() instead. self._backend = backend @property def computation(self): return self._c_computation def GetSerializedProto(self): """Gets the serialized HloModuleProto proto object in this computation. Returns: A string containing a serialized HloModuleProto proto containing the computation and its dependencies. """ return self.computation.GetSerializedProto() def GetHloText(self): """Get the textual HLO representation of this computation. Returns: A string containing the textual HLO. """ return self.computation.GetHloText() def GetHloDotGraph(self): """Get a Graphviz Dot representation of this computation. Returns: A string containing the graphviz dot graph. """ return self.computation.GetHloDotGraph() def Compile(self, argument_shapes=None, compile_options=None, backend=None): """Compiles a computation. Computations are the result of a "ComputationBuild'ing" process. Arguments: argument_shapes: Deprecated. Use compile_options.argument_layouts instead. compile_options: options to use for compilation, includes an optional laid out result shape for the computation. backend: a `Backend` for which an executable should be generated. Returns: A Executable instance. """ backend = backend or self._backend or get_local_backend() compile_options = compile_options or CompileOptions() if argument_shapes: compile_options.argument_layouts = argument_shapes return backend.compile(self.computation, compile_options) def GetProgramShape(self): return self._c_computation.GetProgramShape() def GetReturnValueShape(self): return self._c_computation.GetProgramShape().result_shape() def Hash(self): return self._c_computation.Hash() # An Executable is a C++ class that duck types with the following API: # class Executable(object): # def local_devices(self) -> [Device]: # def Execute(self, arguments : [Buffer]) -> Buffer: # """Execute on one replica with Buffer arguments and return value.""" # # def SizeOfGeneratedCodeInBytes(self) -> int: # """Return generated binary size, or -1 if not known.""" # # def ExecutePerReplica(self, arguments: [[Buffer]]) -> [Buffer]: # """Execute on many replicas with Buffer arguments and return value. # # Args: # arguments: A sequence of sequences of Buffers. The i'th inner sequence # comprises the arguments for execution on the i'th replica. # # Returns: # A list of the computation's outputs for each replica, as a Buffer. If # a shallow sequence of arguments was passed in for `arguments`, then the # sole, zero'th replica's output is returned instead, as a Buffer. # """ # # There are different implementations of Executable for different backends. def execute_with_python_values(executable, arguments=(), backend=None): """Execute on one replica with Python values as arguments and output.""" backend = backend or get_local_backend() def put(arg): return Buffer.from_pyval( arg, device=executable.local_devices()[0], backend=backend) arguments = [put(arg) for arg in arguments] return executable.Execute(arguments).to_py() def execute_with_python_values_replicated(executable, arguments, backend=None): """Execute on many replicas with Python values as arguments and output. Arguments: executable: the program to run. arguments: a list of lists of Python values indexed by `[replica][arg_num]` to pass as inputs. backend: the backend we are targeting. Returns: A list of python values, one per replica. """ backend = backend or get_local_backend() devices = executable.local_devices() # pylint: disable=g-complex-comprehension flat_args = [(arg, devices[replica]) for replica, replica_args in enumerate(arguments) for arg in replica_args] flat_arg_buffers = [ backend.buffer_from_pyval(pyval, device) for pyval, device in flat_args ] arg_buffers = [] for replica_args in arguments: arg_buffers.append(flat_arg_buffers[:len(replica_args)]) flat_arg_buffers = flat_arg_buffers[len(replica_args):] return [out.to_py() for out in executable.ExecutePerReplica(arg_buffers)] class PaddingType(enum.Enum): VALID = 1 SAME = 2 def _convert_padding_type_to_pad_values(padding_type, lhs_dims, rhs_dims, window_strides): """Maps PaddingType or string to pad values (list of pairs of ints).""" if not isinstance(padding_type, (str, PaddingType)): msg = 'padding_type must be str or PaddingType, got {}.' raise TypeError(msg.format(type(padding_type))) if isinstance(padding_type, str): if padding_type.upper() == 'VALID': padding_type = PaddingType.VALID elif padding_type.upper() == 'SAME': padding_type = PaddingType.SAME else: msg = 'Unknown padding type string: expected "VALID" or "SAME", got {}.' raise ValueError(msg.format(padding_type)) if padding_type == PaddingType.VALID: return [(0, 0)] * len(window_strides) elif padding_type == PaddingType.SAME: out_shape = np.ceil(np.true_divide(lhs_dims, window_strides)).astype(int) pad_sizes = [ max((out_size - 1) * stride + filter_size - in_size, 0) for out_size, stride, filter_size, in_size in zip( out_shape, window_strides, rhs_dims, lhs_dims) ] return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes] else: msg = 'Unexpected PaddingType value: {}' raise ValueError(msg.format(padding_type)) class ComputationBuilder(object): """XLA computation builder. Enqueues XLA ops in sequence and in order to build a Computation, which in turn can be compiled into a LocalExecutable, which in turn can be locally executed. """ # The methods of this class map 1-to-1 onto the XLA C++ # computation builder API. Therefore, there's no need to laboriously list # arguments and return values for every method, especially where it's obvious. # # pylint: disable=g-doc-return-or-yield # pylint: disable=g-doc-args def __init__(self, name): self._builder = _xla.XlaBuilder(name) self._parameter_numbering = itertools.count() def Build(self, root=None, backend=None): """Builds a `Computation` from the contents of the builder. Args: root: if not None, the operator containing the return value of the computation. Returns: A `Computation`. """ if root is not None: return Computation(self._builder.Build(root), backend=backend) else: return Computation(self._builder.Build(), backend=backend) def GetShape(self, operand): return self._builder.GetShape(operand) def SetOpMetadata(self, op_metadata): """Set metadata for operations that are about to be enqueued.""" self._builder.SetOpMetadata(op_metadata) def ClearOpMetadata(self): """Clear metadata for operations that are about to be enqueued.""" self._builder.ClearOpMetadata() def SetSharding(self, sharding): """Set sharding that will be attached to all instructions until cleared.""" self._builder.SetSharding(sharding) def ClearSharding(self): """Clears the sharding. Ops will be shared according to the default placement policy. """ self._builder.ClearSharding() def CreateToken(self): """Enqueues a CreateToken op onto the computation. Returns: An XlaOp, representing a fresh token. """ return ops.CreateToken(self._builder) def AfterAll(self, tokens): """Enqueues a after-all op onto the computation. `AfterAll` takes a variadic number of tokens and produces a single token. Args: tokens: a list of `XlaOp` values representing predecessor tokens. Returns: An `XlaOp`. """ return ops.AfterAll(self._builder, tokens) def Infeed(self, shape, token=None): """Enqueues an infeed op onto the computation. Infeed operations dequeue data of the given shape from the device's infeed queue for subsequent use in the computation. Args: shape: a `Shape` describing the shape of the infed value. token: an optional `XlaOp` representing a token after which the infeed effect should be sequenced. Returns: An XlaOp, representing a (value, token) pair. """ if token is None: token = ops.CreateToken(self._builder) return ops.InfeedWithToken(token, shape.with_major_to_minor_layout_if_absent()) def Outfeed(self, operand, token=None): """Enqueues an outfeed op onto the computation. Outfeed operations enqueue data, using the given operand, onto the XLA outfeed queue for subsequent dequeue via the client API. Args: operand: an `XlaOp` representing the data to outfeed. token: an `XlaOp` representing a token after which the outfeed should be sequenced. Returns: An `XlaOp` representing a token. """ if token is None: token = ops.CreateToken(self._builder) return ops.OutfeedWithToken(operand, token, self._builder.GetShape(operand), '') def Constant(self, value): """Enqueues a constant op onto the computation. Args: value: value for the constant, as a np.array with an explicit dtype set to one of the supported types. Returns: An XlaOp. """ return ops.ConstantLiteral(self._builder, value) def ConstantF32Scalar(self, value): """Convenience method to enqueue a scalar F32 constant op. Args: value: a floating-point number. Returns: An XlaOp. """ return self.Constant(np.array(value, dtype=np.float32)) def ConstantF64Scalar(self, value): """Convenience method to enqueue a scalar F32 constant op. Args: value: a floating-point number. Returns: An XlaOp. """ return self.Constant(np.array(value, dtype=np.float64)) def ConstantS32Scalar(self, value): """Convenience method to enqueue a scalar S32 constant op. Args: value: a floating-point number. Returns: An XlaOp. """ return self.Constant(np.array(value, dtype=np.int32)) def ConstantS64Scalar(self, value): """Convenience method to enqueue a scalar S64 constant op. Args: value: a floating-point number. Returns: An XlaOp. """ return self.Constant(np.array(value, dtype=np.int64)) def ConstantPredScalar(self, value): """Convenience method to enqueue a scalar PRED constant op. Args: value: a boolean value. Returns: An XlaOp. """ return self.Constant(np.array(value, dtype=np.bool)) def ParameterWithShape(self, shape, name=None, parameter_num=None): """Enqueues a Parameter op onto the computation, given a shape. Args: shape: the parameter's shape as a Shape object. name: optional string name for the parameter. parameter_num: parameter number in the computation function. If None, the next linear parameter number is used. The default value capability can be used for auto-numbering. If you're using auto-numbering for some parameters, use it for *all* parameters to avoid clashes. Returns: An XlaOp. """ if name is None: name = '' if parameter_num is None: parameter_num = next(self._parameter_numbering) return ops.Parameter(self._builder, parameter_num, shape.with_major_to_minor_layout_if_absent(), name.encode('utf8')) def ParameterFromNumpy(self, value, name=None, parameter_num=None): """Enqueues a Parameter op onto the computation. Args: value: a Numpy array, or a nested tuple thereof, from which the shape is inferred. name: as in ParameterWithShape. parameter_num: as in ParameterWithShape. Returns: An XlaOp. """ return self.ParameterWithShape( shape_from_pyval(value), name=name, parameter_num=parameter_num) def Iota(self, dtype, size): """Enqueues an iota constant onto the computation. Args: dtype: expected numpy dtype of the output. size: integer, the number of elements in the array. Returns: An XlaOp representing the added iota constant. """ element_type = DTYPE_TO_XLA_ELEMENT_TYPE[str(np.dtype(dtype))] return ops.Iota(self._builder, element_type, size) def BroadcastedIota(self, dtype, shape, dimension): """Enqueues a broadcasted iota constant onto the computation. Args: dtype: expected numpy dtype of the output. shape: tuple of integers, the expected output shape (dimensions). dimension: positive integer, dimension along which to increment values. Returns: An XlaOp representing the added broadcasted iota constant. """ element_type = DTYPE_TO_XLA_ELEMENT_TYPE[str(np.dtype(dtype))] xla_shape = _xla.Shape.array_shape(element_type, shape, None) return ops.Iota(self._builder, xla_shape, dimension) def Concatenate(self, operands, dimension): """Enqueues a concatenate operation onto the computation. Args: operands: the operands to concatenate. dimension: the dimension in which to perform the concatenation. Returns: An XlaOp representing the added concatenate op. """ return ops.ConcatInDim(self._builder, list(operands), dimension) def ReplicaId(self): """Enqueues a ReplicaId operation onto the computation. Returns: A LocalOp representing the replica id. """ return _xla.ops.ReplicaId(self._builder) def Pad(self, operand, padding_value, padding_config): """Enqueues a Pad operation onto the computation. Args: operand: XlaOp representing the array to pad. padding_value: XlaOp representing the scalar pad value. padding_config: either a PaddingConfig or a list of integer triples (edge_padding_low, edge_padding_high, interior_padding) representing the configuration of the padding operation. Returns: An XlaOp representing the added Pad op. """ if isinstance(padding_config, tuple) or isinstance(padding_config, list): padding_config = GetPaddingConfigFromTriples(padding_config) return ops.Pad(operand, padding_value, padding_config) def Reshape(self, operand, dimensions, new_sizes): """Enqueues a reshape op onto the computation. Args: operand: XlaOp representing the array to be reshaped. dimensions: sequence of integers encoding the order in which dimensions are collapsed or None, in which case dimensions are flattened in order. new_sizes: sequence of integers encoding the new dimension sizes (shape). Returns: An XlaOp representing the added Reshape op. """ if dimensions is None: ndim = len(self.GetShape(operand).dimensions()) dimensions = tuple(range(ndim)) return ops.Reshape(operand, dimensions, new_sizes) def AllReduce(self, operand, computation, replica_groups=None): """AllReduce op. Args: operand: XlaOp representing the input array computation: a Computation object - binary reduction function. replica_groups: optional, list of lists of ints encoding a partition of the set {0, 1, ..., num_replicas} into equally-sized replica groups within which the all-to-all is performed. If not supplied or None (the default), all replicas belong to the same group. Returns: An XlaOp that represents the all-reduced result. """ replica_groups_protos = _get_replica_groups_protos(replica_groups) return ops.AllReduce(operand, computation.computation, replica_groups_protos, None, None) def AllToAll(self, operand, split_dimension, concat_dimension, replica_groups=None): """AllToAll op. Args: operand: XlaOp representing the input array split_dimension: the dimension along which the operand is split concat_dimension: the dimension along which the split blocks are concatenated replica_groups: optional, list of lists of ints encoding a partition of the set {0, 1, ..., num_replicas} into equally-sized replica groups within which the all-to-all is performed. If not supplied or None (the default), all replicas belong to the same group. Returns: An XlaOp that represents the all-to-all concatenation. """ replica_groups_protos = _get_replica_groups_protos(replica_groups) if not replica_groups: split_count = 1 else: split_count = len(replica_groups[0]) if not all(split_count == len(g) for g in replica_groups): raise ValueError('Replica groups must be equally sized') return ops.AllToAll(operand, split_dimension, concat_dimension, split_count, replica_groups_protos) def CrossReplicaSum(self, operand, replica_groups=None): """CrossReplicaSum op. Args: operand: the operand to sum across replica instances. replica_groups: optional, list of lists of ints encoding a partition of the set {0, 1, ..., num_replicas} into equally-sized replica groups within which the cross-replica sum is performed. If not supplied or None (the default), all replicas belong to the same group. Returns: An XlaOp that represents on each replica the sum of its group's values. """ replica_groups_protos = _get_replica_groups_protos(replica_groups) return ops.CrossReplicaSum(operand, replica_groups_protos) def Trans(self, operand): """Specialized matrix transpose op.""" return ops.Transpose(operand, [1, 0]) def Transpose(self, operand, permutation): """Transpose op.""" return ops.Transpose(operand, permutation) def SelectAndScatter(self, operand, select, window_dimensions, window_strides, padding, source, init_value, scatter): """Select and scatter op, used by the gradient of ReduceWindow. Args: operand: XlaOp for array of dimension N and type T over which the windows slide. select: Computation of type (T, T) -> Pred to apply to the elements of each window to indicate which element is selected. window_dimensions: sequence of N integers for dimensions of the window. window_strides: sequence of N integers for the strides of the window. padding: PaddingType representing either 'SAME' or 'VALID ' padding. source: XlaOp for array of type T with values to scatter. init_value: XlaOp of scalar type T for initial out value. scatter: Computation of type (T, T) -> T to apply to each scatter source element with its destination element. Returns: An XlaOp representing the added SelectAndScatter op. """ pads = _convert_padding_type_to_pad_values( padding, self.GetShape(operand).dimensions(), window_dimensions, window_strides) return ops.SelectAndScatterWithGeneralPadding(operand, select.computation, window_dimensions, window_strides, pads, source, init_value, scatter.computation) def Slice(self, operand, start_indices, limit_indices, strides=None): """Enqueues a slice operation onto the computation. Args: operand: XlaOp for the N dimensional array to be sliced. start_indices: iterable of N integers containing the starting indices of the slice for each dimension. limit_indices: iterable of N integers containing the ending indices (exclusive) of the slice for each dimension. strides: optional iterable of N integers containing the stride sizes for each dimension. Returns: An XlaOp representing the added Slice op. """ if strides is None: start_indices = list(start_indices) strides = [1] * len(start_indices) return ops.Slice(operand, start_indices, limit_indices, strides) def DynamicSlice(self, operand, start_indices, slice_sizes): """Enqueues a slice op with dynamic start indices onto the computation. Args: operand: XlaOp for the N dimensional array to be sliced. start_indices: XlaOp for the 1D array of N integers containing the starting indices of the slice. slice_sizes: iterable of N integers containing the slice sizes in each dimension. Returns: An XlaOp representing the added DynamicSlice op. """ slice_sizes = list(slice_sizes) if isinstance(start_indices, _xla.XlaOp): start_indices = [ ops.Reshape(ops.Slice(start_indices, [i], [i + 1], [1]), []) for i in range(len(slice_sizes)) ] return ops.DynamicSlice(operand, list(start_indices), slice_sizes) def DynamicUpdateSlice(self, operand, update, start_indices): """Enqueues a dynamic update slice operation onto the computation. Args: operand: XlaOp for the N dimensional array to be updated. update: N dimensional array comprising the slice update. start_indices: Rank-1 array of N integers comprising the starting indices of the slice along each dimension. Returns: An XlaOp representing the added DynamicUpdateSlice op. """ if isinstance(start_indices, _xla.XlaOp): ndims = self._builder.GetShape(start_indices).dimensions()[0] start_indices = [ ops.Reshape(ops.Slice(start_indices, [i], [i + 1], [1]), []) for i in range(ndims) ] return ops.DynamicUpdateSlice(operand, update, list(start_indices)) def Tuple(self, *elems): """Enqueues a tuple operation onto the computation. Args: elems: a sequence of tuple operands (each a XlaOp). Returns: An XlaOp representing the added Tuple op. """ return ops.Tuple(self._builder, list(elems)) def Call(self, computation_to_apply, operands): """Enqueues a call operation onto the computation. Args: computation_to_apply: a Computation object. operands: an iterable of XlaOp. The number and types of operands must match the arity of computation_to_apply. Returns: An XlaOp representing the added call op. """ return ops.Call(self._builder, computation_to_apply.computation, list(operands)) def CustomCall(self, call_target_name, operands, shape_with_layout, operand_shapes_with_layout, opaque=None): """Enqueues a custom call operation onto the computation. Args: call_target_name: the name of the function to call. operands: an iterable of XlaOp. The number and types of operands must match the arity of `operand_shapes_with_layout`. shape_with_layout: the shape of the operator's output, with layout. operand_shapes_with_layout: the shapes of `operands`, including the expected layouts. opaque: an opaque string passed to the backend. Returns: An XlaOp representing the added custom call op. """ opaque = opaque or b'' return ops.CustomCall(self._builder, call_target_name, list(operands), shape_with_layout, list(operand_shapes_with_layout), opaque) def Map(self, operands, computation_to_apply, dimensions): """Enqueues a map operation onto the computation. Args: operands: an iterable of XlaOp. computation_to_apply: a Computation object. dimensions: dimensions over which to apply map the function. Returns: An XlaOp representing the added Map op. """ return ops.Map(self._builder, list(operands), computation_to_apply.computation, dimensions, []) def Reduce(self, operand, init_value, computation_to_apply, dimensions): """Enqueues a reduction operation onto the computation. Args: operand: reduction operand (XlaOp). init_value: reduction initial value (XlaOp). computation_to_apply: a Computation object - binary reduction function. dimensions: sequence of dimensions (integers) to reduce on. Returns: An XlaOp representing the added Reduce op. """ return ops.Reduce(self._builder, [operand], [init_value], computation_to_apply.computation, dimensions) def ReduceWindow(self, operand, init_value, computation_to_apply, window_dimensions, window_strides, padding): """Enqueues a windowed reduction operation onto the computation. Args: operand: reduction operand (XlaOp). init_value: reduction initial value (XlaOp). computation_to_apply: a binary reduction function (Computation). window_dimensions: dimensions of window (sequence of integers). window_strides: strides for window (sequence of integers). padding: PaddingType representing either 'SAME' or 'VALID' padding. Returns: An XlaOp representing the added ReduceWindow op. """ pads = _convert_padding_type_to_pad_values( padding, self.GetShape(operand).dimensions(), window_dimensions, window_strides) return ops.ReduceWindowWithGeneralPadding(operand, init_value, computation_to_apply.computation, window_dimensions, window_strides, (), (), pads) def ReduceWindowWithGeneralPadding(self, operand, init_value, computation_to_apply, window_dimensions, window_strides, base_dilations, window_dilations, padding): """Enqueues a windowed reduction operation onto the computation. Args: operand: reduction operand (XlaOp). init_value: reduction initial value (XlaOp). computation_to_apply: a binary reduction function (Computation). window_dimensions: dimensions of window (sequence of integers). window_strides: strides for window (sequence of integers). base_dilations: dilations for the base (sequence of integers). window_dilations: dilations for window (sequence of integers). padding: length-N array-like of pairs of integers of (low, high) padding. Returns: An XlaOp representing the added ReduceWindow op. """ return ops.ReduceWindowWithGeneralPadding(operand, init_value, computation_to_apply.computation, window_dimensions, window_strides, base_dilations, window_dilations, padding) def RngNormal(self, mu, sigma, dims): """Enqueues an RngNormal operation onto the computation. Args: mu: An XlaOp to an F32 scalar specifying the mean. sigma: An XlaOp to an F32 scalar specifying the standard deviation. dims: A 1D array-like of nonnegative integers specifying the dimensions. Returns: a XlaOp to the generated array of F32 values. """ shape = _xla.Shape.array_shape(self.GetShape(mu).xla_element_type(), dims) return ops.RngNormal(mu, sigma, shape) def RngUniform(self, a, b, dims): """Enqueues an RngUniform operation onto the computation. Args: a: a XlaOp to an F32, S32, or U32 scalar (consistent with the type of b) specifying the low end of the interval [a, b) over which values are generated. b: a XlaOp to an F32, S32, or U32 scalar (consistent with the type of a) specifying the high end of the interval [a, b) over which values are generated. dims: A 1D array-like of nonnegative integers specifying the dimensions. Returns: a XlaOp to the generated array of values with the same numeric type (F32, S32, or U32) as the arguments a and b. """ shape = _xla.Shape.array_shape(self.GetShape(a).xla_element_type(), dims) return ops.RngUniform(a, b, shape) def While(self, cond, body, init): """Enqueues a While operation onto the computation. Args: cond: a Computation for the loop condition, which has type T -> PRED body: a Computation for the loop body, which has type T -> T init: a XlaOp for the initial parameter, which has type T Returns: a XlaOp representing the While operation. """ return ops.While(cond.computation, body.computation, init) def Conditional(self, pred, true_operand, true_computation, false_operand, false_computation): """Enqueues a Conditional operation onto the computation. Args: predicate: a XlaOp to test, which has scalar type PRED true_operand: a XlaOp of type T_0 true_computation: a Computation to apply to true_operand, type T_0 -> S false_operand: a ComputationDatahandle of type T_1 false_computation: a Computation to apply to false_operand, type T_1 -> S Returns: a XlaOp representing the Conditional operation. """ return ops.Conditional(pred, true_operand, true_computation.computation, false_operand, false_computation.computation) def IsConstant(self, operand): """Checks whether the given operand is a compile-time constant. Args: operand: a ComputationDataHandle to test. Returns: bool indicating whether `operand` is a compile-time constant, meaning its value does not depend on any parametersor, or on stateful operators such as `RngNormal` or `Infeed`. """ return self._builder.IsConstant(operand) def BuildConstantSubGraph(self, operand): """Builds a constant sub graph. Args: operand: a XlaOp to test. Returns: a Computation that is rooted on the given `operand` which is a compile-time constant. """ return ops.BuildConstantSubGraph(operand) def DotGeneral(self, lhs, rhs, dimension_numbers, precision_config=None): """Enqueues a general dot operation onto the computation. Args: lhs: XlaOp for the left-hand-side array. rhs: XlaOp for the right-hand-side array. dimension_numbers: either a DotDimensionNumbers or a nested tuple ((lhs_contract, rhs_contract), (lhs_batch, rhs_batch)) of lists of integers representing the dimensions to treat as contracting dimensions and batch dimensions on each input operand. Returns: a XlaOp representing the DotGeneral operation. """ if isinstance(dimension_numbers, tuple): dimension_numbers = GetDotDimensionsFromLists(dimension_numbers) return ops.DotGeneral( lhs, rhs, dimension_numbers, precision_config=precision_config) def Conv(self, lhs, rhs, window_strides, padding, feature_group_count=1, batch_group_count=1, precision_config=None): """Enqueues a Conv operation onto the computation. Args: lhs: XlaOp for the rank N+2 array of inputs. rhs: XlaOp for the rank N+2 array of kernel weights. window_strides: length-N array-like of integer kernel strides. padding: PaddingType representing either 'SAME' or 'VALID' padding. feature_group_count: number of feature groups for grouped convolution. batch_group_count: number of batch groups for grouped convolution. Returns: a XlaOp representing the Conv operation. """ pads = _convert_padding_type_to_pad_values( padding, self.GetShape(lhs).dimensions()[2:], self.GetShape(rhs).dimensions()[2:], window_strides) return self.ConvGeneralDilated( lhs, rhs, window_strides, pads, [], [], dimension_numbers=None, feature_group_count=feature_group_count, batch_group_count=batch_group_count, precision_config=precision_config) def ConvWithGeneralPadding(self, lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count=1, batch_group_count=1, precision_config=None): """Enqueues a ConvWithGeneralPadding operation onto the computation. Args: lhs: XlaOp for the rank N+2 array of inputs. rhs: XlaOp for the rank N+2 array of kernel weights. window_strides: length-N array-like of kernel strides. padding: length-N array-like of pairs of integers of (low, high) padding. lhs_dilation: length-N array-like of dilation factors. rhs_dilation: length-N array-like of dilation factors. feature_group_count: number of feature groups for grouped convolution. batch_group_count: number of batch groups for grouped convolution. Returns: A ComputationdataHandle representing the added ConvWithGeneralPadding op. """ return self.ConvGeneralDilated( lhs, rhs, list(window_strides), list(padding), list(lhs_dilation), list(rhs_dilation), dimension_numbers=None, feature_group_count=feature_group_count, batch_group_count=batch_group_count, precision_config=precision_config) def _GetConvDimensionNumbers(self, num_spatial_dims): """Create ConvolutionDimensionNumbers proto for convolutions.""" nd = num_spatial_dims dimension_numbers = ConvolutionDimensionNumbers() dimension_numbers.input_batch_dimension = 0 dimension_numbers.input_feature_dimension = 1 dimension_numbers.output_batch_dimension = 0 dimension_numbers.output_feature_dimension = 1 dimension_numbers.kernel_output_feature_dimension = 0 dimension_numbers.kernel_input_feature_dimension = 1 dimension_numbers.input_spatial_dimensions.extend(range(2, 2 + nd)) dimension_numbers.kernel_spatial_dimensions.extend(range(2, 2 + nd)) dimension_numbers.output_spatial_dimensions.extend(range(2, 2 + nd)) return dimension_numbers def ConvGeneralDilated(self, lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, dimension_numbers=None, feature_group_count=1, batch_group_count=1, precision_config=None): """Enqueues a ConvGeneralDilated operation onto the computation. Args: lhs: XlaOp for the rank N+2 array of inputs. rhs: XlaOp for the rank N+2 array of kernel weights. window_strides: length-N array-like of integer kernel strides. padding: length-N array-like of pairs of integers of (low, high) padding. lhs_dilation: length-N array-like of integer dilation factors. rhs_dilation: length-N array-like of integer dilation factors. dimension_numbers: optional, either a ConvolutionDimensionNumbers object or a tuple (lhs_spec, rhs_spec, out_spec). Each element is a string of length N+2 identifying by position: (1) batch dimensions in lhs, rhs, and the output with the character 'N', (2) feature dimensions in lhs and the output with the character 'C', (3) input and output feature dimensions in rhs with the characters 'I' and 'O' respectively, and (4) spatial dimension correspondences between lhs, rhs, and the output using any distinct characters. For example, to indicate dimension numbers consistent with the Conv operation with two spatial dimensions, one could use ('NCHW', 'OIHW', 'NCHW'). As another example, to indicate dimension numbers consistent with the TensorFlow Conv2D operation, one could use ('NHWC', 'HWIO', 'NHWC'). When using the latter form of convolution dimension specification, window strides are associated with spatial dimension character labels according to the order in which the labels appear in the rhs_spec string, so that window_strides[0] is matched with the dimension corresponding to the first character appearing in rhs_spec that is not 'I' or 'O'. By default, use the same dimension numbering as Conv and ConvWithGeneralPadding. feature_group_count: number of feature groups for grouped convolution. batch_group_count: number of batch groups for grouped convolution. Returns: a XlaOp representing the ConvGeneralDilated operation. """ if dimension_numbers is None: dimension_numbers = self._GetConvDimensionNumbers(len(window_strides)) elif isinstance(dimension_numbers, tuple): lhs_spec, rhs_spec, out_spec = dimension_numbers dimension_numbers = ConvolutionDimensionNumbers() dimension_numbers.input_batch_dimension = lhs_spec.index('N') dimension_numbers.input_feature_dimension = lhs_spec.index('C') dimension_numbers.output_batch_dimension = out_spec.index('N') dimension_numbers.output_feature_dimension = out_spec.index('C') dimension_numbers.kernel_output_feature_dimension = rhs_spec.index('O') dimension_numbers.kernel_input_feature_dimension = rhs_spec.index('I') dimension_numbers.kernel_spatial_dimensions.extend( i for i, c in enumerate(rhs_spec) if c not in {'I', 'O'}) dimension_numbers.input_spatial_dimensions.extend( sorted((i for i, c in enumerate(lhs_spec) if c not in {'N', 'C'}), key=lambda i: rhs_spec.index(lhs_spec[i]))) dimension_numbers.output_spatial_dimensions.extend( sorted((i for i, c in enumerate(out_spec) if c not in {'N', 'C'}), key=lambda i: rhs_spec.index(out_spec[i]))) return ops.ConvGeneralDilated( lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count, batch_group_count, precision_config=precision_config) def Sort(self, operands, dimension=-1, comparator=None): """Enqueues a sort operation onto the computation. Args: operands: either an XlaOp or a sequence of XlaOps to sort. All operands must be arrays with the same dimensions. dimension: the array dimension over which to sort. comparator: a comparator XlaComputation. See the XLA operation semantics for details. Returns: Either an XlaOp or a tuple of XlaOps (if `operands` was an XlaOp or a tuple of XlaOps, respectively.) """ operands = ( list(operands) if isinstance(operands, collections.Sequence) else [operands]) return ops.Sort(self._builder, operands, dimension, comparator.computation if comparator else None) def SortKeyVal(self, keys, values, dimension=-1): """Enqueues a key-value sort operation onto the computation. Deprecated. Use `Sort` instead. """ return ops.Sort(self._builder, [keys, values], dimension) def QR(self, a, full_matrices=True): """Enqueues a QR decomposition onto the computation.""" return self.Tuple(*ops.QR(a, full_matrices)) def TriangularSolve(self, a, b, left_side=False, lower=False, transpose_a=False, conjugate_a=False, unit_diagonal=False): """Enqueues a triangular-solve operation onto the computation.""" if not transpose_a: transpose = _xla.TriangularSolveOptions_Transpose.NO_TRANSPOSE if conjugate_a: a = self.Conj(a) else: transpose = ( _xla.TriangularSolveOptions_Transpose.ADJOINT if conjugate_a else _xla.TriangularSolveOptions_Transpose.TRANSPOSE) return ops.TriangularSolve(a, b, left_side, lower, unit_diagonal, transpose) def Eigh(self, a, full_matrices=True): """Enqueues a symmetric/Hermitian eigendecomposition.""" return self.Tuple(*ops.Eigh(a, full_matrices)) def SVD(self, a): """Enqueues a singular value decomposition.""" return self.Tuple(*ops.SVD(a)) def Gather(self, a, start_indices, dimension_numbers, slice_sizes, indices_are_sorted=False): """Enqueues a Gather operation onto the computation.""" return ops.Gather(a, start_indices, dimension_numbers, slice_sizes, indices_are_sorted) def Scatter(self, a, scatter_indices, updates, update_computation, dimension_numbers, indices_are_sorted=False, unique_indices=False): """Enqueues a Scatter operation onto the computation.""" return ops.Scatter(a, scatter_indices, updates, update_computation.computation, dimension_numbers, indices_are_sorted, unique_indices) def Fft(self, operand, fft_type, fft_lengths): """Enqueues a FFT operation onto the computation.""" return ops.Fft(operand, fft_type, fft_lengths) FftType = _xla.FftType _UNARY_OPS = [ 'Not', 'Clz', 'Abs', 'Exp', 'Expm1', 'Floor', 'Round', 'Ceil', 'Log', 'Log1p', 'Sign', 'Cos', 'Sin', 'Tanh', 'IsFinite', 'Sqrt', 'Rsqrt', 'Square', 'Reciprocal', 'Neg', 'Erf', 'Erfc', 'ErfInv', 'Lgamma', 'Digamma', 'BesselI0e', 'BesselI1e', 'Acos', 'Asin', 'Atan', 'Tan', 'Acosh', 'Asinh', 'Atanh', 'Cosh', 'Sinh', 'Real', 'Imag', 'Conj', ] _BINARY_OPS = [ 'Eq', 'Ne', 'Ge', 'Gt', 'Lt', 'Le', 'Add', 'Sub', 'Mul', 'Div', 'Rem', 'Max', 'Min', 'And', 'Or', 'Xor', 'Pow', 'ShiftLeft', 'ShiftRightArithmetic', 'ShiftRightLogical', 'Atan2', 'Complex', 'NextAfter', ] _OTHER_OPS = [ 'BitcastConvertType', 'Broadcast', 'BroadcastInDim', 'Cholesky', 'Clamp', 'Collapse', 'CollectivePermute', 'ConvertElementType', 'Dot', 'GetTupleElement', 'ReducePrecision', 'RegularizedIncompleteBeta', 'Rev', 'Select', 'SliceInDim', ] def _forward_methods_to_local_builder(): """Forward remaining ComputationBuilder methods to the C API. Set up methods, corresponding to XLA operations, whose calls are forwarded in a boilerplate manner to the underlying _xla.ops API. """ def forward_op(target_method): def forward(builder, *args, **kwargs): del builder return target_method(*args, **kwargs) return forward for method_name in itertools.chain(_UNARY_OPS, _BINARY_OPS, _OTHER_OPS): forward = forward_op(getattr(ops, method_name)) forward.__name__ = method_name setattr(ComputationBuilder, method_name, forward) _forward_methods_to_local_builder() def register_custom_call_target(name, fn, platform='cpu'): """Registers a custom call target. Args: name: bytes containing the name of the function. fn: a PyCapsule object containing the function pointer. platform: the target platform. """ _xla.RegisterCustomCallTarget(name, fn, xla_platform_names[platform]) # Deprecated. Use register_custom_call_target instead. register_cpu_custom_call_target = register_custom_call_target class PaddingConfigDimension(object): """Python representation of a xla.PaddingConfigDimension protobuf.""" __slots__ = ('edge_padding_low', 'edge_padding_high', 'interior_padding') def __init__(self): self.edge_padding_low = 0 self.edge_padding_high = 0 self.interior_padding = 0 class PaddingConfig(object): """Python representation of a xla.PaddingConfig protobuf.""" __slots__ = ('dimensions',) def __init__(self): self.dimensions = [] def GetPaddingConfigFromTriples(triples): """Create PaddingConfig proto from list of triples of integers.""" padding_config = PaddingConfig() for lo, hi, interior in triples: dimension = PaddingConfigDimension() dimension.edge_padding_low = lo dimension.edge_padding_high = hi dimension.interior_padding = interior padding_config.dimensions.append(dimension) return padding_config class DotDimensionNumbers(object): """Python representation of a xla.DotDimensionNumbers protobuf.""" __slots__ = ('lhs_contracting_dimensions', 'rhs_contracting_dimensions', 'lhs_batch_dimensions', 'rhs_batch_dimensions') def __init__(self): self.lhs_contracting_dimensions = [] self.rhs_contracting_dimensions = [] self.lhs_batch_dimensions = [] self.rhs_batch_dimensions = [] def GetDotDimensionsFromLists(dimension_numbers): (lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers dot_dims_proto = DotDimensionNumbers() dot_dims_proto.lhs_contracting_dimensions.extend(lhs_contract) dot_dims_proto.rhs_contracting_dimensions.extend(rhs_contract) dot_dims_proto.lhs_batch_dimensions.extend(lhs_batch) dot_dims_proto.rhs_batch_dimensions.extend(rhs_batch) return dot_dims_proto class ConvolutionDimensionNumbers(object): """Python representation of a xla.ConvolutionDimensionNumbers protobuf.""" __slots__ = ('input_batch_dimension', 'input_feature_dimension', 'input_spatial_dimensions', 'kernel_input_feature_dimension', 'kernel_output_feature_dimension', 'kernel_spatial_dimensions', 'output_batch_dimension', 'output_feature_dimension', 'output_spatial_dimensions') def __init__(self): self.input_batch_dimension = 0 self.input_feature_dimension = 0 self.input_spatial_dimensions = [] self.kernel_input_feature_dimension = 0 self.kernel_output_feature_dimension = 0 self.kernel_spatial_dimensions = [] self.output_batch_dimension = 0 self.output_feature_dimension = 0 self.output_spatial_dimensions = [] class OpSharding(object): """Python representation of a xla.OpSharding protobuf.""" __slots__ = ('type', 'tile_assignment_dimensions', 'tile_assignment_devices', 'tuple_shardings') Type = _xla.OpSharding_Type def __init__(self): self.type = self.Type.REPLICATED self.tile_assignment_dimensions = [] self.tile_assignment_devices = [] self.tuple_shardings = [] class PrecisionConfig(object): """Python representation of a xla.PrecisionConfig protobuf.""" __slots__ = ('operand_precision',) Precision = _xla.PrecisionConfig_Precision def __init__(self): self.operand_precision = [] class GatherDimensionNumbers(object): """Python representation of a xla.GatherDimensionNumbers protobuf.""" __slots__ = ('offset_dims', 'collapsed_slice_dims', 'start_index_map', 'index_vector_dim') def __init__(self): self.offset_dims = [] self.collapsed_slice_dims = [] self.start_index_map = [] self.index_vector_dim = 0 class ScatterDimensionNumbers(object): """Python representation of a xla.ScatterDimensionNumbers protobuf.""" __slots__ = ('update_window_dims', 'inserted_window_dims', 'scatter_dims_to_operand_dims', 'index_vector_dim') def __init__(self): self.update_window_dims = [] self.inserted_window_dims = [] self.scatter_dims_to_operand_dims = [] self.index_vector_dim = 0 class ReplicaGroup(object): """Python representation of a xla.ReplicaGroup protobuf.""" __slots__ = ('replica_ids',) def __init__(self): self.replica_ids = [] def _make_replica_group_proto(replica_group): replica_group_proto = ReplicaGroup() replica_group_proto.replica_ids.extend(replica_group) return replica_group_proto def _get_replica_groups_protos(replica_groups): if replica_groups is None: replica_groups_protos = [] # special value for XLA API else: replica_groups = list(replica_groups) replica_groups_protos = [ _make_replica_group_proto(group) for group in replica_groups ] return replica_groups_protos
[]
[]
[ "XLA_PYTHON_CLIENT_ALLOCATOR", "XLA_PYTHON_CLIENT_MEM_FRACTION", "XLA_PYTHON_CLIENT_PREALLOCATE" ]
[]
["XLA_PYTHON_CLIENT_ALLOCATOR", "XLA_PYTHON_CLIENT_MEM_FRACTION", "XLA_PYTHON_CLIENT_PREALLOCATE"]
python
3
0
samples/snippets/translate_v3_batch_translate_text_with_glossary_test.py
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import uuid import backoff from google.api_core.exceptions import DeadlineExceeded, GoogleAPICallError from google.cloud import storage from google.cloud.exceptions import NotFound import pytest import translate_v3_batch_translate_text_with_glossary import translate_v3_create_glossary import translate_v3_delete_glossary PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"] GLOSSARY_INPUT_URI = "gs://cloud-samples-data/translation/glossary_ja.csv" @pytest.fixture(scope="session") def glossary(): """Get the ID of a glossary available to session (do not mutate/delete).""" glossary_id = "test-{}".format(uuid.uuid4()) translate_v3_create_glossary.create_glossary( PROJECT_ID, GLOSSARY_INPUT_URI, glossary_id ) yield glossary_id # cleanup @backoff.on_exception( backoff.expo, (DeadlineExceeded, GoogleAPICallError), max_time=60 ) def delete_glossary(): try: translate_v3_delete_glossary.delete_glossary(PROJECT_ID, glossary_id) except NotFound as e: # Ignoring this case. print("Got NotFound, detail: {}".format(str(e))) delete_glossary() @pytest.fixture(scope="function") def bucket(): """Create a temporary bucket to store annotation output.""" bucket_name = f"tmp-{uuid.uuid4().hex}" storage_client = storage.Client() bucket = storage_client.create_bucket(bucket_name) yield bucket bucket.delete(force=True) @pytest.mark.flaky(max_runs=3, min_passes=1) def test_batch_translate_text_with_glossary(capsys, bucket, glossary): translate_v3_batch_translate_text_with_glossary.batch_translate_text_with_glossary( "gs://cloud-samples-data/translation/text_with_glossary.txt", "gs://{}/translation/BATCH_TRANSLATION_GLOS_OUTPUT/".format(bucket.name), PROJECT_ID, glossary, 320, ) out, _ = capsys.readouterr() assert "Total Characters: 9" in out
[]
[]
[ "GOOGLE_CLOUD_PROJECT" ]
[]
["GOOGLE_CLOUD_PROJECT"]
python
1
0
src/HCTool-sg-1.py
# coding: utf-8 from huaweicloudsdkcore.auth.credentials import BasicCredentials from huaweicloudsdkcore.exceptions import exceptions from huaweicloudsdkcore.http.http_config import HttpConfig """ # 导入指定云服务的库 huaweicloudsdk{service} """ from huaweicloudsdkvpc.v2 import * from huaweicloudsdkvpc.v2.region.vpc_region import VpcRegion """ # 导入其它依赖库 """ from urllib.request import urlopen from json import load, loads from Crypto.Cipher import AES import time, os, base64, sys, getopt """ # 导入IPy # --(Class and tools for handling of IPv4 and IPv6 addresses and networks) #用于判断当前公网IP地址是IPv4 or IPv6 """ import IPy aes_key_from_cli = '' ip_from_cli = '' """ # 从命令行获取解密秘钥、指定的IP地址等信息 """ def start(argv): if not argv: print('Get usage info by # HCTool-XXX.py -h') sys.exit(2) try: opts, args = getopt.getopt(argv, "hk:i:", ["help", "key=", "ip="]) except getopt.GetoptError: print('Get usage info by # HCTool-XXX.py -h') sys.exit(2) for opt, arg in opts: if opt in ("-h", "--help"): print('# HCTool-XXX.py -k <aes_key> -i <ip_addr> OR \n# HCTool-XXX.py --key=<aes_key> --ip=<ip_addr>') sys.exit() elif opt in ("-k", "--key"): global aes_key_from_cli aes_key_from_cli = arg if aes_key_from_cli == '': print({'create_security_group_rule_tool: error@start()': 'ERROR: key must not be NULL!'}) sys.exit(2) else: print({'create_security_group_rule_tool: message@start()': 'key is: ' + aes_key_from_cli}) elif opt in ("-i", "--ip"): global ip_from_cli ip_from_cli = arg if ip_from_cli != '': print({'create_security_group_rule_tool: message@start()': 'ip addr is: ' + ip_from_cli}) else: print({'create_security_group_rule_tool: error@start()': 'ERROR: ip is NULL!'}) sys.exit(2) """ # en_val为经过base64编码后的密文string """ def decrypt_env(en_val): (aes_key, aes_iv, aes_mode) = (aes_key_from_cli, 'knx5FQtE4XOQ', AES.MODE_GCM) if aes_key_from_cli == '': print({'create_security_group_rule_tool: error@decrypt_env()': 'ERROR: key must not be NULL!'}) sys.exit(2) aes_de_instance = AES.new(aes_key.encode('utf-8'), aes_mode, aes_iv.encode('utf-8')) plain_val = aes_de_instance.decrypt(base64.b64decode(en_val.encode('utf-8'))).decode('utf-8') return plain_val """ # 获取个人云环境配置 # en_cred_dict = {'EN_AK':' ','EN_SK':' ','EN_ProjectID':' ','Region':' '} """ def get_cred_config(): en_env_data = os.getenv('EN_CRED_JSON_STR') en_cred_dict = loads(en_env_data) en_ak = en_cred_dict['EN_AK'] en_sk = en_cred_dict['EN_SK'] en_project_id = en_cred_dict['EN_ProjectID'] ak = decrypt_env(en_ak) sk = decrypt_env(en_sk) project_id = decrypt_env(en_project_id) region = en_cred_dict['Region'] security_group_id = en_cred_dict['SecurityGroupID'] endpoint = "https://" + "vpc." + region + ".myhwclouds.com" print({'create_security_group_rule_tool: message@get_cred_config()': 'current endpoint is: ' + endpoint}) return ak, sk, project_id, region, endpoint, security_group_id """ # demo 列出所有VPC """ def list_vpc(client): try: request = ListVpcsRequest() response = client.list_vpcs(request) print(response) except exceptions.ClientRequestException as e: print(e.status_code) print(e.request_id) print(e.error_code) print(e.error_msg) """ # demo 列出所有SecurityGroupRules """ def list_sg(client): try: request = ListSecurityGroupRulesRequest() response = client.list_security_group_rules(request) print(response) except exceptions.ClientRequestException as e: print(e.status_code) print(e.request_id) print(e.error_code) print(e.error_msg) """ # 创建放通通当前工具所在主机公网IP的安全组 """ def get_pub_ip_from_inet(): ip_from_inet = '' for num in range(1, 3): if num == 1: ip_from_inet = load(urlopen('https://httpbin.org/ip'))['origin'] elif num == 2: ip_from_inet = load(urlopen('https://api.ipify.org/?format=json'))['ip'] else: ip_from_inet = load(urlopen('https://jsonip.com'))['ip'] if IPy.IP(ip_from_inet).version() == 4: break return ip_from_inet """ # 创建放通通当前工具所在主机公网IP的安全组 """ def create_sg(client, security_group_id): global ip_from_cli cur_ip = ip_from_cli if cur_ip == '': cur_ip = get_pub_ip_from_inet() print({'create_security_group_rule_tool: message@create_sg()': 'current public network IP is: ' + cur_ip}) try: if IPy.IP(cur_ip).version() == 6: ethertype = 'IPv6' remote_ip_prefix = cur_ip elif IPy.IP(cur_ip).version() == 4: ethertype = 'IPv4' remote_ip_prefix = cur_ip else: print({'create_security_group_rule_tool: error@create_sg()': 'not IPv4 nor IPv6: ' + cur_ip}) sys.exit(2) except ValueError: print({'create_security_group_rule_tool: error@create_sg()': 'invaild IP addr: ' + cur_ip}) sys.exit(2) loca_ltime = time.asctime(time.localtime(time.time())) try: rule = CreateSecurityGroupRuleOption(security_group_id, description=loca_ltime, direction="ingress", ethertype=ethertype, remote_ip_prefix=remote_ip_prefix) body = CreateSecurityGroupRuleRequestBody(rule) request = CreateSecurityGroupRuleRequest(body) response = client.create_security_group_rule(request) print(response) except exceptions.ClientRequestException as e: print(e.status_code) print(e.request_id) print(e.error_code) print(e.error_msg) if __name__ == "__main__": start(sys.argv[1:]) (ak, sk, project_id, region, endpoint, security_group_id) = get_cred_config() config = HttpConfig.get_default_config() config.ignore_ssl_verification = False credentials = BasicCredentials(ak, sk, project_id) vpc_client = VpcClient.new_builder(VpcClient) \ .with_http_config(config) \ .with_credentials(credentials) \ .with_region(VpcRegion.value_of(region)) \ .build() # list_vpc(vpc_client) # list_sg(vpc_client) create_sg(vpc_client, security_group_id)
[]
[]
[ "EN_CRED_JSON_STR" ]
[]
["EN_CRED_JSON_STR"]
python
1
0
qpm/core/context.go
package core import ( "fmt" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "log" "os" msg "qpm.io/common/messages" "runtime" ) var ( Version = "0.X.x" Build = "master" ) const ( PackageFile = "qpm.json" SignatureFile = "qpm.asc" Vendor = "vendor" Address = "pkg.qpm.io:7000" LicenseFile = "LICENSE" ) var UA = fmt.Sprintf("qpm/%v (%s; %s)", Version, runtime.GOOS, runtime.GOARCH) type Context struct { Log *log.Logger Client msg.QpmClient } func NewContext() *Context { log := log.New(os.Stderr, "QPM: ", log.LstdFlags) address := os.Getenv("SERVER") if address == "" { address = Address } noTls := os.Getenv("NO_TLS") == "1" var tlsOption grpc.DialOption if noTls { tlsOption = grpc.WithInsecure() } else { tlsOption = grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")) } conn, err := grpc.Dial(address, tlsOption, grpc.WithUserAgent(UA)) if err != nil { log.Fatalf("did not connect: %v", err) } return &Context{ Log: log, Client: msg.NewQpmClient(conn), } }
[ "\"SERVER\"", "\"NO_TLS\"" ]
[]
[ "NO_TLS", "SERVER" ]
[]
["NO_TLS", "SERVER"]
go
2
0
tests/unit/test_errors.py
"""Test the presentation style of exceptions.""" import textwrap from io import StringIO from typing import Literal, Optional import pytest import rich import rich.console import rich.text from sphinx_theme_builder._internal.errors import DiagnosticError class TestDiagnosticErrorInitialisation: def test_fails_without_reference(self) -> None: class DerivedError(DiagnosticError): pass with pytest.raises(AssertionError) as exc_info: DerivedError(message="", context=None, hint_stmt=None) assert str(exc_info.value) == "error reference not provided!" def test_can_fetch_reference_from_subclass(self) -> None: class DerivedError(DiagnosticError): reference = "subclass-reference" obj = DerivedError(message="", context=None, hint_stmt=None) assert obj.reference == "subclass-reference" def test_can_fetch_reference_from_arguments(self) -> None: class DerivedError(DiagnosticError): pass obj = DerivedError( message="", context=None, hint_stmt=None, reference="subclass-reference" ) assert obj.reference == "subclass-reference" @pytest.mark.parametrize( "name", [ "BADNAME", "BadName", "bad_name", "BAD_NAME", "_bad", "bad-name-", "bad--name", "-bad-name", "bad-name-due-to-1-number", ], ) def test_rejects_non_kebab_case_names(self, name: str) -> None: class DerivedError(DiagnosticError): reference = name with pytest.raises(AssertionError) as exc_info: DerivedError(message="", context=None, hint_stmt=None) assert str(exc_info.value) == "error reference must be kebab-case!" def assert_presentation_matches( error: DiagnosticError, expected: str, *, color_system: Optional[Literal["auto", "standard", "256", "truecolor", "windows"]] ) -> None: expected_output = textwrap.dedent(expected) stream = StringIO() console = rich.console.Console(file=stream, color_system=color_system) console.print(error) assert stream.getvalue() == expected_output class TestDiagnosticPipErrorPresentation: def test_complete_string(self) -> None: # GIVEN error = DiagnosticError( reference="ooops-an-error-occured", message=( "This is an error message describing the issues." "\nIt can have multiple lines." ), context=( "This is some context associated with that error." "\nAny relevant additional details are mentioned here." ), hint_stmt=( "This is a hint, that will help you figure this out." "\nAnd the hint can have multiple lines." ), note_stmt=( "This is to draw your [b]attention[/] toward about something important." "\nAnd this can also have multiple lines." ), ) # WHEN / THEN assert str(error) == ( "<DiagnosticError: " "https://sphinx-theme-builder.rtfd.io/errors/#ooops-an-error-occured" ">" ) assert repr(error) == ( "<DiagnosticError(" "reference='ooops-an-error-occured', " "message='This is an error message describing the issues.\\n" "It can have multiple lines.', " "context='This is some context associated with that error.\\n" "Any relevant additional details are mentioned here.', " "note_stmt='This is to draw your [b]attention[/] toward about something important.\\n" "And this can also have multiple lines.', " "hint_stmt='This is a hint, that will help you figure this out.\\n" "And the hint can have multiple lines.'" ")>" ) def test_complete(self) -> None: assert_presentation_matches( DiagnosticError( reference="ooops-an-error-occured", message=( "This is an error message describing the issues." "\nIt can have multiple lines." ), context=( "This is some context associated with that error." "\nAny relevant additional details are mentioned here." ), hint_stmt=( "This is a hint, that will help you figure this out." "\nAnd the hint can have multiple lines." ), note_stmt=( "This is to draw your [b]attention[/] toward about something important." "\nAnd this can also have multiple lines." ), ), """\ error: ooops-an-error-occured × This is an error message describing the issues. │ It can have multiple lines. ╰─> This is some context associated with that error. Any relevant additional details are mentioned here. note: This is to draw your attention toward about something important. And this can also have multiple lines. hint: This is a hint, that will help you figure this out. And the hint can have multiple lines. Link: https://sphinx-theme-builder.rtfd.io/errors/#ooops-an-error-occured """, color_system=None, ) def test_complete_colors(self) -> None: assert_presentation_matches( DiagnosticError( reference="ooops-an-error-occured", message=( "This is an error message describing the issues." "\nIt can have multiple lines." ), context=( "This is some context associated with that error." "\nAny relevant additional details are mentioned here." ), hint_stmt=rich.text.Text( "This is a hint, that will help you figure this out." "\nAnd the [b]hint[/] can have multiple lines." ), note_stmt=( "This is to draw your [b]attention[/] toward about something important." "\nAnd this can also have multiple lines." ), ), # Yes, I know this is dumb. """\ \x1b[1;31merror\x1b[0m: \x1b[1mooops-an-error-occured\x1b[0m \x1b[31m×\x1b[0m This is an error message describing the issues. \x1b[31m│\x1b[0m It can have multiple lines. \x1b[31m╰─>\x1b[0m This is some context associated with that error. \x1b[31m \x1b[0m Any relevant additional details are mentioned here. \x1b[1;35mnote\x1b[0m: This is to draw your \x1b[1mattention\x1b[0m toward about something important. And this can also have multiple lines. \x1b[1;36mhint\x1b[0m: This is a hint, that will help you figure this out. And the [b]hint[/] can have multiple lines. Link: \x1b[4;94mhttps://sphinx-theme-builder.rtfd.io/errors/#ooops-an-error-occured\x1b[0m """, color_system="256", ) def test_no_note_no_hint_no_context(self) -> None: # GIVEN assert_presentation_matches( DiagnosticError( reference="ooops-an-error-occured", message=( "This is an error message describing the issues." "\nIt can have multiple lines." ), context=None, hint_stmt=None, ), """\ error: ooops-an-error-occured × This is an error message describing the issues. It can have multiple lines. Link: https://sphinx-theme-builder.rtfd.io/errors/#ooops-an-error-occured """, color_system=None, ) def test_no_note_no_hint(self) -> None: # GIVEN assert_presentation_matches( DiagnosticError( reference="ooops-an-error-occured", message=( "This is an error message describing the issues." "\nIt can have multiple lines." ), context=( "This is some context associated with that error." "\nAny relevant additional details are mentioned here." ), hint_stmt=None, ), """\ error: ooops-an-error-occured × This is an error message describing the issues. │ It can have multiple lines. ╰─> This is some context associated with that error. Any relevant additional details are mentioned here. Link: https://sphinx-theme-builder.rtfd.io/errors/#ooops-an-error-occured """, color_system=None, ) def test_no_note(self) -> None: # GIVEN assert_presentation_matches( DiagnosticError( reference="ooops-an-error-occured", message=( "This is an error message describing the issues." "\nIt can have multiple lines." ), context=( "This is some context associated with that error." "\nAny relevant additional details are mentioned here." ), hint_stmt=( "This is a hint, that will help you figure this out." "\nAnd the hint can have multiple lines." ), ), """\ error: ooops-an-error-occured × This is an error message describing the issues. │ It can have multiple lines. ╰─> This is some context associated with that error. Any relevant additional details are mentioned here. hint: This is a hint, that will help you figure this out. And the hint can have multiple lines. Link: https://sphinx-theme-builder.rtfd.io/errors/#ooops-an-error-occured """, color_system=None, ) def test_no_hint(self) -> None: # GIVEN assert_presentation_matches( DiagnosticError( reference="ooops-an-error-occured", message=( "This is an error message describing the issues." "\nIt can have multiple lines." ), context=( "This is some context associated with that error." "\nAny relevant additional details are mentioned here." ), hint_stmt=None, note_stmt=( "This is to draw your [b]attention[/] toward about something important." "\nAnd this can also have multiple lines." ), ), """\ error: ooops-an-error-occured × This is an error message describing the issues. │ It can have multiple lines. ╰─> This is some context associated with that error. Any relevant additional details are mentioned here. note: This is to draw your attention toward about something important. And this can also have multiple lines. Link: https://sphinx-theme-builder.rtfd.io/errors/#ooops-an-error-occured """, color_system=None, )
[]
[]
[]
[]
[]
python
null
null
null
SW_pressure.py
# -*- coding: utf-8 -*- """ Created on Fri Sep 18 14:23:25 2020 A script that pulls all available 1 minute OMNI SW data, calculates thermal and magnetic pressure, and plots a neat historgam of dynamic, magnetic, and thermal pressure sources. Takes a while to download all that data for sure but boy is it worth it! @author: connor o'brien """ import os os.environ["CDF_LIB"] = "c:\CDF_Distribution\cdf37_1-dist" from spacepy import pycdf import datetime as dt import numpy as np import matplotlib.pyplot as plt import bisect import webbrowser import time def GetOMNIData(Date,DataPath,DownloadPath): """Downloads OMNI data for date string YYYYMMDD. Courtesy Emil Atz""" Year = Date[0]+Date[1]+Date[2]+Date[3] #Splitting the year Month = Date[4]+Date[5] #Splitting the month if (int(Year) >= 1981): CDFFile = 'omni_hro_1min_'+Year+Month+'01_v01.cdf' #Building the file name else: if (int(Month) < 7): CDFFile = 'omni2_h0_mrg1hr_'+Year+'0101_v01.cdf' else: CDFFile = 'omni2_h0_mrg1hr_'+Year+'0701_v01.cdf' if os.path.isfile(DataPath+CDFFile): #Checking if you already have it downloaded print('The CDF of this month is already downloaded') return CDFFile #This breaks you out of the function # PYTHON INTERNET SCRAPPING # DANGER: If you do this method and are too 'fast' with your internet requests, # the web page can block you because it may think you're hacking. Which you are. # So dont be flooding web pages with download requests from a script! Thays why # there is a time.sleep(10) in there. if (int(Year) >= 1981): webbrowser.open('https://cdaweb.gsfc.nasa.gov/pub/data/omni/omni_cdaweb/hro_1min/'+Year+'/'+CDFFile) print('Downloading 1 min OMNI CDF file ' + Date) else: webbrowser.open('https://cdaweb.gsfc.nasa.gov/pub/data/omni/omni_cdaweb/hourly/'+Year+'/'+CDFFile) print('Downloading Hourly OMNI CDF file ' + Date) time.sleep(20) os.rename(DownloadPath+CDFFile, DataPath+CDFFile) return CDFFile def ModifyOMNIData(Date, CDFFile, DataPath): """Creates small cdf from monthly OMNI data (string CDFFile) for date string YYYYMMDD. Courtesy Emil Atz""" os.chdir(DataPath) FullCDF = pycdf.CDF(CDFFile) #Getting the file into Python Start = dt.datetime.strptime(Date, '%Y%m%d') Stop = dt.datetime.strptime(Date, '%Y%m%d') Stop = Stop+dt.timedelta(days = 1) # Finding the indicies with which to cut the file start_ind = bisect.bisect_left(FullCDF['Epoch'], Start) stop_ind = bisect.bisect_left(FullCDF['Epoch'], Stop) # Aplying the indicies to the file variables of interest time = FullCDF['Epoch'][start_ind:stop_ind] BzGSE = FullCDF['BZ_GSE'][start_ind:stop_ind] BxGSE = FullCDF['BX_GSE'][start_ind:stop_ind] BzGSM = FullCDF['BZ_GSM'][start_ind:stop_ind] ByGSM = FullCDF['BY_GSM'][start_ind:stop_ind] Pdyn = FullCDF['Pressure'][start_ind:stop_ind] Vx = FullCDF['Vx'][start_ind:stop_ind] Vy = FullCDF['Vy'][start_ind:stop_ind] Vz = FullCDF['Vz'][start_ind:stop_ind] os.chdir(DataPath) # Saving the cut file to a new file CDFName = Date+'.cdf' if os.path.isfile(DataPath+CDFName): print('The CDF of these days is already generated') return CDFName Shortcdf = pycdf.CDF(CDFName, '') Shortcdf['Epoch'] = time Shortcdf['BZ_GSE'] = BzGSE Shortcdf['BZ_GSM'] = BzGSM Shortcdf['BX_GSE'] = BxGSE Shortcdf['BY_GSM'] = ByGSM Shortcdf['Pressure'] = Pdyn Shortcdf['Vx'] = Vx Shortcdf['Vy'] = Vy Shortcdf['Vz'] = Vz return CDFName storage_path = '/Users/conno/Documents/Data Storage/OMNI/' #Where the data is to be stored downloads_path = '/Users/conno/Downloads/' #Your downloads folder home_path = "/Users/conno/Documents/Magnetopause Modeling/Data/" #Where the script is run from cdf_arr = np.asarray([]) #Make the array of cdf names for year in np.arange(1981, 2020): for month in np.arange(1,13): date = str(year*100 + month) cdf_arr = np.append(cdf_arr, GetOMNIData(date, storage_path, downloads_path)) #Stage the condition arrays imf = np.asarray([]) pdyn = np.asarray([]) proton_n = np.asarray([]) proton_t = np.asarray([]) #Gotta look in the right place! os.chdir(storage_path) #Download the data, cut the incomplete parts, and put in the staged arrays for filename in cdf_arr: print('Appending '+ filename) data = pycdf.CDF(filename) temp_imf = np.sqrt(np.asarray(data['BX_GSE'])**2 + np.asarray(data['BY_GSE'])**2 + np.asarray(data['BZ_GSE'])**2) temp_pdyn = np.asarray(data['Pressure']) temp_n = np.asarray(data['proton_density']) temp_t = np.asarray(data['T']) temp_imf = temp_imf[(np.asarray(data['BX_GSE']) < 9999.99) & (np.asarray(data['BY_GSE']) < 9999.99) & (np.asarray(data['BZ_GSE']) < 9999.99)] temp_pdyn = temp_pdyn[temp_pdyn < 99.99] therm_bool = (temp_n < 999.99) & (temp_t < 9999999.) temp_n = temp_n[therm_bool] temp_t = temp_t[therm_bool] imf = np.append(imf, temp_imf) pdyn = np.append(pdyn, temp_pdyn) proton_n = np.append(proton_n, temp_n) proton_t = np.append(proton_t, temp_t) #Calculate the derived pressures in nPa pmag = (imf ** 2) * (3.97887 * 10**(-4)) ptherm = proton_n * proton_t * (1.381 * 10**(-8)) #Make a log x scale for the histogram to make it legible logbins = np.geomspace(np.min(pmag), np.max(pdyn), 75) #Plot it up! pdyn_n, pdyn_bins, pdyn_patch = plt.hist(pdyn, bins = logbins, color = '#ddaa33', histtype = 'stepfilled', alpha = 0.5) pmag_n, pmag_bins, pmag_patch = plt.hist(pmag, bins = logbins, color = '#004488', histtype = 'stepfilled', alpha = 0.5) ptherm_n, ptherm_bins, ptherm_patch = plt.hist(ptherm, bins = logbins, color = '#bb5566', histtype = 'stepfilled', alpha = 0.5) plt.xscale('log') plt.xlim(10**(-4), 10**2) plt.xlabel('Pressure (nPa)') plt.legend(['$P_{dyn}$', '$P_{mag}$', '$P_{therm}$'], loc = 'upper left', frameon = False) plt.title('Solar Wind Pressure Sources') plt.savefig('sw_pressure_trans.png') plt.savefig('sw_pressure_trans.pdf', format = 'pdf')
[]
[]
[ "CDF_LIB" ]
[]
["CDF_LIB"]
python
1
0
tfx/components/pusher/executor_test.py
# Lint as: python2, python3 # Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tfx.components.pusher.executor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tensorflow as tf from google.protobuf import json_format from tfx.components.pusher import executor from tfx.proto import pusher_pb2 from tfx.types import standard_artifacts class ExecutorTest(tf.test.TestCase): def setUp(self): super(ExecutorTest, self).setUp() self._source_data_dir = os.path.join( os.path.dirname(os.path.dirname(__file__)), 'testdata') self._output_data_dir = os.path.join( os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()), self._testMethodName) tf.io.gfile.makedirs(self._output_data_dir) self._model_export = standard_artifacts.Model() self._model_export.uri = os.path.join(self._source_data_dir, 'trainer/current/') self._model_blessing = standard_artifacts.ModelBlessing() self._input_dict = { 'model_export': [self._model_export], 'model_blessing': [self._model_blessing], } self._model_push = standard_artifacts.PushedModel() self._model_push.uri = os.path.join(self._output_data_dir, 'model_push') tf.io.gfile.makedirs(self._model_push.uri) self._output_dict = { 'model_push': [self._model_push], } self._serving_model_dir = os.path.join(self._output_data_dir, 'serving_model_dir') tf.io.gfile.makedirs(self._serving_model_dir) self._exec_properties = { 'push_destination': json_format.MessageToJson( pusher_pb2.PushDestination( filesystem=pusher_pb2.PushDestination.Filesystem( base_directory=self._serving_model_dir)), preserving_proto_field_name=True), } self._executor = executor.Executor() def testDoBlessed(self): self._model_blessing.uri = os.path.join(self._source_data_dir, 'model_validator/blessed/') self._model_blessing.set_int_custom_property('blessed', 1) self._executor.Do(self._input_dict, self._output_dict, self._exec_properties) self.assertNotEqual(0, len(tf.io.gfile.listdir(self._serving_model_dir))) self.assertNotEqual(0, len(tf.io.gfile.listdir(self._model_push.uri))) self.assertEqual( 1, self._model_push.artifact.custom_properties['pushed'].int_value) def testDoNotBlessed(self): self._model_blessing.uri = os.path.join(self._source_data_dir, 'model_validator/not_blessed/') self._model_blessing.set_int_custom_property('blessed', 0) self._executor.Do(self._input_dict, self._output_dict, self._exec_properties) self.assertEqual(0, len(tf.io.gfile.listdir(self._serving_model_dir))) self.assertEqual(0, len(tf.io.gfile.listdir(self._model_push.uri))) self.assertEqual( 0, self._model_push.artifact.custom_properties['pushed'].int_value) if __name__ == '__main__': tf.test.main()
[]
[]
[ "TEST_UNDECLARED_OUTPUTS_DIR" ]
[]
["TEST_UNDECLARED_OUTPUTS_DIR"]
python
1
0
backend-python/aiqa_geo_django/wsgi.py
""" WSGI config for aiqa_geo_django project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'aiqa_geo_django.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
dependency/dependency/wsgi.py
""" WSGI config for native project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dependency.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
sdcflows/conftest.py
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # # Copyright 2021 The NiPreps Developers <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # We support and encourage derived works from this project, please read # about our expectations at # # https://www.nipreps.org/community/licensing/ # """py.test configuration.""" import os from pathlib import Path import numpy import nibabel import pytest from bids.layout import BIDSLayout test_data_env = os.getenv("TEST_DATA_HOME", str(Path.home() / "sdcflows-tests")) test_output_dir = os.getenv("TEST_OUTPUT_DIR") test_workdir = os.getenv("TEST_WORK_DIR") _sloppy_mode = os.getenv("TEST_PRODUCTION", "off").lower() not in ("on", "1", "true", "yes", "y") layouts = { p.name: BIDSLayout(str(p), validate=False, derivatives=True) for p in Path(test_data_env).glob("*") if p.is_dir() } data_dir = Path(__file__).parent / "tests" / "data" layouts.update({ folder.name: BIDSLayout(folder, validate=False, derivatives=False) for folder in data_dir.glob("ds*") if folder.is_dir() }) def pytest_report_header(config): return f"""\ TEST_DATA_HOME={test_data_env} -> Available datasets: {', '.join(layouts.keys())}. TEST_OUTPUT_DIR={test_output_dir or '<unset> (output files will be discarded)'}. TEST_WORK_DIR={test_workdir or '<unset> (intermediate files will be discarded)'}. """ @pytest.fixture(autouse=True) def add_np(doctest_namespace): doctest_namespace["np"] = numpy doctest_namespace["nb"] = nibabel doctest_namespace["os"] = os doctest_namespace["Path"] = Path doctest_namespace["layouts"] = layouts for key, val in list(layouts.items()): doctest_namespace[key] = Path(val.root) doctest_namespace["dsA_dir"] = data_dir / "dsA" doctest_namespace["dsB_dir"] = data_dir / "dsB" doctest_namespace["dsC_dir"] = data_dir / "dsC" @pytest.fixture def workdir(): return None if test_workdir is None else Path(test_workdir) @pytest.fixture def outdir(): return None if test_output_dir is None else Path(test_output_dir) @pytest.fixture def bids_layouts(): if layouts: return layouts pytest.skip() @pytest.fixture def datadir(): return Path(test_data_env) @pytest.fixture def testdata_dir(): return data_dir @pytest.fixture def dsA_dir(): return data_dir / "dsA" @pytest.fixture def sloppy_mode(): return _sloppy_mode
[]
[]
[ "TEST_WORK_DIR", "TEST_OUTPUT_DIR", "TEST_DATA_HOME", "TEST_PRODUCTION" ]
[]
["TEST_WORK_DIR", "TEST_OUTPUT_DIR", "TEST_DATA_HOME", "TEST_PRODUCTION"]
python
4
0
dataplane/linux/bpf_ep_mgr.go
// +build !windows // Copyright (c) 2020-2021 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package intdataplane import ( "bytes" "context" "encoding/binary" "encoding/json" "errors" "fmt" "net" "os" "os/exec" "reflect" "regexp" "runtime" "sort" "strings" "sync" "time" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "golang.org/x/sync/semaphore" "golang.org/x/sys/unix" "github.com/projectcalico/libcalico-go/lib/set" "github.com/projectcalico/felix/bpf" "github.com/projectcalico/felix/bpf/polprog" "github.com/projectcalico/felix/bpf/tc" "github.com/projectcalico/felix/idalloc" "github.com/projectcalico/felix/ifacemonitor" "github.com/projectcalico/felix/iptables" "github.com/projectcalico/felix/proto" "github.com/projectcalico/felix/ratelimited" ) const jumpMapCleanupInterval = 10 * time.Second var ( bpfEndpointsGauge = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "felix_bpf_dataplane_endpoints", Help: "Number of BPF endpoints managed in the dataplane.", }) bpfDirtyEndpointsGauge = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "felix_bpf_dirty_dataplane_endpoints", Help: "Number of BPF endpoints managed in the dataplane that are left dirty after a failure.", }) bpfHappyEndpointsGauge = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "felix_bpf_happy_dataplane_endpoints", Help: "Number of BPF endpoints that are successfully programmed.", }) ) func init() { prometheus.MustRegister(bpfEndpointsGauge) prometheus.MustRegister(bpfDirtyEndpointsGauge) prometheus.MustRegister(bpfHappyEndpointsGauge) } type bpfDataplane interface { ensureStarted() ensureProgramAttached(ap *tc.AttachPoint, polDirection PolDirection) (bpf.MapFD, error) ensureQdisc(iface string) error updatePolicyProgram(jumpMapFD bpf.MapFD, rules polprog.Rules) error removePolicyProgram(jumpMapFD bpf.MapFD) error setAcceptLocal(iface string, val bool) error } type bpfInterface struct { // info contains the information about the interface sent to us from external sources. For example, // the ID of the controlling workload interface and our current expectation of its "oper state". // When the info changes, we mark the interface dirty and refresh its dataplane state. info bpfInterfaceInfo // dpState contains the dataplane state that we've derived locally. It caches the result of updating // the interface (so changes to dpState don't cause the interface to be marked dirty). dpState bpfInterfaceState } type bpfInterfaceInfo struct { ifaceIsUp bool endpointID *proto.WorkloadEndpointID } type bpfInterfaceState struct { jumpMapFDs [2]bpf.MapFD } type bpfEndpointManager struct { // Main store of information about interfaces; indexed on interface name. ifacesLock sync.Mutex nameToIface map[string]bpfInterface allWEPs map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint happyWEPs map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint happyWEPsDirty bool policies map[proto.PolicyID]*proto.Policy profiles map[proto.ProfileID]*proto.Profile // Indexes policiesToWorkloads map[proto.PolicyID]set.Set /*proto.WorkloadEndpointID*/ profilesToWorkloads map[proto.ProfileID]set.Set /*proto.WorkloadEndpointID*/ dirtyIfaceNames set.Set bpfLogLevel string hostname string hostIP net.IP fibLookupEnabled bool dataIfaceRegex *regexp.Regexp workloadIfaceRegex *regexp.Regexp ipSetIDAlloc *idalloc.IDAllocator epToHostAction string vxlanMTU int vxlanPort uint16 dsrEnabled bool bpfExtToServiceConnmark int ipSetMap bpf.Map stateMap bpf.Map ruleRenderer bpfAllowChainRenderer iptablesFilterTable iptablesTable startupOnce sync.Once mapCleanupRunner *ratelimited.Runner // onStillAlive is called from loops to reset the watchdog. onStillAlive func() // HEP processing. hostIfaceToEpMap map[string]proto.HostEndpoint wildcardHostEndpoint proto.HostEndpoint wildcardExists bool // UT-able BPF dataplane interface. dp bpfDataplane ifaceToIpMap map[string]net.IP } type bpfAllowChainRenderer interface { WorkloadInterfaceAllowChains(endpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint) []*iptables.Chain } func newBPFEndpointManager( bpfLogLevel string, hostname string, fibLookupEnabled bool, epToHostAction string, dataIfaceRegex *regexp.Regexp, workloadIfaceRegex *regexp.Regexp, ipSetIDAlloc *idalloc.IDAllocator, vxlanMTU int, vxlanPort uint16, dsrEnabled bool, bpfExtToServiceConnmark int, ipSetMap bpf.Map, stateMap bpf.Map, iptablesRuleRenderer bpfAllowChainRenderer, iptablesFilterTable iptablesTable, livenessCallback func(), ) *bpfEndpointManager { if livenessCallback == nil { livenessCallback = func() {} } m := &bpfEndpointManager{ allWEPs: map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{}, happyWEPs: map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{}, happyWEPsDirty: true, policies: map[proto.PolicyID]*proto.Policy{}, profiles: map[proto.ProfileID]*proto.Profile{}, nameToIface: map[string]bpfInterface{}, policiesToWorkloads: map[proto.PolicyID]set.Set{}, profilesToWorkloads: map[proto.ProfileID]set.Set{}, dirtyIfaceNames: set.New(), bpfLogLevel: bpfLogLevel, hostname: hostname, fibLookupEnabled: fibLookupEnabled, dataIfaceRegex: dataIfaceRegex, workloadIfaceRegex: workloadIfaceRegex, ipSetIDAlloc: ipSetIDAlloc, epToHostAction: epToHostAction, vxlanMTU: vxlanMTU, vxlanPort: vxlanPort, dsrEnabled: dsrEnabled, bpfExtToServiceConnmark: bpfExtToServiceConnmark, ipSetMap: ipSetMap, stateMap: stateMap, ruleRenderer: iptablesRuleRenderer, iptablesFilterTable: iptablesFilterTable, mapCleanupRunner: ratelimited.NewRunner(jumpMapCleanupInterval, func(ctx context.Context) { log.Debug("Jump map cleanup triggered.") tc.CleanUpJumpMaps() }), onStillAlive: livenessCallback, hostIfaceToEpMap: map[string]proto.HostEndpoint{}, ifaceToIpMap: map[string]net.IP{}, } // Normally this endpoint manager uses its own dataplane implementation, but we have an // indirection here so that UT can simulate the dataplane and test how it's called. m.dp = m return m } // withIface handles the bookkeeping for working with a particular bpfInterface value. It // * creates the value if needed // * calls the giving callback with the value so it can be edited // * if the bpfInterface's info field changes, it marks it as dirty // * if the bpfInterface is now empty (no info or state), it cleans it up. func (m *bpfEndpointManager) withIface(ifaceName string, fn func(iface *bpfInterface) (forceDirty bool)) { iface := m.nameToIface[ifaceName] ifaceCopy := iface dirty := fn(&iface) logCtx := log.WithField("name", ifaceName) var zeroIface bpfInterface if iface == zeroIface { logCtx.Debug("Interface info is now empty.") delete(m.nameToIface, ifaceName) } else { // Always store the result (rather than checking the dirty flag) because dirty only covers the info.. m.nameToIface[ifaceName] = iface } dirty = dirty || iface.info != ifaceCopy.info if !dirty { return } logCtx.Debug("Marking iface dirty.") m.dirtyIfaceNames.Add(ifaceName) } func (m *bpfEndpointManager) OnUpdate(msg interface{}) { switch msg := msg.(type) { // Updates from the dataplane: // Interface updates. case *ifaceUpdate: m.onInterfaceUpdate(msg) case *ifaceAddrsUpdate: m.onInterfaceAddrsUpdate(msg) // Updates from the datamodel: // Workloads. case *proto.WorkloadEndpointUpdate: m.onWorkloadEndpointUpdate(msg) case *proto.WorkloadEndpointRemove: m.onWorkloadEnpdointRemove(msg) // Policies. case *proto.ActivePolicyUpdate: m.onPolicyUpdate(msg) case *proto.ActivePolicyRemove: m.onPolicyRemove(msg) // Profiles. case *proto.ActiveProfileUpdate: m.onProfileUpdate(msg) case *proto.ActiveProfileRemove: m.onProfileRemove(msg) case *proto.HostMetadataUpdate: if msg.Hostname == m.hostname { log.WithField("HostMetadataUpdate", msg).Info("Host IP changed") ip := net.ParseIP(msg.Ipv4Addr) if ip != nil { m.hostIP = ip // Should be safe without the lock since there shouldn't be any active background threads // but taking it now makes us robust to refactoring. m.ifacesLock.Lock() for ifaceName := range m.nameToIface { m.dirtyIfaceNames.Add(ifaceName) } m.ifacesLock.Unlock() } else { log.WithField("HostMetadataUpdate", msg).Warn("Cannot parse IP, no change applied") } } } } func (m *bpfEndpointManager) onInterfaceAddrsUpdate(update *ifaceAddrsUpdate) { var ipAddrs []net.IP m.ifacesLock.Lock() defer m.ifacesLock.Unlock() if update.Addrs != nil && update.Addrs.Len() > 0 { log.Debugf("Interface %+v received address update %+v", update.Name, update.Addrs) update.Addrs.Iter(func(item interface{}) error { ip := net.ParseIP(item.(string)) if ip.To4() != nil { ipAddrs = append(ipAddrs, ip) } return nil }) sort.Slice(ipAddrs, func(i, j int) bool { return bytes.Compare(ipAddrs[i], ipAddrs[j]) < 0 }) if len(ipAddrs) > 0 { ip, ok := m.ifaceToIpMap[update.Name] if !ok || !ip.Equal(ipAddrs[0]) { m.ifaceToIpMap[update.Name] = ipAddrs[0] m.dirtyIfaceNames.Add(update.Name) } } } else { _, ok := m.ifaceToIpMap[update.Name] if ok { delete(m.ifaceToIpMap, update.Name) m.dirtyIfaceNames.Add(update.Name) } } } func (m *bpfEndpointManager) onInterfaceUpdate(update *ifaceUpdate) { log.Debugf("Interface update for %v, state %v", update.Name, update.State) // Should be safe without the lock since there shouldn't be any active background threads // but taking it now makes us robust to refactoring. m.ifacesLock.Lock() defer m.ifacesLock.Unlock() if !m.isDataIface(update.Name) && !m.isWorkloadIface(update.Name) { log.WithField("update", update).Debug("Ignoring interface that's neither data nor workload.") return } m.withIface(update.Name, func(iface *bpfInterface) bool { iface.info.ifaceIsUp = update.State == ifacemonitor.StateUp // Note, only need to handle the mapping and unmapping of the host-* endpoint here. // For specific host endpoints OnHEPUpdate doesn't depend on iface state, and has // already stored and mapped as needed. if iface.info.ifaceIsUp { if _, hostEpConfigured := m.hostIfaceToEpMap[update.Name]; m.wildcardExists && !hostEpConfigured { log.Debugf("Map host-* endpoint for %v", update.Name) m.addHEPToIndexes(update.Name, &m.wildcardHostEndpoint) m.hostIfaceToEpMap[update.Name] = m.wildcardHostEndpoint } } else { if m.wildcardExists && reflect.DeepEqual(m.hostIfaceToEpMap[update.Name], m.wildcardHostEndpoint) { log.Debugf("Unmap host-* endpoint for %v", update.Name) m.removeHEPFromIndexes(update.Name, &m.wildcardHostEndpoint) delete(m.hostIfaceToEpMap, update.Name) } } return true // Force interface to be marked dirty in case we missed a transition during a resync. }) } // onWorkloadEndpointUpdate adds/updates the workload in the cache along with the index from active policy to // workloads using that policy. func (m *bpfEndpointManager) onWorkloadEndpointUpdate(msg *proto.WorkloadEndpointUpdate) { log.WithField("wep", msg.Endpoint).Debug("Workload endpoint update") wlID := *msg.Id oldWEP := m.allWEPs[wlID] m.removeWEPFromIndexes(wlID, oldWEP) wl := msg.Endpoint m.allWEPs[wlID] = wl m.addWEPToIndexes(wlID, wl) m.withIface(wl.Name, func(iface *bpfInterface) bool { iface.info.endpointID = &wlID return true // Force interface to be marked dirty in case policies changed. }) } // onWorkloadEndpointRemove removes the workload from the cache and the index, which maps from policy to workload. func (m *bpfEndpointManager) onWorkloadEnpdointRemove(msg *proto.WorkloadEndpointRemove) { wlID := *msg.Id log.WithField("id", wlID).Debug("Workload endpoint removed") oldWEP := m.allWEPs[wlID] m.removeWEPFromIndexes(wlID, oldWEP) delete(m.allWEPs, wlID) if m.happyWEPs[wlID] != nil { delete(m.happyWEPs, wlID) m.happyWEPsDirty = true } m.withIface(oldWEP.Name, func(iface *bpfInterface) bool { iface.info.endpointID = nil return false }) } // onPolicyUpdate stores the policy in the cache and marks any endpoints using it dirty. func (m *bpfEndpointManager) onPolicyUpdate(msg *proto.ActivePolicyUpdate) { polID := *msg.Id log.WithField("id", polID).Debug("Policy update") m.policies[polID] = msg.Policy m.markEndpointsDirty(m.policiesToWorkloads[polID], "policy") } // onPolicyRemove removes the policy from the cache and marks any endpoints using it dirty. // The latter should be a no-op due to the ordering guarantees of the calc graph. func (m *bpfEndpointManager) onPolicyRemove(msg *proto.ActivePolicyRemove) { polID := *msg.Id log.WithField("id", polID).Debug("Policy removed") m.markEndpointsDirty(m.policiesToWorkloads[polID], "policy") delete(m.policies, polID) delete(m.policiesToWorkloads, polID) } // onProfileUpdate stores the profile in the cache and marks any endpoints that use it as dirty. func (m *bpfEndpointManager) onProfileUpdate(msg *proto.ActiveProfileUpdate) { profID := *msg.Id log.WithField("id", profID).Debug("Profile update") m.profiles[profID] = msg.Profile m.markEndpointsDirty(m.profilesToWorkloads[profID], "profile") } // onProfileRemove removes the profile from the cache and marks any endpoints that were using it as dirty. // The latter should be a no-op due to the ordering guarantees of the calc graph. func (m *bpfEndpointManager) onProfileRemove(msg *proto.ActiveProfileRemove) { profID := *msg.Id log.WithField("id", profID).Debug("Profile removed") m.markEndpointsDirty(m.profilesToWorkloads[profID], "profile") delete(m.profiles, profID) delete(m.profilesToWorkloads, profID) } func (m *bpfEndpointManager) markEndpointsDirty(ids set.Set, kind string) { if ids == nil { // Hear about the policy/profile before the endpoint. return } ids.Iter(func(item interface{}) error { switch id := item.(type) { case proto.WorkloadEndpointID: m.markExistingWEPDirty(id, kind) case string: if id == allInterfaces { for ifaceName := range m.nameToIface { if m.isWorkloadIface(ifaceName) { log.Debugf("Mark WEP iface dirty, for host-* endpoint %v change", kind) m.dirtyIfaceNames.Add(ifaceName) } } } else { log.Debugf("Mark host iface dirty, for host %v change", kind) m.dirtyIfaceNames.Add(id) } } return nil }) } func (m *bpfEndpointManager) markExistingWEPDirty(wlID proto.WorkloadEndpointID, mapping string) { wep := m.allWEPs[wlID] if wep == nil { log.WithField("wlID", wlID).Panicf( "BUG: %s mapping points to unknown workload.", mapping) } else { m.dirtyIfaceNames.Add(wep.Name) } } func (m *bpfEndpointManager) CompleteDeferredWork() error { // Do one-off initialisation. m.dp.ensureStarted() m.applyProgramsToDirtyDataInterfaces() m.updateWEPsInDataplane() bpfEndpointsGauge.Set(float64(len(m.nameToIface))) bpfDirtyEndpointsGauge.Set(float64(m.dirtyIfaceNames.Len())) if m.happyWEPsDirty { chains := m.ruleRenderer.WorkloadInterfaceAllowChains(m.happyWEPs) m.iptablesFilterTable.UpdateChains(chains) m.happyWEPsDirty = false } bpfHappyEndpointsGauge.Set(float64(len(m.happyWEPs))) return nil } func (m *bpfEndpointManager) applyProgramsToDirtyDataInterfaces() { var mutex sync.Mutex errs := map[string]error{} var wg sync.WaitGroup m.dirtyIfaceNames.Iter(func(item interface{}) error { iface := item.(string) if !m.isDataIface(iface) { log.WithField("iface", iface).Debug( "Ignoring interface that doesn't match the host data interface regex") return nil } if !m.ifaceIsUp(iface) { log.WithField("iface", iface).Debug("Ignoring interface that is down") return set.RemoveItem } wg.Add(1) go func() { defer wg.Done() // Attach the qdisc first; it is shared between the directions. err := m.dp.ensureQdisc(iface) if err != nil { mutex.Lock() errs[iface] = err mutex.Unlock() return } var hepPtr *proto.HostEndpoint if hep, hepExists := m.hostIfaceToEpMap[iface]; hepExists { hepPtr = &hep } var ingressWG sync.WaitGroup var ingressErr error ingressWG.Add(1) go func() { defer ingressWG.Done() ingressErr = m.attachDataIfaceProgram(iface, hepPtr, PolDirnIngress) }() err = m.attachDataIfaceProgram(iface, hepPtr, PolDirnEgress) ingressWG.Wait() if err == nil { err = ingressErr } if err == nil { // This is required to allow NodePort forwarding with // encapsulation with the host's IP as the source address err = m.dp.setAcceptLocal(iface, true) } mutex.Lock() errs[iface] = err mutex.Unlock() }() return nil }) wg.Wait() m.dirtyIfaceNames.Iter(func(item interface{}) error { iface := item.(string) if !m.isDataIface(iface) { log.WithField("iface", iface).Debug( "Ignoring interface that doesn't match the host data interface regex") return nil } err := errs[iface] if err == nil { log.WithField("id", iface).Info("Applied program to host interface") return set.RemoveItem } if errors.Is(err, tc.ErrDeviceNotFound) { log.WithField("iface", iface).Debug( "Tried to apply BPF program to interface but the interface wasn't present. " + "Will retry if it shows up.") return set.RemoveItem } log.WithError(err).Warn("Failed to apply policy to interface") return nil }) } func (m *bpfEndpointManager) updateWEPsInDataplane() { var mutex sync.Mutex errs := map[string]error{} var wg sync.WaitGroup // Limit the number of parallel workers. Without this, all the workers vie for CPU and complete slowly. // On a constrained system, we can end up taking too long and going non-ready. maxWorkers := runtime.GOMAXPROCS(0) sem := semaphore.NewWeighted(int64(maxWorkers)) m.dirtyIfaceNames.Iter(func(item interface{}) error { ifaceName := item.(string) if !m.isWorkloadIface(ifaceName) { return nil } if err := sem.Acquire(context.Background(), 1); err != nil { // Should only happen if the context finishes. log.WithError(err).Panic("Failed to acquire semaphore") } m.onStillAlive() wg.Add(1) go func(ifaceName string) { defer wg.Done() defer sem.Release(1) err := m.applyPolicy(ifaceName) mutex.Lock() errs[ifaceName] = err mutex.Unlock() }(ifaceName) return nil }) wg.Wait() if m.dirtyIfaceNames.Len() > 0 { // Clean up any left-over jump maps in the background... m.mapCleanupRunner.Trigger() } m.dirtyIfaceNames.Iter(func(item interface{}) error { ifaceName := item.(string) if !m.isWorkloadIface(ifaceName) { return nil } err := errs[ifaceName] wlID := m.nameToIface[ifaceName].info.endpointID if err == nil { log.WithField("iface", ifaceName).Info("Updated workload interface.") if wlID != nil && m.allWEPs[*wlID] != nil { if m.happyWEPs[*wlID] == nil { log.WithField("id", wlID).Info("Adding workload interface to iptables allow list.") m.happyWEPsDirty = true } m.happyWEPs[*wlID] = m.allWEPs[*wlID] } return set.RemoveItem } else { if wlID != nil && m.happyWEPs[*wlID] != nil { if !errors.Is(err, tc.ErrDeviceNotFound) { log.WithField("id", *wlID).WithError(err).Warning( "Failed to add policy to workload, removing from iptables allow list") } delete(m.happyWEPs, *wlID) m.happyWEPsDirty = true } } if errors.Is(err, tc.ErrDeviceNotFound) { log.WithField("wep", wlID).Debug( "Tried to apply BPF program to interface but the interface wasn't present. " + "Will retry if it shows up.") return set.RemoveItem } log.WithError(err).WithFields(log.Fields{ "wepID": wlID, "name": ifaceName, }).Warn("Failed to apply policy to endpoint, leaving it dirty") return nil }) } // applyPolicy actually applies the policy to the given workload. func (m *bpfEndpointManager) applyPolicy(ifaceName string) error { startTime := time.Now() // Other threads might be filling in jump map FDs in the map so take the lock. m.ifacesLock.Lock() var endpointID *proto.WorkloadEndpointID var ifaceUp bool m.withIface(ifaceName, func(iface *bpfInterface) (forceDirty bool) { ifaceUp = iface.info.ifaceIsUp endpointID = iface.info.endpointID if !ifaceUp { log.WithField("iface", ifaceName).Debug("Interface is down/gone, closing jump maps.") for i := range iface.dpState.jumpMapFDs { if iface.dpState.jumpMapFDs[i] > 0 { err := iface.dpState.jumpMapFDs[i].Close() if err != nil { log.WithError(err).Error("Failed to close jump map.") } iface.dpState.jumpMapFDs[i] = 0 } } } return false }) m.ifacesLock.Unlock() if !ifaceUp { // Interface is gone, nothing to do. log.WithField("ifaceName", ifaceName).Debug( "Ignoring request to program interface that is not present.") return nil } // Otherwise, the interface appears to be present but we may or may not have an endpoint from the // datastore. If we don't have an endpoint then we'll attach a program to block traffic and we'll // get the jump map ready to insert the policy if the endpoint shows up. // Attach the qdisc first; it is shared between the directions. err := m.dp.ensureQdisc(ifaceName) if err != nil { if errors.Is(err, tc.ErrDeviceNotFound) { // Interface is gone, nothing to do. log.WithField("ifaceName", ifaceName).Debug( "Ignoring request to program interface that is not present.") return nil } return err } var ingressErr, egressErr error var wg sync.WaitGroup var wep *proto.WorkloadEndpoint if endpointID != nil { wep = m.allWEPs[*endpointID] } wg.Add(2) go func() { defer wg.Done() ingressErr = m.attachWorkloadProgram(ifaceName, wep, PolDirnIngress) }() go func() { defer wg.Done() egressErr = m.attachWorkloadProgram(ifaceName, wep, PolDirnEgress) }() wg.Wait() if ingressErr != nil { return ingressErr } if egressErr != nil { return egressErr } applyTime := time.Since(startTime) log.WithField("timeTaken", applyTime).Info("Finished applying BPF programs for workload") return nil } var calicoRouterIP = net.IPv4(169, 254, 1, 1).To4() func (m *bpfEndpointManager) attachWorkloadProgram(ifaceName string, endpoint *proto.WorkloadEndpoint, polDirection PolDirection) error { ap := m.calculateTCAttachPoint(polDirection, ifaceName) // Host side of the veth is always configured as 169.254.1.1. ap.HostIP = calicoRouterIP // * VXLAN MTU should be the host ifaces MTU -50, in order to allow space for VXLAN. // * We also expect that to be the MTU used on veths. // * We do encap on the veths, and there's a bogus kernel MTU check in the BPF helper // for resizing the packet, so we have to reduce the apparent MTU by another 50 bytes // when we cannot encap the packet - non-GSO & too close to veth MTU ap.TunnelMTU = uint16(m.vxlanMTU - 50) ap.IntfIP = calicoRouterIP ap.ExtToServiceConnmark = uint32(m.bpfExtToServiceConnmark) jumpMapFD, err := m.dp.ensureProgramAttached(&ap, polDirection) if err != nil { return err } var profileIDs []string var tier *proto.TierInfo if endpoint != nil { profileIDs = endpoint.ProfileIds if len(endpoint.Tiers) != 0 { tier = endpoint.Tiers[0] } } else { log.WithField("name", ifaceName).Debug( "Workload interface with no endpoint in datastore, installing default-drop program.") } // If tier or profileIDs is nil, this will return an empty set of rules but updatePolicyProgram appends a // drop rule, giving us default drop behaviour in that case. rules := m.extractRules(tier, profileIDs, polDirection) // If host-* endpoint is configured, add in its policy. if m.wildcardExists { m.addHostPolicy(&rules, &m.wildcardHostEndpoint, polDirection.Inverse()) } // If workload egress and DefaultEndpointToHostAction is ACCEPT or DROP, suppress the normal // host-* endpoint policy. if polDirection == PolDirnEgress && m.epToHostAction != "RETURN" { rules.SuppressNormalHostPolicy = true } // If host -> workload, always suppress the normal host-* endpoint policy. if polDirection == PolDirnIngress { rules.SuppressNormalHostPolicy = true } return m.dp.updatePolicyProgram(jumpMapFD, rules) } func (m *bpfEndpointManager) addHostPolicy(rules *polprog.Rules, hostEndpoint *proto.HostEndpoint, polDirection PolDirection) { // When there is applicable pre-DNAT policy that does not explicitly Allow or Deny traffic, // we continue on to subsequent tiers and normal or AoF policy. if len(hostEndpoint.PreDnatTiers) == 1 { rules.HostPreDnatTiers = m.extractTiers(hostEndpoint.PreDnatTiers[0], polDirection, NoEndTierDrop) } // When there is applicable apply-on-forward policy that does not explicitly Allow or Deny // traffic, traffic is dropped. if len(hostEndpoint.ForwardTiers) == 1 { rules.HostForwardTiers = m.extractTiers(hostEndpoint.ForwardTiers[0], polDirection, EndTierDrop) } // When there is applicable normal policy that does not explicitly Allow or Deny traffic, // traffic is dropped. if len(hostEndpoint.Tiers) == 1 { rules.HostNormalTiers = m.extractTiers(hostEndpoint.Tiers[0], polDirection, EndTierDrop) } rules.HostProfiles = m.extractProfiles(hostEndpoint.ProfileIds, polDirection) } func (m *bpfEndpointManager) ifaceIsUp(ifaceName string) (up bool) { m.ifacesLock.Lock() defer m.ifacesLock.Unlock() m.withIface(ifaceName, func(iface *bpfInterface) bool { up = iface.info.ifaceIsUp return false }) return } func (m *bpfEndpointManager) attachDataIfaceProgram(ifaceName string, ep *proto.HostEndpoint, polDirection PolDirection) error { ap := m.calculateTCAttachPoint(polDirection, ifaceName) ap.HostIP = m.hostIP ap.TunnelMTU = uint16(m.vxlanMTU) ap.ExtToServiceConnmark = uint32(m.bpfExtToServiceConnmark) ip, err := m.getInterfaceIP(ifaceName) if err != nil { log.Debugf("Error getting IP for interface %+v: %+v", ifaceName, err) ap.IntfIP = m.hostIP } else { ap.IntfIP = *ip } jumpMapFD, err := m.dp.ensureProgramAttached(&ap, polDirection) if err != nil { return err } if ep != nil { rules := polprog.Rules{ ForHostInterface: true, } m.addHostPolicy(&rules, ep, polDirection) return m.dp.updatePolicyProgram(jumpMapFD, rules) } return m.dp.removePolicyProgram(jumpMapFD) } // PolDirection is the Calico datamodel direction of policy. On a host endpoint, ingress is towards the host. // On a workload endpoint, ingress is towards the workload. type PolDirection int const ( PolDirnIngress PolDirection = iota PolDirnEgress ) func (polDirection PolDirection) Inverse() PolDirection { if polDirection == PolDirnIngress { return PolDirnEgress } return PolDirnIngress } func (m *bpfEndpointManager) calculateTCAttachPoint(policyDirection PolDirection, ifaceName string) tc.AttachPoint { var ap tc.AttachPoint var endpointType tc.EndpointType // Determine endpoint type. if m.isWorkloadIface(ifaceName) { endpointType = tc.EpTypeWorkload } else if ifaceName == "tunl0" { endpointType = tc.EpTypeTunnel } else if ifaceName == "wireguard.cali" { endpointType = tc.EpTypeWireguard } else if m.isDataIface(ifaceName) { endpointType = tc.EpTypeHost } else { log.Panicf("Unsupported ifaceName %v", ifaceName) } if endpointType == tc.EpTypeWorkload { // Policy direction is relative to the workload so, from the host namespace it's flipped. if policyDirection == PolDirnIngress { ap.Hook = tc.HookEgress } else { ap.Hook = tc.HookIngress } } else { // Host endpoints have the natural relationship between policy direction and hook. if policyDirection == PolDirnIngress { ap.Hook = tc.HookIngress } else { ap.Hook = tc.HookEgress } } var toOrFrom tc.ToOrFromEp if ap.Hook == tc.HookIngress { toOrFrom = tc.FromEp } else { toOrFrom = tc.ToEp } ap.Iface = ifaceName ap.Type = endpointType ap.ToOrFrom = toOrFrom ap.ToHostDrop = (m.epToHostAction == "DROP") ap.FIB = m.fibLookupEnabled ap.DSR = m.dsrEnabled ap.LogLevel = m.bpfLogLevel ap.VXLANPort = m.vxlanPort return ap } const EndTierDrop = true const NoEndTierDrop = false func (m *bpfEndpointManager) extractTiers(tier *proto.TierInfo, direction PolDirection, endTierDrop bool) (rTiers []polprog.Tier) { if tier == nil { return } directionalPols := tier.IngressPolicies if direction == PolDirnEgress { directionalPols = tier.EgressPolicies } if len(directionalPols) > 0 { polTier := polprog.Tier{ Name: tier.Name, Policies: make([]polprog.Policy, len(directionalPols)), } for i, polName := range directionalPols { pol := m.policies[proto.PolicyID{Tier: tier.Name, Name: polName}] var prules []*proto.Rule if direction == PolDirnIngress { prules = pol.InboundRules } else { prules = pol.OutboundRules } policy := polprog.Policy{ Name: polName, Rules: make([]polprog.Rule, len(prules)), } for ri, r := range prules { policy.Rules[ri] = polprog.Rule{ Rule: r, } } polTier.Policies[i] = policy } if endTierDrop { polTier.EndAction = polprog.TierEndDeny } else { polTier.EndAction = polprog.TierEndPass } rTiers = append(rTiers, polTier) } return } func (m *bpfEndpointManager) extractProfiles(profileNames []string, direction PolDirection) (rProfiles []polprog.Profile) { if count := len(profileNames); count > 0 { rProfiles = make([]polprog.Profile, count) for i, profName := range profileNames { prof := m.profiles[proto.ProfileID{Name: profName}] var prules []*proto.Rule if direction == PolDirnIngress { prules = prof.InboundRules } else { prules = prof.OutboundRules } profile := polprog.Profile{ Name: profName, Rules: make([]polprog.Rule, len(prules)), } for ri, r := range prules { profile.Rules[ri] = polprog.Rule{ Rule: r, } } rProfiles[i] = profile } } return } func (m *bpfEndpointManager) extractRules(tier *proto.TierInfo, profileNames []string, direction PolDirection) polprog.Rules { var r polprog.Rules // When there is applicable normal policy that does not explicitly Allow or Deny traffic, // traffic is dropped. r.Tiers = m.extractTiers(tier, direction, EndTierDrop) r.Profiles = m.extractProfiles(profileNames, direction) return r } func (m *bpfEndpointManager) isWorkloadIface(iface string) bool { return m.workloadIfaceRegex.MatchString(iface) } func (m *bpfEndpointManager) isDataIface(iface string) bool { return m.dataIfaceRegex.MatchString(iface) } func (m *bpfEndpointManager) addWEPToIndexes(wlID proto.WorkloadEndpointID, wl *proto.WorkloadEndpoint) { for _, t := range wl.Tiers { m.addPolicyToEPMappings(t.IngressPolicies, wlID) m.addPolicyToEPMappings(t.EgressPolicies, wlID) } m.addProfileToEPMappings(wl.ProfileIds, wlID) } func (m *bpfEndpointManager) addPolicyToEPMappings(polNames []string, id interface{}) { for _, pol := range polNames { polID := proto.PolicyID{ Tier: "default", Name: pol, } if m.policiesToWorkloads[polID] == nil { m.policiesToWorkloads[polID] = set.New() } m.policiesToWorkloads[polID].Add(id) } } func (m *bpfEndpointManager) addProfileToEPMappings(profileIds []string, id interface{}) { for _, profName := range profileIds { profID := proto.ProfileID{Name: profName} profSet := m.profilesToWorkloads[profID] if profSet == nil { profSet = set.New() m.profilesToWorkloads[profID] = profSet } profSet.Add(id) } } func (m *bpfEndpointManager) removeWEPFromIndexes(wlID proto.WorkloadEndpointID, wep *proto.WorkloadEndpoint) { if wep == nil { return } for _, t := range wep.Tiers { m.removePolicyToEPMappings(t.IngressPolicies, wlID) m.removePolicyToEPMappings(t.EgressPolicies, wlID) } m.removeProfileToEPMappings(wep.ProfileIds, wlID) m.withIface(wep.Name, func(iface *bpfInterface) bool { iface.info.endpointID = nil return false }) } func (m *bpfEndpointManager) removePolicyToEPMappings(polNames []string, id interface{}) { for _, pol := range polNames { polID := proto.PolicyID{ Tier: "default", Name: pol, } polSet := m.policiesToWorkloads[polID] if polSet == nil { continue } polSet.Discard(id) if polSet.Len() == 0 { // Defensive; we also clean up when the profile is removed. delete(m.policiesToWorkloads, polID) } } } func (m *bpfEndpointManager) removeProfileToEPMappings(profileIds []string, id interface{}) { for _, profName := range profileIds { profID := proto.ProfileID{Name: profName} profSet := m.profilesToWorkloads[profID] if profSet == nil { continue } profSet.Discard(id) if profSet.Len() == 0 { // Defensive; we also clean up when the policy is removed. delete(m.profilesToWorkloads, profID) } } } func (m *bpfEndpointManager) OnHEPUpdate(hostIfaceToEpMap map[string]proto.HostEndpoint) { if m == nil { return } log.Debugf("HEP update from generic endpoint manager: %v", hostIfaceToEpMap) // Pre-process the map for the host-* endpoint: if there is a host-* endpoint, any host // interface without its own HEP should use the host-* endpoint's policy. wildcardHostEndpoint, wildcardExists := hostIfaceToEpMap[allInterfaces] if wildcardExists { log.Info("Host-* endpoint is configured") for ifaceName := range m.nameToIface { if _, specificExists := hostIfaceToEpMap[ifaceName]; m.isDataIface(ifaceName) && !specificExists { log.Infof("Use host-* endpoint policy for %v", ifaceName) hostIfaceToEpMap[ifaceName] = wildcardHostEndpoint } } delete(hostIfaceToEpMap, allInterfaces) } // If there are parts of proto.HostEndpoint that do not affect us, we could mask those out // here so that they can't cause spurious updates - at the cost of having different // proto.HostEndpoint data here than elsewhere. For example, the ExpectedIpv4Addrs and // ExpectedIpv6Addrs fields. But currently there are no fields that are sufficiently likely // to change as to make this worthwhile. // If the host-* endpoint is changing, mark all workload interfaces as dirty. if (wildcardExists != m.wildcardExists) || !reflect.DeepEqual(wildcardHostEndpoint, m.wildcardHostEndpoint) { log.Infof("Host-* endpoint is changing; was %v, now %v", m.wildcardHostEndpoint, wildcardHostEndpoint) m.removeHEPFromIndexes(allInterfaces, &m.wildcardHostEndpoint) m.wildcardHostEndpoint = wildcardHostEndpoint m.wildcardExists = wildcardExists m.addHEPToIndexes(allInterfaces, &wildcardHostEndpoint) for ifaceName := range m.nameToIface { if m.isWorkloadIface(ifaceName) { log.Info("Mark WEP iface dirty, for host-* endpoint change") m.dirtyIfaceNames.Add(ifaceName) } } } // Loop through existing host endpoints, in case they are changing or disappearing. for ifaceName, existingEp := range m.hostIfaceToEpMap { newEp, stillExists := hostIfaceToEpMap[ifaceName] if stillExists && reflect.DeepEqual(newEp, existingEp) { log.Debugf("No change to host endpoint for ifaceName=%v", ifaceName) } else { m.removeHEPFromIndexes(ifaceName, &existingEp) if stillExists { log.Infof("Host endpoint changing for ifaceName=%v", ifaceName) m.addHEPToIndexes(ifaceName, &newEp) m.hostIfaceToEpMap[ifaceName] = newEp } else { log.Infof("Host endpoint deleted for ifaceName=%v", ifaceName) delete(m.hostIfaceToEpMap, ifaceName) } m.dirtyIfaceNames.Add(ifaceName) } delete(hostIfaceToEpMap, ifaceName) } // Now anything remaining in hostIfaceToEpMap must be a new host endpoint. for ifaceName, newEp := range hostIfaceToEpMap { if !m.isDataIface(ifaceName) { log.Warningf("Host endpoint configured for ifaceName=%v, but that doesn't match BPFDataIfacePattern; ignoring", ifaceName) continue } log.Infof("Host endpoint added for ifaceName=%v", ifaceName) m.addHEPToIndexes(ifaceName, &newEp) m.hostIfaceToEpMap[ifaceName] = newEp m.dirtyIfaceNames.Add(ifaceName) } } func (m *bpfEndpointManager) addHEPToIndexes(ifaceName string, ep *proto.HostEndpoint) { for _, tiers := range [][]*proto.TierInfo{ep.Tiers, ep.UntrackedTiers, ep.PreDnatTiers, ep.ForwardTiers} { for _, t := range tiers { m.addPolicyToEPMappings(t.IngressPolicies, ifaceName) m.addPolicyToEPMappings(t.EgressPolicies, ifaceName) } } m.addProfileToEPMappings(ep.ProfileIds, ifaceName) } func (m *bpfEndpointManager) removeHEPFromIndexes(ifaceName string, ep *proto.HostEndpoint) { for _, tiers := range [][]*proto.TierInfo{ep.Tiers, ep.UntrackedTiers, ep.PreDnatTiers, ep.ForwardTiers} { for _, t := range tiers { m.removePolicyToEPMappings(t.IngressPolicies, ifaceName) m.removePolicyToEPMappings(t.EgressPolicies, ifaceName) } } m.removeProfileToEPMappings(ep.ProfileIds, ifaceName) } // Dataplane code. // // We don't yet have an enforced dividing line between the "manager" and "dataplane" parts of the // BPF endpoint manager. But we do have an indirection (the `dp` field) that allows us to UT the // "manager" logic on its own, and it's useful to keep a separation in mind so that we can continue // to UT in that way. // // As a small help for that, all of the "dataplane" code comes after this point in the file, and all // of the "manager" code above. func (m *bpfEndpointManager) setAcceptLocal(iface string, val bool) error { numval := "0" if val { numval = "1" } path := fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/accept_local", iface) err := writeProcSys(path, numval) if err != nil { log.WithField("err", err).Errorf("Failed to set %s to %s", path, numval) return err } log.Infof("%s set to %s", path, numval) return nil } func (m *bpfEndpointManager) ensureStarted() { m.startupOnce.Do(func() { log.Info("Starting map cleanup runner.") m.mapCleanupRunner.Start(context.Background()) }) } func (m *bpfEndpointManager) ensureQdisc(iface string) error { return tc.EnsureQdisc(iface) } // Ensure TC program is attached to the specified interface and return its jump map FD. func (m *bpfEndpointManager) ensureProgramAttached(ap *tc.AttachPoint, polDirection PolDirection) (bpf.MapFD, error) { jumpMapFD := m.getJumpMapFD(ap.Iface, polDirection) if jumpMapFD != 0 { if attached, err := ap.IsAttached(); err != nil { return jumpMapFD, fmt.Errorf("failed to check if interface %s had BPF program; %w", ap.Iface, err) } else if !attached { // BPF program is missing; maybe we missed a notification of the interface being recreated? // Close the now-defunct jump map. log.WithField("iface", ap.Iface).Info( "Detected that BPF program no longer attached to interface.") err := jumpMapFD.Close() if err != nil { log.WithError(err).Warn("Failed to close jump map FD. Ignoring.") } m.setJumpMapFD(ap.Iface, polDirection, 0) jumpMapFD = 0 // Trigger program to be re-added below. } } if jumpMapFD == 0 { // We don't have a program attached to this interface yet, attach one now. err := ap.AttachProgram() if err != nil { return 0, err } jumpMapFD, err = FindJumpMap(ap) if err != nil { return 0, fmt.Errorf("failed to look up jump map: %w", err) } m.setJumpMapFD(ap.Iface, polDirection, jumpMapFD) } return jumpMapFD, nil } func (m *bpfEndpointManager) getJumpMapFD(ifaceName string, direction PolDirection) (fd bpf.MapFD) { m.ifacesLock.Lock() defer m.ifacesLock.Unlock() m.withIface(ifaceName, func(iface *bpfInterface) bool { fd = iface.dpState.jumpMapFDs[direction] return false }) return } func (m *bpfEndpointManager) setJumpMapFD(name string, direction PolDirection, fd bpf.MapFD) { m.ifacesLock.Lock() defer m.ifacesLock.Unlock() m.withIface(name, func(iface *bpfInterface) bool { iface.dpState.jumpMapFDs[direction] = fd return false }) } func (m *bpfEndpointManager) updatePolicyProgram(jumpMapFD bpf.MapFD, rules polprog.Rules) error { pg := polprog.NewBuilder(m.ipSetIDAlloc, m.ipSetMap.MapFD(), m.stateMap.MapFD(), jumpMapFD) insns, err := pg.Instructions(rules) if err != nil { return fmt.Errorf("failed to generate policy bytecode: %w", err) } progFD, err := bpf.LoadBPFProgramFromInsns(insns, "Apache-2.0") if err != nil { return fmt.Errorf("failed to load BPF policy program: %w", err) } defer func() { // Once we've put the program in the map, we don't need its FD any more. err := progFD.Close() if err != nil { log.WithError(err).Panic("Failed to close program FD.") } }() k := make([]byte, 4) v := make([]byte, 4) binary.LittleEndian.PutUint32(v, uint32(progFD)) err = bpf.UpdateMapEntry(jumpMapFD, k, v) if err != nil { return fmt.Errorf("failed to update jump map: %w", err) } return nil } func (m *bpfEndpointManager) removePolicyProgram(jumpMapFD bpf.MapFD) error { k := make([]byte, 4) err := bpf.DeleteMapEntryIfExists(jumpMapFD, k, 4) if err != nil { return fmt.Errorf("failed to update jump map: %w", err) } return nil } func FindJumpMap(ap *tc.AttachPoint) (mapFD bpf.MapFD, err error) { logCtx := log.WithField("iface", ap.Iface) logCtx.Debug("Looking up jump map.") out, err := tc.ExecTC("filter", "show", "dev", ap.Iface, string(ap.Hook)) if err != nil { return 0, fmt.Errorf("failed to find TC filter for interface %v: %w", ap.Iface, err) } progName := ap.ProgramName() for _, line := range strings.Split(out, "\n") { if strings.Contains(line, progName) { re := regexp.MustCompile(`id (\d+)`) m := re.FindStringSubmatch(line) if len(m) > 0 { progIDStr := m[1] bpftool := exec.Command("bpftool", "prog", "show", "id", progIDStr, "--json") output, err := bpftool.Output() if err != nil { // We can hit this case if the interface was deleted underneath us; check that it's still there. if _, err := os.Stat(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s", ap.Iface)); os.IsNotExist(err) { return 0, tc.ErrDeviceNotFound } return 0, fmt.Errorf("failed to get map metadata: %w", err) } var prog struct { MapIDs []int `json:"map_ids"` } err = json.Unmarshal(output, &prog) if err != nil { return 0, fmt.Errorf("failed to parse bpftool output: %w", err) } for _, mapID := range prog.MapIDs { mapFD, err := bpf.GetMapFDByID(mapID) if err != nil { return 0, fmt.Errorf("failed to get map FD from ID: %w", err) } mapInfo, err := bpf.GetMapInfo(mapFD) if err != nil { err = mapFD.Close() if err != nil { log.WithError(err).Panic("Failed to close FD.") } return 0, fmt.Errorf("failed to get map info: %w", err) } if mapInfo.Type == unix.BPF_MAP_TYPE_PROG_ARRAY { logCtx.WithField("fd", mapFD).Debug("Found jump map") return mapFD, nil } err = mapFD.Close() if err != nil { log.WithError(err).Panic("Failed to close FD.") } } } return 0, errors.New("failed to find map") } } return 0, errors.New("failed to find TC program") } func (m *bpfEndpointManager) getInterfaceIP(ifaceName string) (*net.IP, error) { var ipAddrs []net.IP if ip, ok := m.ifaceToIpMap[ifaceName]; ok { return &ip, nil } intf, err := net.InterfaceByName(ifaceName) if err != nil { return nil, err } addrs, err := intf.Addrs() if err != nil { return nil, err } for _, addr := range addrs { switch t := addr.(type) { case *net.IPNet: if t.IP.To4() != nil { ipAddrs = append(ipAddrs, t.IP) } } } sort.Slice(ipAddrs, func(i, j int) bool { return bytes.Compare(ipAddrs[i], ipAddrs[j]) < 0 }) if len(ipAddrs) > 0 { return &ipAddrs[0], nil } return nil, errors.New("interface ip address not found") }
[]
[]
[]
[]
[]
go
null
null
null
test/testdata/baseTest/sample_test.go
package main import ( "fmt" "os" "testing" ) func hello() { fmt.Println("Hello") } // TestMe func TestMe(t *testing.T) { if os.Getenv("dummyEnvVar") != "dummyEnvValue" { t.Errorf("Oops! Value for the variable is %q", os.Getenv("dummyEnvVar")) } }
[ "\"dummyEnvVar\"", "\"dummyEnvVar\"" ]
[]
[ "dummyEnvVar" ]
[]
["dummyEnvVar"]
go
1
0
vendor/src/github.com/sendgrid/sendgrid-go/examples/mailboxproviders/mailboxproviders.go
package main import ( "fmt" "github.com/sendgrid/sendgrid-go" "log" "os" ) /////////////////////////////////////////////////// // Retrieve email statistics by mailbox provider. // GET /mailbox_providers/stats func Retrieveemailstatisticsbymailboxprovider() { apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") host := "https://api.sendgrid.com" request := sendgrid.GetRequest(apiKey, "/v3/mailbox_providers/stats", host) request.Method = "GET" queryParams := make(map[string]string) queryParams["end_date"] = "2016-04-01" queryParams["mailbox_providers"] = "test_string" queryParams["aggregated_by"] = "day" queryParams["limit"] = "1" queryParams["offset"] = "1" queryParams["start_date"] = "2016-01-01" request.QueryParams = queryParams response, err := sendgrid.API(request) if err != nil { log.Println(err) } else { fmt.Println(response.StatusCode) fmt.Println(response.Body) fmt.Println(response.Headers) } } func main() { // add your function calls here }
[ "\"YOUR_SENDGRID_APIKEY\"" ]
[]
[ "YOUR_SENDGRID_APIKEY" ]
[]
["YOUR_SENDGRID_APIKEY"]
go
1
0
scripts/process_all.py
import os import subprocess import sys environments = [ {"NAME": "latest", "PYTHON_VERSION": "3.9"}, {"NAME": "python3.9-alpine3.13", "PYTHON_VERSION": "3.9"}, {"NAME": "python3.9-slim", "PYTHON_VERSION": "3.9"}, {"NAME": "python3.9", "PYTHON_VERSION": "3.9"}, {"NAME": "mambaforge", "PYTHON_VERSION": "Mambaforge 3.9"}, {"NAME": "miniforge3", "PYTHON_VERSION": "Miniforge3 3.9"}, ] start_with = os.environ.get("START_WITH") build_push = os.environ.get("BUILD_PUSH") def process_tag(*, env: dict): use_env = {**os.environ, **env} script = "scripts/test.sh" if build_push: script = "scripts/build-push.sh" return_code = subprocess.call(["bash", script], env=use_env) if return_code != 0: sys.exit(return_code) def print_version_envs(): env_lines = [] for env in environments: env_vars = [] for key, value in env.items(): env_vars.append(f"{key}='{value}'") env_lines.append(" ".join(env_vars)) for line in env_lines: print(line) def main(): start_at = 0 if start_with: start_at = [ i for i, env in enumerate((environments)) if env["NAME"] == start_with ][0] for i, env in enumerate(environments[start_at:]): print(f"Processing tag: {env['NAME']}") process_tag(env=env) if __name__ == "__main__": if len(sys.argv) > 1: print_version_envs() else: main()
[]
[]
[ "BUILD_PUSH", "START_WITH" ]
[]
["BUILD_PUSH", "START_WITH"]
python
2
0
todo/manage.py
#../env/bin python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'togo.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[]
[]
[]
[]
[]
python
0
0
testing/kfctl/kf_is_ready_test.py
import datetime import logging import os import subprocess import tempfile import uuid from retrying import retry import pytest from kubeflow.testing import util from testing import deploy_utils def test_kf_is_ready(namespace, use_basic_auth, use_istio): """Test that Kubeflow was successfully deployed. Args: namespace: The namespace Kubeflow is deployed to. """ logging.info("Using namespace %s", namespace) # Need to activate account for scopes. if os.getenv("GOOGLE_APPLICATION_CREDENTIALS"): util.run(["gcloud", "auth", "activate-service-account", "--key-file=" + os.environ["GOOGLE_APPLICATION_CREDENTIALS"]]) api_client = deploy_utils.create_k8s_client() util.load_kube_config() # Verify that components are actually deployed. # TODO(jlewi): We need to parameterize this list based on whether # we are using IAP or basic auth. deployment_names = [ "argo-ui", "centraldashboard", "cloud-endpoints-controller", "jupyter-web-app-deployment", "metadata-db", "metadata-deployment", "metadata-ui", "ml-pipeline", "ml-pipeline-scheduledworkflow", "ml-pipeline-ui", "notebook-controller-deployment", "tf-job-operator", "pytorch-operator", "katib-controller", "workflow-controller", ] ingress_related_deployments = [] stateful_sets = [] if use_basic_auth: deployment_names.extend(["basic-auth-login"]) stateful_sets.extend(["backend-updater"]) else: ingress_related_deployments.extend(["iap-enabler"]) stateful_sets.extend(["backend-updater"]) # TODO(jlewi): Might want to parallelize this. for deployment_name in deployment_names: logging.info("Verifying that deployment %s started...", deployment_name) util.wait_for_deployment(api_client, namespace, deployment_name, 10) ingress_namespace = "istio-system" if use_istio else namespace for deployment_name in ingress_related_deployments: logging.info("Verifying that deployment %s started...", deployment_name) util.wait_for_deployment(api_client, ingress_namespace, deployment_name, 10) for name in stateful_sets: logging.info("Verifying that statefulset %s started...", name) util.wait_for_statefulset(api_client, ingress_namespace, name) # TODO(jlewi): We should verify that the ingress is created and healthy. if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format=('%(levelname)s|%(asctime)s' '|%(pathname)s|%(lineno)d| %(message)s'), datefmt='%Y-%m-%dT%H:%M:%S', ) logging.getLogger().setLevel(logging.INFO) pytest.main()
[]
[]
[ "GOOGLE_APPLICATION_CREDENTIALS" ]
[]
["GOOGLE_APPLICATION_CREDENTIALS"]
python
1
0
resample/fpcalc.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2013 pyReScene # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. import subprocess import inspect import os import sys import tempfile from distutils.spawn import find_executable from rescene.utility import fsunicode from resample.mp3 import Mp3Reader MSG_NOTFOUND = "The fpcalc executable isn't found." fpcalc_executable = "" class ExecutableNotFound(Exception): """The fpcalc.exe executable isn't found.""" def fingerprint(file_name, temp_dir=None, recursive=0): """Calculates the fingerprint of the given file. temp_dir: optional temporary directory to use recursive: local parameter to prevent endless loop after stripping tags""" duration = fp = b"" bad = False fpcalc = find_fpcalc_executable() temp_cleanup = False try: file_name.encode('ascii') except: # file has special characters # I don't know how to pass those to fpcalc # => create a temporary file for these rare cases # test release: VA-Tony_Hawks_Pro_Skater_4-Soundtrack-2003-RARNeT # copy the file with a default name and create the fp for that file print("Non-ASCII characters detected: creating temporary file.") temp_cleanup = True name_suffix = make_temp_suffix(file_name) (fd, tmpname) = tempfile.mkstemp(name_suffix, dir=temp_dir) os.close(fd) # we won't use it with open(file_name, "rb") as music_file: with open(tmpname, "wb") as tmpf: tmpf.write(music_file.read()) file_name = tmpname # Set fingerprint length to 120 seconds # older fpcalc versions default to 60 seconds fprint = custom_popen([fpcalc, '-length', '120', file_name]) stdout, _stderr = fprint.communicate() lines = stdout.split(os.linesep.encode("ascii")) for line in lines: if line.startswith(b"DURATION="): duration = line[len(b"DURATION="):] elif line.startswith(b"FINGERPRINT="): fp = line[len(b"FINGERPRINT="):] # ERROR: couldn't open the file # ERROR: unable to calculate fingerprint for file elif line.startswith(b"ERROR: couldn't open the file"): bad = True # ERROR: couldn't find stream information in the file # ERROR: unable to calculate fingerprint for file X.srs, skipping elif line.startswith(b"ERROR: couldn't find stream"): bad = True if not duration or not fp: bad = True if bad: # strip any recognized tags from the music file and try again # (ID3v2 tag around RIFF file) # e.g. (angelmoon)-hes_all_i_want_cd_pg2k-bmi # ERROR: couldn't find stream information in the file # ERROR: unable to calculate fingerprint for file x.mp3, skipping if recursive > 1: # tags have been stripped before already raise ValueError("Fingerprinting failed.") else: recursive += 1 print("Stripping recognized tags for better fpcalc detection.") name_suffix = make_temp_suffix(file_name) (fd, stripped) = tempfile.mkstemp(name_suffix, dir=temp_dir) os.close(fd) # we won't use it try: if recursive < 2: with open(stripped, "wb") as tmpf: mr = Mp3Reader(file_name) for block in mr.read(): if block.type in ("MP3", "fLaC"): # main music data read = 0 to_read = 65536 while read < block.size: if read + to_read > block.size: to_read = block.size - read tmpf.write(mr.read_part(to_read, read)) read += to_read break # exit for: music data copied mr.close() else: # no double tagging: try to strip away the crap # Yano2d-Der_Bunte_Hund_Im_Untergrund-WEB-DE-2014-CUSTODES_INT # has Adobe crap and something that looks like ascii art, # but in a hex editor with open(file_name, "rb") as orig: string_index = -1 current = 0 # 1) find real mp3 data based on certain strings while True: orig.seek(current, os.SEEK_SET) # +3 for border cases overlap bytespart = orig.read(0x10000 + 3) if not len(bytespart): break m1 = bytespart.find(b"Xing") m2 = bytespart.find(b"LAME") matches = [x for x in [m1, m2] if x >= 0] if len(matches): string_index = current + min(matches) break current += 0x10000 # 64KiB batches if string_index < 0: raise ValueError("Fingerprinting failed: " "no MP3 string found.") # 2) find last MP3 sync block before found string # 256 bytes: random amount that seems enough orig.seek(string_index - 0x100, os.SEEK_SET) stack = orig.read(0x100) sync_index = stack[:-1].rfind(b"\xFF") while sync_index > -1: next_byte = ord(stack[sync_index:sync_index + 1]) if next_byte & 0xE0 == 0xE0: break sync_index = stack.rfind(b"\xFF", 0, sync_index) # 3) write out the cleaned music data to fingerprint on with open(stripped, "wb") as tmpf: sync_start = string_index - (0x100 + sync_index) orig.seek(sync_start, os.SEEK_SET) tmpf.write(orig.read()) duration, fp = fingerprint(stripped, temp_dir, recursive) bad = False # it succeeded (exception otherwise) except: if recursive == 2: print("----------------------------------------------------") print("Tell me if the .sfv matches the music file!") print("Otherwise your file is most likely totally corrupt.") print("----------------------------------------------------") # Alpha_Blondy_and_The_Wailers-Jerusalem-1986-YARD track 3 # this would be a very rare case: # double bad tagging or just bad data? raise finally: # cleanup temporary stripped file print("Removing %s" % stripped) os.remove(stripped) if temp_cleanup: print("Removing %s" % tmpname) os.remove(tmpname) if bad: raise ValueError("Fingerprinting failed.") return duration, fp def find_fpcalc_executable(): # if we already located it before global fpcalc_executable if fpcalc_executable: return fpcalc_executable # see if it's in the path + other predefined locations # when running from source: check current directory # when running from source: check bin directory script_dir = os.path.dirname(os.path.abspath( inspect.getfile(inspect.currentframe()))) bin_dir = os.path.join(script_dir, "..", "bin") path = os.pathsep.join([script_dir, bin_dir, module_path(), os.getenv('PATH', "")]) result = find_executable("fpcalc", path=path) result = check_fpcalc_validity(result) if result: print(result) fpcalc_executable = result return fpcalc_executable else: raise ExecutableNotFound(MSG_NOTFOUND) def check_fpcalc_validity(potential_fpcalc_executable): """It tries to run the executable to check viability. Windows: (empty fpcalc.exe file in path) [Error 193] %1 is not a valid Win32 application Linux: [Errno 2] No such file or directory """ # fpcalc was not found if potential_fpcalc_executable is None: return None # something is wrong with the executable try: custom_popen([potential_fpcalc_executable]) except (OSError, IOError) as err: msg = None # Windows help messages try: if err.winerror == 216: # errno 8 msg = "fpcalc.exe has the wrong architecture" elif err.winerror == 193: # errno 22 msg = "fpcalc.exe is not an executable" except: pass # *nix help messages if not msg: try: if err.errno == 13: # Permission denied msg = "fpcalc has no execution rights" elif err.errno == 8: # Exec format error msg = "fpcalc has the wrong architecture" except: pass if msg: print(msg) return None except Exception as ex: print("Tell me about this unexpected error below!") print(ex) # any other exception should not happen return None # the executable ran just fine return potential_fpcalc_executable def make_temp_suffix(file_name): nm = "-pyReScene_fpcalc" if file_name.endswith(".flac"): nm += ".flac" else: nm += file_name[-4:] return nm # http://www.py2exe.org/index.cgi/WhereAmI def we_are_frozen(): """Returns whether we are frozen via py2exe. This will affect how we find out where we are located.""" return hasattr(sys, "frozen") def module_path(): """ This will get us the program's directory, even if we are frozen using py2exe""" if we_are_frozen(): return os.path.dirname(fsunicode(sys.executable)) return os.path.dirname(fsunicode(__file__)) def custom_popen(cmd): """disconnect cmd from parent fds, read only from stdout""" # needed for py2exe creationflags = 0 if sys.platform == 'win32': creationflags = 0x08000000 # CREATE_NO_WINDOW # run command return subprocess.Popen(cmd, bufsize=0, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, creationflags=creationflags)
[]
[]
[ "PATH" ]
[]
["PATH"]
python
1
0
main.go
/* Copyright 2020 The actions-runner-controller authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "flag" "fmt" "os" "strconv" "time" actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1" "github.com/summerwind/actions-runner-controller/controllers" "github.com/summerwind/actions-runner-controller/github" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/log/zap" // +kubebuilder:scaffold:imports ) const ( defaultRunnerImage = "summerwind/actions-runner:latest" defaultDockerImage = "docker:dind" ) var ( scheme = runtime.NewScheme() setupLog = ctrl.Log.WithName("setup") ) func init() { _ = clientgoscheme.AddToScheme(scheme) _ = actionsv1alpha1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme } func main() { var ( err error ghClient *github.Client metricsAddr string enableLeaderElection bool syncPeriod time.Duration runnerImage string dockerImage string ghToken string ghAppID int64 ghAppInstallationID int64 ghAppPrivateKey string ) flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") flag.StringVar(&runnerImage, "runner-image", defaultRunnerImage, "The image name of self-hosted runner container.") flag.StringVar(&dockerImage, "docker-image", defaultDockerImage, "The image name of docker sidecar container.") flag.StringVar(&ghToken, "github-token", "", "The personal access token of GitHub.") flag.Int64Var(&ghAppID, "github-app-id", 0, "The application ID of GitHub App.") flag.Int64Var(&ghAppInstallationID, "github-app-installation-id", 0, "The installation ID of GitHub App.") flag.StringVar(&ghAppPrivateKey, "github-app-private-key", "", "The path of a private key file to authenticate as a GitHub App") flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change") flag.Parse() if ghToken == "" { ghToken = os.Getenv("GITHUB_TOKEN") } if ghAppID == 0 { appID, err := strconv.ParseInt(os.Getenv("GITHUB_APP_ID"), 10, 64) if err == nil { ghAppID = appID } } if ghAppInstallationID == 0 { appInstallationID, err := strconv.ParseInt(os.Getenv("GITHUB_APP_INSTALLATION_ID"), 10, 64) if err == nil { ghAppInstallationID = appInstallationID } } if ghAppPrivateKey == "" { ghAppPrivateKey = os.Getenv("GITHUB_APP_PRIVATE_KEY") } if ghAppID != 0 { if ghAppInstallationID == 0 { fmt.Fprintln(os.Stderr, "Error: The installation ID must be specified.") os.Exit(1) } if ghAppPrivateKey == "" { fmt.Fprintln(os.Stderr, "Error: The path of a private key file must be specified.") os.Exit(1) } ghClient, err = github.NewClient(ghAppID, ghAppInstallationID, ghAppPrivateKey) if err != nil { fmt.Fprintf(os.Stderr, "Error: Failed to create GitHub client: %v\n", err) os.Exit(1) } } else if ghToken != "" { ghClient, err = github.NewClientWithAccessToken(ghToken) if err != nil { fmt.Fprintf(os.Stderr, "Error: Failed to create GitHub client: %v\n", err) os.Exit(1) } } else { fmt.Fprintln(os.Stderr, "Error: GitHub App credentials or personal access token must be specified.") os.Exit(1) } ctrl.SetLogger(zap.New(func(o *zap.Options) { o.Development = true })) mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, MetricsBindAddress: metricsAddr, LeaderElection: enableLeaderElection, Port: 9443, SyncPeriod: &syncPeriod, }) if err != nil { setupLog.Error(err, "unable to start manager") os.Exit(1) } runnerReconciler := &controllers.RunnerReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("Runner"), Scheme: mgr.GetScheme(), GitHubClient: ghClient, RunnerImage: runnerImage, DockerImage: dockerImage, } if err = runnerReconciler.SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Runner") os.Exit(1) } runnerSetReconciler := &controllers.RunnerReplicaSetReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("RunnerReplicaSet"), Scheme: mgr.GetScheme(), GitHubClient: ghClient, } if err = runnerSetReconciler.SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RunnerReplicaSet") os.Exit(1) } runnerDeploymentReconciler := &controllers.RunnerDeploymentReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("RunnerDeployment"), Scheme: mgr.GetScheme(), } if err = runnerDeploymentReconciler.SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RunnerDeployment") os.Exit(1) } horizontalRunnerAutoscaler := &controllers.HorizontalRunnerAutoscalerReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("HorizontalRunnerAutoscaler"), Scheme: mgr.GetScheme(), GitHubClient: ghClient, } if err = horizontalRunnerAutoscaler.SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "HorizontalRunnerAutoscaler") os.Exit(1) } if err = (&actionsv1alpha1.Runner{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "Runner") os.Exit(1) } if err = (&actionsv1alpha1.RunnerDeployment{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "RunnerDeployment") os.Exit(1) } if err = (&actionsv1alpha1.RunnerReplicaSet{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "RunnerReplicaSet") os.Exit(1) } // +kubebuilder:scaffold:builder setupLog.Info("starting manager") if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { setupLog.Error(err, "problem running manager") os.Exit(1) } }
[ "\"GITHUB_TOKEN\"", "\"GITHUB_APP_ID\"", "\"GITHUB_APP_INSTALLATION_ID\"", "\"GITHUB_APP_PRIVATE_KEY\"" ]
[]
[ "GITHUB_APP_INSTALLATION_ID", "GITHUB_TOKEN", "GITHUB_APP_PRIVATE_KEY", "GITHUB_APP_ID" ]
[]
["GITHUB_APP_INSTALLATION_ID", "GITHUB_TOKEN", "GITHUB_APP_PRIVATE_KEY", "GITHUB_APP_ID"]
go
4
0
eval_scrubber/__main__.py
# -*- coding: utf-8 -*- import os import sys import logging from . import find_infected, remove_infected if __name__ == '__main__': args = sys.argv if (len(args) < 3): raise ValueError('Please supply the action (find or remove) and the directory ex: python -m eval_scrubber find /home/username') action = args[1] directory = args[2] debug = os.getenv("DEBUG", False) if debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) if not os.path.isdir(directory): raise IOError('Directory does not exist') if action == 'find': find_infected(directory) elif action == 'remove': remove_infected(directory) else: raise ValueError('Action must be either "find" or "remove"')
[]
[]
[ "DEBUG" ]
[]
["DEBUG"]
python
1
0
tests/suite_test.go
/* Copyright © 2021 The MayaData Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package tests import ( "flag" "fmt" "net" "time" "os" "testing" "github.com/ghodss/yaml" "github.com/mayadata-io/volume-events-exporter/tests/nfs" "github.com/mayadata-io/volume-events-exporter/tests/server" "github.com/mayadata-io/volume-events-exporter/tests/server/rest" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" // auth plugins _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) var ( // CLI options kubeConfigPath string ipAddress string port int serverType string serverIface server.ServerInterface // Artifacts required configuration applicationNamespace = "event-exporter-tests-ns" nfsProvisionerName = "openebs-nfs-provisioner" nfsProvisionerLabelSelector = "openebs.io/component-name=openebs-nfs-provisioner" OpenEBSNamespace = "openebs" nfsHookConfigName = "hook-config" nfsHookConfigDataKey = "hook-config" //KeyPVNFSServerType defines if the NFS PV should be launched // using kernel or ganesha KeyPVNFSServerType = "NFSServerType" //KeyPVBackendStorageClass defines default provisioner to be used // to create the data(export) directory for NFS server KeyPVBackendStorageClass = "BackendStorageClass" // integrationTestFinalizer will be configured only on backend PVC. // This finalizer is required for test to ensure whether volume events // (create/delete) are exported to server, once the server receives a volume // event will add received `metadata.name` as an annotation on backend PVC, // Since the finalizer exist test will be able to verify annotations // of occurred events and if everything is good, test will remove finalizer // manually integrationTestFinalizer = "it.nfs.openebs.io/test-protection" ) func TestSource(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Test application deployment") } func init() { flag.StringVar(&kubeConfigPath, "kubeconfig", os.Getenv("KUBECONFIG"), "path to kubeconfig to invoke kubernetes API calls") flag.StringVar(&ipAddress, "address", "", "address on which server(event listener) will start. Defaults to machine IP Address") flag.IntVar(&port, "port", 9090, "port on which server will listen. Defaults to 9090") flag.StringVar(&serverType, "type", "rest", "type of the server to serve service. Supported only REST") } var _ = BeforeSuite(func() { var err error flag.Parse() if err := initK8sClient(kubeConfigPath); err != nil { panic(fmt.Sprintf("failed to initialize k8s client err=%s", err)) } if ipAddress == "" { ipAddress, err = externalIP() if err != nil { panic(fmt.Sprintf("failed to get externalIP address, err: %s", err)) } } serverIface, err = newServer(ipAddress, port, serverType) Expect(err).To(BeNil(), "while instantiating the new server") err = serverIface.Start() Expect(err).To(BeNil(), "while starting the server") By("waiting for openebs-nfs-provisioner pod to come into running state") err = Client.waitForPods(OpenEBSNamespace, nfsProvisionerLabelSelector, corev1.PodRunning, 1) Expect(err).To(BeNil(), "while waiting for nfs deployment to be ready") err = updateNFSHookConfig(OpenEBSNamespace, nfsHookConfigName) Expect(err).To(BeNil(), "while updating nfs hook configuration as required per test") err = addEventControllerSideCar(OpenEBSNamespace, nfsProvisionerName) Expect(err).To(BeNil(), "while adding volume-event-exporter sidecar") By("building a namespace") err = Client.createNamespace(applicationNamespace) Expect(err).To(BeNil(), "while creating namespace {%s}", applicationNamespace) }) var _ = AfterSuite(func() { if Client != nil { By("deleting namespace") err := Client.destroyNamespace(applicationNamespace) Expect(err).To(BeNil(), "while deleting namespace {%s}", applicationNamespace) } if serverIface != nil { err := serverIface.Stop() Expect(err).To(BeNil(), "while stopping the server") } }) func newServer(address string, port int, serverType string) (server.ServerInterface, error) { switch serverType { case "rest": return rest.NewRestServer(rest.ServerConfig{ IPAddress: address, Port: port, SecretKey: "mayadata-io-secret", TLSTimeout: 2 * time.Hour, Clientset: Client.Interface, EventsReceiver: &nfs.NFS{ Clientset: Client.Interface, }, }) } return nil, errors.Errorf("Unsupported server type %s", serverType) } // addOrUpdateEventControllerSidecar will add volume-event-controller side car only // if container doesn't exist else updates the CALLBACK_URL and CALLBACK_TOKEN func addEventControllerSideCar(deploymentNamespace, deploymentName string) error { deployObj, err := Client.getDeployment(deploymentNamespace, deploymentName) if err != nil { return err } var isVolumeEventsCollectorExist bool volumeEventsCollector := corev1.Container{ Name: "volume-events-collector", Image: "mayadataio/volume-events-exporter:ci", Args: []string{ "--leader-election=false", "--generate-k8s-events=true", }, Env: []corev1.EnvVar{ { Name: "OPENEBS_IO_NFS_SERVER_NS", Value: OpenEBSNamespace, }, { Name: "CALLBACK_URL", Value: serverIface.GetEventsReceiverEndpoint(), }, { Name: "CALLBACK_TOKEN", Value: serverIface.GetToken(), }, }, } for idx, container := range deployObj.Spec.Template.Spec.Containers { if container.Name == volumeEventsCollector.Name { deployObj.Spec.Template.Spec.Containers[idx] = volumeEventsCollector isVolumeEventsCollectorExist = true break } } if !isVolumeEventsCollectorExist { deployObj.Spec.Template.Spec.Containers = append(deployObj.Spec.Template.Spec.Containers, volumeEventsCollector) } updatedDeployObj, err := Client.updateDeployment(deployObj) if err != nil { return err } return Client.waitForDeploymentRollout(updatedDeployObj.Namespace, updatedDeployObj.Name) } func removeEventsCollectorSidecar(deploymentNamespace, deploymentName string) error { var isVolumeEventsCollectorExist bool var index int deployObj, err := Client.getDeployment(deploymentNamespace, deploymentName) if err != nil { return err } for idx, container := range deployObj.Spec.Template.Spec.Containers { if container.Name == "volume-events-collector" { index = idx isVolumeEventsCollectorExist = true } } // Remove volume events collector sidecar if !isVolumeEventsCollectorExist { return nil } deployObj.Spec.Template.Spec.Containers = append(deployObj.Spec.Template.Spec.Containers[:index], deployObj.Spec.Template.Spec.Containers[index+1:]...) updatedDeployObj, err := Client.updateDeployment(deployObj) if err != nil { return err } return Client.waitForDeploymentRollout(updatedDeployObj.Namespace, updatedDeployObj.Name) } // updateNFSHookConfig will update the NFS hook configuration as // per test details func updateNFSHookConfig(namespace, name string) error { hookConfigMap, err := Client.getConfigMap(namespace, name) if err != nil { return errors.Wrapf(err, "failed to get configmap %s/%s", namespace, name) } var hook Hook hookData, isConfigExist := hookConfigMap.Data[nfsHookConfigDataKey] if !isConfigExist { return errors.Errorf("hook configmap=%s/%s doesn't have data field=%s", namespace, name, nfsHookConfigDataKey) } err = yaml.Unmarshal([]byte(hookData), &hook) if err != nil { return err } addHookConfig, isAddExist := hook.Config[ActionAddOnCreateVolumeEvent] if !isAddExist { return errors.Errorf("%s configuration doesn't exist in hook %s/%s", ActionAddOnCreateVolumeEvent, namespace, name) } addHookConfig.BackendPVCConfig.Finalizers = append(addHookConfig.BackendPVCConfig.Finalizers, integrationTestFinalizer) hook.Config[ActionAddOnCreateVolumeEvent] = addHookConfig updatedHookConfigInBytes, err := yaml.Marshal(hook) if err != nil { return err } hookConfigMap.Data[nfsHookConfigDataKey] = string(updatedHookConfigInBytes) _, err = Client.updateConfigMap(hookConfigMap) return err } // externalIP will fetch the IP from ifconfig func externalIP() (string, error) { ifaces, err := net.Interfaces() if err != nil { return "", err } for _, iface := range ifaces { if iface.Flags&net.FlagUp == 0 { continue // interface down } if iface.Flags&net.FlagLoopback != 0 { continue // loopback interface } addrs, err := iface.Addrs() if err != nil { return "", err } for _, addr := range addrs { var ip net.IP switch v := addr.(type) { case *net.IPNet: ip = v.IP case *net.IPAddr: ip = v.IP } if ip == nil || ip.IsLoopback() { continue } ip = ip.To4() if ip == nil { continue // not an ipv4 address } return ip.String(), nil } } return "", errors.New("are you connected to the network?") }
[ "\"KUBECONFIG\"" ]
[]
[ "KUBECONFIG" ]
[]
["KUBECONFIG"]
go
1
0
cmd/repl/repl.go
// Copyright 2020 The SQLFlow Authors. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "bufio" "bytes" "image" _ "image/png" "time" "encoding/base64" "flag" "fmt" "io" "log" "net/url" "os" "os/exec" "regexp" "strings" "syscall" "github.com/mattn/go-sixel" "github.com/olekukonko/tablewriter" "golang.org/x/crypto/ssh/terminal" "sqlflow.org/sqlflow/pkg/database" pb "sqlflow.org/sqlflow/pkg/proto" "sqlflow.org/sqlflow/pkg/sql" "sqlflow.org/sqlflow/pkg/sql/codegen/attribute" ) const tablePageSize = 1000 func isSpace(c byte) bool { return len(bytes.TrimSpace([]byte{c})) == 0 } // addLineToStmt scans lines into statements, the last four parameters are both input/output. // A user must initialize `inQuotedString` and `isSingleQuoted` to false and `statements` to [] // at the first call func addLineToStmt(line string, inQuotedString, isSingleQuoted *bool, statements *[]string) bool { if len(*statements) == 0 { // First line of the statements *statements = append(*statements, "") line = strings.TrimLeft(line, "\t ") } else { (*statements)[len(*statements)-1] += "\n" } var isEscape bool // Escaping in quoted string cannot cross lines var start, i int for i = 0; i < len(line); i++ { if isEscape { isEscape = false continue } switch line[i] { case '\\': if *inQuotedString { isEscape = true } case '"', '\'': if *inQuotedString { if *isSingleQuoted == (line[i] == '\'') { *inQuotedString = false // We found the end of a quoted string } } else { // The start of a quoted string *inQuotedString = true *isSingleQuoted = (line[i] == '\'') } case ';': if !*inQuotedString { // We found a statement if i-start != 1 { // Ignore empty statement that has only a ';' (*statements)[len(*statements)-1] += line[start : i+1] } for i+1 < len(line) && isSpace(line[i+1]) { i++ // Ignore leading whitespaces of the next statement } start = i + 1 if start == len(line) { return true // All done, the last character in the line is the end of a statement } *statements = append(*statements, "") // Prepare for searching the next statement } case '-': if !*inQuotedString { if i+1 < len(line) && line[i+1] == '-' { if i+2 == len(line) || isSpace(line[i+2]) { // We found a line comment // Note: `--` comment doesn't interfere with quoted-string and `;` (*statements)[len(*statements)-1] += strings.TrimSpace(line[start:i]) if len(*statements) == 1 && (*statements)[0] == "" { *statements = []string{} return true // The whole line is an empty statement that has only a `-- comment`, } return false } } } } } (*statements)[len(*statements)-1] += line[start:] return false } // readStmt reads a SQL statement from the scanner. A statement could have // multiple lines and ends at a semicolon at the end of the last line. func readStmt(scn *bufio.Scanner) ([]string, error) { stmt := []string{} var inQuotedString, isSingleQuoted bool for scn.Scan() { if addLineToStmt(scn.Text(), &inQuotedString, &isSingleQuoted, &stmt) { return stmt, nil } } // If the the file doesn't ends with ';', we consider the remaining content as a statement if scn.Err() == nil { return stmt, io.EOF } return stmt, scn.Err() } func header(head map[string]interface{}) ([]string, error) { cn, ok := head["columnNames"] if !ok { return nil, fmt.Errorf("can't find field columnNames in head") } cols, ok := cn.([]string) if !ok { return nil, fmt.Errorf("invalid header type") } return cols, nil } func isHTMLSnippet(s string) bool { // TODO(shendiaomo): more accurate checks later return strings.HasPrefix(s, "<div") } func printAsDataURL(s string) { fmt.Println("data:text/html,", s) fmt.Println() fmt.Println("To view the content, paste the above data url to a web browser.") } func getBase64EncodedImage(s string) ([]byte, error) { match := regexp.MustCompile(`base64,(.*)'`).FindStringSubmatch(s) if len(match) == 2 { return base64.StdEncoding.DecodeString(match[1]) } return []byte{}, fmt.Errorf("no images in the HTML") } func imageCat(imageBytes []byte) error { img, _, err := image.Decode(bytes.NewReader(imageBytes)) if err != nil { return err } err = sixel.NewEncoder(os.Stdout).Encode(img) if err != nil { return err } fmt.Println() return nil } var it2Check = false func render(rsp interface{}, table *tablewriter.Table, isTerminal bool) (bool, error) { switch s := rsp.(type) { case map[string]interface{}: // table header cols, e := header(s) if e == nil { table.SetHeader(cols) } return true, nil case []interface{}: // row row := make([]string, len(s)) for i, v := range s { row[i] = fmt.Sprint(v) } table.Append(row) return true, nil case error: if os.Getenv("SQLFLOW_log_dir") != "" { // To avoid printing duplicated error message to console log.New(os.Stderr, "", 0).Printf("ERROR: %v\n", s) } if !isTerminal { os.Exit(1) } return false, s case sql.EndOfExecution: case sql.Figures: if isHTMLSnippet(s.Image) { if !isTerminal { printAsDataURL(s.Image) break } if image, e := getBase64EncodedImage(s.Image); e != nil { printAsDataURL(s.Image) } else if !it2Check { printAsDataURL(s.Image) fmt.Println("Or use iTerm2 as your terminal to view images.") fmt.Println(s.Text) } else if e = imageCat(image); e != nil { log.New(os.Stderr, "", 0).Printf("ERROR: %v\n", e) printAsDataURL(s.Image) fmt.Println(s.Text) } } else { fmt.Println(s) } case string: fmt.Println(s) default: log.Fatalf("unrecognized response type: %v", s) } return false, nil } func flagPassed(name ...string) bool { found := false for _, n := range name { flag.Visit(func(f *flag.Flag) { if f.Name == n { found = true } }) } return found } func runStmt(stmt string, isTerminal bool, modelDir string, ds string) error { startTime := time.Now().UnixNano() if !isTerminal { fmt.Println("sqlflow>", stmt) } tableRendered := false table := tablewriter.NewWriter(os.Stdout) sess := makeSessionFromEnv() sess.DbConnStr = getDataSource(ds, currentDB) parts := strings.Fields(strings.ReplaceAll(stmt, ";", "")) if len(parts) == 2 && strings.ToUpper(parts[0]) == "USE" { return switchDatabase(parts[1], sess) } stream := sql.RunSQLProgram(stmt, modelDir, sess) var isTable bool var err error for rsp := range stream.ReadAll() { // pagination. avoid exceed memory isTable, err = render(rsp, table, isTerminal) if err != nil { break } if isTable && table.NumLines() == tablePageSize { table.Render() tableRendered = true table.ClearRows() } } if table.NumLines() > 0 && !tableRendered { table.Render() } if err == nil { if isTable { fmt.Printf("%d rows in set ", table.NumLines()) } fmt.Printf("(%.2f sec)\n", float64(time.Now().UnixNano()-startTime)/1e9) fmt.Println() } return nil } func assertConnectable(ds string) { db, err := database.OpenAndConnectDB(ds) if err != nil { log.Fatal(err) } defer db.Close() } func repl(scanner *bufio.Scanner, modelDir string, ds string) { for { statements, err := readStmt(scanner) if err == io.EOF && len(statements) == 0 { return } for _, stmt := range statements { if err := runStmt(stmt, false, modelDir, ds); err != nil { log.Fatalf("run SQL statement failed: %v", err) } } } } func makeSessionFromEnv() *pb.Session { return &pb.Session{ Token: os.Getenv("SQLFLOW_USER_TOKEN"), DbConnStr: os.Getenv("SQLFLOW_DATASOURCE"), ExitOnSubmit: strings.ToLower(os.Getenv("SQLFLOW_EXIT_ON_SUBMIT")) == "true", UserId: os.Getenv("SQLFLOW_USER_ID"), HiveLocation: os.Getenv("SQLFLOW_HIVE_LOCATION"), HdfsNamenodeAddr: os.Getenv("SQLFLOW_HDFS_NAMENODE_ADDR"), HdfsUser: os.Getenv("SQLFLOW_HADOOP_USER"), HdfsPass: os.Getenv("SQLFLOW_HADOOP_PASS"), Submitter: os.Getenv("SQLFLOW_submitter")} } func switchDatabase(db string, session *pb.Session) error { stream := sql.RunSQLProgram("USE "+db, "", session) r := <-stream.ReadAll() switch r.(type) { case string: session.DbConnStr = getDataSource(session.DbConnStr, db) fmt.Println("Database changed to", db) currentDB = db case error: fmt.Println(r) } return nil } func getDatabaseName(datasource string) string { driver, other, e := database.ParseURL(datasource) if e != nil { log.Fatalf("unrecognized data source '%s'", datasource) } // The data source string of MySQL and Hive have similar patterns // with the database name as a pathname under root. For example: // mysql://root:root@tcp(127.0.0.1:3306)/iris?maxAllowedPacket=0 // hive://root:[email protected]:10000/iris?auth=NOSASL re := regexp.MustCompile(`[^/]*/(\w*).*`) // Extract the database name of MySQL and Hive switch driver { case "maxcompute": // The database name in data source string of MaxCompute is the argument to parameter // `curr_project` re = regexp.MustCompile(`[^/].*/api[?].*curr_project=(\w*).*`) case "mysql": case "hive": case "alisa": // TODO(yaney1989): using go drivers to parse the database default: log.Fatalf("unknown database '%s' in data source'%s'", driver, datasource) } if group := re.FindStringSubmatch(other); group != nil { return group[1] } return "" } // getDataSource generates a data source string that is using database `db` from the original dataSource func getDataSource(dataSource, db string) string { driver, other, e := database.ParseURL(dataSource) if e != nil { log.Fatalf("unrecognized data source '%s'", dataSource) } pieces := strings.Split(other, "?") switch driver { case "maxcompute", "alisa": var v url.Values = url.Values{} if len(pieces) == 2 { v, e = url.ParseQuery(pieces[1]) if e != nil { log.Fatalf("unrecognized data source '%s'", dataSource) } } v["curr_project"] = []string{db} return fmt.Sprintf("maxcompute://%s?%s", pieces[0], v.Encode()) case "mysql": fallthrough case "hive": pieces[0] = strings.Split(pieces[0], "/")[0] + "/" + db return fmt.Sprintf("%s://%s", driver, strings.Join(pieces, "?")) } log.Fatalf("unknown database '%s' in data source'%s'", driver, dataSource) return "" } var currentDB string func main() { ds := flag.String("datasource", "", "database connect string") modelDir := flag.String("model_dir", "", "model would be saved on the local dir, otherwise upload to the table.") cliStmt := flag.String("execute", "", "execute SQLFlow from command line. e.g. --execute 'select * from table1'") flag.StringVar(cliStmt, "e", "", "execute SQLFlow from command line, short for --execute") sqlFileName := flag.String("file", "", "execute SQLFlow from file. e.g. --file '~/iris_dnn.sql'") flag.StringVar(sqlFileName, "f", "", "execute SQLFlow from file, short for --file") noAutoCompletion := flag.Bool("A", false, "No auto completion for sqlflow models. This gives a quicker start.") flag.Parse() assertConnectable(*ds) // Fast fail if we can't connect to the datasource currentDB = getDatabaseName(*ds) if *modelDir != "" { if _, derr := os.Stat(*modelDir); derr != nil { os.Mkdir(*modelDir, os.ModePerm) } } isTerminal := !flagPassed("execute", "e", "file", "f") && terminal.IsTerminal(syscall.Stdin) sqlFile := os.Stdin var err error if flagPassed("file", "f") { sqlFile, err = os.Open(*sqlFileName) if err != nil { log.Fatal(err) } defer sqlFile.Close() } var reader io.Reader = sqlFile // Override stdin and file when the `-e|-execute' options are present. if flagPassed("execute", "e") { reader = strings.NewReader(*cliStmt) } scanner := bufio.NewScanner(reader) if isTerminal { if !it2Check { fmt.Println("The terminal doesn't support sixel, explanation statements will show ASCII figures.") } if !*noAutoCompletion { attribute.ExtractDocStringsOnce() } runPrompt(func(stmt string) { runStmt(stmt, true, *modelDir, *ds) }) } else { repl(scanner, *modelDir, *ds) } } func init() { // `it2check` and `go-prompt` both set terminal to raw mode, we has to call `it2check` only once cmd := exec.Command("it2check") cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout if cmd.Run() == nil { it2Check = true } }
[ "\"SQLFLOW_log_dir\"", "\"SQLFLOW_USER_TOKEN\"", "\"SQLFLOW_DATASOURCE\"", "\"SQLFLOW_EXIT_ON_SUBMIT\"", "\"SQLFLOW_USER_ID\"", "\"SQLFLOW_HIVE_LOCATION\"", "\"SQLFLOW_HDFS_NAMENODE_ADDR\"", "\"SQLFLOW_HADOOP_USER\"", "\"SQLFLOW_HADOOP_PASS\"", "\"SQLFLOW_submitter\"" ]
[]
[ "SQLFLOW_USER_ID", "SQLFLOW_HADOOP_USER", "SQLFLOW_USER_TOKEN", "SQLFLOW_HADOOP_PASS", "SQLFLOW_EXIT_ON_SUBMIT", "SQLFLOW_HDFS_NAMENODE_ADDR", "SQLFLOW_log_dir", "SQLFLOW_submitter", "SQLFLOW_HIVE_LOCATION", "SQLFLOW_DATASOURCE" ]
[]
["SQLFLOW_USER_ID", "SQLFLOW_HADOOP_USER", "SQLFLOW_USER_TOKEN", "SQLFLOW_HADOOP_PASS", "SQLFLOW_EXIT_ON_SUBMIT", "SQLFLOW_HDFS_NAMENODE_ADDR", "SQLFLOW_log_dir", "SQLFLOW_submitter", "SQLFLOW_HIVE_LOCATION", "SQLFLOW_DATASOURCE"]
go
10
0
selfdrive/car/interfaces.py
import os import time from typing import Dict from cereal import car from common.kalman.simple_kalman import KF1D from common.realtime import DT_CTRL from selfdrive.car import gen_empty_fingerprint from selfdrive.config import Conversions as CV from selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX from selfdrive.controls.lib.events import Events from selfdrive.controls.lib.vehicle_model import VehicleModel GearShifter = car.CarState.GearShifter EventName = car.CarEvent.EventName # WARNING: this value was determined based on the model's training distribution, # model predictions above this speed can be unpredictable MAX_CTRL_SPEED = (V_CRUISE_MAX + 4) * CV.KPH_TO_MS # 135 + 4 = 86 mph ACCEL_MAX = 2.0 ACCEL_MIN = -3.5 # generic car and radar interfaces class CarInterfaceBase(): def __init__(self, CP, CarController, CarState): self.CP = CP self.VM = VehicleModel(CP) self.frame = 0 self.steer_warning = 0 self.steering_unpressed = 0 self.low_speed_alert = False if CarState is not None: self.CS = CarState(CP) self.cp = self.CS.get_can_parser(CP) self.cp_cam = self.CS.get_cam_can_parser(CP) self.cp_body = self.CS.get_body_can_parser(CP) self.CC = None if CarController is not None: self.CC = CarController(self.cp.dbc_name, CP, self.VM) @staticmethod def get_pid_accel_limits(CP, current_speed, cruise_speed): return ACCEL_MIN, ACCEL_MAX @staticmethod def calc_accel_override(a_ego, a_target, v_ego, v_target): return 1. @staticmethod def get_params(candidate, fingerprint=gen_empty_fingerprint(), car_fw=None): raise NotImplementedError @staticmethod def init(CP, logcan, sendcan): pass # returns a set of default params to avoid repetition in car specific params @staticmethod def get_std_params(candidate, fingerprint): ret = car.CarParams.new_message() ret.carFingerprint = candidate # standard ALC params ret.steerControlType = car.CarParams.SteerControlType.torque ret.steerMaxBP = [0.] ret.steerMaxV = [1.] ret.minSteerSpeed = 0. ret.pcmCruise = True # openpilot's state is tied to the PCM's cruise state on most cars ret.minEnableSpeed = -1. # enable is done by stock ACC, so ignore this ret.steerRatioRear = 0. # no rear steering, at least on the listed cars aboveA ret.openpilotLongitudinalControl = False ret.startAccel = 0.0 ret.minSpeedCan = 0.3 ret.stoppingDecelRate = 0.8 # brake_travel/s while trying to stop ret.startingAccelRate = 3.2 # brake_travel/s while releasing on restart ret.stoppingControl = True ret.longitudinalTuning.deadzoneBP = [0.] ret.longitudinalTuning.deadzoneV = [0.] ret.longitudinalTuning.kpBP = [0.] ret.longitudinalTuning.kpV = [1.] ret.longitudinalTuning.kiBP = [0.] ret.longitudinalTuning.kiV = [1.] ret.longitudinalActuatorDelay = 0.15 return ret # returns a car.CarState, pass in car.CarControl def update(self, c, can_strings): raise NotImplementedError # return sendcan, pass in a car.CarControl def apply(self, c): raise NotImplementedError def create_common_events(self, cs_out, extra_gears=None, gas_resume_speed=-1, pcm_enable=True): events = Events() if cs_out.doorOpen: events.add(EventName.doorOpen) if cs_out.seatbeltUnlatched: events.add(EventName.seatbeltNotLatched) if cs_out.gearShifter != GearShifter.drive and (extra_gears is None or cs_out.gearShifter not in extra_gears): events.add(EventName.wrongGear) if cs_out.gearShifter == GearShifter.reverse: events.add(EventName.reverseGear) if not cs_out.cruiseState.available: events.add(EventName.wrongCarMode) if cs_out.espDisabled: events.add(EventName.espDisabled) if cs_out.gasPressed: events.add(EventName.gasPressed) if cs_out.stockFcw: events.add(EventName.stockFcw) if cs_out.stockAeb: events.add(EventName.stockAeb) if cs_out.vEgo > MAX_CTRL_SPEED: events.add(EventName.speedTooHigh) if cs_out.cruiseState.nonAdaptive: events.add(EventName.wrongCruiseMode) self.steer_warning = self.steer_warning + 1 if cs_out.steerWarning else 0 self.steering_unpressed = 0 if cs_out.steeringPressed else self.steering_unpressed + 1 # Handle permanent and temporary steering faults if cs_out.steerError: events.add(EventName.steerUnavailable) elif cs_out.steerWarning: # only escalate to the harsher alert after the condition has # persisted for 0.5s and we're certain that the user isn't overriding if self.steering_unpressed > int(0.5/DT_CTRL) and self.steer_warning > int(0.5/DT_CTRL): events.add(EventName.steerTempUnavailable) else: events.add(EventName.steerTempUnavailableSilent) # Disable on rising edge of gas or brake. Also disable on brake when speed > 0. # Optionally allow to press gas at zero speed to resume. # e.g. Chrysler does not spam the resume button yet, so resuming with gas is handy. FIXME! if (cs_out.gasPressed and (not self.CS.out.gasPressed) and cs_out.vEgo > gas_resume_speed) or \ (cs_out.brakePressed and (not self.CS.out.brakePressed or not cs_out.standstill)): events.add(EventName.pedalPressed) # we engage when pcm is active (rising edge) if pcm_enable: if cs_out.cruiseState.enabled and not self.CS.out.cruiseState.enabled: events.add(EventName.pcmEnable) elif not cs_out.cruiseState.enabled: events.add(EventName.pcmDisable) return events class RadarInterfaceBase(): def __init__(self, CP): self.pts = {} self.delay = 0 self.radar_ts = CP.radarTimeStep self.no_radar_sleep = 'NO_RADAR_SLEEP' in os.environ def update(self, can_strings): ret = car.RadarData.new_message() if not self.no_radar_sleep: time.sleep(self.radar_ts) # radard runs on RI updates return ret class CarStateBase: def __init__(self, CP): self.CP = CP self.car_fingerprint = CP.carFingerprint self.out = car.CarState.new_message() self.cruise_buttons = 0 self.left_blinker_cnt = 0 self.right_blinker_cnt = 0 self.left_blinker_prev = False self.right_blinker_prev = False # Q = np.matrix([[10.0, 0.0], [0.0, 100.0]]) # R = 1e3 self.v_ego_kf = KF1D(x0=[[0.0], [0.0]], A=[[1.0, DT_CTRL], [0.0, 1.0]], C=[1.0, 0.0], K=[[0.12287673], [0.29666309]]) def update_speed_kf(self, v_ego_raw): if abs(v_ego_raw - self.v_ego_kf.x[0][0]) > 2.0: # Prevent large accelerations when car starts at non zero speed self.v_ego_kf.x = [[v_ego_raw], [0.0]] v_ego_x = self.v_ego_kf.update(v_ego_raw) return float(v_ego_x[0]), float(v_ego_x[1]) def update_blinker_from_lamp(self, blinker_time: int, left_blinker_lamp: bool, right_blinker_lamp: bool): """Update blinkers from lights. Enable output when light was seen within the last `blinker_time` iterations""" # TODO: Handle case when switching direction. Now both blinkers can be on at the same time self.left_blinker_cnt = blinker_time if left_blinker_lamp else max(self.left_blinker_cnt - 1, 0) self.right_blinker_cnt = blinker_time if right_blinker_lamp else max(self.right_blinker_cnt - 1, 0) return self.left_blinker_cnt > 0, self.right_blinker_cnt > 0 def update_blinker_from_stalk(self, blinker_time: int, left_blinker_stalk: bool, right_blinker_stalk: bool): """Update blinkers from stalk position. When stalk is seen the blinker will be on for at least blinker_time, or until the stalk is turned off, whichever is longer. If the opposite stalk direction is seen the blinker is forced to the other side. On a rising edge of the stalk the timeout is reset.""" if left_blinker_stalk: self.right_blinker_cnt = 0 if not self.left_blinker_prev: self.left_blinker_cnt = blinker_time if right_blinker_stalk: self.left_blinker_cnt = 0 if not self.right_blinker_prev: self.right_blinker_cnt = blinker_time self.left_blinker_cnt = max(self.left_blinker_cnt - 1, 0) self.right_blinker_cnt = max(self.right_blinker_cnt - 1, 0) self.left_blinker_prev = left_blinker_stalk self.right_blinker_prev = right_blinker_stalk return bool(left_blinker_stalk or self.left_blinker_cnt > 0), bool(right_blinker_stalk or self.right_blinker_cnt > 0) @staticmethod def parse_gear_shifter(gear: str) -> car.CarState.GearShifter: d: Dict[str, car.CarState.GearShifter] = { 'P': GearShifter.park, 'R': GearShifter.reverse, 'N': GearShifter.neutral, 'E': GearShifter.eco, 'T': GearShifter.manumatic, 'D': GearShifter.drive, 'S': GearShifter.sport, 'L': GearShifter.low, 'B': GearShifter.brake } return d.get(gear, GearShifter.unknown) @staticmethod def get_cam_can_parser(CP): return None @staticmethod def get_body_can_parser(CP): return None
[]
[]
[]
[]
[]
python
0
0
icrawler/builtin/flickr.py
# -*- coding: utf-8 -*- import datetime import json import math import os from six.moves.urllib.parse import urlencode from icrawler import Crawler, Feeder, Parser, ImageDownloader class FlickrFeeder(Feeder): def feed(self, apikey, max_num=4000, **kwargs): if max_num > 4000: max_num = 4000 self.logger.warning( 'max_num exceeds 4000, set it to 4000 automatically.') base_url = 'https://api.flickr.com/services/rest/?' params = { 'method': 'flickr.photos.search', 'api_key': apikey, 'format': 'json', 'nojsoncallback': 1 } for key in kwargs: if key in ['user_id', 'tags', 'tag_mode', 'text', 'license', 'sort', 'privacy_filter', 'accuracy', 'safe_search', 'content_type', 'machine_tags', 'machine_tag_mode', 'group_id', 'contacts', 'woe_id', 'place_id', 'has_geo', 'geo_context', 'lat', 'lon', 'radius', 'radius_units', 'is_commons', 'in_gallery', 'is_getty', 'extras', 'per_page', 'page', 'color_codes', 'styles', 'orientation']: # yapf: disable params[key] = kwargs[key] elif key in ['min_upload_date', 'max_upload_date', 'min_taken_date', 'max_taken_date']: # yapf: disable val = kwargs[key] if isinstance(val, datetime.date): params[key] = val.strftime('%Y-%m-%d') elif isinstance(val, (int, str)): params[key] = val else: self.logger.error('%s is invalid', key) else: self.logger.error('Unrecognized search param: %s', key) url = base_url + urlencode(params) per_page = params.get('per_page', 100) page = params.get('page', 1) page_max = int(math.ceil(4000.0 / per_page)) for i in range(page, page + page_max): if self.signal.get('reach_max_num'): break complete_url = '{}&page={}'.format(url, i) while True: try: self.output(complete_url, block=False) except: if self.signal.get('reach_max_num'): break else: break self.logger.debug('put url to url_queue: {}'.format(complete_url)) class FlickrParser(Parser): def parse(self, response, apikey, size_preference=None): content = json.loads(response.content.decode('utf-8', 'ignore')) if content['stat'] != 'ok': return photos = content['photos']['photo'] for photo in photos: photo_id = photo['id'] base_url = 'https://api.flickr.com/services/rest/?' params = { 'method': 'flickr.photos.getSizes', 'api_key': apikey, 'photo_id': photo_id, 'format': 'json', 'nojsoncallback': 1 } try: ret = self.session.get(base_url + urlencode(params)) info = json.loads(ret.content.decode()) except: continue else: if info['stat'] == 'ok': urls = { item['label'].lower(): item['source'] for item in info['sizes']['size'] } else: continue for sz in size_preference: if sz in urls: yield dict(file_url=urls[sz], meta=photo) break class FlickrImageCrawler(Crawler): def __init__(self, apikey=None, feeder_cls=FlickrFeeder, parser_cls=FlickrParser, downloader_cls=ImageDownloader, *args, **kwargs): if apikey is None: apikey = os.getenv('FLICKR_APIKEY') if not apikey: self.logger.error('apikey is not specified') return self.apikey = apikey super(FlickrImageCrawler, self).__init__( feeder_cls, parser_cls, downloader_cls, *args, **kwargs) def crawl(self, max_num=1000, size_preference=None, min_size=None, max_size=None, file_idx_offset=0, **kwargs): kwargs['apikey'] = self.apikey default_order = [ 'original', 'large 2048', 'large 1600', 'large', 'medium 800', 'medium 640', 'medium', 'small 320', 'small', 'thumbnail', 'large Square', 'square' ] if size_preference is None: size_preference = default_order elif isinstance(size_preference, str): assert size_preference in default_order size_preference = [size_preference] else: for sz in size_preference: assert sz in default_order super(FlickrImageCrawler, self).crawl( feeder_kwargs=kwargs, parser_kwargs=dict( apikey=self.apikey, size_preference=size_preference), downloader_kwargs=dict( max_num=max_num, min_size=min_size, max_size=max_size, file_idx_offset=file_idx_offset))
[]
[]
[ "FLICKR_APIKEY" ]
[]
["FLICKR_APIKEY"]
python
1
0
app.py
from flask import Flask from flask_restful import Api from flask_jwt import JWT import os from security import authenticate, identity from resources.user import UserRegister from resources.campsite import Campsite, CampsiteList, CampsiteByZipList from resources.zipcode import Zipcode, ZipcodeList from resources.travel_time import TravelTime, TravelTimeList, TravelTimeByZipList from resources.weather_forecast import WeatherForecastList, WeatherForecastForCampsite from db import db app = Flask(__name__) app.config["SQLALCHEMY_DATABASE_URI"] = os.environ.get( "DATABASE_URL", "sqlite:///data.db" ) # confusing-- I think this keep flask from tracking changes but lets SQLalchemy do it?? app.config["SQLALCHEMY_TRACK_NOTIFICATIONS"] = False app.secret_key = "stuart" api = Api(app) jwt = JWT( app, authenticate, identity ) # JWT will create /auth endpoint... that endpoint will return JWT token api.add_resource(Zipcode, "/zipcode/<string:zipcode>") api.add_resource(Campsite, "/campsite") api.add_resource(CampsiteList, "/campsites/all") api.add_resource(ZipcodeList, "/zipcodes") api.add_resource(TravelTime, "/traveltime") api.add_resource(TravelTimeList, "/traveltimes") api.add_resource(TravelTimeByZipList, "/traveltimes/<string:zipcode>") api.add_resource(CampsiteByZipList, "/campsites/<string:zipcode>") api.add_resource(UserRegister, "/register") api.add_resource(WeatherForecastList, "/forecasts/all") api.add_resource(WeatherForecastForCampsite, "/forecast/<int:campsite_id>") # conditional ensures that app is only run when we run app.py, and not if/when we import it to another file # only the file you directly run is __main__ if __name__ == "__main__": db.init_app(app) app.run(port=5000, debug=True)
[]
[]
[ "DATABASE_URL" ]
[]
["DATABASE_URL"]
python
1
0
puppeteer-tests/bazel/extract_puppeteer_screenshots/extract_puppeteer_screenshots.go
// extract_puppeteer_screenshots extracts Puppeteer screenshots into a user-specified directory. // // Usage: // // $ bazel run //:extract_puppeteer_screenshots -- --output_dir=<output directory> // // Under Bazel, Puppeteer tests save screenshots inside $TEST_UNDECLARED_OUTPUTS_DIR, which is set // by the "bazel test" command. Screenshots, and any other undeclared outputs of a test, can be // found under //_bazel_testlogs bundled as a single .zip file per test target. // // For example, if we run a Puppeteer test with "bazel test //my_app:puppeteer_test", then any // screenshots will be found inside //_bazel_testlogs/my_app/puppeteer_test/test.outputs/outputs.zip. // // See https://docs.bazel.build/versions/master/test-encyclopedia.html#initial-conditions to learn // more about undeclared test outputs. package main import ( "archive/zip" "flag" "fmt" "io" "os" "path/filepath" "strings" "go.skia.org/infra/go/skerr" "go.skia.org/infra/go/sklog" "go.skia.org/infra/go/util" ) var ( outputDir = flag.String("output_dir", "", "Directory inside which to extract screenshots.") outputDirAbsPath string ) func main() { flag.Parse() if *outputDir == "" { failf("Flag --output_dir is required.\n") } // If running via "bazel run", change into the directory where Bazel was invoked. This is // necessary to correctly compute the absolute path of the output directory. if os.Getenv("BUILD_WORKING_DIRECTORY") != "" { if err := os.Chdir(os.Getenv("BUILD_WORKING_DIRECTORY")); err != nil { sklog.Fatal(err) } } // Validate and compute the absolute path of the output directory. var err error if outputDirAbsPath, err = filepath.Abs(*outputDir); err != nil { failf("Invalid path: \"%s\"\n", *outputDir) } if _, err := os.Stat(*outputDir); os.IsNotExist(err) { failf("Directory \"%s\" does not exist.\n", *outputDir) } // If running via "bazel run", change into the workspace root directory (i.e. where the WORKSPACE // file is located). If not, we assume that the current working directory is the workspace root. if os.Getenv("BUILD_WORKSPACE_DIRECTORY") != "" { if err := os.Chdir(os.Getenv("BUILD_WORKSPACE_DIRECTORY")); err != nil { sklog.Fatal(err) } } // Resolve the //_bazel_testlogs symlink. Necessary because filepath.Walk() ignores symlinks. bazelTestlogsDir, err := filepath.EvalSymlinks("_bazel_testlogs") if err != nil { sklog.Fatal(err) } // Find all outputs.zip files under //_bazel_testlogs, which contain the undeclared outputs // produced by all tests. var allOutputsZipPaths []string if err := filepath.Walk(bazelTestlogsDir, func(path string, info os.FileInfo, err error) error { if err != nil { return skerr.Wrap(err) } if strings.HasSuffix(path, "/test.outputs/outputs.zip") { allOutputsZipPaths = append(allOutputsZipPaths, path) } return nil }); err != nil { sklog.Fatal(err) } // Inspect each outputs.zip file for Puppeteer screenshots. Extract them into the output directory // if any are found. for _, path := range allOutputsZipPaths { if err := extractPuppeteerScreenshotsFromOutputsZip(path); err != nil { sklog.Fatal(err) } } } // failf prints a message to sterr and exits with a non-zero exit code. func failf(msg string, args ...interface{}) { if _, err := fmt.Fprintf(os.Stderr, msg, args...); err != nil { sklog.Fatal(err) } os.Exit(1) } // extractPuppeteerScreenshotsFromOutputsZip inspects an outputs.zip file looking for screenshots // taken by a Puppeteer test, and extracts the screenshots inside the output directory if any are // found. // // This function makes the following assumptions: // // - All screenshots produced by a Puppeteer tests will be found inside a // "puppeteer-test-screenshots" directory within the test's outputs.zip file. // - All screenshots are PNG files (*.png) // - Puppeteer tests are the only tests in our codebase that produce undeclared outputs following // the above conventions. // // An alternative approach is to find all Puppeteer tests via a Bazel query (e.g. // "bazel query 'attr(generator_function, sk_element_puppeteer_test, //...)'"), but this can be // slow. Inspecting all outputs.zip files inside the //_bazel_testlogs directory is much faster. func extractPuppeteerScreenshotsFromOutputsZip(zipFilePath string) error { // Open the ZIP archive. zipFile, err := zip.OpenReader(zipFilePath) if err != nil { return skerr.Wrap(err) } defer util.Close(zipFile) // Iterate over all files inside the ZIP archive. for _, file := range zipFile.File { // Skip if the file is not a Puppeteer screenshot. dir, screenshotFileName := filepath.Split(file.Name) if dir != "puppeteer-test-screenshots/" || !strings.HasSuffix(screenshotFileName, ".png") { continue } // Extract screenshot into the output directory. outputFileName := filepath.Join(outputDirAbsPath, screenshotFileName) if err := extractFileFromZipArchive(file, outputFileName); err != nil { return skerr.Wrap(err) } fmt.Printf("Extracted screenshot: %s\n", outputFileName) } return nil } // extractFileFromZipArchive extracts a file inside a ZIP archive, and saves it to the outputPath. func extractFileFromZipArchive(zippedFile *zip.File, outputPath string) error { // Open the file inside the ZIP archive. zippedFileReader, err := zippedFile.Open() if err != nil { return skerr.Wrap(err) } defer util.Close(zippedFileReader) // Save it to disk. if err := util.WithWriteFile(outputPath, func(w io.Writer) error { if _, err := io.Copy(w, zippedFileReader); err != nil { return skerr.Wrap(err) } return nil }); err != nil { return skerr.Wrap(err) } return nil }
[ "\"BUILD_WORKING_DIRECTORY\"", "\"BUILD_WORKING_DIRECTORY\"", "\"BUILD_WORKSPACE_DIRECTORY\"", "\"BUILD_WORKSPACE_DIRECTORY\"" ]
[]
[ "BUILD_WORKING_DIRECTORY", "BUILD_WORKSPACE_DIRECTORY" ]
[]
["BUILD_WORKING_DIRECTORY", "BUILD_WORKSPACE_DIRECTORY"]
go
2
0
xcode.py
#!/usr/bin/env python import os import shlex import subprocess from datetime import datetime import wrapper wrapper_command = "java -jar /Users/ccampbell/Veracode/Source/vosp-api-wrappers-java-19.2.5.6.jar -action uploadandscan -appname verademo-swift -createprofile false -version '{}' -filepath '{}'" def build_bca(): archive_file = os.environ["ARCHIVE_PATH"] try: output = subprocess.check_output(["vcxcodepkg", "--noui", "-a", archive_file]) print(output) except subprocess.CalledProcessError as e: print(e.output) else: output_split = output.rsplit(" Path: ", 1) if len(output_split) == 2: bca_file = output_split[1][:-1] date = datetime.utcnow().strftime("%-d %b %Y %H:%M") command = shlex.split(wrapper_command.format(date, bca_file)) wrapper.run_wrapper(command) if __name__ == "__main__": build_bca()
[]
[]
[ "ARCHIVE_PATH" ]
[]
["ARCHIVE_PATH"]
python
1
0
python/pyarrow/tests/test_parquet.py
# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from collections import OrderedDict import datetime import decimal import io import json import os import six import pickle import pytest import numpy as np import pyarrow as pa from pyarrow.compat import guid, u, BytesIO, unichar, PY2 from pyarrow.pandas_compat import _pandas_api from pyarrow.tests import util from pyarrow.filesystem import LocalFileSystem, FileSystem try: import pyarrow.parquet as pq except ImportError: pq = None try: import pandas as pd import pandas.util.testing as tm from .pandas_examples import dataframe_with_arrays, dataframe_with_lists except ImportError: pd = tm = None # Marks all of the tests in this module # Ignore these with pytest ... -m 'not parquet' pytestmark = pytest.mark.parquet @pytest.fixture(scope='module') def datadir(datadir): return datadir / 'parquet' def _write_table(table, path, **kwargs): # So we see the ImportError somewhere import pyarrow.parquet as pq if _pandas_api.is_data_frame(table): table = pa.Table.from_pandas(table) pq.write_table(table, path, **kwargs) return table def _read_table(*args, **kwargs): return pq.read_table(*args, **kwargs) def _roundtrip_table(table, read_table_kwargs=None, write_table_kwargs=None): read_table_kwargs = read_table_kwargs or {} write_table_kwargs = write_table_kwargs or {} buf = io.BytesIO() _write_table(table, buf, **write_table_kwargs) buf.seek(0) return _read_table(buf, **read_table_kwargs) def _check_roundtrip(table, expected=None, read_table_kwargs=None, **write_table_kwargs): if expected is None: expected = table read_table_kwargs = read_table_kwargs or {} # intentionally check twice result = _roundtrip_table(table, read_table_kwargs=read_table_kwargs, write_table_kwargs=write_table_kwargs) assert result.equals(expected) result = _roundtrip_table(result, read_table_kwargs=read_table_kwargs, write_table_kwargs=write_table_kwargs) assert result.equals(expected) def _roundtrip_pandas_dataframe(df, write_kwargs): table = pa.Table.from_pandas(df) buf = io.BytesIO() _write_table(table, buf, **write_kwargs) buf.seek(0) table1 = _read_table(buf) return table1.to_pandas() @pytest.mark.parametrize('dtype', [int, float]) def test_single_pylist_column_roundtrip(tempdir, dtype): filename = tempdir / 'single_{}_column.parquet'.format(dtype.__name__) data = [pa.array(list(map(dtype, range(5))))] table = pa.Table.from_arrays(data, names=['a']) _write_table(table, filename) table_read = _read_table(filename) for i in range(table.num_columns): col_written = table[i] col_read = table_read[i] assert table.field(i).name == table_read.field(i).name assert col_read.num_chunks == 1 data_written = col_written.chunk(0) data_read = col_read.chunk(0) assert data_written.equals(data_read) def alltypes_sample(size=10000, seed=0, categorical=False): np.random.seed(seed) arrays = { 'uint8': np.arange(size, dtype=np.uint8), 'uint16': np.arange(size, dtype=np.uint16), 'uint32': np.arange(size, dtype=np.uint32), 'uint64': np.arange(size, dtype=np.uint64), 'int8': np.arange(size, dtype=np.int16), 'int16': np.arange(size, dtype=np.int16), 'int32': np.arange(size, dtype=np.int32), 'int64': np.arange(size, dtype=np.int64), 'float32': np.arange(size, dtype=np.float32), 'float64': np.arange(size, dtype=np.float64), 'bool': np.random.randn(size) > 0, # TODO(wesm): Test other timestamp resolutions now that arrow supports # them 'datetime': np.arange("2016-01-01T00:00:00.001", size, dtype='datetime64[ms]'), 'str': pd.Series([str(x) for x in range(size)]), 'empty_str': [''] * size, 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None], 'null': [None] * size, 'null_list': [None] * 2 + [[None] * (x % 4) for x in range(size - 2)], } if categorical: arrays['str_category'] = arrays['str'].astype('category') return pd.DataFrame(arrays) @pytest.mark.pandas @pytest.mark.parametrize('chunk_size', [None, 1000]) def test_pandas_parquet_2_0_roundtrip(tempdir, chunk_size): df = alltypes_sample(size=10000, categorical=True) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df) assert arrow_table.schema.pandas_metadata is not None _write_table(arrow_table, filename, version="2.0", coerce_timestamps='ms', chunk_size=chunk_size) table_read = pq.read_pandas(filename) assert table_read.schema.pandas_metadata is not None assert arrow_table.schema.metadata == table_read.schema.metadata df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) def test_set_data_page_size(): arr = pa.array([1, 2, 3] * 1000000) t = pa.Table.from_arrays([arr], names=['f0']) # 128K, 256K, 512K page_sizes = [2 << 16, 2 << 17, 2 << 18] for target_page_size in page_sizes: _check_roundtrip(t, data_page_size=target_page_size) @pytest.mark.pandas def test_chunked_table_write(): # ARROW-232 df = alltypes_sample(size=10) batch = pa.RecordBatch.from_pandas(df) table = pa.Table.from_batches([batch] * 3) _check_roundtrip(table, version='2.0') df, _ = dataframe_with_lists() batch = pa.RecordBatch.from_pandas(df) table = pa.Table.from_batches([batch] * 3) _check_roundtrip(table, version='2.0') @pytest.mark.pandas def test_no_memory_map(tempdir): df = alltypes_sample(size=10) table = pa.Table.from_pandas(df) _check_roundtrip(table, read_table_kwargs={'memory_map': False}, version='2.0') filename = str(tempdir / 'tmp_file') with open(filename, 'wb') as f: _write_table(table, f, version='2.0') table_read = pq.read_pandas(filename, memory_map=False) assert table_read.equals(table) def test_special_chars_filename(tempdir): table = pa.Table.from_arrays([pa.array([42])], ["ints"]) filename = "foo # bar" path = tempdir / filename assert not path.exists() _write_table(table, str(path)) assert path.exists() table_read = _read_table(str(path)) assert table_read.equals(table) @pytest.mark.pandas def test_empty_table_roundtrip(): df = alltypes_sample(size=10) # Create a non-empty table to infer the types correctly, then slice to 0 table = pa.Table.from_pandas(df) table = pa.Table.from_arrays( [col.chunk(0)[:0] for col in table.itercolumns()], names=table.schema.names) assert table.schema.field('null').type == pa.null() assert table.schema.field('null_list').type == pa.list_(pa.null()) _check_roundtrip(table, version='2.0') @pytest.mark.pandas def test_empty_table_no_columns(): df = pd.DataFrame() empty = pa.Table.from_pandas(df, preserve_index=False) _check_roundtrip(empty) def test_empty_lists_table_roundtrip(): # ARROW-2744: Shouldn't crash when writing an array of empty lists arr = pa.array([[], []], type=pa.list_(pa.int32())) table = pa.Table.from_arrays([arr], ["A"]) _check_roundtrip(table) @pytest.mark.pandas def test_pandas_parquet_datetime_tz(): s = pd.Series([datetime.datetime(2017, 9, 6)]) s = s.dt.tz_localize('utc') s.index = s # Both a column and an index to hit both use cases df = pd.DataFrame({'tz_aware': s, 'tz_eastern': s.dt.tz_convert('US/Eastern')}, index=s) f = BytesIO() arrow_table = pa.Table.from_pandas(df) _write_table(arrow_table, f, coerce_timestamps='ms') f.seek(0) table_read = pq.read_pandas(f) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) @pytest.mark.pandas @pytest.mark.skipif(six.PY2, reason='datetime.timezone is available since ' 'python version 3.2') def test_datetime_timezone_tzinfo(): value = datetime.datetime(2018, 1, 1, 1, 23, 45, tzinfo=datetime.timezone.utc) df = pd.DataFrame({'foo': [value]}) _roundtrip_pandas_dataframe(df, write_kwargs={}) @pytest.mark.pandas def test_pandas_parquet_custom_metadata(tempdir): df = alltypes_sample(size=10000) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df) assert b'pandas' in arrow_table.schema.metadata _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms') metadata = pq.read_metadata(filename).metadata assert b'pandas' in metadata js = json.loads(metadata[b'pandas'].decode('utf8')) assert js['index_columns'] == [{'kind': 'range', 'name': None, 'start': 0, 'stop': 10000, 'step': 1}] @pytest.mark.pandas def test_pandas_parquet_column_multiindex(tempdir): df = alltypes_sample(size=10) df.columns = pd.MultiIndex.from_tuples( list(zip(df.columns, df.columns[::-1])), names=['level_1', 'level_2'] ) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df) assert arrow_table.schema.pandas_metadata is not None _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms') table_read = pq.read_pandas(filename) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(tempdir): df = alltypes_sample(size=10000) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df, preserve_index=False) js = arrow_table.schema.pandas_metadata assert not js['index_columns'] # ARROW-2170 # While index_columns should be empty, columns needs to be filled still. assert js['columns'] _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms') table_read = pq.read_pandas(filename) js = table_read.schema.pandas_metadata assert not js['index_columns'] assert arrow_table.schema.metadata == table_read.schema.metadata df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_pandas_parquet_1_0_roundtrip(tempdir): size = 10000 np.random.seed(0) df = pd.DataFrame({ 'uint8': np.arange(size, dtype=np.uint8), 'uint16': np.arange(size, dtype=np.uint16), 'uint32': np.arange(size, dtype=np.uint32), 'uint64': np.arange(size, dtype=np.uint64), 'int8': np.arange(size, dtype=np.int16), 'int16': np.arange(size, dtype=np.int16), 'int32': np.arange(size, dtype=np.int32), 'int64': np.arange(size, dtype=np.int64), 'float32': np.arange(size, dtype=np.float32), 'float64': np.arange(size, dtype=np.float64), 'bool': np.random.randn(size) > 0, 'str': [str(x) for x in range(size)], 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None], 'empty_str': [''] * size }) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df) _write_table(arrow_table, filename, version='1.0') table_read = _read_table(filename) df_read = table_read.to_pandas() # We pass uint32_t as int64_t if we write Parquet version 1.0 df['uint32'] = df['uint32'].values.astype(np.int64) tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_multiple_path_types(tempdir): # Test compatibility with PEP 519 path-like objects path = tempdir / 'zzz.parquet' df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)}) _write_table(df, path) table_read = _read_table(path) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) # Test compatibility with plain string paths path = str(tempdir) + 'zzz.parquet' df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)}) _write_table(df, path) table_read = _read_table(path) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_pandas_column_selection(tempdir): size = 10000 np.random.seed(0) df = pd.DataFrame({ 'uint8': np.arange(size, dtype=np.uint8), 'uint16': np.arange(size, dtype=np.uint16) }) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df) _write_table(arrow_table, filename) table_read = _read_table(filename, columns=['uint8']) df_read = table_read.to_pandas() tm.assert_frame_equal(df[['uint8']], df_read) # ARROW-4267: Selection of duplicate columns still leads to these columns # being read uniquely. table_read = _read_table(filename, columns=['uint8', 'uint8']) df_read = table_read.to_pandas() tm.assert_frame_equal(df[['uint8']], df_read) def _random_integers(size, dtype): # We do not generate integers outside the int64 range platform_int_info = np.iinfo('int_') iinfo = np.iinfo(dtype) return np.random.randint(max(iinfo.min, platform_int_info.min), min(iinfo.max, platform_int_info.max), size=size).astype(dtype) def _test_dataframe(size=10000, seed=0): np.random.seed(seed) df = pd.DataFrame({ 'uint8': _random_integers(size, np.uint8), 'uint16': _random_integers(size, np.uint16), 'uint32': _random_integers(size, np.uint32), 'uint64': _random_integers(size, np.uint64), 'int8': _random_integers(size, np.int8), 'int16': _random_integers(size, np.int16), 'int32': _random_integers(size, np.int32), 'int64': _random_integers(size, np.int64), 'float32': np.random.randn(size).astype(np.float32), 'float64': np.arange(size, dtype=np.float64), 'bool': np.random.randn(size) > 0, 'strings': [tm.rands(10) for i in range(size)], 'all_none': [None] * size, 'all_none_category': [None] * size }) # TODO(PARQUET-1015) # df['all_none_category'] = df['all_none_category'].astype('category') return df @pytest.mark.pandas def test_pandas_parquet_native_file_roundtrip(tempdir): df = _test_dataframe(10000) arrow_table = pa.Table.from_pandas(df) imos = pa.BufferOutputStream() _write_table(arrow_table, imos, version="2.0") buf = imos.getvalue() reader = pa.BufferReader(buf) df_read = _read_table(reader).to_pandas() tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_parquet_incremental_file_build(tempdir): df = _test_dataframe(100) df['unique_id'] = 0 arrow_table = pa.Table.from_pandas(df, preserve_index=False) out = pa.BufferOutputStream() writer = pq.ParquetWriter(out, arrow_table.schema, version='2.0') frames = [] for i in range(10): df['unique_id'] = i arrow_table = pa.Table.from_pandas(df, preserve_index=False) writer.write_table(arrow_table) frames.append(df.copy()) writer.close() buf = out.getvalue() result = _read_table(pa.BufferReader(buf)) expected = pd.concat(frames, ignore_index=True) tm.assert_frame_equal(result.to_pandas(), expected) @pytest.mark.pandas def test_read_pandas_column_subset(tempdir): df = _test_dataframe(10000) arrow_table = pa.Table.from_pandas(df) imos = pa.BufferOutputStream() _write_table(arrow_table, imos, version="2.0") buf = imos.getvalue() reader = pa.BufferReader(buf) df_read = pq.read_pandas(reader, columns=['strings', 'uint8']).to_pandas() tm.assert_frame_equal(df[['strings', 'uint8']], df_read) @pytest.mark.pandas def test_pandas_parquet_empty_roundtrip(tempdir): df = _test_dataframe(0) arrow_table = pa.Table.from_pandas(df) imos = pa.BufferOutputStream() _write_table(arrow_table, imos, version="2.0") buf = imos.getvalue() reader = pa.BufferReader(buf) df_read = _read_table(reader).to_pandas() tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_pandas_parquet_pyfile_roundtrip(tempdir): filename = tempdir / 'pandas_pyfile_roundtrip.parquet' size = 5 df = pd.DataFrame({ 'int64': np.arange(size, dtype=np.int64), 'float32': np.arange(size, dtype=np.float32), 'float64': np.arange(size, dtype=np.float64), 'bool': np.random.randn(size) > 0, 'strings': ['foo', 'bar', None, 'baz', 'qux'] }) arrow_table = pa.Table.from_pandas(df) with filename.open('wb') as f: _write_table(arrow_table, f, version="1.0") data = io.BytesIO(filename.read_bytes()) table_read = _read_table(data) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_pandas_parquet_configuration_options(tempdir): size = 10000 np.random.seed(0) df = pd.DataFrame({ 'uint8': np.arange(size, dtype=np.uint8), 'uint16': np.arange(size, dtype=np.uint16), 'uint32': np.arange(size, dtype=np.uint32), 'uint64': np.arange(size, dtype=np.uint64), 'int8': np.arange(size, dtype=np.int16), 'int16': np.arange(size, dtype=np.int16), 'int32': np.arange(size, dtype=np.int32), 'int64': np.arange(size, dtype=np.int64), 'float32': np.arange(size, dtype=np.float32), 'float64': np.arange(size, dtype=np.float64), 'bool': np.random.randn(size) > 0 }) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df) for use_dictionary in [True, False]: _write_table(arrow_table, filename, version='2.0', use_dictionary=use_dictionary) table_read = _read_table(filename) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) for write_statistics in [True, False]: _write_table(arrow_table, filename, version='2.0', write_statistics=write_statistics) table_read = _read_table(filename) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) for compression in ['NONE', 'SNAPPY', 'GZIP', 'LZ4', 'ZSTD']: _write_table(arrow_table, filename, version='2.0', compression=compression) table_read = _read_table(filename) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) def make_sample_file(table_or_df): if isinstance(table_or_df, pa.Table): a_table = table_or_df else: a_table = pa.Table.from_pandas(table_or_df) buf = io.BytesIO() _write_table(a_table, buf, compression='SNAPPY', version='2.0', coerce_timestamps='ms') buf.seek(0) return pq.ParquetFile(buf) @pytest.mark.pandas def test_parquet_metadata_api(): df = alltypes_sample(size=10000) df = df.reindex(columns=sorted(df.columns)) df.index = np.random.randint(0, 1000000, size=len(df)) fileh = make_sample_file(df) ncols = len(df.columns) # Series of sniff tests meta = fileh.metadata repr(meta) assert meta.num_rows == len(df) assert meta.num_columns == ncols + 1 # +1 for index assert meta.num_row_groups == 1 assert meta.format_version == '2.0' assert 'parquet-cpp' in meta.created_by assert isinstance(meta.serialized_size, int) assert isinstance(meta.metadata, dict) # Schema schema = fileh.schema assert meta.schema is schema assert len(schema) == ncols + 1 # +1 for index repr(schema) col = schema[0] repr(col) assert col.name == df.columns[0] assert col.max_definition_level == 1 assert col.max_repetition_level == 0 assert col.max_repetition_level == 0 assert col.physical_type == 'BOOLEAN' assert col.converted_type == 'NONE' with pytest.raises(IndexError): schema[ncols + 1] # +1 for index with pytest.raises(IndexError): schema[-1] # Row group for rg in range(meta.num_row_groups): rg_meta = meta.row_group(rg) assert isinstance(rg_meta, pq.RowGroupMetaData) repr(rg_meta) for col in range(rg_meta.num_columns): col_meta = rg_meta.column(col) assert isinstance(col_meta, pq.ColumnChunkMetaData) repr(col_meta) with pytest.raises(IndexError): meta.row_group(-1) with pytest.raises(IndexError): meta.row_group(meta.num_row_groups + 1) rg_meta = meta.row_group(0) assert rg_meta.num_rows == len(df) assert rg_meta.num_columns == ncols + 1 # +1 for index assert rg_meta.total_byte_size > 0 with pytest.raises(IndexError): col_meta = rg_meta.column(-1) with pytest.raises(IndexError): col_meta = rg_meta.column(ncols + 2) col_meta = rg_meta.column(0) assert col_meta.file_offset > 0 assert col_meta.file_path == '' # created from BytesIO assert col_meta.physical_type == 'BOOLEAN' assert col_meta.num_values == 10000 assert col_meta.path_in_schema == 'bool' assert col_meta.is_stats_set is True assert isinstance(col_meta.statistics, pq.Statistics) assert col_meta.compression == 'SNAPPY' assert col_meta.encodings == ('PLAIN', 'RLE') assert col_meta.has_dictionary_page is False assert col_meta.dictionary_page_offset is None assert col_meta.data_page_offset > 0 assert col_meta.total_compressed_size > 0 assert col_meta.total_uncompressed_size > 0 with pytest.raises(NotImplementedError): col_meta.has_index_page with pytest.raises(NotImplementedError): col_meta.index_page_offset @pytest.mark.pandas @pytest.mark.parametrize( ( 'data', 'type', 'physical_type', 'min_value', 'max_value', 'null_count', 'num_values', 'distinct_count' ), [ ([1, 2, 2, None, 4], pa.uint8(), 'INT32', 1, 4, 1, 4, 0), ([1, 2, 2, None, 4], pa.uint16(), 'INT32', 1, 4, 1, 4, 0), ([1, 2, 2, None, 4], pa.uint32(), 'INT32', 1, 4, 1, 4, 0), ([1, 2, 2, None, 4], pa.uint64(), 'INT64', 1, 4, 1, 4, 0), ([-1, 2, 2, None, 4], pa.int8(), 'INT32', -1, 4, 1, 4, 0), ([-1, 2, 2, None, 4], pa.int16(), 'INT32', -1, 4, 1, 4, 0), ([-1, 2, 2, None, 4], pa.int32(), 'INT32', -1, 4, 1, 4, 0), ([-1, 2, 2, None, 4], pa.int64(), 'INT64', -1, 4, 1, 4, 0), ( [-1.1, 2.2, 2.3, None, 4.4], pa.float32(), 'FLOAT', -1.1, 4.4, 1, 4, 0 ), ( [-1.1, 2.2, 2.3, None, 4.4], pa.float64(), 'DOUBLE', -1.1, 4.4, 1, 4, 0 ), ( [u'', u'b', unichar(1000), None, u'aaa'], pa.binary(), 'BYTE_ARRAY', b'', unichar(1000).encode('utf-8'), 1, 4, 0 ), ( [True, False, False, True, True], pa.bool_(), 'BOOLEAN', False, True, 0, 5, 0 ), ( [b'\x00', b'b', b'12', None, b'aaa'], pa.binary(), 'BYTE_ARRAY', b'\x00', b'b', 1, 4, 0 ), ] ) def test_parquet_column_statistics_api(data, type, physical_type, min_value, max_value, null_count, num_values, distinct_count): df = pd.DataFrame({'data': data}) schema = pa.schema([pa.field('data', type)]) table = pa.Table.from_pandas(df, schema=schema, safe=False) fileh = make_sample_file(table) meta = fileh.metadata rg_meta = meta.row_group(0) col_meta = rg_meta.column(0) stat = col_meta.statistics assert stat.has_min_max assert _close(type, stat.min, min_value) assert _close(type, stat.max, max_value) assert stat.null_count == null_count assert stat.num_values == num_values # TODO(kszucs) until parquet-cpp API doesn't expose HasDistinctCount # method, missing distinct_count is represented as zero instead of None assert stat.distinct_count == distinct_count assert stat.physical_type == physical_type def _close(type, left, right): if type == pa.float32(): return abs(left - right) < 1E-7 elif type == pa.float64(): return abs(left - right) < 1E-13 else: return left == right def test_statistics_convert_logical_types(tempdir): # ARROW-5166, ARROW-4139 # (min, max, type) cases = [(10, 11164359321221007157, pa.uint64()), (10, 4294967295, pa.uint32()), (u"ähnlich", u"öffentlich", pa.utf8()), (datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000), pa.time32('ms')), (datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000), pa.time64('us')), (datetime.datetime(2019, 6, 24, 0, 0, 0, 1000), datetime.datetime(2019, 6, 25, 0, 0, 0, 1000), pa.timestamp('ms')), (datetime.datetime(2019, 6, 24, 0, 0, 0, 1000), datetime.datetime(2019, 6, 25, 0, 0, 0, 1000), pa.timestamp('us'))] for i, (min_val, max_val, typ) in enumerate(cases): t = pa.Table.from_arrays([pa.array([min_val, max_val], type=typ)], ['col']) path = str(tempdir / ('example{}.parquet'.format(i))) pq.write_table(t, path, version='2.0') pf = pq.ParquetFile(path) stats = pf.metadata.row_group(0).column(0).statistics assert stats.min == min_val assert stats.max == max_val def test_parquet_write_disable_statistics(tempdir): table = pa.Table.from_pydict( {'a': pa.array([1, 2, 3]), 'b': pa.array(['a', 'b', 'c'])}) _write_table(table, tempdir / 'data.parquet') meta = pq.read_metadata(tempdir / 'data.parquet') for col in [0, 1]: cc = meta.row_group(0).column(col) assert cc.is_stats_set is True assert cc.statistics is not None _write_table(table, tempdir / 'data2.parquet', write_statistics=False) meta = pq.read_metadata(tempdir / 'data2.parquet') for col in [0, 1]: cc = meta.row_group(0).column(col) assert cc.is_stats_set is False assert cc.statistics is None _write_table(table, tempdir / 'data3.parquet', write_statistics=['a']) meta = pq.read_metadata(tempdir / 'data3.parquet') cc_a = meta.row_group(0).column(0) assert cc_a.is_stats_set is True assert cc_a.statistics is not None cc_b = meta.row_group(0).column(1) assert cc_b.is_stats_set is False assert cc_b.statistics is None @pytest.mark.pandas def test_compare_schemas(): df = alltypes_sample(size=10000) fileh = make_sample_file(df) fileh2 = make_sample_file(df) fileh3 = make_sample_file(df[df.columns[::2]]) # ParquetSchema assert isinstance(fileh.schema, pq.ParquetSchema) assert fileh.schema.equals(fileh.schema) assert fileh.schema == fileh.schema assert fileh.schema.equals(fileh2.schema) assert fileh.schema == fileh2.schema assert fileh.schema != 'arbitrary object' assert not fileh.schema.equals(fileh3.schema) assert fileh.schema != fileh3.schema # ColumnSchema assert isinstance(fileh.schema[0], pq.ColumnSchema) assert fileh.schema[0].equals(fileh.schema[0]) assert fileh.schema[0] == fileh.schema[0] assert not fileh.schema[0].equals(fileh.schema[1]) assert fileh.schema[0] != fileh.schema[1] assert fileh.schema[0] != 'arbitrary object' def test_validate_schema_write_table(tempdir): # ARROW-2926 simple_fields = [ pa.field('POS', pa.uint32()), pa.field('desc', pa.string()) ] simple_schema = pa.schema(simple_fields) # simple_table schema does not match simple_schema simple_from_array = [pa.array([1]), pa.array(['bla'])] simple_table = pa.Table.from_arrays(simple_from_array, ['POS', 'desc']) path = tempdir / 'simple_validate_schema.parquet' with pq.ParquetWriter(path, simple_schema, version='2.0', compression='snappy', flavor='spark') as w: with pytest.raises(ValueError): w.write_table(simple_table) @pytest.mark.pandas def test_column_of_arrays(tempdir): df, schema = dataframe_with_arrays() filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df, schema=schema) _write_table(arrow_table, filename, version="2.0", coerce_timestamps='ms') table_read = _read_table(filename) df_read = table_read.to_pandas() tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_coerce_timestamps(tempdir): from collections import OrderedDict # ARROW-622 arrays = OrderedDict() fields = [pa.field('datetime64', pa.list_(pa.timestamp('ms')))] arrays['datetime64'] = [ np.array(['2007-07-13T01:23:34.123456789', None, '2010-08-13T05:46:57.437699912'], dtype='datetime64[ms]'), None, None, np.array(['2007-07-13T02', None, '2010-08-13T05:46:57.437699912'], dtype='datetime64[ms]'), ] df = pd.DataFrame(arrays) schema = pa.schema(fields) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df, schema=schema) _write_table(arrow_table, filename, version="2.0", coerce_timestamps='us') table_read = _read_table(filename) df_read = table_read.to_pandas() df_expected = df.copy() for i, x in enumerate(df_expected['datetime64']): if isinstance(x, np.ndarray): df_expected['datetime64'][i] = x.astype('M8[us]') tm.assert_frame_equal(df_expected, df_read) with pytest.raises(ValueError): _write_table(arrow_table, filename, version='2.0', coerce_timestamps='unknown') @pytest.mark.pandas def test_coerce_timestamps_truncated(tempdir): """ ARROW-2555: Test that we can truncate timestamps when coercing if explicitly allowed. """ dt_us = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1, second=1, microsecond=1) dt_ms = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1, second=1) fields_us = [pa.field('datetime64', pa.timestamp('us'))] arrays_us = {'datetime64': [dt_us, dt_ms]} df_us = pd.DataFrame(arrays_us) schema_us = pa.schema(fields_us) filename = tempdir / 'pandas_truncated.parquet' table_us = pa.Table.from_pandas(df_us, schema=schema_us) _write_table(table_us, filename, version="2.0", coerce_timestamps='ms', allow_truncated_timestamps=True) table_ms = _read_table(filename) df_ms = table_ms.to_pandas() arrays_expected = {'datetime64': [dt_ms, dt_ms]} df_expected = pd.DataFrame(arrays_expected) tm.assert_frame_equal(df_expected, df_ms) @pytest.mark.pandas def test_column_of_lists(tempdir): df, schema = dataframe_with_lists(parquet_compatible=True) filename = tempdir / 'pandas_roundtrip.parquet' arrow_table = pa.Table.from_pandas(df, schema=schema) _write_table(arrow_table, filename, version='2.0') table_read = _read_table(filename) df_read = table_read.to_pandas() if PY2: # assert_frame_equal fails when comparing datetime.date and # np.datetime64, even with check_datetimelike_compat=True so # convert the values to np.datetime64 instead for col in ['date32[day]_list', 'date64[ms]_list']: df[col] = df[col].apply( lambda x: list(map(np.datetime64, x)) if x else x ) tm.assert_frame_equal(df, df_read) @pytest.mark.pandas def test_date_time_types(tempdir): t1 = pa.date32() data1 = np.array([17259, 17260, 17261], dtype='int32') a1 = pa.array(data1, type=t1) t2 = pa.date64() data2 = data1.astype('int64') * 86400000 a2 = pa.array(data2, type=t2) t3 = pa.timestamp('us') start = pd.Timestamp('2001-01-01').value / 1000 data3 = np.array([start, start + 1, start + 2], dtype='int64') a3 = pa.array(data3, type=t3) t4 = pa.time32('ms') data4 = np.arange(3, dtype='i4') a4 = pa.array(data4, type=t4) t5 = pa.time64('us') a5 = pa.array(data4.astype('int64'), type=t5) t6 = pa.time32('s') a6 = pa.array(data4, type=t6) ex_t6 = pa.time32('ms') ex_a6 = pa.array(data4 * 1000, type=ex_t6) t7 = pa.timestamp('ns') start = pd.Timestamp('2001-01-01').value data7 = np.array([start, start + 1000, start + 2000], dtype='int64') a7 = pa.array(data7, type=t7) table = pa.Table.from_arrays([a1, a2, a3, a4, a5, a6, a7], ['date32', 'date64', 'timestamp[us]', 'time32[s]', 'time64[us]', 'time32_from64[s]', 'timestamp[ns]']) # date64 as date32 # time32[s] to time32[ms] expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7], ['date32', 'date64', 'timestamp[us]', 'time32[s]', 'time64[us]', 'time32_from64[s]', 'timestamp[ns]']) _check_roundtrip(table, expected=expected, version='2.0') t0 = pa.timestamp('ms') data0 = np.arange(4, dtype='int64') a0 = pa.array(data0, type=t0) t1 = pa.timestamp('us') data1 = np.arange(4, dtype='int64') a1 = pa.array(data1, type=t1) t2 = pa.timestamp('ns') data2 = np.arange(4, dtype='int64') a2 = pa.array(data2, type=t2) table = pa.Table.from_arrays([a0, a1, a2], ['ts[ms]', 'ts[us]', 'ts[ns]']) expected = pa.Table.from_arrays([a0, a1, a2], ['ts[ms]', 'ts[us]', 'ts[ns]']) # int64 for all timestamps supported by default filename = tempdir / 'int64_timestamps.parquet' _write_table(table, filename, version='2.0') parquet_schema = pq.ParquetFile(filename).schema for i in range(3): assert parquet_schema.column(i).physical_type == 'INT64' read_table = _read_table(filename) assert read_table.equals(expected) t0_ns = pa.timestamp('ns') data0_ns = np.array(data0 * 1000000, dtype='int64') a0_ns = pa.array(data0_ns, type=t0_ns) t1_ns = pa.timestamp('ns') data1_ns = np.array(data1 * 1000, dtype='int64') a1_ns = pa.array(data1_ns, type=t1_ns) expected = pa.Table.from_arrays([a0_ns, a1_ns, a2], ['ts[ms]', 'ts[us]', 'ts[ns]']) # int96 nanosecond timestamps produced upon request filename = tempdir / 'explicit_int96_timestamps.parquet' _write_table(table, filename, version='2.0', use_deprecated_int96_timestamps=True) parquet_schema = pq.ParquetFile(filename).schema for i in range(3): assert parquet_schema.column(i).physical_type == 'INT96' read_table = _read_table(filename) assert read_table.equals(expected) # int96 nanosecond timestamps implied by flavor 'spark' filename = tempdir / 'spark_int96_timestamps.parquet' _write_table(table, filename, version='2.0', flavor='spark') parquet_schema = pq.ParquetFile(filename).schema for i in range(3): assert parquet_schema.column(i).physical_type == 'INT96' read_table = _read_table(filename) assert read_table.equals(expected) def test_timestamp_restore_timezone(): # ARROW-5888, restore timezone from serialized metadata ty = pa.timestamp('ms', tz='America/New_York') arr = pa.array([1, 2, 3], type=ty) t = pa.table([arr], names=['f0']) _check_roundtrip(t) @pytest.mark.pandas def test_list_of_datetime_time_roundtrip(): # ARROW-4135 times = pd.to_datetime(['09:00', '09:30', '10:00', '10:30', '11:00', '11:30', '12:00']) df = pd.DataFrame({'time': [times.time]}) _roundtrip_pandas_dataframe(df, write_kwargs={}) @pytest.mark.pandas def test_parquet_version_timestamp_differences(): i_s = pd.Timestamp('2010-01-01').value / 1000000000 # := 1262304000 d_s = np.arange(i_s, i_s + 10, 1, dtype='int64') d_ms = d_s * 1000 d_us = d_ms * 1000 d_ns = d_us * 1000 a_s = pa.array(d_s, type=pa.timestamp('s')) a_ms = pa.array(d_ms, type=pa.timestamp('ms')) a_us = pa.array(d_us, type=pa.timestamp('us')) a_ns = pa.array(d_ns, type=pa.timestamp('ns')) names = ['ts:s', 'ts:ms', 'ts:us', 'ts:ns'] table = pa.Table.from_arrays([a_s, a_ms, a_us, a_ns], names) # Using Parquet version 1.0, seconds should be coerced to milliseconds # and nanoseconds should be coerced to microseconds by default expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_us], names) _check_roundtrip(table, expected) # Using Parquet version 2.0, seconds should be coerced to milliseconds # and nanoseconds should be retained by default expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_ns], names) _check_roundtrip(table, expected, version='2.0') # Using Parquet version 1.0, coercing to milliseconds or microseconds # is allowed expected = pa.Table.from_arrays([a_ms, a_ms, a_ms, a_ms], names) _check_roundtrip(table, expected, coerce_timestamps='ms') # Using Parquet version 2.0, coercing to milliseconds or microseconds # is allowed expected = pa.Table.from_arrays([a_us, a_us, a_us, a_us], names) _check_roundtrip(table, expected, version='2.0', coerce_timestamps='us') # TODO: after pyarrow allows coerce_timestamps='ns', tests like the # following should pass ... # Using Parquet version 1.0, coercing to nanoseconds is not allowed # expected = None # with pytest.raises(NotImplementedError): # _roundtrip_table(table, coerce_timestamps='ns') # Using Parquet version 2.0, coercing to nanoseconds is allowed # expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names) # _check_roundtrip(table, expected, version='2.0', coerce_timestamps='ns') # For either Parquet version, coercing to nanoseconds is allowed # if Int96 storage is used expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names) _check_roundtrip(table, expected, use_deprecated_int96_timestamps=True) _check_roundtrip(table, expected, version='2.0', use_deprecated_int96_timestamps=True) def test_large_list_records(): # This was fixed in PARQUET-1100 list_lengths = np.random.randint(0, 500, size=50) list_lengths[::10] = 0 list_values = [list(map(int, np.random.randint(0, 100, size=x))) if i % 8 else None for i, x in enumerate(list_lengths)] a1 = pa.array(list_values) table = pa.Table.from_arrays([a1], ['int_lists']) _check_roundtrip(table) def test_sanitized_spark_field_names(): a0 = pa.array([0, 1, 2, 3, 4]) name = 'prohib; ,\t{}' table = pa.Table.from_arrays([a0], [name]) result = _roundtrip_table(table, write_table_kwargs={'flavor': 'spark'}) expected_name = 'prohib______' assert result.schema[0].name == expected_name @pytest.mark.pandas def test_spark_flavor_preserves_pandas_metadata(): df = _test_dataframe(size=100) df.index = np.arange(0, 10 * len(df), 10) df.index.name = 'foo' result = _roundtrip_pandas_dataframe(df, {'version': '2.0', 'flavor': 'spark'}) tm.assert_frame_equal(result, df) def test_fixed_size_binary(): t0 = pa.binary(10) data = [b'fooooooooo', None, b'barooooooo', b'quxooooooo'] a0 = pa.array(data, type=t0) table = pa.Table.from_arrays([a0], ['binary[10]']) _check_roundtrip(table) @pytest.mark.pandas def test_multithreaded_read(): df = alltypes_sample(size=10000) table = pa.Table.from_pandas(df) buf = io.BytesIO() _write_table(table, buf, compression='SNAPPY', version='2.0') buf.seek(0) table1 = _read_table(buf, use_threads=True) buf.seek(0) table2 = _read_table(buf, use_threads=False) assert table1.equals(table2) @pytest.mark.pandas def test_min_chunksize(): data = pd.DataFrame([np.arange(4)], columns=['A', 'B', 'C', 'D']) table = pa.Table.from_pandas(data.reset_index()) buf = io.BytesIO() _write_table(table, buf, chunk_size=-1) buf.seek(0) result = _read_table(buf) assert result.equals(table) with pytest.raises(ValueError): _write_table(table, buf, chunk_size=0) @pytest.mark.pandas def test_pass_separate_metadata(): # ARROW-471 df = alltypes_sample(size=10000) a_table = pa.Table.from_pandas(df) buf = io.BytesIO() _write_table(a_table, buf, compression='snappy', version='2.0') buf.seek(0) metadata = pq.read_metadata(buf) buf.seek(0) fileh = pq.ParquetFile(buf, metadata=metadata) tm.assert_frame_equal(df, fileh.read().to_pandas()) @pytest.mark.pandas def test_read_single_row_group(): # ARROW-471 N, K = 10000, 4 df = alltypes_sample(size=N) a_table = pa.Table.from_pandas(df) buf = io.BytesIO() _write_table(a_table, buf, row_group_size=N / K, compression='snappy', version='2.0') buf.seek(0) pf = pq.ParquetFile(buf) assert pf.num_row_groups == K row_groups = [pf.read_row_group(i) for i in range(K)] result = pa.concat_tables(row_groups) tm.assert_frame_equal(df, result.to_pandas()) @pytest.mark.pandas def test_read_single_row_group_with_column_subset(): N, K = 10000, 4 df = alltypes_sample(size=N) a_table = pa.Table.from_pandas(df) buf = io.BytesIO() _write_table(a_table, buf, row_group_size=N / K, compression='snappy', version='2.0') buf.seek(0) pf = pq.ParquetFile(buf) cols = list(df.columns[:2]) row_groups = [pf.read_row_group(i, columns=cols) for i in range(K)] result = pa.concat_tables(row_groups) tm.assert_frame_equal(df[cols], result.to_pandas()) # ARROW-4267: Selection of duplicate columns still leads to these columns # being read uniquely. row_groups = [pf.read_row_group(i, columns=cols + cols) for i in range(K)] result = pa.concat_tables(row_groups) tm.assert_frame_equal(df[cols], result.to_pandas()) @pytest.mark.pandas def test_read_multiple_row_groups(): N, K = 10000, 4 df = alltypes_sample(size=N) a_table = pa.Table.from_pandas(df) buf = io.BytesIO() _write_table(a_table, buf, row_group_size=N / K, compression='snappy', version='2.0') buf.seek(0) pf = pq.ParquetFile(buf) assert pf.num_row_groups == K result = pf.read_row_groups(range(K)) tm.assert_frame_equal(df, result.to_pandas()) @pytest.mark.pandas def test_read_multiple_row_groups_with_column_subset(): N, K = 10000, 4 df = alltypes_sample(size=N) a_table = pa.Table.from_pandas(df) buf = io.BytesIO() _write_table(a_table, buf, row_group_size=N / K, compression='snappy', version='2.0') buf.seek(0) pf = pq.ParquetFile(buf) cols = list(df.columns[:2]) result = pf.read_row_groups(range(K), columns=cols) tm.assert_frame_equal(df[cols], result.to_pandas()) # ARROW-4267: Selection of duplicate columns still leads to these columns # being read uniquely. result = pf.read_row_groups(range(K), columns=cols + cols) tm.assert_frame_equal(df[cols], result.to_pandas()) @pytest.mark.pandas def test_scan_contents(): N, K = 10000, 4 df = alltypes_sample(size=N) a_table = pa.Table.from_pandas(df) buf = io.BytesIO() _write_table(a_table, buf, row_group_size=N / K, compression='snappy', version='2.0') buf.seek(0) pf = pq.ParquetFile(buf) assert pf.scan_contents() == 10000 assert pf.scan_contents(df.columns[:4]) == 10000 @pytest.mark.pandas def test_parquet_piece_read(tempdir): df = _test_dataframe(1000) table = pa.Table.from_pandas(df) path = tempdir / 'parquet_piece_read.parquet' _write_table(table, path, version='2.0') piece1 = pq.ParquetDatasetPiece(path) result = piece1.read() assert result.equals(table) @pytest.mark.pandas def test_parquet_piece_open_and_get_metadata(tempdir): df = _test_dataframe(100) table = pa.Table.from_pandas(df) path = tempdir / 'parquet_piece_read.parquet' _write_table(table, path, version='2.0') piece = pq.ParquetDatasetPiece(path) table1 = piece.read() assert isinstance(table1, pa.Table) meta1 = piece.get_metadata() assert isinstance(meta1, pq.FileMetaData) assert table == table1 def test_parquet_piece_basics(): path = '/baz.parq' piece1 = pq.ParquetDatasetPiece(path) piece2 = pq.ParquetDatasetPiece(path, row_group=1) piece3 = pq.ParquetDatasetPiece( path, row_group=1, partition_keys=[('foo', 0), ('bar', 1)]) assert str(piece1) == path assert str(piece2) == '/baz.parq | row_group=1' assert str(piece3) == 'partition[foo=0, bar=1] /baz.parq | row_group=1' assert piece1 == piece1 assert piece2 == piece2 assert piece3 == piece3 assert piece1 != piece3 def test_partition_set_dictionary_type(): set1 = pq.PartitionSet('key1', [u('foo'), u('bar'), u('baz')]) set2 = pq.PartitionSet('key2', [2007, 2008, 2009]) assert isinstance(set1.dictionary, pa.StringArray) assert isinstance(set2.dictionary, pa.IntegerArray) set3 = pq.PartitionSet('key2', [datetime.datetime(2007, 1, 1)]) with pytest.raises(TypeError): set3.dictionary @pytest.mark.pandas def test_read_partitioned_directory(tempdir): fs = LocalFileSystem.get_instance() _partition_test_for_filesystem(fs, tempdir) @pytest.mark.pandas def test_create_parquet_dataset_multi_threaded(tempdir): fs = LocalFileSystem.get_instance() base_path = tempdir _partition_test_for_filesystem(fs, base_path) manifest = pq.ParquetManifest(base_path, filesystem=fs, metadata_nthreads=1) dataset = pq.ParquetDataset(base_path, filesystem=fs, metadata_nthreads=16) assert len(dataset.pieces) > 0 partitions = dataset.partitions assert len(partitions.partition_names) > 0 assert partitions.partition_names == manifest.partitions.partition_names assert len(partitions.levels) == len(manifest.partitions.levels) @pytest.mark.pandas def test_equivalency(tempdir): fs = LocalFileSystem.get_instance() base_path = tempdir integer_keys = [0, 1] string_keys = ['a', 'b', 'c'] boolean_keys = [True, False] partition_spec = [ ['integer', integer_keys], ['string', string_keys], ['boolean', boolean_keys] ] df = pd.DataFrame({ 'integer': np.array(integer_keys, dtype='i4').repeat(15), 'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2), 'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5), 3), }, columns=['integer', 'string', 'boolean']) _generate_partition_directories(fs, base_path, partition_spec, df) # Old filters syntax: # integer == 1 AND string != b AND boolean == True dataset = pq.ParquetDataset( base_path, filesystem=fs, filters=[('integer', '=', 1), ('string', '!=', 'b'), ('boolean', '==', True)] ) table = dataset.read() result_df = (table.to_pandas().reset_index(drop=True)) assert 0 not in result_df['integer'].values assert 'b' not in result_df['string'].values assert False not in result_df['boolean'].values # filters in disjunctive normal form: # (integer == 1 AND string != b AND boolean == True) OR # (integer == 2 AND boolean == False) # TODO(ARROW-3388): boolean columns are reconstructed as string filters = [ [ ('integer', '=', 1), ('string', '!=', 'b'), ('boolean', '==', 'True') ], [('integer', '=', 0), ('boolean', '==', 'False')] ] dataset = pq.ParquetDataset(base_path, filesystem=fs, filters=filters) table = dataset.read() result_df = table.to_pandas().reset_index(drop=True) # Check that all rows in the DF fulfill the filter # Pandas 0.23.x has problems with indexing constant memoryviews in # categoricals. Thus we need to make an explicity copy here with np.array. df_filter_1 = (np.array(result_df['integer']) == 1) \ & (np.array(result_df['string']) != 'b') \ & (np.array(result_df['boolean']) == 'True') df_filter_2 = (np.array(result_df['integer']) == 0) \ & (np.array(result_df['boolean']) == 'False') assert df_filter_1.sum() > 0 assert df_filter_2.sum() > 0 assert result_df.shape[0] == (df_filter_1.sum() + df_filter_2.sum()) # Check for \0 in predicate values. Until they are correctly implemented # in ARROW-3391, they would otherwise lead to weird results with the # current code. with pytest.raises(NotImplementedError): filters = [[('string', '==', b'1\0a')]] pq.ParquetDataset(base_path, filesystem=fs, filters=filters) with pytest.raises(NotImplementedError): filters = [[('string', '==', u'1\0a')]] pq.ParquetDataset(base_path, filesystem=fs, filters=filters) @pytest.mark.pandas def test_cutoff_exclusive_integer(tempdir): fs = LocalFileSystem.get_instance() base_path = tempdir integer_keys = [0, 1, 2, 3, 4] partition_spec = [ ['integers', integer_keys], ] N = 5 df = pd.DataFrame({ 'index': np.arange(N), 'integers': np.array(integer_keys, dtype='i4'), }, columns=['index', 'integers']) _generate_partition_directories(fs, base_path, partition_spec, df) dataset = pq.ParquetDataset( base_path, filesystem=fs, filters=[ ('integers', '<', 4), ('integers', '>', 1), ] ) table = dataset.read() result_df = (table.to_pandas() .sort_values(by='index') .reset_index(drop=True)) result_list = [x for x in map(int, result_df['integers'].values)] assert result_list == [2, 3] @pytest.mark.pandas @pytest.mark.xfail( raises=TypeError, reason='Loss of type information in creation of categoricals.' ) def test_cutoff_exclusive_datetime(tempdir): fs = LocalFileSystem.get_instance() base_path = tempdir date_keys = [ datetime.date(2018, 4, 9), datetime.date(2018, 4, 10), datetime.date(2018, 4, 11), datetime.date(2018, 4, 12), datetime.date(2018, 4, 13) ] partition_spec = [ ['dates', date_keys] ] N = 5 df = pd.DataFrame({ 'index': np.arange(N), 'dates': np.array(date_keys, dtype='datetime64'), }, columns=['index', 'dates']) _generate_partition_directories(fs, base_path, partition_spec, df) dataset = pq.ParquetDataset( base_path, filesystem=fs, filters=[ ('dates', '<', "2018-04-12"), ('dates', '>', "2018-04-10") ] ) table = dataset.read() result_df = (table.to_pandas() .sort_values(by='index') .reset_index(drop=True)) expected = pd.Categorical( np.array([datetime.date(2018, 4, 11)], dtype='datetime64'), categories=np.array(date_keys, dtype='datetime64')) assert result_df['dates'].values == expected @pytest.mark.pandas def test_inclusive_integer(tempdir): fs = LocalFileSystem.get_instance() base_path = tempdir integer_keys = [0, 1, 2, 3, 4] partition_spec = [ ['integers', integer_keys], ] N = 5 df = pd.DataFrame({ 'index': np.arange(N), 'integers': np.array(integer_keys, dtype='i4'), }, columns=['index', 'integers']) _generate_partition_directories(fs, base_path, partition_spec, df) dataset = pq.ParquetDataset( base_path, filesystem=fs, filters=[ ('integers', '<=', 3), ('integers', '>=', 2), ] ) table = dataset.read() result_df = (table.to_pandas() .sort_values(by='index') .reset_index(drop=True)) result_list = [int(x) for x in map(int, result_df['integers'].values)] assert result_list == [2, 3] @pytest.mark.pandas def test_inclusive_set(tempdir): fs = LocalFileSystem.get_instance() base_path = tempdir integer_keys = [0, 1] string_keys = ['a', 'b', 'c'] boolean_keys = [True, False] partition_spec = [ ['integer', integer_keys], ['string', string_keys], ['boolean', boolean_keys] ] df = pd.DataFrame({ 'integer': np.array(integer_keys, dtype='i4').repeat(15), 'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2), 'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5), 3), }, columns=['integer', 'string', 'boolean']) _generate_partition_directories(fs, base_path, partition_spec, df) dataset = pq.ParquetDataset( base_path, filesystem=fs, filters=[('integer', 'in', {1}), ('string', 'in', {'a', 'b'}), ('boolean', 'in', {True})] ) table = dataset.read() result_df = (table.to_pandas().reset_index(drop=True)) assert 0 not in result_df['integer'].values assert 'c' not in result_df['string'].values assert False not in result_df['boolean'].values @pytest.mark.pandas def test_invalid_pred_op(tempdir): fs = LocalFileSystem.get_instance() base_path = tempdir integer_keys = [0, 1, 2, 3, 4] partition_spec = [ ['integers', integer_keys], ] N = 5 df = pd.DataFrame({ 'index': np.arange(N), 'integers': np.array(integer_keys, dtype='i4'), }, columns=['index', 'integers']) _generate_partition_directories(fs, base_path, partition_spec, df) with pytest.raises(ValueError): pq.ParquetDataset(base_path, filesystem=fs, filters=[ ('integers', '=<', 3), ]) with pytest.raises(ValueError): pq.ParquetDataset(base_path, filesystem=fs, filters=[ ('integers', 'in', set()), ]) with pytest.raises(ValueError): pq.ParquetDataset(base_path, filesystem=fs, filters=[ ('integers', '!=', {3}), ]) @pytest.mark.pandas def test_filters_read_table(tempdir): # test that filters keyword is passed through in read_table fs = LocalFileSystem.get_instance() base_path = tempdir integer_keys = [0, 1, 2, 3, 4] partition_spec = [ ['integers', integer_keys], ] N = 5 df = pd.DataFrame({ 'index': np.arange(N), 'integers': np.array(integer_keys, dtype='i4'), }, columns=['index', 'integers']) _generate_partition_directories(fs, base_path, partition_spec, df) table = pq.read_table( base_path, filesystem=fs, filters=[('integers', '<', 3)]) assert table.num_rows == 3 table = pq.read_table( base_path, filesystem=fs, filters=[[('integers', '<', 3)]]) assert table.num_rows == 3 table = pq.read_pandas( base_path, filters=[('integers', '<', 3)]) assert table.num_rows == 3 @pytest.yield_fixture def s3_example(): access_key = os.environ['PYARROW_TEST_S3_ACCESS_KEY'] secret_key = os.environ['PYARROW_TEST_S3_SECRET_KEY'] bucket_name = os.environ['PYARROW_TEST_S3_BUCKET'] import s3fs fs = s3fs.S3FileSystem(key=access_key, secret=secret_key) test_dir = guid() bucket_uri = 's3://{0}/{1}'.format(bucket_name, test_dir) fs.mkdir(bucket_uri) yield fs, bucket_uri fs.rm(bucket_uri, recursive=True) @pytest.mark.pandas @pytest.mark.s3 def test_read_partitioned_directory_s3fs(s3_example): from pyarrow.filesystem import S3FSWrapper fs, bucket_uri = s3_example wrapper = S3FSWrapper(fs) _partition_test_for_filesystem(wrapper, bucket_uri) # Check that we can auto-wrap dataset = pq.ParquetDataset(bucket_uri, filesystem=fs) dataset.read() def _partition_test_for_filesystem(fs, base_path): foo_keys = [0, 1] bar_keys = ['a', 'b', 'c'] partition_spec = [ ['foo', foo_keys], ['bar', bar_keys] ] N = 30 df = pd.DataFrame({ 'index': np.arange(N), 'foo': np.array(foo_keys, dtype='i4').repeat(15), 'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2), 'values': np.random.randn(N) }, columns=['index', 'foo', 'bar', 'values']) _generate_partition_directories(fs, base_path, partition_spec, df) dataset = pq.ParquetDataset(base_path, filesystem=fs) table = dataset.read() result_df = (table.to_pandas() .sort_values(by='index') .reset_index(drop=True)) expected_df = (df.sort_values(by='index') .reset_index(drop=True) .reindex(columns=result_df.columns)) expected_df['foo'] = pd.Categorical(df['foo'], categories=foo_keys) expected_df['bar'] = pd.Categorical(df['bar'], categories=bar_keys) assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all() tm.assert_frame_equal(result_df, expected_df) def _generate_partition_directories(fs, base_dir, partition_spec, df): # partition_spec : list of lists, e.g. [['foo', [0, 1, 2], # ['bar', ['a', 'b', 'c']] # part_table : a pyarrow.Table to write to each partition DEPTH = len(partition_spec) def _visit_level(base_dir, level, part_keys): name, values = partition_spec[level] for value in values: this_part_keys = part_keys + [(name, value)] level_dir = base_dir / '{0}={1}'.format(name, value) fs.mkdir(level_dir) if level == DEPTH - 1: # Generate example data file_path = level_dir / guid() filtered_df = _filter_partition(df, this_part_keys) part_table = pa.Table.from_pandas(filtered_df) with fs.open(file_path, 'wb') as f: _write_table(part_table, f) assert fs.exists(file_path) (level_dir / '_SUCCESS').touch() else: _visit_level(level_dir, level + 1, this_part_keys) (level_dir / '_SUCCESS').touch() _visit_level(base_dir, 0, []) def _test_read_common_metadata_files(fs, base_path): N = 100 df = pd.DataFrame({ 'index': np.arange(N), 'values': np.random.randn(N) }, columns=['index', 'values']) base_path = str(base_path) data_path = os.path.join(base_path, 'data.parquet') table = pa.Table.from_pandas(df) with fs.open(data_path, 'wb') as f: _write_table(table, f) metadata_path = os.path.join(base_path, '_common_metadata') with fs.open(metadata_path, 'wb') as f: pq.write_metadata(table.schema, f) dataset = pq.ParquetDataset(base_path, filesystem=fs) assert dataset.common_metadata_path == str(metadata_path) with fs.open(data_path) as f: common_schema = pq.read_metadata(f).schema assert dataset.schema.equals(common_schema) # handle list of one directory dataset2 = pq.ParquetDataset([base_path], filesystem=fs) assert dataset2.schema.equals(dataset.schema) @pytest.mark.pandas def test_read_common_metadata_files(tempdir): fs = LocalFileSystem.get_instance() _test_read_common_metadata_files(fs, tempdir) @pytest.mark.pandas def test_read_metadata_files(tempdir): fs = LocalFileSystem.get_instance() N = 100 df = pd.DataFrame({ 'index': np.arange(N), 'values': np.random.randn(N) }, columns=['index', 'values']) data_path = tempdir / 'data.parquet' table = pa.Table.from_pandas(df) with fs.open(data_path, 'wb') as f: _write_table(table, f) metadata_path = tempdir / '_metadata' with fs.open(metadata_path, 'wb') as f: pq.write_metadata(table.schema, f) dataset = pq.ParquetDataset(tempdir, filesystem=fs) assert dataset.metadata_path == str(metadata_path) with fs.open(data_path) as f: metadata_schema = pq.read_metadata(f).schema assert dataset.schema.equals(metadata_schema) @pytest.mark.pandas def test_read_schema(tempdir): N = 100 df = pd.DataFrame({ 'index': np.arange(N), 'values': np.random.randn(N) }, columns=['index', 'values']) data_path = tempdir / 'test.parquet' table = pa.Table.from_pandas(df) _write_table(table, data_path) read1 = pq.read_schema(data_path) read2 = pq.read_schema(data_path, memory_map=True) assert table.schema.equals(read1, check_metadata=False) assert table.schema.equals(read2, check_metadata=False) assert table.schema.metadata[b'pandas'] == read1.metadata[b'pandas'] def _filter_partition(df, part_keys): predicate = np.ones(len(df), dtype=bool) to_drop = [] for name, value in part_keys: to_drop.append(name) # to avoid pandas warning if isinstance(value, (datetime.date, datetime.datetime)): value = pd.Timestamp(value) predicate &= df[name] == value return df[predicate].drop(to_drop, axis=1) @pytest.mark.pandas def test_read_multiple_files(tempdir): nfiles = 10 size = 5 dirpath = tempdir / guid() dirpath.mkdir() test_data = [] paths = [] for i in range(nfiles): df = _test_dataframe(size, seed=i) # Hack so that we don't have a dtype cast in v1 files df['uint32'] = df['uint32'].astype(np.int64) path = dirpath / '{}.parquet'.format(i) table = pa.Table.from_pandas(df) _write_table(table, path) test_data.append(table) paths.append(path) # Write a _SUCCESS.crc file (dirpath / '_SUCCESS.crc').touch() def read_multiple_files(paths, columns=None, use_threads=True, **kwargs): dataset = pq.ParquetDataset(paths, **kwargs) return dataset.read(columns=columns, use_threads=use_threads) result = read_multiple_files(paths) expected = pa.concat_tables(test_data) assert result.equals(expected) # Read with provided metadata metadata = pq.read_metadata(paths[0]) result2 = read_multiple_files(paths, metadata=metadata) assert result2.equals(expected) result3 = pa.localfs.read_parquet(dirpath, schema=metadata.schema) assert result3.equals(expected) # Read column subset to_read = [0, 2, 6, result.num_columns - 1] col_names = [result.field(i).name for i in to_read] out = pa.localfs.read_parquet(dirpath, columns=col_names) expected = pa.Table.from_arrays([result.column(i) for i in to_read], names=col_names, metadata=result.schema.metadata) assert out.equals(expected) # Read with multiple threads pa.localfs.read_parquet(dirpath, use_threads=True) # Test failure modes with non-uniform metadata bad_apple = _test_dataframe(size, seed=i).iloc[:, :4] bad_apple_path = tempdir / '{}.parquet'.format(guid()) t = pa.Table.from_pandas(bad_apple) _write_table(t, bad_apple_path) bad_meta = pq.read_metadata(bad_apple_path) with pytest.raises(ValueError): read_multiple_files(paths + [bad_apple_path]) with pytest.raises(ValueError): read_multiple_files(paths, metadata=bad_meta) mixed_paths = [bad_apple_path, paths[0]] with pytest.raises(ValueError): read_multiple_files(mixed_paths, schema=bad_meta.schema) with pytest.raises(ValueError): read_multiple_files(mixed_paths) @pytest.mark.pandas def test_dataset_read_pandas(tempdir): nfiles = 5 size = 5 dirpath = tempdir / guid() dirpath.mkdir() test_data = [] frames = [] paths = [] for i in range(nfiles): df = _test_dataframe(size, seed=i) df.index = np.arange(i * size, (i + 1) * size) df.index.name = 'index' path = dirpath / '{}.parquet'.format(i) table = pa.Table.from_pandas(df) _write_table(table, path) test_data.append(table) frames.append(df) paths.append(path) dataset = pq.ParquetDataset(dirpath) columns = ['uint8', 'strings'] result = dataset.read_pandas(columns=columns).to_pandas() expected = pd.concat([x[columns] for x in frames]) tm.assert_frame_equal(result, expected) @pytest.mark.pandas def test_dataset_no_memory_map(tempdir): # ARROW-2627: Check that we can use ParquetDataset without memory-mapping dirpath = tempdir / guid() dirpath.mkdir() df = _test_dataframe(10, seed=0) path = dirpath / '{}.parquet'.format(0) table = pa.Table.from_pandas(df) _write_table(table, path, version='2.0') # TODO(wesm): Not sure how to easily check that memory mapping is _not_ # used. Mocking is not especially easy for pa.memory_map dataset = pq.ParquetDataset(dirpath, memory_map=False) assert dataset.pieces[0].read().equals(table) @pytest.mark.pandas @pytest.mark.parametrize('preserve_index', [True, False, None]) def test_dataset_read_pandas_common_metadata(tempdir, preserve_index): # ARROW-1103 nfiles = 5 size = 5 dirpath = tempdir / guid() dirpath.mkdir() test_data = [] frames = [] paths = [] for i in range(nfiles): df = _test_dataframe(size, seed=i) df.index = pd.Index(np.arange(i * size, (i + 1) * size), name='index') path = dirpath / '{}.parquet'.format(i) table = pa.Table.from_pandas(df, preserve_index=preserve_index) # Obliterate metadata table = table.replace_schema_metadata(None) assert table.schema.metadata is None _write_table(table, path) test_data.append(table) frames.append(df) paths.append(path) # Write _metadata common file table_for_metadata = pa.Table.from_pandas( df, preserve_index=preserve_index ) pq.write_metadata(table_for_metadata.schema, dirpath / '_metadata') dataset = pq.ParquetDataset(dirpath) columns = ['uint8', 'strings'] result = dataset.read_pandas(columns=columns).to_pandas() expected = pd.concat([x[columns] for x in frames]) expected.index.name = ( df.index.name if preserve_index is not False else None) tm.assert_frame_equal(result, expected) def _make_example_multifile_dataset(base_path, nfiles=10, file_nrows=5): test_data = [] paths = [] for i in range(nfiles): df = _test_dataframe(file_nrows, seed=i) path = base_path / '{}.parquet'.format(i) test_data.append(_write_table(df, path)) paths.append(path) return paths @pytest.mark.pandas def test_ignore_private_directories(tempdir): dirpath = tempdir / guid() dirpath.mkdir() paths = _make_example_multifile_dataset(dirpath, nfiles=10, file_nrows=5) # private directory (dirpath / '_impala_staging').mkdir() dataset = pq.ParquetDataset(dirpath) assert set(map(str, paths)) == set(x.path for x in dataset.pieces) @pytest.mark.pandas def test_ignore_hidden_files_dot(tempdir): dirpath = tempdir / guid() dirpath.mkdir() paths = _make_example_multifile_dataset(dirpath, nfiles=10, file_nrows=5) with (dirpath / '.DS_Store').open('wb') as f: f.write(b'gibberish') with (dirpath / '.private').open('wb') as f: f.write(b'gibberish') dataset = pq.ParquetDataset(dirpath) assert set(map(str, paths)) == set(x.path for x in dataset.pieces) @pytest.mark.pandas def test_ignore_hidden_files_underscore(tempdir): dirpath = tempdir / guid() dirpath.mkdir() paths = _make_example_multifile_dataset(dirpath, nfiles=10, file_nrows=5) with (dirpath / '_committed_123').open('wb') as f: f.write(b'abcd') with (dirpath / '_started_321').open('wb') as f: f.write(b'abcd') dataset = pq.ParquetDataset(dirpath) assert set(map(str, paths)) == set(x.path for x in dataset.pieces) @pytest.mark.pandas def test_multiindex_duplicate_values(tempdir): num_rows = 3 numbers = list(range(num_rows)) index = pd.MultiIndex.from_arrays( [['foo', 'foo', 'bar'], numbers], names=['foobar', 'some_numbers'], ) df = pd.DataFrame({'numbers': numbers}, index=index) table = pa.Table.from_pandas(df) filename = tempdir / 'dup_multi_index_levels.parquet' _write_table(table, filename) result_table = _read_table(filename) assert table.equals(result_table) result_df = result_table.to_pandas() tm.assert_frame_equal(result_df, df) @pytest.mark.pandas def test_write_error_deletes_incomplete_file(tempdir): # ARROW-1285 df = pd.DataFrame({'a': list('abc'), 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], 'f': pd.Categorical(list('abc')), 'g': pd.date_range('20130101', periods=3), 'h': pd.date_range('20130101', periods=3, tz='US/Eastern'), 'i': pd.date_range('20130101', periods=3, freq='ns')}) pdf = pa.Table.from_pandas(df) filename = tempdir / 'tmp_file' try: _write_table(pdf, filename) except pa.ArrowException: pass assert not filename.exists() @pytest.mark.pandas def test_noncoerced_nanoseconds_written_without_exception(tempdir): # ARROW-1957: the Parquet version 2.0 writer preserves Arrow # nanosecond timestamps by default n = 9 df = pd.DataFrame({'x': range(n)}, index=pd.DatetimeIndex(start='2017-01-01', freq='1n', periods=n)) tb = pa.Table.from_pandas(df) filename = tempdir / 'written.parquet' try: pq.write_table(tb, filename, version='2.0') except Exception: pass assert filename.exists() recovered_table = pq.read_table(filename) assert tb.equals(recovered_table) # Loss of data thru coercion (without explicit override) still an error filename = tempdir / 'not_written.parquet' with pytest.raises(ValueError): pq.write_table(tb, filename, coerce_timestamps='ms', version='2.0') def test_read_non_existent_file(tempdir): path = 'non-existent-file.parquet' try: pq.read_table(path) except Exception as e: assert path in e.args[0] def test_read_table_doesnt_warn(datadir): with pytest.warns(None) as record: pq.read_table(datadir / 'v0.7.1.parquet') assert len(record) == 0 def _test_write_to_dataset_with_partitions(base_path, filesystem=None, schema=None, index_name=None): # ARROW-1400 output_df = pd.DataFrame({'group1': list('aaabbbbccc'), 'group2': list('eefeffgeee'), 'num': list(range(10)), 'nan': [pd.np.nan] * 10, 'date': np.arange('2017-01-01', '2017-01-11', dtype='datetime64[D]')}) cols = output_df.columns.tolist() partition_by = ['group1', 'group2'] output_table = pa.Table.from_pandas(output_df, schema=schema, safe=False, preserve_index=False) pq.write_to_dataset(output_table, base_path, partition_by, filesystem=filesystem) metadata_path = os.path.join(base_path, '_common_metadata') if filesystem is not None: with filesystem.open(metadata_path, 'wb') as f: pq.write_metadata(output_table.schema, f) else: pq.write_metadata(output_table.schema, metadata_path) # ARROW-2891: Ensure the output_schema is preserved when writing a # partitioned dataset dataset = pq.ParquetDataset(base_path, filesystem=filesystem, validate_schema=True) # ARROW-2209: Ensure the dataset schema also includes the partition columns dataset_cols = set(dataset.schema.to_arrow_schema().names) assert dataset_cols == set(output_table.schema.names) input_table = dataset.read() input_df = input_table.to_pandas() # Read data back in and compare with original DataFrame # Partitioned columns added to the end of the DataFrame when read input_df_cols = input_df.columns.tolist() assert partition_by == input_df_cols[-1 * len(partition_by):] # Partitioned columns become 'categorical' dtypes input_df = input_df[cols] for col in partition_by: output_df[col] = output_df[col].astype('category') assert output_df.equals(input_df) def _test_write_to_dataset_no_partitions(base_path, filesystem=None): # ARROW-1400 output_df = pd.DataFrame({'group1': list('aaabbbbccc'), 'group2': list('eefeffgeee'), 'num': list(range(10)), 'date': np.arange('2017-01-01', '2017-01-11', dtype='datetime64[D]')}) cols = output_df.columns.tolist() output_table = pa.Table.from_pandas(output_df) if filesystem is None: filesystem = LocalFileSystem.get_instance() # Without partitions, append files to root_path n = 5 for i in range(n): pq.write_to_dataset(output_table, base_path, filesystem=filesystem) output_files = [file for file in filesystem.ls(base_path) if file.endswith(".parquet")] assert len(output_files) == n # Deduplicated incoming DataFrame should match # original outgoing Dataframe input_table = pq.ParquetDataset(base_path, filesystem=filesystem).read() input_df = input_table.to_pandas() input_df = input_df.drop_duplicates() input_df = input_df[cols] assert output_df.equals(input_df) @pytest.mark.pandas def test_write_to_dataset_with_partitions(tempdir): _test_write_to_dataset_with_partitions(str(tempdir)) @pytest.mark.pandas def test_write_to_dataset_with_partitions_and_schema(tempdir): schema = pa.schema([pa.field('group1', type=pa.string()), pa.field('group2', type=pa.string()), pa.field('num', type=pa.int64()), pa.field('nan', type=pa.int32()), pa.field('date', type=pa.timestamp(unit='us'))]) _test_write_to_dataset_with_partitions(str(tempdir), schema=schema) @pytest.mark.pandas def test_write_to_dataset_with_partitions_and_index_name(tempdir): _test_write_to_dataset_with_partitions(str(tempdir), index_name='index_name') @pytest.mark.pandas def test_write_to_dataset_no_partitions(tempdir): _test_write_to_dataset_no_partitions(str(tempdir)) @pytest.mark.pandas def test_write_to_dataset_with_partitions_and_custom_filenames(tempdir): output_df = pd.DataFrame({'group1': list('aaabbbbccc'), 'group2': list('eefeffgeee'), 'num': list(range(10)), 'nan': [pd.np.nan] * 10, 'date': np.arange('2017-01-01', '2017-01-11', dtype='datetime64[D]')}) partition_by = ['group1', 'group2'] output_table = pa.Table.from_pandas(output_df) path = str(tempdir) def partition_filename_callback(keys): return "{0}-{1}.parquet".format(*keys) pq.write_to_dataset(output_table, path, partition_by, partition_filename_callback) dataset = pq.ParquetDataset(path) # ARROW-3538: Ensure partition filenames match the given pattern # defined in the local function partition_filename_callback expected_basenames = [ 'a-e.parquet', 'a-f.parquet', 'b-e.parquet', 'b-f.parquet', 'b-g.parquet', 'c-e.parquet' ] output_basenames = [os.path.basename(p.path) for p in dataset.pieces] assert sorted(expected_basenames) == sorted(output_basenames) @pytest.mark.large_memory def test_large_table_int32_overflow(): size = np.iinfo('int32').max + 1 arr = np.ones(size, dtype='uint8') parr = pa.array(arr, type=pa.uint8()) table = pa.Table.from_arrays([parr], names=['one']) f = io.BytesIO() _write_table(table, f) def _simple_table_roundtrip(table): stream = pa.BufferOutputStream() _write_table(table, stream) buf = stream.getvalue() return _read_table(buf) @pytest.mark.pandas @pytest.mark.large_memory def test_binary_array_overflow_to_chunked(): # ARROW-3762 # 2^31 + 1 bytes values = [b'x'] + [ b'x' * (1 << 20) ] * 2 * (1 << 10) df = pd.DataFrame({'byte_col': values}) tbl = pa.Table.from_pandas(df, preserve_index=False) read_tbl = _simple_table_roundtrip(tbl) col0_data = read_tbl[0] assert isinstance(col0_data, pa.ChunkedArray) # Split up into 2GB chunks assert col0_data.num_chunks == 2 assert tbl.equals(read_tbl) @pytest.mark.pandas @pytest.mark.large_memory def test_list_of_binary_large_cell(): # ARROW-4688 data = [] # TODO(wesm): handle chunked children # 2^31 - 1 bytes in a single cell # data.append([b'x' * (1 << 20)] * 2047 + [b'x' * ((1 << 20) - 1)]) # A little under 2GB in cell each containing approximately 10MB each data.extend([[b'x' * 1000000] * 10] * 214) arr = pa.array(data) table = pa.Table.from_arrays([arr], ['chunky_cells']) read_table = _simple_table_roundtrip(table) assert table.equals(read_table) @pytest.mark.pandas def test_index_column_name_duplicate(tempdir): data = { 'close': { pd.Timestamp('2017-06-30 01:31:00'): 154.99958999999998, pd.Timestamp('2017-06-30 01:32:00'): 154.99958999999998, }, 'time': { pd.Timestamp('2017-06-30 01:31:00'): pd.Timestamp( '2017-06-30 01:31:00' ), pd.Timestamp('2017-06-30 01:32:00'): pd.Timestamp( '2017-06-30 01:32:00' ), } } path = str(tempdir / 'data.parquet') dfx = pd.DataFrame(data).set_index('time', drop=False) tdfx = pa.Table.from_pandas(dfx) _write_table(tdfx, path) arrow_table = _read_table(path) result_df = arrow_table.to_pandas() tm.assert_frame_equal(result_df, dfx) @pytest.mark.pandas def test_parquet_nested_convenience(tempdir): # ARROW-1684 df = pd.DataFrame({ 'a': [[1, 2, 3], None, [4, 5], []], 'b': [[1.], None, None, [6., 7.]], }) path = str(tempdir / 'nested_convenience.parquet') table = pa.Table.from_pandas(df, preserve_index=False) _write_table(table, path) read = pq.read_table(path, columns=['a']) tm.assert_frame_equal(read.to_pandas(), df[['a']]) read = pq.read_table(path, columns=['a', 'b']) tm.assert_frame_equal(read.to_pandas(), df) @pytest.mark.pandas def test_backwards_compatible_index_naming(datadir): expected_string = b"""\ carat cut color clarity depth table price x y z 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39""" expected = pd.read_csv(io.BytesIO(expected_string), sep=r'\s{2,}', index_col=None, header=0, engine='python') table = _read_table(datadir / 'v0.7.1.parquet') result = table.to_pandas() tm.assert_frame_equal(result, expected) @pytest.mark.pandas def test_backwards_compatible_index_multi_level_named(datadir): expected_string = b"""\ carat cut color clarity depth table price x y z 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39""" expected = pd.read_csv( io.BytesIO(expected_string), sep=r'\s{2,}', index_col=['cut', 'color', 'clarity'], header=0, engine='python' ).sort_index() table = _read_table(datadir / 'v0.7.1.all-named-index.parquet') result = table.to_pandas() tm.assert_frame_equal(result, expected) @pytest.mark.pandas def test_backwards_compatible_index_multi_level_some_named(datadir): expected_string = b"""\ carat cut color clarity depth table price x y z 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39""" expected = pd.read_csv( io.BytesIO(expected_string), sep=r'\s{2,}', index_col=['cut', 'color', 'clarity'], header=0, engine='python' ).sort_index() expected.index = expected.index.set_names(['cut', None, 'clarity']) table = _read_table(datadir / 'v0.7.1.some-named-index.parquet') result = table.to_pandas() tm.assert_frame_equal(result, expected) @pytest.mark.pandas def test_backwards_compatible_column_metadata_handling(datadir): expected = pd.DataFrame( {'a': [1, 2, 3], 'b': [.1, .2, .3], 'c': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')}) expected.index = pd.MultiIndex.from_arrays( [['a', 'b', 'c'], pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')], names=['index', None]) path = datadir / 'v0.7.1.column-metadata-handling.parquet' table = _read_table(path) result = table.to_pandas() tm.assert_frame_equal(result, expected) table = _read_table(path, columns=['a']) result = table.to_pandas() tm.assert_frame_equal(result, expected[['a']].reset_index(drop=True)) def _make_dataset_for_pickling(tempdir, N=100): path = tempdir / 'data.parquet' fs = LocalFileSystem.get_instance() df = pd.DataFrame({ 'index': np.arange(N), 'values': np.random.randn(N) }, columns=['index', 'values']) table = pa.Table.from_pandas(df) num_groups = 3 with pq.ParquetWriter(path, table.schema) as writer: for i in range(num_groups): writer.write_table(table) reader = pq.ParquetFile(path) assert reader.metadata.num_row_groups == num_groups metadata_path = tempdir / '_metadata' with fs.open(metadata_path, 'wb') as f: pq.write_metadata(table.schema, f) dataset = pq.ParquetDataset(tempdir, filesystem=fs) assert dataset.metadata_path == str(metadata_path) return dataset @pytest.mark.pandas @pytest.mark.parametrize('pickler', [ pytest.param(pickle, id='builtin'), pytest.param(pytest.importorskip('cloudpickle'), id='cloudpickle') ]) def test_pickle_dataset(tempdir, datadir, pickler): def is_pickleable(obj): return obj == pickler.loads(pickler.dumps(obj)) dataset = _make_dataset_for_pickling(tempdir) assert is_pickleable(dataset) assert is_pickleable(dataset.metadata) assert is_pickleable(dataset.metadata.schema) assert len(dataset.metadata.schema) for column in dataset.metadata.schema: assert is_pickleable(column) for piece in dataset.pieces: assert is_pickleable(piece) metadata = piece.get_metadata() assert metadata.num_row_groups for i in range(metadata.num_row_groups): assert is_pickleable(metadata.row_group(i)) @pytest.mark.pandas def test_decimal_roundtrip(tempdir): num_values = 10 columns = {} for precision in range(1, 39): for scale in range(0, precision + 1): with util.random_seed(0): random_decimal_values = [ util.randdecimal(precision, scale) for _ in range(num_values) ] column_name = ('dec_precision_{:d}_scale_{:d}' .format(precision, scale)) columns[column_name] = random_decimal_values expected = pd.DataFrame(columns) filename = tempdir / 'decimals.parquet' string_filename = str(filename) table = pa.Table.from_pandas(expected) _write_table(table, string_filename) result_table = _read_table(string_filename) result = result_table.to_pandas() tm.assert_frame_equal(result, expected) @pytest.mark.pandas @pytest.mark.xfail( raises=pa.ArrowException, reason='Parquet does not support negative scale' ) def test_decimal_roundtrip_negative_scale(tempdir): expected = pd.DataFrame({'decimal_num': [decimal.Decimal('1.23E4')]}) filename = tempdir / 'decimals.parquet' string_filename = str(filename) t = pa.Table.from_pandas(expected) _write_table(t, string_filename) result_table = _read_table(string_filename) result = result_table.to_pandas() tm.assert_frame_equal(result, expected) @pytest.mark.pandas def test_parquet_writer_context_obj(tempdir): df = _test_dataframe(100) df['unique_id'] = 0 arrow_table = pa.Table.from_pandas(df, preserve_index=False) out = pa.BufferOutputStream() with pq.ParquetWriter(out, arrow_table.schema, version='2.0') as writer: frames = [] for i in range(10): df['unique_id'] = i arrow_table = pa.Table.from_pandas(df, preserve_index=False) writer.write_table(arrow_table) frames.append(df.copy()) buf = out.getvalue() result = _read_table(pa.BufferReader(buf)) expected = pd.concat(frames, ignore_index=True) tm.assert_frame_equal(result.to_pandas(), expected) @pytest.mark.pandas def test_parquet_writer_context_obj_with_exception(tempdir): df = _test_dataframe(100) df['unique_id'] = 0 arrow_table = pa.Table.from_pandas(df, preserve_index=False) out = pa.BufferOutputStream() error_text = 'Artificial Error' try: with pq.ParquetWriter(out, arrow_table.schema, version='2.0') as writer: frames = [] for i in range(10): df['unique_id'] = i arrow_table = pa.Table.from_pandas(df, preserve_index=False) writer.write_table(arrow_table) frames.append(df.copy()) if i == 5: raise ValueError(error_text) except Exception as e: assert str(e) == error_text buf = out.getvalue() result = _read_table(pa.BufferReader(buf)) expected = pd.concat(frames, ignore_index=True) tm.assert_frame_equal(result.to_pandas(), expected) @pytest.mark.pandas def test_zlib_compression_bug(): # ARROW-3514: "zlib deflate failed, output buffer too small" table = pa.Table.from_arrays([pa.array(['abc', 'def'])], ['some_col']) f = io.BytesIO() pq.write_table(table, f, compression='gzip') f.seek(0) roundtrip = pq.read_table(f) tm.assert_frame_equal(roundtrip.to_pandas(), table.to_pandas()) @pytest.mark.pandas def test_merging_parquet_tables_with_different_pandas_metadata(tempdir): # ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch schema = pa.schema([ pa.field('int', pa.int16()), pa.field('float', pa.float32()), pa.field('string', pa.string()) ]) df1 = pd.DataFrame({ 'int': np.arange(3, dtype=np.uint8), 'float': np.arange(3, dtype=np.float32), 'string': ['ABBA', 'EDDA', 'ACDC'] }) df2 = pd.DataFrame({ 'int': [4, 5], 'float': [1.1, None], 'string': [None, None] }) table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False) table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False) assert not table1.schema.equals(table2.schema) assert table1.schema.equals(table2.schema, check_metadata=False) writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema) writer.write_table(table1) writer.write_table(table2) def test_empty_row_groups(tempdir): # ARROW-3020 table = pa.Table.from_arrays([pa.array([], type='int32')], ['f0']) path = tempdir / 'empty_row_groups.parquet' num_groups = 3 with pq.ParquetWriter(path, table.schema) as writer: for i in range(num_groups): writer.write_table(table) reader = pq.ParquetFile(path) assert reader.metadata.num_row_groups == num_groups for i in range(num_groups): assert reader.read_row_group(i).equals(table) @pytest.mark.pandas def test_parquet_writer_with_caller_provided_filesystem(): out = pa.BufferOutputStream() class CustomFS(FileSystem): def __init__(self): self.path = None self.mode = None def open(self, path, mode='rb'): self.path = path self.mode = mode return out fs = CustomFS() fname = 'expected_fname.parquet' df = _test_dataframe(100) table = pa.Table.from_pandas(df, preserve_index=False) with pq.ParquetWriter(fname, table.schema, filesystem=fs, version='2.0') \ as writer: writer.write_table(table) assert fs.path == fname assert fs.mode == 'wb' assert out.closed buf = out.getvalue() table_read = _read_table(pa.BufferReader(buf)) df_read = table_read.to_pandas() tm.assert_frame_equal(df_read, df) # Should raise ValueError when filesystem is passed with file-like object with pytest.raises(ValueError) as err_info: pq.ParquetWriter(pa.BufferOutputStream(), table.schema, filesystem=fs) expected_msg = ("filesystem passed but where is file-like, so" " there is nothing to open with filesystem.") assert str(err_info) == expected_msg def test_writing_empty_lists(): # ARROW-2591: [Python] Segmentation fault issue in pq.write_table arr1 = pa.array([[], []], pa.list_(pa.int32())) table = pa.Table.from_arrays([arr1], ['list(int32)']) _check_roundtrip(table) def test_write_nested_zero_length_array_chunk_failure(): # Bug report in ARROW-3792 cols = OrderedDict( int32=pa.int32(), list_string=pa.list_(pa.string()) ) data = [[], [OrderedDict(int32=1, list_string=('G',)), ]] # This produces a table with a column like # <Column name='list_string' type=ListType(list<item: string>)> # [ # [], # [ # [ # "G" # ] # ] # ] # # Each column is a ChunkedArray with 2 elements my_arrays = [pa.array(batch, type=pa.struct(cols)).flatten() for batch in data] my_batches = [pa.RecordBatch.from_arrays(batch, pa.schema(cols)) for batch in my_arrays] tbl = pa.Table.from_batches(my_batches, pa.schema(cols)) _check_roundtrip(tbl) @pytest.mark.pandas def test_partitioned_dataset(tempdir): # ARROW-3208: Segmentation fault when reading a Parquet partitioned dataset # to a Parquet file path = tempdir / "ARROW-3208" df = pd.DataFrame({ 'one': [-1, 10, 2.5, 100, 1000, 1, 29.2], 'two': [-1, 10, 2, 100, 1000, 1, 11], 'three': [0, 0, 0, 0, 0, 0, 0] }) table = pa.Table.from_pandas(df) pq.write_to_dataset(table, root_path=str(path), partition_cols=['one', 'two']) table = pq.ParquetDataset(path).read() pq.write_table(table, path / "output.parquet") def test_read_column_invalid_index(): table = pa.table([pa.array([4, 5]), pa.array(["foo", "bar"])], names=['ints', 'strs']) bio = pa.BufferOutputStream() pq.write_table(table, bio) f = pq.ParquetFile(bio.getvalue()) assert f.reader.read_column(0).to_pylist() == [4, 5] assert f.reader.read_column(1).to_pylist() == ["foo", "bar"] for index in (-1, 2): with pytest.raises((ValueError, IndexError)): f.reader.read_column(index) def test_direct_read_dictionary(): # ARROW-3325 repeats = 10 nunique = 5 data = [ [tm.rands(10) for i in range(nunique)] * repeats, ] table = pa.table(data, names=['f0']) bio = pa.BufferOutputStream() pq.write_table(table, bio) contents = bio.getvalue() result = pq.read_table(pa.BufferReader(contents), read_dictionary=['f0']) # Compute dictionary-encoded subfield expected = pa.table([table[0].dictionary_encode()], names=['f0']) assert result.equals(expected) def test_dataset_read_dictionary(tempdir): path = tempdir / "ARROW-3325-dataset" t1 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0']) t2 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0']) pq.write_to_dataset(t1, root_path=str(path)) pq.write_to_dataset(t2, root_path=str(path)) result = pq.ParquetDataset(path, read_dictionary=['f0']).read() # The order of the chunks is non-deterministic ex_chunks = [t1[0].chunk(0).dictionary_encode(), t2[0].chunk(0).dictionary_encode()] assert result[0].num_chunks == 2 c0, c1 = result[0].chunk(0), result[0].chunk(1) if c0.equals(ex_chunks[0]): assert c1.equals(ex_chunks[1]) else: assert c0.equals(ex_chunks[1]) assert c1.equals(ex_chunks[0]) def test_direct_read_dictionary_subfield(): repeats = 10 nunique = 5 data = [ [[tm.rands(10)] for i in range(nunique)] * repeats, ] table = pa.table(data, names=['f0']) bio = pa.BufferOutputStream() pq.write_table(table, bio) contents = bio.getvalue() result = pq.read_table(pa.BufferReader(contents), read_dictionary=['f0.list.item']) arr = pa.array(data[0]) values_as_dict = arr.values.dictionary_encode() inner_indices = values_as_dict.indices.cast('int32') new_values = pa.DictionaryArray.from_arrays(inner_indices, values_as_dict.dictionary) offsets = pa.array(range(51), type='int32') expected_arr = pa.ListArray.from_arrays(offsets, new_values) expected = pa.table([expected_arr], names=['f0']) assert result.equals(expected) assert result[0].num_chunks == 1 @pytest.mark.pandas def test_dataset_metadata(tempdir): path = tempdir / "ARROW-1983-dataset" # create and write a test dataset df = pd.DataFrame({ 'one': [1, 2, 3], 'two': [-1, -2, -3], 'three': [[1, 2], [2, 3], [3, 4]], }) table = pa.Table.from_pandas(df) metadata_list = [] pq.write_to_dataset(table, root_path=str(path), partition_cols=['one', 'two'], metadata_collector=metadata_list) # open the dataset and collect metadata from pieces: dataset = pq.ParquetDataset(path) metadata_list2 = [p.get_metadata() for p in dataset.pieces] # compare metadata list content: assert len(metadata_list) == len(metadata_list2) for md, md2 in zip(metadata_list, metadata_list2): d = md.to_dict() d2 = md2.to_dict() # serialized_size is initialized in the reader: assert d.pop('serialized_size') == 0 assert d2.pop('serialized_size') > 0 assert d == d2 def test_parquet_file_too_small(tempdir): path = str(tempdir / "test.parquet") with pytest.raises(pa.ArrowIOError, match='size is 0 bytes'): with open(path, 'wb') as f: pass pq.read_table(path) with pytest.raises(pa.ArrowIOError, match='size is 4 bytes'): with open(path, 'wb') as f: f.write(b'ffff') pq.read_table(path) @pytest.mark.pandas def test_categorical_index_survives_roundtrip(): # ARROW-3652, addressed by ARROW-3246 df = pd.DataFrame([['a', 'b'], ['c', 'd']], columns=['c1', 'c2']) df['c1'] = df['c1'].astype('category') df = df.set_index(['c1']) table = pa.Table.from_pandas(df) bos = pa.BufferOutputStream() pq.write_table(table, bos) ref_df = pq.read_pandas(bos.getvalue()).to_pandas() assert isinstance(ref_df.index, pd.CategoricalIndex) assert ref_df.index.equals(df.index) @pytest.mark.pandas def test_categorical_order_survives_roundtrip(): # ARROW-6302 df = pd.DataFrame({"a": pd.Categorical( ["a", "b", "c", "a"], categories=["b", "c", "d"], ordered=True)}) table = pa.Table.from_pandas(df) bos = pa.BufferOutputStream() pq.write_table(table, bos) contents = bos.getvalue() result = pq.read_pandas(contents).to_pandas() tm.assert_frame_equal(result, df) def test_dictionary_array_automatically_read(): # ARROW-3246 # Make a large dictionary, a little over 4MB of data dict_length = 4000 dict_values = pa.array([('x' * 1000 + '_{}'.format(i)) for i in range(dict_length)]) num_chunks = 10 chunk_size = 100 chunks = [] for i in range(num_chunks): indices = np.random.randint(0, dict_length, size=chunk_size).astype(np.int32) chunks.append(pa.DictionaryArray.from_arrays(pa.array(indices), dict_values)) table = pa.table([pa.chunked_array(chunks)], names=['f0']) bio = pa.BufferOutputStream() pq.write_table(table, bio) contents = bio.getvalue() result = pq.read_table(pa.BufferReader(contents)) assert result.equals(table) # The only key in the metadata was the Arrow schema key assert result.schema.metadata is None @pytest.mark.pandas def test_pandas_categorical_na_type_row_groups(): # ARROW-5085 df = pd.DataFrame({"col": [None] * 100, "int": [1.0] * 100}) df_category = df.astype({"col": "category", "int": "category"}) table = pa.Table.from_pandas(df) table_cat = pa.Table.from_pandas(df_category) buf = pa.BufferOutputStream() # it works pq.write_table(table_cat, buf, version="2.0", chunk_size=10) result = pq.read_table(buf.getvalue()) # Result is non-categorical assert result[0].equals(table[0]) assert result[1].equals(table[1]) @pytest.mark.pandas def test_pandas_categorical_roundtrip(): # ARROW-5480, this was enabled by ARROW-3246 # Have one of the categories unobserved and include a null (-1) codes = np.array([2, 0, 0, 2, 0, -1, 2], dtype='int32') categories = ['foo', 'bar', 'baz'] df = pd.DataFrame({'x': pd.Categorical.from_codes( codes, categories=categories)}) buf = pa.BufferOutputStream() pq.write_table(pa.table(df), buf) result = pq.read_table(buf.getvalue()).to_pandas() assert result.x.dtype == 'category' assert (result.x.cat.categories == categories).all() tm.assert_frame_equal(result, df) @pytest.mark.pandas def test_multi_dataset_metadata(tempdir): filenames = ["ARROW-1983-dataset.0", "ARROW-1983-dataset.1"] metapath = str(tempdir / "_metadata") # create a test dataset df = pd.DataFrame({ 'one': [1, 2, 3], 'two': [-1, -2, -3], 'three': [[1, 2], [2, 3], [3, 4]], }) table = pa.Table.from_pandas(df) # write dataset twice and collect/merge metadata _meta = None for filename in filenames: meta = [] pq.write_table(table, str(tempdir / filename), metadata_collector=meta) meta[0].set_file_path(filename) if _meta is None: _meta = meta[0] else: _meta.append_row_groups(meta[0]) # Write merged metadata-only file with open(metapath, "wb") as f: _meta.write_metadata_file(f) # Read back the metadata meta = pq.read_metadata(metapath) md = meta.to_dict() _md = _meta.to_dict() for key in _md: if key != 'serialized_size': assert _md[key] == md[key] assert _md['num_columns'] == 3 assert _md['num_rows'] == 6 assert _md['num_row_groups'] == 2 assert _md['serialized_size'] == 0 assert md['serialized_size'] > 0 @pytest.mark.pandas def test_filter_before_validate_schema(tempdir): # ARROW-4076 apply filter before schema validation # to avoid checking unneeded schemas # create partitioned dataset with mismatching schemas which would # otherwise raise if first validation all schemas dir1 = tempdir / 'A=0' dir1.mkdir() table1 = pa.Table.from_pandas(pd.DataFrame({'B': [1, 2, 3]})) pq.write_table(table1, dir1 / 'data.parquet') dir2 = tempdir / 'A=1' dir2.mkdir() table2 = pa.Table.from_pandas(pd.DataFrame({'B': ['a', 'b', 'c']})) pq.write_table(table2, dir2 / 'data.parquet') # read single file using filter table = pq.read_table(tempdir, filters=[[('A', '==', 0)]]) assert table.column('B').equals(pa.chunked_array([[1, 2, 3]]))
[]
[]
[ "PYARROW_TEST_S3_BUCKET", "PYARROW_TEST_S3_SECRET_KEY", "PYARROW_TEST_S3_ACCESS_KEY" ]
[]
["PYARROW_TEST_S3_BUCKET", "PYARROW_TEST_S3_SECRET_KEY", "PYARROW_TEST_S3_ACCESS_KEY"]
python
3
0
pkg/config/config.go
// Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. package config import ( "bytes" "encoding/json" "errors" "fmt" "net" "net/url" "os" "path/filepath" "regexp" "strings" "time" yaml "gopkg.in/yaml.v2" "github.com/DataDog/datadog-agent/pkg/autodiscovery/common/types" "github.com/DataDog/datadog-agent/pkg/collector/check/defaults" "github.com/DataDog/datadog-agent/pkg/util/hostname/validate" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/secrets" "github.com/DataDog/datadog-agent/pkg/version" ) const ( // DefaultSite is the default site the Agent sends data to. DefaultSite = "datadoghq.com" infraURLPrefix = "https://app." // DefaultNumWorkers default number of workers for our check runner DefaultNumWorkers = 4 // MaxNumWorkers maximum number of workers for our check runner MaxNumWorkers = 25 // DefaultAPIKeyValidationInterval is the default interval of api key validation checks DefaultAPIKeyValidationInterval = 60 // DefaultForwarderRecoveryInterval is the default recovery interval, // also used if the user-provided value is invalid. DefaultForwarderRecoveryInterval = 2 megaByte = 1024 * 1024 // DefaultBatchWait is the default HTTP batch wait in second for logs DefaultBatchWait = 5 // DefaultBatchMaxConcurrentSend is the default HTTP batch max concurrent send for logs DefaultBatchMaxConcurrentSend = 0 // DefaultBatchMaxSize is the default HTTP batch max size (maximum number of events in a single batch) for logs DefaultBatchMaxSize = 100 // DefaultBatchMaxContentSize is the default HTTP batch max content size (before compression) for logs // It is also the maximum possible size of a single event. Events exceeding this limit are dropped. DefaultBatchMaxContentSize = 1000000 // DefaultAuditorTTL is the default logs auditor TTL in hours DefaultAuditorTTL = 23 // ClusterIDCacheKey is the key name for the orchestrator cluster id in the agent in-mem cache ClusterIDCacheKey = "orchestratorClusterID" // DefaultRuntimePoliciesDir is the default policies directory used by the runtime security module DefaultRuntimePoliciesDir = "/etc/datadog-agent/runtime-security.d" // DefaultLogsSenderBackoffFactor is the default logs sender backoff randomness factor DefaultLogsSenderBackoffFactor = 2.0 // DefaultLogsSenderBackoffBase is the default logs sender base backoff time, seconds DefaultLogsSenderBackoffBase = 1.0 // DefaultLogsSenderBackoffMax is the default logs sender maximum backoff time, seconds DefaultLogsSenderBackoffMax = 120.0 // DefaultLogsSenderBackoffRecoveryInterval is the default logs sender backoff recovery interval DefaultLogsSenderBackoffRecoveryInterval = 2 ) // Datadog is the global configuration object var ( Datadog Config proxies *Proxy overrideFuncs = make([]func(Config), 0) ) // Variables to initialize at build time var ( DefaultPython string // ForceDefaultPython has its value set to true at compile time if we should ignore // the Python version set in the configuration and use `DefaultPython` instead. // We use this to force Python 3 in the Agent 7 as it's the only one available. ForceDefaultPython string ) // Variables to initialize at start time var ( // StartTime is the agent startup time StartTime = time.Now() ) // MetadataProviders helps unmarshalling `metadata_providers` config param type MetadataProviders struct { Name string `mapstructure:"name"` Interval time.Duration `mapstructure:"interval"` } // ConfigurationProviders helps unmarshalling `config_providers` config param type ConfigurationProviders struct { Name string `mapstructure:"name"` Polling bool `mapstructure:"polling"` PollInterval string `mapstructure:"poll_interval"` TemplateURL string `mapstructure:"template_url"` TemplateDir string `mapstructure:"template_dir"` Username string `mapstructure:"username"` Password string `mapstructure:"password"` CAFile string `mapstructure:"ca_file"` CAPath string `mapstructure:"ca_path"` CertFile string `mapstructure:"cert_file"` KeyFile string `mapstructure:"key_file"` Token string `mapstructure:"token"` GraceTimeSeconds int `mapstructure:"grace_time_seconds"` } // Listeners helps unmarshalling `listeners` config param type Listeners struct { Name string `mapstructure:"name"` } // Proxy represents the configuration for proxies in the agent type Proxy struct { HTTP string `mapstructure:"http"` HTTPS string `mapstructure:"https"` NoProxy []string `mapstructure:"no_proxy"` } // MappingProfile represent a group of mappings type MappingProfile struct { Name string `mapstructure:"name" json:"name"` Prefix string `mapstructure:"prefix" json:"prefix"` Mappings []MetricMapping `mapstructure:"mappings" json:"mappings"` } // MetricMapping represent one mapping rule type MetricMapping struct { Match string `mapstructure:"match" json:"match"` MatchType string `mapstructure:"match_type" json:"match_type"` Name string `mapstructure:"name" json:"name"` Tags map[string]string `mapstructure:"tags" json:"tags"` } // Warnings represent the warnings in the config type Warnings struct { TraceMallocEnabledWithPy2 bool } func init() { osinit() // Configure Datadog global configuration Datadog = NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) // Configuration defaults InitConfig(Datadog) } // InitConfig initializes the config defaults on a config func InitConfig(config Config) { // Agent // Don't set a default on 'site' to allow detecting with viper whether it's set in config config.BindEnv("site") //nolint:errcheck config.BindEnv("dd_url") //nolint:errcheck config.BindEnvAndSetDefault("app_key", "") config.BindEnvAndSetDefault("cloud_provider_metadata", []string{"aws", "gcp", "azure", "alibaba"}) config.SetDefault("proxy", nil) config.BindEnvAndSetDefault("skip_ssl_validation", false) config.BindEnvAndSetDefault("hostname", "") config.BindEnvAndSetDefault("tags", []string{}) config.BindEnvAndSetDefault("extra_tags", []string{}) config.BindEnv("env") //nolint:errcheck config.BindEnvAndSetDefault("tag_value_split_separator", map[string]string{}) config.BindEnvAndSetDefault("conf_path", ".") config.BindEnvAndSetDefault("confd_path", defaultConfdPath) config.BindEnvAndSetDefault("additional_checksd", defaultAdditionalChecksPath) config.BindEnvAndSetDefault("jmx_log_file", "") config.BindEnvAndSetDefault("log_payloads", false) config.BindEnvAndSetDefault("log_file", "") config.BindEnvAndSetDefault("log_file_max_size", "10Mb") config.BindEnvAndSetDefault("log_file_max_rolls", 1) config.BindEnvAndSetDefault("log_level", "info") config.BindEnvAndSetDefault("log_to_syslog", false) config.BindEnvAndSetDefault("log_to_console", true) config.BindEnvAndSetDefault("log_format_rfc3339", false) config.BindEnvAndSetDefault("log_all_goroutines_when_unhealthy", false) config.BindEnvAndSetDefault("logging_frequency", int64(500)) config.BindEnvAndSetDefault("disable_file_logging", false) config.BindEnvAndSetDefault("syslog_uri", "") config.BindEnvAndSetDefault("syslog_rfc", false) config.BindEnvAndSetDefault("syslog_pem", "") config.BindEnvAndSetDefault("syslog_key", "") config.BindEnvAndSetDefault("syslog_tls_verify", true) config.BindEnvAndSetDefault("cmd_host", "localhost") config.BindEnvAndSetDefault("cmd_port", 5001) config.BindEnvAndSetDefault("cluster_agent.cmd_port", 5005) config.BindEnvAndSetDefault("default_integration_http_timeout", 9) config.BindEnvAndSetDefault("enable_metadata_collection", true) config.BindEnvAndSetDefault("enable_gohai", true) config.BindEnvAndSetDefault("check_runners", int64(4)) config.BindEnvAndSetDefault("auth_token_file_path", "") _ = config.BindEnv("bind_host") config.BindEnvAndSetDefault("ipc_address", "localhost") config.BindEnvAndSetDefault("health_port", int64(0)) config.BindEnvAndSetDefault("disable_py3_validation", false) config.BindEnvAndSetDefault("python_version", DefaultPython) config.BindEnvAndSetDefault("allow_arbitrary_tags", false) config.BindEnvAndSetDefault("use_proxy_for_cloud_metadata", false) // The number of commits before expiring a context. The value is 2 to handle // the case where a check miss to send a metric. config.BindEnvAndSetDefault("check_sampler_bucket_commits_count_expiry", 2) config.BindEnvAndSetDefault("host_aliases", []string{}) // overridden in IoT Agent main config.BindEnvAndSetDefault("iot_host", false) // overridden in Heroku buildpack config.BindEnvAndSetDefault("heroku_dyno", false) // Debugging + C-land crash feature flags config.BindEnvAndSetDefault("c_stacktrace_collection", false) config.BindEnvAndSetDefault("c_core_dump", false) config.BindEnvAndSetDefault("go_core_dump", false) config.BindEnvAndSetDefault("memtrack_enabled", true) config.BindEnvAndSetDefault("tracemalloc_debug", false) config.BindEnvAndSetDefault("tracemalloc_include", "") config.BindEnvAndSetDefault("tracemalloc_exclude", "") config.BindEnvAndSetDefault("tracemalloc_whitelist", "") // deprecated config.BindEnvAndSetDefault("tracemalloc_blacklist", "") // deprecated config.BindEnvAndSetDefault("run_path", defaultRunPath) config.BindEnvAndSetDefault("no_proxy_nonexact_match", false) // Python 3 linter timeout, in seconds // NOTE: linter is notoriously slow, in the absence of a better solution we // can only increase this timeout value. Linting operation is async. config.BindEnvAndSetDefault("python3_linter_timeout", 120) // Whether to honour the value of PYTHONPATH, if set, on Windows. On other OSes we always do. config.BindEnvAndSetDefault("windows_use_pythonpath", false) // When the Python full interpreter path cannot be deduced via heuristics, the agent // is expected to prevent rtloader from initializing. When set to true, this override // allows us to proceed but with some capabilities unavailable (e.g. `multiprocessing` // library support will not work reliably in those environments) config.BindEnvAndSetDefault("allow_python_path_heuristics_failure", false) // if/when the default is changed to true, make the default platform // dependent; default should remain false on Windows to maintain backward // compatibility with Agent5 behavior/win config.BindEnvAndSetDefault("hostname_fqdn", false) // When enabled, hostname defined in the configuration (datadog.yaml) and starting with `ip-` or `domu` on EC2 is used as // canonical hostname, otherwise the instance-id is used as canonical hostname. config.BindEnvAndSetDefault("hostname_force_config_as_canonical", false) config.BindEnvAndSetDefault("cluster_name", "") config.BindEnvAndSetDefault("disable_cluster_name_tag_key", false) // secrets backend config.BindEnvAndSetDefault("secret_backend_command", "") config.BindEnvAndSetDefault("secret_backend_arguments", []string{}) config.BindEnvAndSetDefault("secret_backend_output_max_size", secrets.SecretBackendOutputMaxSize) config.BindEnvAndSetDefault("secret_backend_timeout", 30) config.BindEnvAndSetDefault("secret_backend_command_allow_group_exec_perm", false) config.BindEnvAndSetDefault("secret_backend_skip_checks", false) // Use to output logs in JSON format config.BindEnvAndSetDefault("log_format_json", false) // IPC API server timeout config.BindEnvAndSetDefault("server_timeout", 15) // Use to force client side TLS version to 1.2 config.BindEnvAndSetDefault("force_tls_12", false) // Defaults to safe YAML methods in base and custom checks. config.BindEnvAndSetDefault("disable_unsafe_yaml", true) // Yaml keys which values are stripped from flare config.BindEnvAndSetDefault("flare_stripped_keys", []string{}) // Agent GUI access port config.BindEnvAndSetDefault("GUI_port", defaultGuiPort) if IsContainerized() { // In serverless-containerized environments (e.g Fargate) // it's impossible to mount host volumes. // Make sure the host paths exist before setting-up the default values. // Fallback to the container paths if host paths aren't mounted. if pathExists("/host/proc") { config.SetDefault("procfs_path", "/host/proc") config.SetDefault("container_proc_root", "/host/proc") // Used by some librairies (like gopsutil) if v := os.Getenv("HOST_PROC"); v == "" { os.Setenv("HOST_PROC", "/host/proc") } } else { config.SetDefault("procfs_path", "/proc") config.SetDefault("container_proc_root", "/proc") } if pathExists("/host/sys/fs/cgroup/") { config.SetDefault("container_cgroup_root", "/host/sys/fs/cgroup/") } else { config.SetDefault("container_cgroup_root", "/sys/fs/cgroup/") } } else { config.SetDefault("container_proc_root", "/proc") // for amazon linux the cgroup directory on host is /cgroup/ // we pick memory.stat to make sure it exists and not empty if _, err := os.Stat("/cgroup/memory/memory.stat"); !os.IsNotExist(err) { config.SetDefault("container_cgroup_root", "/cgroup/") } else { config.SetDefault("container_cgroup_root", "/sys/fs/cgroup/") } } config.BindEnv("procfs_path") //nolint:errcheck config.BindEnv("container_proc_root") //nolint:errcheck config.BindEnv("container_cgroup_root") //nolint:errcheck config.BindEnvAndSetDefault("proc_root", "/proc") config.BindEnvAndSetDefault("histogram_aggregates", []string{"max", "median", "avg", "count"}) config.BindEnvAndSetDefault("histogram_percentiles", []string{"0.95"}) config.BindEnvAndSetDefault("aggregator_stop_timeout", 2) config.BindEnvAndSetDefault("aggregator_buffer_size", 100) config.BindEnvAndSetDefault("basic_telemetry_add_container_tags", false) // configure adding the agent container tags to the basic agent telemetry metrics (e.g. `datadog.agent.running`) // Serializer config.BindEnvAndSetDefault("enable_stream_payload_serialization", true) config.BindEnvAndSetDefault("enable_service_checks_stream_payload_serialization", true) config.BindEnvAndSetDefault("enable_events_stream_payload_serialization", true) config.BindEnvAndSetDefault("enable_sketch_stream_payload_serialization", true) config.BindEnvAndSetDefault("enable_json_stream_shared_compressor_buffers", true) // Warning: do not change the two following values. Your payloads will get dropped by Datadog's intake. config.BindEnvAndSetDefault("serializer_max_payload_size", 2*megaByte+megaByte/2) config.BindEnvAndSetDefault("serializer_max_uncompressed_payload_size", 4*megaByte) config.BindEnvAndSetDefault("use_v2_api.series", false) config.BindEnvAndSetDefault("use_v2_api.events", false) config.BindEnvAndSetDefault("use_v2_api.service_checks", false) // Serializer: allow user to blacklist any kind of payload to be sent config.BindEnvAndSetDefault("enable_payloads.events", true) config.BindEnvAndSetDefault("enable_payloads.series", true) config.BindEnvAndSetDefault("enable_payloads.service_checks", true) config.BindEnvAndSetDefault("enable_payloads.sketches", true) config.BindEnvAndSetDefault("enable_payloads.json_to_v1_intake", true) // Forwarder config.BindEnvAndSetDefault("additional_endpoints", map[string][]string{}) config.BindEnvAndSetDefault("forwarder_timeout", 20) _ = config.BindEnv("forwarder_retry_queue_max_size") // Deprecated in favor of `forwarder_retry_queue_payloads_max_size` _ = config.BindEnv("forwarder_retry_queue_payloads_max_size") // Default value is defined inside `NewOptions` in pkg/forwarder/forwarder.go config.BindEnvAndSetDefault("forwarder_connection_reset_interval", 0) // in seconds, 0 means disabled config.BindEnvAndSetDefault("forwarder_apikey_validation_interval", DefaultAPIKeyValidationInterval) // in minutes config.BindEnvAndSetDefault("forwarder_num_workers", 1) config.BindEnvAndSetDefault("forwarder_stop_timeout", 2) // Forwarder retry settings config.BindEnvAndSetDefault("forwarder_backoff_factor", 2) config.BindEnvAndSetDefault("forwarder_backoff_base", 2) config.BindEnvAndSetDefault("forwarder_backoff_max", 64) config.BindEnvAndSetDefault("forwarder_recovery_interval", DefaultForwarderRecoveryInterval) config.BindEnvAndSetDefault("forwarder_recovery_reset", false) // Forwarder storage on disk config.BindEnvAndSetDefault("forwarder_storage_path", "") config.BindEnvAndSetDefault("forwarder_outdated_file_in_days", 10) config.BindEnvAndSetDefault("forwarder_flush_to_disk_mem_ratio", 0.5) config.BindEnvAndSetDefault("forwarder_storage_max_size_in_bytes", 0) // 0 means disabled. This is a BETA feature. config.BindEnvAndSetDefault("forwarder_storage_max_disk_ratio", 0.95) // Do not store transactions on disk when the disk usage exceeds 95% of the disk capacity. // Forwarder channels buffer size config.BindEnvAndSetDefault("forwarder_high_prio_buffer_size", 1000) config.BindEnvAndSetDefault("forwarder_low_prio_buffer_size", 1000) config.BindEnvAndSetDefault("forwarder_requeue_buffer_size", 1000) // Dogstatsd config.BindEnvAndSetDefault("use_dogstatsd", true) config.BindEnvAndSetDefault("dogstatsd_port", 8125) // Notice: 0 means UDP port closed config.BindEnvAndSetDefault("dogstatsd_pipe_name", "") // experimental and not officially supported for now. // Experimental and not officially supported for now. // Options are: udp, uds, named_pipe config.BindEnvAndSetDefault("dogstatsd_eol_required", []string{}) // The following options allow to configure how the dogstatsd intake buffers and queues incoming datagrams. // When a datagram is received it is first added to a datagrams buffer. This buffer fills up until // we reach `dogstatsd_packet_buffer_size` datagrams or after `dogstatsd_packet_buffer_flush_timeout` ms. // After this happens we flush this buffer of datagrams to a queue for processing. The size of this queue // is `dogstatsd_queue_size`. config.BindEnvAndSetDefault("dogstatsd_buffer_size", 1024*8) config.BindEnvAndSetDefault("dogstatsd_packet_buffer_size", 32) config.BindEnvAndSetDefault("dogstatsd_packet_buffer_flush_timeout", 100*time.Millisecond) config.BindEnvAndSetDefault("dogstatsd_queue_size", 1024) config.BindEnvAndSetDefault("dogstatsd_non_local_traffic", false) config.BindEnvAndSetDefault("dogstatsd_socket", "") // Notice: empty means feature disabled config.BindEnvAndSetDefault("dogstatsd_stats_port", 5000) config.BindEnvAndSetDefault("dogstatsd_stats_enable", false) config.BindEnvAndSetDefault("dogstatsd_stats_buffer", 10) // Control for how long counter would be sampled to 0 if not received config.BindEnvAndSetDefault("dogstatsd_expiry_seconds", 300) // Control how long we keep dogstatsd contexts in memory. This should // not be set bellow 2 dogstatsd bucket size (ie 20s, since each bucket // is 10s), otherwise we won't be able to sample unseen counter as // contexts will be deleted (see 'dogstatsd_expiry_seconds'). config.BindEnvAndSetDefault("dogstatsd_context_expiry_seconds", 300) config.BindEnvAndSetDefault("dogstatsd_origin_detection", false) // Only supported for socket traffic config.BindEnvAndSetDefault("dogstatsd_so_rcvbuf", 0) config.BindEnvAndSetDefault("dogstatsd_metrics_stats_enable", false) config.BindEnvAndSetDefault("dogstatsd_tags", []string{}) config.BindEnvAndSetDefault("dogstatsd_mapper_cache_size", 1000) config.BindEnvAndSetDefault("dogstatsd_string_interner_size", 4096) // Enable check for Entity-ID presence when enriching Dogstatsd metrics with tags config.BindEnvAndSetDefault("dogstatsd_entity_id_precedence", false) // Sends Dogstatsd parse errors to the Debug level instead of the Error level config.BindEnvAndSetDefault("dogstatsd_disable_verbose_logs", false) // Location to store dogstatsd captures by default config.BindEnvAndSetDefault("dogstatsd_capture_path", "") // Depth of the channel the capture writer reads before persisting to disk. // Default is 0 - blocking channel config.BindEnvAndSetDefault("dogstatsd_capture_depth", 0) _ = config.BindEnv("dogstatsd_mapper_profiles") config.SetEnvKeyTransformer("dogstatsd_mapper_profiles", func(in string) interface{} { var mappings []MappingProfile if err := json.Unmarshal([]byte(in), &mappings); err != nil { log.Errorf(`"dogstatsd_mapper_profiles" can not be parsed: %v`, err) } return mappings }) config.BindEnvAndSetDefault("statsd_forward_host", "") config.BindEnvAndSetDefault("statsd_forward_port", 0) config.BindEnvAndSetDefault("statsd_metric_namespace", "") config.BindEnvAndSetDefault("statsd_metric_namespace_blacklist", StandardStatsdPrefixes) // Autoconfig config.BindEnvAndSetDefault("autoconf_template_dir", "/datadog/check_configs") config.BindEnvAndSetDefault("exclude_pause_container", true) config.BindEnvAndSetDefault("ac_include", []string{}) config.BindEnvAndSetDefault("ac_exclude", []string{}) // ac_load_timeout is used to delay the introduction of sources other than // the ones automatically loaded by the AC, into the logs agent. // It is mainly here to delay the introduction of the container_collect_all // in the logs agent, to avoid it to tail all the available containers. config.BindEnvAndSetDefault("ac_load_timeout", 30000) // in milliseconds config.BindEnvAndSetDefault("container_include", []string{}) config.BindEnvAndSetDefault("container_exclude", []string{}) config.BindEnvAndSetDefault("container_include_metrics", []string{}) config.BindEnvAndSetDefault("container_exclude_metrics", []string{}) config.BindEnvAndSetDefault("container_include_logs", []string{}) config.BindEnvAndSetDefault("container_exclude_logs", []string{}) config.BindEnvAndSetDefault("ad_config_poll_interval", int64(10)) // in seconds config.BindEnvAndSetDefault("extra_listeners", []string{}) config.BindEnvAndSetDefault("extra_config_providers", []string{}) config.BindEnvAndSetDefault("ignore_autoconf", []string{}) config.BindEnvAndSetDefault("autoconfig_from_environment", true) config.BindEnvAndSetDefault("autoconfig_exclude_features", []string{}) // Docker config.BindEnvAndSetDefault("docker_query_timeout", int64(5)) config.BindEnvAndSetDefault("docker_labels_as_tags", map[string]string{}) config.BindEnvAndSetDefault("docker_env_as_tags", map[string]string{}) config.BindEnvAndSetDefault("kubernetes_pod_labels_as_tags", map[string]string{}) config.BindEnvAndSetDefault("kubernetes_pod_annotations_as_tags", map[string]string{}) config.BindEnvAndSetDefault("kubernetes_node_labels_as_tags", map[string]string{}) config.BindEnvAndSetDefault("kubernetes_namespace_labels_as_tags", map[string]string{}) config.BindEnvAndSetDefault("container_cgroup_prefix", "") // CRI config.BindEnvAndSetDefault("cri_socket_path", "") // empty is disabled config.BindEnvAndSetDefault("cri_connection_timeout", int64(1)) // in seconds config.BindEnvAndSetDefault("cri_query_timeout", int64(5)) // in seconds // Containerd // We only support containerd in Kubernetes. By default containerd cri uses `k8s.io` https://github.com/containerd/cri/blob/release/1.2/pkg/constants/constants.go#L22-L23 config.BindEnvAndSetDefault("containerd_namespace", "k8s.io") // Kubernetes config.BindEnvAndSetDefault("kubernetes_kubelet_host", "") config.BindEnvAndSetDefault("kubernetes_kubelet_nodename", "") config.BindEnvAndSetDefault("eks_fargate", false) config.BindEnvAndSetDefault("kubernetes_http_kubelet_port", 10255) config.BindEnvAndSetDefault("kubernetes_https_kubelet_port", 10250) config.BindEnvAndSetDefault("kubelet_tls_verify", true) config.BindEnvAndSetDefault("collect_kubernetes_events", false) config.BindEnvAndSetDefault("kubelet_client_ca", "") config.BindEnvAndSetDefault("kubelet_auth_token_path", "") config.BindEnvAndSetDefault("kubelet_client_crt", "") config.BindEnvAndSetDefault("kubelet_client_key", "") config.BindEnvAndSetDefault("kubernetes_pod_expiration_duration", 15*60) // in seconds, default 15 minutes config.BindEnvAndSetDefault("kubelet_wait_on_missing_container", 0) config.BindEnvAndSetDefault("kubelet_cache_pods_duration", 5) // Polling frequency in seconds of the agent to the kubelet "/pods" endpoint config.BindEnvAndSetDefault("kubelet_listener_polling_interval", 5) // Polling frequency in seconds of the pod watcher to detect new pods/containers (affected by kubelet_cache_pods_duration setting) config.BindEnvAndSetDefault("kubernetes_collect_metadata_tags", true) config.BindEnvAndSetDefault("kubernetes_metadata_tag_update_freq", 60) // Polling frequency of the Agent to the DCA in seconds (gets the local cache if the DCA is disabled) config.BindEnvAndSetDefault("kubernetes_apiserver_client_timeout", 10) config.BindEnvAndSetDefault("kubernetes_map_services_on_ip", false) // temporary opt-out of the new mapping logic config.BindEnvAndSetDefault("kubernetes_apiserver_use_protobuf", false) config.BindEnvAndSetDefault("prometheus_scrape.enabled", false) // Enables the prometheus config provider config.BindEnvAndSetDefault("prometheus_scrape.service_endpoints", false) // Enables Service Endpoints checks in the prometheus config provider _ = config.BindEnv("prometheus_scrape.checks") // Defines any extra prometheus/openmetrics check configurations to be handled by the prometheus config provider config.SetEnvKeyTransformer("prometheus_scrape.checks", func(in string) interface{} { var promChecks []*types.PrometheusCheck if err := json.Unmarshal([]byte(in), &promChecks); err != nil { log.Warnf(`"prometheus_scrape.checks" can not be parsed: %v`, err) } return promChecks }) // SNMP config.SetKnown("snmp_listener.discovery_interval") config.SetKnown("snmp_listener.allowed_failures") config.SetKnown("snmp_listener.collect_device_metadata") config.SetKnown("snmp_listener.workers") config.SetKnown("snmp_listener.configs") config.BindEnvAndSetDefault("snmp_traps_enabled", false) config.BindEnvAndSetDefault("snmp_traps_config.port", 162) config.BindEnvAndSetDefault("snmp_traps_config.community_strings", []string{}) config.BindEnvAndSetDefault("snmp_traps_config.bind_host", "localhost") config.BindEnvAndSetDefault("snmp_traps_config.stop_timeout", 5) // in seconds // Kube ApiServer config.BindEnvAndSetDefault("kubernetes_kubeconfig_path", "") config.BindEnvAndSetDefault("leader_lease_duration", "60") config.BindEnvAndSetDefault("leader_election", false) config.BindEnvAndSetDefault("kube_resources_namespace", "") config.BindEnvAndSetDefault("kube_cache_sync_timeout_seconds", 2) // Datadog cluster agent config.BindEnvAndSetDefault("cluster_agent.enabled", false) config.BindEnvAndSetDefault("cluster_agent.auth_token", "") config.BindEnvAndSetDefault("cluster_agent.url", "") config.BindEnvAndSetDefault("cluster_agent.kubernetes_service_name", "datadog-cluster-agent") config.BindEnvAndSetDefault("cluster_agent.tagging_fallback", false) config.BindEnvAndSetDefault("cluster_agent.server.read_timeout_seconds", 2) config.BindEnvAndSetDefault("cluster_agent.server.write_timeout_seconds", 2) config.BindEnvAndSetDefault("cluster_agent.server.idle_timeout_seconds", 60) config.BindEnvAndSetDefault("metrics_port", "5000") // Metadata endpoints // Defines the maximum size of hostame gathered from EC2, GCE, Azure and Alibabacloud metadata endpoints. // Used internally to protect against configurations where metadata endpoints return incorrect values with 200 status codes. config.BindEnvAndSetDefault("metadata_endpoints_max_hostname_size", 255) // EC2 config.BindEnvAndSetDefault("ec2_use_windows_prefix_detection", false) config.BindEnvAndSetDefault("ec2_metadata_timeout", 300) // value in milliseconds config.BindEnvAndSetDefault("ec2_metadata_token_lifetime", 21600) // value in seconds config.BindEnvAndSetDefault("ec2_prefer_imdsv2", false) config.BindEnvAndSetDefault("collect_ec2_tags", false) // ECS config.BindEnvAndSetDefault("ecs_agent_url", "") // Will be autodetected config.BindEnvAndSetDefault("ecs_agent_container_name", "ecs-agent") config.BindEnvAndSetDefault("ecs_collect_resource_tags_ec2", false) config.BindEnvAndSetDefault("ecs_resource_tags_replace_colon", false) config.BindEnvAndSetDefault("ecs_metadata_timeout", 500) // value in milliseconds // GCE config.BindEnvAndSetDefault("collect_gce_tags", true) config.BindEnvAndSetDefault("exclude_gce_tags", []string{ "kube-env", "kubelet-config", "containerd-configure-sh", "startup-script", "shutdown-script", "configure-sh", "sshKeys", "ssh-keys", "user-data", "cli-cert", "ipsec-cert", "ssl-cert", "google-container-manifest", "bosh_settings", "windows-startup-script-ps1", "common-psm1", "k8s-node-setup-psm1", "serial-port-logging-enable", "enable-oslogin", "disable-address-manager", "disable-legacy-endpoints", "windows-keys", "kubeconfig", }) config.BindEnvAndSetDefault("gce_send_project_id_tag", false) config.BindEnvAndSetDefault("gce_metadata_timeout", 1000) // value in milliseconds // Cloud Foundry config.BindEnvAndSetDefault("cloud_foundry", false) config.BindEnvAndSetDefault("bosh_id", "") config.BindEnvAndSetDefault("cf_os_hostname_aliasing", false) // Cloud Foundry BBS config.BindEnvAndSetDefault("cloud_foundry_bbs.url", "https://bbs.service.cf.internal:8889") config.BindEnvAndSetDefault("cloud_foundry_bbs.poll_interval", 15) config.BindEnvAndSetDefault("cloud_foundry_bbs.ca_file", "") config.BindEnvAndSetDefault("cloud_foundry_bbs.cert_file", "") config.BindEnvAndSetDefault("cloud_foundry_bbs.key_file", "") config.BindEnvAndSetDefault("cloud_foundry_bbs.env_include", []string{}) config.BindEnvAndSetDefault("cloud_foundry_bbs.env_exclude", []string{}) // Cloud Foundry CC config.BindEnvAndSetDefault("cloud_foundry_cc.url", "https://cloud-controller-ng.service.cf.internal:9024") config.BindEnvAndSetDefault("cloud_foundry_cc.client_id", "") config.BindEnvAndSetDefault("cloud_foundry_cc.client_secret", "") config.BindEnvAndSetDefault("cloud_foundry_cc.poll_interval", 60) config.BindEnvAndSetDefault("cloud_foundry_cc.skip_ssl_validation", false) // Cloud Foundry Garden config.BindEnvAndSetDefault("cloud_foundry_garden.listen_network", "unix") config.BindEnvAndSetDefault("cloud_foundry_garden.listen_address", "/var/vcap/data/garden/garden.sock") // Azure config.BindEnvAndSetDefault("azure_hostname_style", "os") // JMXFetch config.BindEnvAndSetDefault("jmx_custom_jars", []string{}) config.BindEnvAndSetDefault("jmx_use_cgroup_memory_limit", false) config.BindEnvAndSetDefault("jmx_use_container_support", false) config.BindEnvAndSetDefault("jmx_max_restarts", int64(3)) config.BindEnvAndSetDefault("jmx_restart_interval", int64(5)) config.BindEnvAndSetDefault("jmx_thread_pool_size", 3) config.BindEnvAndSetDefault("jmx_reconnection_thread_pool_size", 3) config.BindEnvAndSetDefault("jmx_collection_timeout", 60) config.BindEnvAndSetDefault("jmx_check_period", int(defaults.DefaultCheckInterval/time.Millisecond)) config.BindEnvAndSetDefault("jmx_reconnection_timeout", 60) // Go_expvar server port config.BindEnvAndSetDefault("expvar_port", "5000") config.BindEnvAndSetDefault("expvar_host", "127.0.0.1") // internal profiling config.BindEnvAndSetDefault("internal_profiling.enabled", false) config.BindEnv("internal_profiling.profile_dd_url", "") //nolint:errcheck config.BindEnvAndSetDefault("internal_profiling.period", 5*time.Minute) config.BindEnvAndSetDefault("internal_profiling.cpu_duration", 1*time.Minute) config.BindEnvAndSetDefault("internal_profiling.block_profile_rate", 0) config.BindEnvAndSetDefault("internal_profiling.mutex_profile_fraction", 0) config.BindEnvAndSetDefault("internal_profiling.enable_goroutine_stacktraces", false) // Process agent config.SetDefault("process_config.enabled", "false") // process_config.enabled is only used on Windows by the core agent to start the process agent service. // it can be set from file, but not from env. Override it with value from DD_PROCESS_AGENT_ENABLED. ddProcessAgentEnabled, found := os.LookupEnv("DD_PROCESS_AGENT_ENABLED") if found { AddOverride("process_config.enabled", ddProcessAgentEnabled) } config.BindEnv("process_config.process_dd_url", "") //nolint:errcheck // Logs Agent // External Use: modify those parameters to configure the logs-agent. // enable the logs-agent: config.BindEnvAndSetDefault("logs_enabled", false) config.BindEnvAndSetDefault("log_enabled", false) // deprecated, use logs_enabled instead // collect all logs from all containers: config.BindEnvAndSetDefault("logs_config.container_collect_all", false) // add a socks5 proxy: config.BindEnvAndSetDefault("logs_config.socks5_proxy_address", "") // specific logs-agent api-key config.BindEnv("logs_config.api_key") //nolint:errcheck // Duration during which the host tags will be submitted with log events. config.BindEnvAndSetDefault("logs_config.expected_tags_duration", time.Duration(0)) // duration-formatted string (parsed by `time.ParseDuration`) // send the logs to the port 443 of the logs-backend via TCP: config.BindEnvAndSetDefault("logs_config.use_port_443", false) // increase the read buffer size of the UDP sockets: config.BindEnvAndSetDefault("logs_config.frame_size", 9000) // increase the number of files that can be tailed in parallel: config.BindEnvAndSetDefault("logs_config.open_files_limit", 100) // add global processing rules that are applied on all logs config.BindEnv("logs_config.processing_rules") //nolint:errcheck // enforce the agent to use files to collect container logs on kubernetes environment config.BindEnvAndSetDefault("logs_config.k8s_container_use_file", false) // Enable the agent to use files to collect container logs on standalone docker environment, containers // with an existing registry offset will continue to be tailed from the docker socket unless // logs_config.docker_container_force_use_file is set to true. config.BindEnvAndSetDefault("logs_config.docker_container_use_file", false) // Force tailing from file for all docker container, even the ones with an existing registry entry config.BindEnvAndSetDefault("logs_config.docker_container_force_use_file", false) // While parsing Kubernetes pod logs, use /var/log/containers to validate that // the pod container ID is matching. config.BindEnvAndSetDefault("logs_config.validate_pod_container_id", false) // additional config to ensure initial logs are tagged with kubelet tags // wait (seconds) for tagger before start fetching tags of new AD services config.BindEnvAndSetDefault("logs_config.tagger_warmup_duration", 0) // Disabled by default (0 seconds) // Configurable docker client timeout while communicating with the docker daemon. // It could happen that the docker daemon takes a lot of time gathering timestamps // before starting to send any data when it has stored several large log files. // This field lets you increase the read timeout to prevent the client from // timing out too early in such a situation. Value in seconds. config.BindEnvAndSetDefault("logs_config.docker_client_read_timeout", 30) // Internal Use Only: avoid modifying those configuration parameters, this could lead to unexpected results. config.BindEnvAndSetDefault("logs_config.run_path", defaultRunPath) config.BindEnvAndSetDefault("logs_config.use_http", false) config.BindEnvAndSetDefault("logs_config.use_tcp", false) bindEnvAndSetLogsConfigKeys(config, "logs_config.") bindEnvAndSetLogsConfigKeys(config, "database_monitoring.samples.") bindEnvAndSetLogsConfigKeys(config, "database_monitoring.metrics.") bindEnvAndSetLogsConfigKeys(config, "network_devices.metadata.") config.BindEnvAndSetDefault("logs_config.dd_port", 10516) config.BindEnvAndSetDefault("logs_config.dev_mode_use_proto", true) config.BindEnvAndSetDefault("logs_config.dd_url_443", "agent-443-intake.logs.datadoghq.com") config.BindEnvAndSetDefault("logs_config.stop_grace_period", 30) config.BindEnvAndSetDefault("logs_config.close_timeout", 60) config.BindEnvAndSetDefault("logs_config.auditor_ttl", DefaultAuditorTTL) // in hours // Timeout in milliseonds used when performing agreggation operations, // including multi-line log processing rules and chunked line reaggregation. // It may be useful to increase it when logs writing is slowed down, that // could happen while serializing large objects on log lines. config.BindEnvAndSetDefault("logs_config.aggregation_timeout", 1000) // The cardinality of tags to send for checks and dogstatsd respectively. // Choices are: low, orchestrator, high. // WARNING: sending orchestrator, or high tags for dogstatsd metrics may create more metrics // (one per container instead of one per host). // Changing this setting may impact your custom metrics billing. config.BindEnvAndSetDefault("checks_tag_cardinality", "low") config.BindEnvAndSetDefault("dogstatsd_tag_cardinality", "low") config.BindEnvAndSetDefault("histogram_copy_to_distribution", false) config.BindEnvAndSetDefault("histogram_copy_to_distribution_prefix", "") config.BindEnv("api_key") //nolint:errcheck config.BindEnvAndSetDefault("hpa_watcher_polling_freq", 10) config.BindEnvAndSetDefault("hpa_watcher_gc_period", 60*5) // 5 minutes config.BindEnvAndSetDefault("hpa_configmap_name", "datadog-custom-metrics") config.BindEnvAndSetDefault("external_metrics_provider.enabled", false) config.BindEnvAndSetDefault("external_metrics_provider.port", 443) config.BindEnvAndSetDefault("external_metrics_provider.endpoint", "") // Override the Datadog API endpoint to query external metrics from config.BindEnvAndSetDefault("external_metrics_provider.api_key", "") // Override the Datadog API Key for external metrics endpoint config.BindEnvAndSetDefault("external_metrics_provider.app_key", "") // Override the Datadog APP Key for external metrics endpoint config.BindEnvAndSetDefault("external_metrics_provider.refresh_period", 30) // value in seconds. Frequency of calls to Datadog to refresh metric values config.BindEnvAndSetDefault("external_metrics_provider.batch_window", 10) // value in seconds. Batch the events from the Autoscalers informer to push updates to the ConfigMap (GlobalStore) config.BindEnvAndSetDefault("external_metrics_provider.max_age", 120) // value in seconds. 4 cycles from the Autoscaler controller (up to Kubernetes 1.11) is enough to consider a metric stale config.BindEnvAndSetDefault("external_metrics.aggregator", "avg") // aggregator used for the external metrics. Choose from [avg,sum,max,min] config.BindEnvAndSetDefault("external_metrics_provider.bucket_size", 60*5) // Window to query to get the metric from Datadog. config.BindEnvAndSetDefault("external_metrics_provider.rollup", 30) // Bucket size to circumvent time aggregation side effects. config.BindEnvAndSetDefault("external_metrics_provider.wpa_controller", false) // Activates the controller for Watermark Pod Autoscalers. config.BindEnvAndSetDefault("external_metrics_provider.use_datadogmetric_crd", false) // Use DatadogMetric CRD with custom Datadog Queries instead of ConfigMap config.BindEnvAndSetDefault("kubernetes_event_collection_timeout", 100) // timeout between two successful event collections in milliseconds. config.BindEnvAndSetDefault("kubernetes_informers_resync_period", 60*5) // value in seconds. Default to 5 minutes config.BindEnvAndSetDefault("external_metrics_provider.config", map[string]string{}) // list of options that can be used to configure the external metrics server config.BindEnvAndSetDefault("external_metrics_provider.local_copy_refresh_rate", 30) // value in seconds // Cluster check Autodiscovery config.BindEnvAndSetDefault("cluster_checks.enabled", false) config.BindEnvAndSetDefault("cluster_checks.node_expiration_timeout", 30) // value in seconds config.BindEnvAndSetDefault("cluster_checks.warmup_duration", 30) // value in seconds config.BindEnvAndSetDefault("cluster_checks.cluster_tag_name", "cluster_name") config.BindEnvAndSetDefault("cluster_checks.extra_tags", []string{}) config.BindEnvAndSetDefault("cluster_checks.advanced_dispatching_enabled", false) config.BindEnvAndSetDefault("cluster_checks.clc_runners_port", 5005) // Cluster check runner config.BindEnvAndSetDefault("clc_runner_enabled", false) config.BindEnvAndSetDefault("clc_runner_id", "") config.BindEnvAndSetDefault("clc_runner_host", "") // must be set using the Kubernetes downward API config.BindEnvAndSetDefault("clc_runner_port", 5005) config.BindEnvAndSetDefault("clc_runner_server_write_timeout", 15) config.BindEnvAndSetDefault("clc_runner_server_readheader_timeout", 10) // Admission controller config.BindEnvAndSetDefault("admission_controller.enabled", false) config.BindEnvAndSetDefault("admission_controller.mutate_unlabelled", false) config.BindEnvAndSetDefault("admission_controller.port", 8000) config.BindEnvAndSetDefault("admission_controller.timeout_seconds", 10) // in seconds (see kubernetes/kubernetes#71508) config.BindEnvAndSetDefault("admission_controller.service_name", "datadog-admission-controller") config.BindEnvAndSetDefault("admission_controller.certificate.validity_bound", 365*24) // validity bound of the certificate created by the controller (in hours, default 1 year) config.BindEnvAndSetDefault("admission_controller.certificate.expiration_threshold", 30*24) // how long before its expiration a certificate should be refreshed (in hours, default 1 month) config.BindEnvAndSetDefault("admission_controller.certificate.secret_name", "webhook-certificate") // name of the Secret object containing the webhook certificate config.BindEnvAndSetDefault("admission_controller.webhook_name", "datadog-webhook") config.BindEnvAndSetDefault("admission_controller.inject_config.enabled", true) config.BindEnvAndSetDefault("admission_controller.inject_config.endpoint", "/injectconfig") config.BindEnvAndSetDefault("admission_controller.inject_tags.enabled", true) config.BindEnvAndSetDefault("admission_controller.inject_tags.endpoint", "/injecttags") config.BindEnvAndSetDefault("admission_controller.pod_owners_cache_validity", 10) // in minutes config.BindEnvAndSetDefault("admission_controller.namespace_selector_fallback", false) // Telemetry // Enable telemetry metrics on the internals of the Agent. // This create a lot of billable custom metrics. config.BindEnvAndSetDefault("telemetry.enabled", false) config.SetKnown("telemetry.checks") // We're using []string as a default instead of []float64 because viper can only parse list of string from the environment // // The histogram buckets use to track the time in nanoseconds DogStatsD listeners are not reading/waiting new data config.BindEnvAndSetDefault("telemetry.dogstatsd.listeners_latency_buckets", []string{}) // The histogram buckets use to track the time in nanoseconds it takes for the DogStatsD server to push data to the aggregator config.BindEnvAndSetDefault("telemetry.dogstatsd.aggregator_channel_latency_buckets", []string{}) // The histogram buckets use to track the time in nanoseconds it takes for a DogStatsD listeners to push data to the server config.BindEnvAndSetDefault("telemetry.dogstatsd.listeners_channel_latency_buckets", []string{}) // Declare other keys that don't have a default/env var. // Mostly, keys we use IsSet() on, because IsSet always returns true if a key has a default. config.SetKnown("metadata_providers") config.SetKnown("config_providers") config.SetKnown("cluster_name") config.SetKnown("listeners") config.SetKnown("proxy.http") config.SetKnown("proxy.https") config.SetKnown("proxy.no_proxy") // Orchestrator Explorer DCA and process-agent config.BindEnvAndSetDefault("orchestrator_explorer.enabled", false) // enabling/disabling the environment variables & command scrubbing from the container specs // this option will potentially impact the CPU usage of the agent config.BindEnvAndSetDefault("orchestrator_explorer.container_scrubbing.enabled", true) config.BindEnvAndSetDefault("orchestrator_explorer.custom_sensitive_words", []string{}) config.BindEnv("orchestrator_explorer.max_per_message") //nolint:errcheck config.BindEnv("orchestrator_explorer.orchestrator_dd_url") //nolint:errcheck config.BindEnv("orchestrator_explorer.orchestrator_additional_endpoints") //nolint:errcheck // Orchestrator Explorer - process agent // DEPRECATED in favor of `orchestrator_explorer.orchestrator_dd_url` setting. If both are set `orchestrator_explorer.orchestrator_dd_url` will take precedence. config.BindEnv("process_config.orchestrator_dd_url") //nolint:errcheck // DEPRECATED in favor of `orchestrator_explorer.orchestrator_additional_endpoints` setting. If both are set `orchestrator_explorer.orchestrator_additional_endpoints` will take precedence. config.SetKnown("process_config.orchestrator_additional_endpoints.*") config.SetKnown("orchestrator_explorer.orchestrator_additional_endpoints.*") config.BindEnvAndSetDefault("orchestrator_explorer.extra_tags", []string{}) // Process agent config.SetKnown("process_config.dd_agent_env") config.SetKnown("process_config.enabled") config.SetKnown("process_config.intervals.process_realtime") config.SetKnown("process_config.queue_size") config.SetKnown("process_config.max_per_message") config.SetKnown("process_config.max_ctr_procs_per_message") config.SetKnown("process_config.intervals.process") config.SetKnown("process_config.blacklist_patterns") config.SetKnown("process_config.intervals.container") config.SetKnown("process_config.intervals.container_realtime") config.SetKnown("process_config.dd_agent_bin") config.SetKnown("process_config.custom_sensitive_words") config.SetKnown("process_config.scrub_args") config.SetKnown("process_config.strip_proc_arguments") config.SetKnown("process_config.windows.args_refresh_interval") config.SetKnown("process_config.windows.add_new_args") config.SetKnown("process_config.additional_endpoints.*") config.SetKnown("process_config.container_source") config.SetKnown("process_config.intervals.connections") config.SetKnown("process_config.expvar_port") config.SetKnown("process_config.log_file") config.SetKnown("process_config.internal_profiling.enabled") config.SetKnown("process_config.remote_tagger") // Network config.BindEnv("network.id") //nolint:errcheck // inventories config.BindEnvAndSetDefault("inventories_enabled", true) config.BindEnvAndSetDefault("inventories_max_interval", 600) // 10min config.BindEnvAndSetDefault("inventories_min_interval", 300) // 5min // Datadog security agent (common) config.BindEnvAndSetDefault("security_agent.cmd_port", 5010) config.BindEnvAndSetDefault("security_agent.expvar_port", 5011) config.BindEnvAndSetDefault("security_agent.log_file", defaultSecurityAgentLogFile) config.BindEnvAndSetDefault("security_agent.remote_tagger", false) // Datadog security agent (compliance) config.BindEnvAndSetDefault("compliance_config.enabled", false) config.BindEnvAndSetDefault("compliance_config.check_interval", 20*time.Minute) config.BindEnvAndSetDefault("compliance_config.check_max_events_per_run", 100) config.BindEnvAndSetDefault("compliance_config.dir", "/etc/datadog-agent/compliance.d") config.BindEnvAndSetDefault("compliance_config.run_path", defaultRunPath) bindEnvAndSetLogsConfigKeys(config, "compliance_config.endpoints.") // Datadog security agent (runtime) config.BindEnvAndSetDefault("runtime_security_config.enabled", false) config.SetKnown("runtime_security_config.fim_enabled") config.BindEnvAndSetDefault("runtime_security_config.erpc_dentry_resolution_enabled", true) config.BindEnvAndSetDefault("runtime_security_config.map_dentry_resolution_enabled", true) config.BindEnvAndSetDefault("runtime_security_config.policies.dir", DefaultRuntimePoliciesDir) config.BindEnvAndSetDefault("runtime_security_config.socket", "/opt/datadog-agent/run/runtime-security.sock") config.BindEnvAndSetDefault("runtime_security_config.enable_approvers", true) config.BindEnvAndSetDefault("runtime_security_config.enable_kernel_filters", true) config.BindEnvAndSetDefault("runtime_security_config.flush_discarder_window", 3) config.BindEnvAndSetDefault("runtime_security_config.syscall_monitor.enabled", false) config.BindEnvAndSetDefault("runtime_security_config.events_stats.polling_interval", 20) config.BindEnvAndSetDefault("runtime_security_config.events_stats.tags_cardinality", "high") config.BindEnvAndSetDefault("runtime_security_config.run_path", defaultRunPath) config.BindEnvAndSetDefault("runtime_security_config.event_server.burst", 40) config.BindEnvAndSetDefault("runtime_security_config.event_server.retention", 6) config.BindEnvAndSetDefault("runtime_security_config.event_server.rate", 10) config.BindEnvAndSetDefault("runtime_security_config.load_controller.events_count_threshold", 20000) config.BindEnvAndSetDefault("runtime_security_config.load_controller.discarder_timeout", 10) config.BindEnvAndSetDefault("runtime_security_config.load_controller.control_period", 2) config.BindEnvAndSetDefault("runtime_security_config.pid_cache_size", 10000) config.BindEnvAndSetDefault("runtime_security_config.cookie_cache_size", 100) config.BindEnvAndSetDefault("runtime_security_config.agent_monitoring_events", true) config.BindEnvAndSetDefault("runtime_security_config.custom_sensitive_words", []string{}) config.BindEnvAndSetDefault("runtime_security_config.remote_tagger", true) config.BindEnvAndSetDefault("runtime_security_config.log_patterns", []string{}) bindEnvAndSetLogsConfigKeys(config, "runtime_security_config.endpoints.") // Serverless Agent config.BindEnvAndSetDefault("serverless.logs_enabled", true) config.BindEnvAndSetDefault("enhanced_metrics", true) // command line options config.SetKnown("cmd.check.fullsketches") setAssetFs(config) setupAPM(config) } var ddURLRegexp = regexp.MustCompile(`^app(\.(us|eu)\d)?\.datad(oghq|0g)\.(com|eu)$`) // GetProxies returns the proxy settings from the configuration func GetProxies() *Proxy { return proxies } // loadProxyFromEnv overrides the proxy settings with environment variables func loadProxyFromEnv(config Config) { // Viper doesn't handle mixing nested variables from files and set // manually. If we manually set one of the sub value for "proxy" all // other values from the conf file will be shadowed when using // 'config.Get("proxy")'. For that reason we first get the value from // the conf files, overwrite them with the env variables and reset // everything. lookupEnvCaseInsensitive := func(key string) (string, bool) { value, found := os.LookupEnv(key) if !found { value, found = os.LookupEnv(strings.ToLower(key)) } if found { log.Infof("Found '%v' env var, using it for the Agent proxy settings", key) } return value, found } lookupEnv := func(key string) (string, bool) { value, found := os.LookupEnv(key) if found { log.Infof("Found '%v' env var, using it for the Agent proxy settings", key) } return value, found } var isSet bool p := &Proxy{} if isSet = config.IsSet("proxy"); isSet { if err := config.UnmarshalKey("proxy", p); err != nil { isSet = false log.Errorf("Could not load proxy setting from the configuration (ignoring): %s", err) } } if HTTP, found := lookupEnv("DD_PROXY_HTTP"); found { isSet = true p.HTTP = HTTP } else if HTTP, found := lookupEnvCaseInsensitive("HTTP_PROXY"); found { isSet = true p.HTTP = HTTP } if HTTPS, found := lookupEnv("DD_PROXY_HTTPS"); found { isSet = true p.HTTPS = HTTPS } else if HTTPS, found := lookupEnvCaseInsensitive("HTTPS_PROXY"); found { isSet = true p.HTTPS = HTTPS } if noProxy, found := lookupEnv("DD_PROXY_NO_PROXY"); found { isSet = true p.NoProxy = strings.Split(noProxy, " ") // space-separated list, consistent with viper } else if noProxy, found := lookupEnvCaseInsensitive("NO_PROXY"); found { isSet = true p.NoProxy = strings.Split(noProxy, ",") // comma-separated list, consistent with other tools that use the NO_PROXY env var } // We have to set each value individually so both config.Get("proxy") // and config.Get("proxy.http") work if isSet { config.Set("proxy.http", p.HTTP) config.Set("proxy.https", p.HTTPS) if len(p.NoProxy) > 0 { config.Set("proxy.no_proxy", p.NoProxy) } else { // If this is set to an empty []string, viper will have a type conflict when merging // this config during secrets resolution. It unmarshals empty yaml lists to type // []interface{}, which will then conflict with type []string and fail to merge. config.Set("proxy.no_proxy", []interface{}{}) } proxies = p } if !config.GetBool("use_proxy_for_cloud_metadata") { p.NoProxy = append(p.NoProxy, "169.254.169.254") // Azure, EC2, GCE p.NoProxy = append(p.NoProxy, "100.100.100.200") // Alibaba } } // Load reads configs files and initializes the config module func Load() (*Warnings, error) { return load(Datadog, "datadog.yaml", true) } // LoadWithoutSecret reads configs files, initializes the config module without decrypting any secrets func LoadWithoutSecret() (*Warnings, error) { return load(Datadog, "datadog.yaml", false) } func findUnknownKeys(config Config) []string { var unknownKeys []string knownKeys := config.GetKnownKeys() loadedKeys := config.AllKeys() for _, key := range loadedKeys { if _, found := knownKeys[key]; !found { // Check if any subkey terminated with a '.*' wildcard is marked as known // e.g.: apm_config.* would match all sub-keys of apm_config splitPath := strings.Split(key, ".") for j := range splitPath { subKey := strings.Join(splitPath[:j+1], ".") + ".*" if _, found = knownKeys[subKey]; found { break } } if !found { unknownKeys = append(unknownKeys, key) } } } return unknownKeys } func load(config Config, origin string, loadSecret bool) (*Warnings, error) { warnings := Warnings{} if err := config.ReadInConfig(); err != nil { if errors.Is(err, os.ErrPermission) { log.Warnf("Error loading config: %v (check config file permissions for dd-agent user)", err) } else { log.Warnf("Error loading config: %v", err) } return &warnings, err } for _, key := range findUnknownKeys(config) { log.Warnf("Unknown key in config file: %v", key) } if loadSecret { if err := ResolveSecrets(config, origin); err != nil { return &warnings, err } } // If this variable is set to true, we'll use DefaultPython for the Python version, // ignoring the python_version configuration value. if ForceDefaultPython == "true" { pv := config.GetString("python_version") if pv != DefaultPython { log.Warnf("Python version has been forced to %s", DefaultPython) } AddOverride("python_version", DefaultPython) } loadProxyFromEnv(config) SanitizeAPIKeyConfig(config, "api_key") // Environment feature detection needs to run before applying override funcs // as it may provide such overrides detectFeatures() applyOverrideFuncs(config) // setTracemallocEnabled *must* be called before setNumWorkers warnings.TraceMallocEnabledWithPy2 = setTracemallocEnabled(config) setNumWorkers(config) return &warnings, nil } // ResolveSecrets merges all the secret values from origin into config. Secret values // are identified by a value of the form "ENC[key]" where key is the secret key. // See: https://github.com/DataDog/datadog-agent/blob/main/docs/agent/secrets.md func ResolveSecrets(config Config, origin string) error { // We have to init the secrets package before we can use it to decrypt // anything. secrets.Init( config.GetString("secret_backend_command"), config.GetStringSlice("secret_backend_arguments"), config.GetInt("secret_backend_timeout"), config.GetInt("secret_backend_output_max_size"), config.GetBool("secret_backend_command_allow_group_exec_perm"), ) if config.GetString("secret_backend_command") != "" { // Viper doesn't expose the final location of the file it // loads. Since we are searching for 'datadog.yaml' in multiple // locations we let viper determine the one to use before // updating it. yamlConf, err := yaml.Marshal(config.AllSettings()) if err != nil { return fmt.Errorf("unable to marshal configuration to YAML to decrypt secrets: %v", err) } finalYamlConf, err := secrets.Decrypt(yamlConf, origin) if err != nil { return fmt.Errorf("unable to decrypt secret from datadog.yaml: %v", err) } r := bytes.NewReader(finalYamlConf) if err = config.MergeConfigOverride(r); err != nil { return fmt.Errorf("could not update main configuration after decrypting secrets: %v", err) } } return nil } // SanitizeAPIKeyConfig strips newlines and other control characters from a given key. func SanitizeAPIKeyConfig(config Config, key string) { config.Set(key, SanitizeAPIKey(config.GetString(key))) } // SanitizeAPIKey strips newlines and other control characters from a given string. func SanitizeAPIKey(key string) string { return strings.TrimSpace(key) } // GetMainInfraEndpoint returns the main DD Infra URL defined in the config, based on the value of `site` and `dd_url` func GetMainInfraEndpoint() string { return getMainInfraEndpointWithConfig(Datadog) } // GetMainEndpoint returns the main DD URL defined in the config, based on `site` and the prefix, or ddURLKey func GetMainEndpoint(prefix string, ddURLKey string) string { return GetMainEndpointWithConfig(Datadog, prefix, ddURLKey) } // GetMultipleEndpoints returns the api keys per domain specified in the main agent config func GetMultipleEndpoints() (map[string][]string, error) { return getMultipleEndpointsWithConfig(Datadog) } func bindEnvAndSetLogsConfigKeys(config Config, prefix string) { config.BindEnv(prefix + "logs_dd_url") //nolint:errcheck // Send the logs to a proxy. Must respect format '<HOST>:<PORT>' and '<PORT>' to be an integer config.BindEnv(prefix + "dd_url") //nolint:errcheck config.BindEnv(prefix + "additional_endpoints") //nolint:errcheck config.BindEnvAndSetDefault(prefix+"use_compression", true) config.BindEnvAndSetDefault(prefix+"compression_level", 6) // Default level for the gzip/deflate algorithm config.BindEnvAndSetDefault(prefix+"batch_wait", DefaultBatchWait) config.BindEnvAndSetDefault(prefix+"connection_reset_interval", 0) // in seconds, 0 means disabled config.BindEnvAndSetDefault(prefix+"logs_no_ssl", false) config.BindEnvAndSetDefault(prefix+"batch_max_concurrent_send", DefaultBatchMaxConcurrentSend) config.BindEnvAndSetDefault(prefix+"batch_max_content_size", DefaultBatchMaxContentSize) config.BindEnvAndSetDefault(prefix+"batch_max_size", DefaultBatchMaxSize) config.BindEnvAndSetDefault(prefix+"sender_backoff_factor", DefaultLogsSenderBackoffFactor) config.BindEnvAndSetDefault(prefix+"sender_backoff_base", DefaultLogsSenderBackoffBase) config.BindEnvAndSetDefault(prefix+"sender_backoff_max", DefaultLogsSenderBackoffMax) config.BindEnvAndSetDefault(prefix+"sender_recovery_interval", DefaultForwarderRecoveryInterval) config.BindEnvAndSetDefault(prefix+"sender_recovery_reset", false) } // getDomainPrefix provides the right prefix for agent X.Y.Z func getDomainPrefix(app string) string { v, _ := version.Agent() return fmt.Sprintf("%d-%d-%d-%s.agent", v.Major, v.Minor, v.Patch, app) } // AddAgentVersionToDomain prefixes the domain with the agent version: X-Y-Z.domain func AddAgentVersionToDomain(DDURL string, app string) (string, error) { u, err := url.Parse(DDURL) if err != nil { return "", err } // we don't update unknown URLs (ie: proxy or custom DD domain) if !ddURLRegexp.MatchString(u.Host) { return DDURL, nil } subdomain := strings.Split(u.Host, ".")[0] newSubdomain := getDomainPrefix(app) u.Host = strings.Replace(u.Host, subdomain, newSubdomain, 1) return u.String(), nil } func getMainInfraEndpointWithConfig(config Config) string { return GetMainEndpointWithConfig(config, infraURLPrefix, "dd_url") } // GetMainEndpointWithConfig implements the logic to extract the DD URL from a config, based on `site` and ddURLKey func GetMainEndpointWithConfig(config Config, prefix string, ddURLKey string) (resolvedDDURL string) { if config.IsSet(ddURLKey) && config.GetString(ddURLKey) != "" { // value under ddURLKey takes precedence over 'site' resolvedDDURL = getResolvedDDUrl(config, ddURLKey) } else if config.GetString("site") != "" { resolvedDDURL = prefix + strings.TrimSpace(config.GetString("site")) } else { resolvedDDURL = prefix + DefaultSite } return } // GetMainEndpointWithConfigBackwardCompatible implements the logic to extract the DD URL from a config, based on `site`,ddURLKey and a backward compatible key func GetMainEndpointWithConfigBackwardCompatible(config Config, prefix string, ddURLKey string, backwardKey string) (resolvedDDURL string) { if config.IsSet(ddURLKey) && config.GetString(ddURLKey) != "" { // value under ddURLKey takes precedence over backwardKey and 'site' resolvedDDURL = getResolvedDDUrl(config, ddURLKey) } else if config.IsSet(backwardKey) && config.GetString(backwardKey) != "" { // value under backwardKey takes precedence over 'site' resolvedDDURL = getResolvedDDUrl(config, backwardKey) } else if config.GetString("site") != "" { resolvedDDURL = prefix + strings.TrimSpace(config.GetString("site")) } else { resolvedDDURL = prefix + DefaultSite } return } func getResolvedDDUrl(config Config, urlKey string) string { resolvedDDURL := config.GetString(urlKey) if config.IsSet("site") { log.Infof("'site' and '%s' are both set in config: setting main endpoint to '%s': \"%s\"", urlKey, urlKey, config.GetString(urlKey)) } return resolvedDDURL } // getMultipleEndpointsWithConfig implements the logic to extract the api keys per domain from an agent config func getMultipleEndpointsWithConfig(config Config) (map[string][]string, error) { // Validating domain ddURL := getMainInfraEndpointWithConfig(config) _, err := url.Parse(ddURL) if err != nil { return nil, fmt.Errorf("could not parse main endpoint: %s", err) } keysPerDomain := map[string][]string{ ddURL: { config.GetString("api_key"), }, } additionalEndpoints := config.GetStringMapStringSlice("additional_endpoints") // merge additional endpoints into keysPerDomain for domain, apiKeys := range additionalEndpoints { // Validating domain _, err := url.Parse(domain) if err != nil { return nil, fmt.Errorf("could not parse url from 'additional_endpoints' %s: %s", domain, err) } if _, ok := keysPerDomain[domain]; ok { for _, apiKey := range apiKeys { keysPerDomain[domain] = append(keysPerDomain[domain], apiKey) } } else { keysPerDomain[domain] = apiKeys } } // dedupe api keys and remove domains with no api keys (or empty ones) for domain, apiKeys := range keysPerDomain { dedupedAPIKeys := make([]string, 0, len(apiKeys)) seen := make(map[string]bool) for _, apiKey := range apiKeys { trimmedAPIKey := strings.TrimSpace(apiKey) if _, ok := seen[trimmedAPIKey]; !ok && trimmedAPIKey != "" { seen[trimmedAPIKey] = true dedupedAPIKeys = append(dedupedAPIKeys, trimmedAPIKey) } } if len(dedupedAPIKeys) > 0 { keysPerDomain[domain] = dedupedAPIKeys } else { log.Infof("No API key provided for domain \"%s\", removing domain from endpoints", domain) delete(keysPerDomain, domain) } } return keysPerDomain, nil } // IsCloudProviderEnabled checks the cloud provider family provided in // pkg/util/<cloud_provider>.go against the value for cloud_provider: on the // global config object Datadog func IsCloudProviderEnabled(cloudProviderName string) bool { cloudProviderFromConfig := Datadog.GetStringSlice("cloud_provider_metadata") for _, cloudName := range cloudProviderFromConfig { if strings.ToLower(cloudName) == strings.ToLower(cloudProviderName) { log.Debugf("cloud_provider_metadata is set to %s in agent configuration, trying endpoints for %s Cloud Provider", cloudProviderFromConfig, cloudProviderName) return true } } log.Debugf("cloud_provider_metadata is set to %s in agent configuration, skipping %s Cloud Provider", cloudProviderFromConfig, cloudProviderName) return false } // FileUsedDir returns the absolute path to the folder containing the config // file used to populate the registry func FileUsedDir() string { return filepath.Dir(Datadog.ConfigFileUsed()) } // GetIPCAddress returns the IPC address or an error if the address is not local func GetIPCAddress() (string, error) { address := Datadog.GetString("ipc_address") if address == "localhost" { return address, nil } ip := net.ParseIP(address) if ip == nil { return "", fmt.Errorf("ipc_address was set to an invalid IP address: %s", address) } for _, cidr := range []string{ "127.0.0.0/8", // IPv4 loopback "::1/128", // IPv6 loopback } { _, block, err := net.ParseCIDR(cidr) if err != nil { return "", err } if block.Contains(ip) { return address, nil } } return "", fmt.Errorf("ipc_address was set to a non-loopback IP address: %s", address) } // pathExists returns true if the given path exists func pathExists(path string) bool { _, err := os.Stat(path) return !os.IsNotExist(err) } // setTracemallocEnabled is a helper to get the effective tracemalloc // configuration. func setTracemallocEnabled(config Config) bool { pyVersion := config.GetString("python_version") wTracemalloc := config.GetBool("tracemalloc_debug") traceMallocEnabledWithPy2 := false if pyVersion == "2" && wTracemalloc { log.Warnf("Tracemalloc was enabled but unavailable with python version %q, disabling.", pyVersion) wTracemalloc = false traceMallocEnabledWithPy2 = true } // update config with the actual effective tracemalloc config.Set("tracemalloc_debug", wTracemalloc) return traceMallocEnabledWithPy2 } // setNumWorkers is a helper to set the effective number of workers for // a given config. func setNumWorkers(config Config) { wTracemalloc := config.GetBool("tracemalloc_debug") numWorkers := config.GetInt("check_runners") if wTracemalloc { log.Infof("Tracemalloc enabled, only one check runner enabled to run checks serially") numWorkers = 1 } // update config with the actual effective number of workers config.Set("check_runners", numWorkers) } // GetDogstatsdMappingProfiles returns mapping profiles used in DogStatsD mapper func GetDogstatsdMappingProfiles() ([]MappingProfile, error) { return getDogstatsdMappingProfilesConfig(Datadog) } func getDogstatsdMappingProfilesConfig(config Config) ([]MappingProfile, error) { var mappings []MappingProfile if config.IsSet("dogstatsd_mapper_profiles") { err := config.UnmarshalKey("dogstatsd_mapper_profiles", &mappings) if err != nil { return []MappingProfile{}, log.Errorf("Could not parse dogstatsd_mapper_profiles: %v", err) } } return mappings, nil } // IsCLCRunner returns whether the Agent is in cluster check runner mode func IsCLCRunner() bool { if !Datadog.GetBool("clc_runner_enabled") { return false } var cp []ConfigurationProviders if err := Datadog.UnmarshalKey("config_providers", &cp); err == nil { for _, name := range Datadog.GetStringSlice("extra_config_providers") { cp = append(cp, ConfigurationProviders{Name: name}) } if len(cp) == 1 && cp[0].Name == "clusterchecks" { // A cluster check runner is an Agent configured to run clusterchecks only return true } } return false } // GetBindHost returns `bind_host` variable or default value // Not using `config.BindEnvAndSetDefault` as some processes need to know // if value was default one or not (e.g. trace-agent) func GetBindHost() string { if Datadog.IsSet("bind_host") { return Datadog.GetString("bind_host") } return "localhost" } // GetValidHostAliases validates host aliases set in `host_aliases` variable and returns // only valid ones. func GetValidHostAliases() []string { return getValidHostAliasesWithConfig(Datadog) } func getValidHostAliasesWithConfig(config Config) []string { aliases := []string{} for _, alias := range config.GetStringSlice("host_aliases") { if err := validate.ValidHostname(alias); err == nil { aliases = append(aliases, alias) } else { log.Warnf("skipping invalid host alias '%s': %s", alias, err) } } return aliases } // GetConfiguredTags returns complete list of user configured tags func GetConfiguredTags(includeDogstatsd bool) []string { tags := Datadog.GetStringSlice("tags") extraTags := Datadog.GetStringSlice("extra_tags") var dsdTags []string if includeDogstatsd { dsdTags = Datadog.GetStringSlice("dogstatsd_tags") } combined := make([]string, 0, len(tags)+len(extraTags)+len(dsdTags)) combined = append(combined, tags...) combined = append(combined, extraTags...) combined = append(combined, dsdTags...) return combined }
[ "\"HOST_PROC\"" ]
[]
[ "HOST_PROC" ]
[]
["HOST_PROC"]
go
1
0
backend/routes/assignment_test.go
package routes import ( "bytes" "encoding/json" "net/http" "net/http/httptest" "os" "testing" "time" "git.teich.3nt3.de/3nt3/homework/logging" "git.teich.3nt3.de/3nt3/homework/structs" ) func TestCreateAssignment(t *testing.T) { a := structs.Assignment{ Title: "test assignment", DueDate: (structs.UnixTime)(time.Date(2021, 11, 16, 0, 0, 0, 0, time.UTC)), Course: 123, FromMoodle: false, } body, _ := json.Marshal(a) req, err := http.NewRequest("POST", "http://localhost:8000/assignment", bytes.NewBuffer(body)) req.AddCookie(&http.Cookie{Name: "hw_cookie_v2", Value: os.Getenv("HW_SESSION_COOKIE")}) if err != nil { t.Errorf("error requesting: %v", err) } arr := httptest.NewRecorder() CreateAssignment(arr, req) aResult := arr.Result() if aResult.StatusCode != http.StatusOK { t.Errorf("request failed with status code %d", aResult.StatusCode) } aResp := apiResponse{} err = json.NewDecoder(aResult.Body).Decode(&aResp) if err != nil { t.Errorf("error decoding body: %v", err) } logging.InfoLogger.Printf("assignment") } func TestDeleteAssignment(t *testing.T) { // create assignment a := structs.Assignment{ Title: "test assignment", DueDate: (structs.UnixTime)(time.Date(2021, 11, 16, 0, 0, 0, 0, time.UTC)), Course: 123, FromMoodle: false, } body, _ := json.Marshal(a) req, err := http.NewRequest("POST", "http://localhost:8000/assignment", bytes.NewBuffer(body)) req.AddCookie(&http.Cookie{Name: "hw_cookie_v2", Value: os.Getenv("HW_SESSION_COOKIE")}) if err != nil { t.Errorf("error requesting: %v", err) } arr := httptest.NewRecorder() CreateAssignment(arr, req) aResult := arr.Result() aResp := apiResponse{} err = json.NewDecoder(aResult.Body).Decode(&aResp) if err != nil { t.Errorf("error decoding body: %v", err) } if aResult.StatusCode != http.StatusOK { t.Errorf("request failed with status code %d %v", aResult.StatusCode, aResp.Errors) } assignmentJson, err := json.Marshal(aResp.Content) if err != nil { return } var respA structs.Assignment err = json.Unmarshal(assignmentJson, &respA) if err != nil { t.Errorf("error: %v\n", err) } req, err = http.NewRequest("DELETE", "http://localhost:8000/assignment?id="+respA.UID.String(), bytes.NewBuffer(body)) req.AddCookie(&http.Cookie{Name: "hw_cookie_v2", Value: os.Getenv("HW_SESSION_COOKIE")}) if err != nil { t.Errorf("error requesting: %v\n", err) } logging.InfoLogger.Printf("assignment: %+v\n", respA) rr := httptest.NewRecorder() DeleteAssignment(rr, req) result := rr.Result() if result.StatusCode != http.StatusOK { t.Errorf("request failed with status code %d", result.StatusCode) } resp := apiResponse{} err = json.NewDecoder(result.Body).Decode(&resp) if err != nil { t.Errorf("error decoding body: %v", err) } if aResult.StatusCode != http.StatusOK { t.Errorf("request failed with status code %d %v", result.StatusCode, resp.Errors) } }
[ "\"HW_SESSION_COOKIE\"", "\"HW_SESSION_COOKIE\"", "\"HW_SESSION_COOKIE\"" ]
[]
[ "HW_SESSION_COOKIE" ]
[]
["HW_SESSION_COOKIE"]
go
1
0
examples/grpc/app/app.go
package main import ( "context" "flag" "fmt" "log" "net" "os" "strings" "sync" "time" "github.com/johnsiilver/serveonssh" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" "google.golang.org/grpc" pb "github.com/johnsiilver/serveonssh/examples/grpc/proto" ) var ( endpoint = flag.String("endpoint", "", "The host:port we are connecting to") socket = flag.String("socket", "", "The Unix socket on the REMOTE side to connect to") keyFile = flag.String("key", "", "The SSH key to use. If not provided, attempts to use the SSH agent.") pass = flag.String("pass", "", "File containing a password to use for SSH. If not provided tries --key and then the SSH agent.") user = flag.String("user", os.Getenv("USER"), "The user to SSH as, set to your logged in user") ) func main() { flag.Parse() log.SetFlags(log.LstdFlags | log.Lshortfile) auths, err := getAuthFromFlags() if err != nil { log.Fatalf("auth failure: %s", err) } f, err := serveonssh.New( *endpoint, *socket, &ssh.ClientConfig{ User: *user, Auth: auths, HostKeyCallback: ssh.InsecureIgnoreHostKey(), // Don't do this in real life }, ) if err != nil { panic(err) } defer f.Close() opts := []grpc.DialOption{ grpc.WithInsecure(), grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { return f.Dialer()() }), } conn, err := grpc.Dial(*socket, opts...) if err != nil { panic(err) } client := pb.NewServiceClient(conn) wg := sync.WaitGroup{} for i := 0; i < 100; i++ { i := i wg.Add(1) go func() { defer wg.Done() resp, err := client.Say(context.Background(), &pb.Req{}) if err != nil { panic(err) } if resp.Say != "World" { panic(fmt.Errorf("didn't say World in the response")) } log.Printf("attempt(%d) was successful", i) }() } wg.Wait() } func getAuthFromFlags() ([]ssh.AuthMethod, error) { auths := []ssh.AuthMethod{} if *keyFile != "" { a, err := publicKey(*keyFile) if err != nil { return nil, err } auths = append(auths, a) } if *pass != "" { b, err := os.ReadFile(*pass) if err != nil { return nil, fmt.Errorf("pasword file(%s) had error: %s", *pass, err) } auths = append(auths, ssh.Password(strings.TrimSpace(string(b)))) } if a, err := agentAuth(); err == nil { auths = append(auths, a) } return auths, nil } func agentAuth() (ssh.AuthMethod, error) { conn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")) if err != nil { return nil, err } client := agent.NewClient(conn) return ssh.PublicKeysCallback(client.Signers), nil } func publicKey(privateKeyFile string) (ssh.AuthMethod, error) { k, err := os.ReadFile(privateKeyFile) if err != nil { return nil, err } signer, err := ssh.ParsePrivateKey(k) if err != nil { return nil, err } return ssh.PublicKeys(signer), nil }
[ "\"USER\"", "\"SSH_AUTH_SOCK\"" ]
[]
[ "USER", "SSH_AUTH_SOCK" ]
[]
["USER", "SSH_AUTH_SOCK"]
go
2
0
pkg/executor/executortype/container/containermgr.go
/* Copyright 2020 The Fission Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package container import ( "context" "fmt" "os" "reflect" "strconv" "strings" "sync" "time" multierror "github.com/hashicorp/go-multierror" "github.com/pkg/errors" "go.uber.org/zap" appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" k8sErrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" k8sTypes "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" k8sCache "k8s.io/client-go/tools/cache" fv1 "github.com/fission/fission/pkg/apis/core/v1" "github.com/fission/fission/pkg/crd" "github.com/fission/fission/pkg/executor/executortype" "github.com/fission/fission/pkg/executor/fscache" "github.com/fission/fission/pkg/executor/reaper" "github.com/fission/fission/pkg/throttler" "github.com/fission/fission/pkg/utils" "github.com/fission/fission/pkg/utils/maps" ) var _ executortype.ExecutorType = &Container{} type ( // Container represents an executor type Container struct { logger *zap.Logger kubernetesClient *kubernetes.Clientset fissionClient *crd.FissionClient instanceID string // fetcherConfig *fetcherConfig.Config runtimeImagePullPolicy apiv1.PullPolicy namespace string useIstio bool fsCache *fscache.FunctionServiceCache // cache funcSvc's by function, address and pod name throttler *throttler.Throttler funcInformer *k8sCache.SharedIndexInformer serviceInformer k8sCache.SharedIndexInformer deploymentInformer k8sCache.SharedIndexInformer defaultIdlePodReapTime time.Duration } ) // MakeContainer initializes and returns an instance of CaaF func MakeContainer( logger *zap.Logger, fissionClient *crd.FissionClient, kubernetesClient *kubernetes.Clientset, namespace string, instanceID string, funcInformer *k8sCache.SharedIndexInformer) (executortype.ExecutorType, error) { enableIstio := false if len(os.Getenv("ENABLE_ISTIO")) > 0 { istio, err := strconv.ParseBool(os.Getenv("ENABLE_ISTIO")) if err != nil { logger.Error("failed to parse 'ENABLE_ISTIO', set to false", zap.Error(err)) } enableIstio = istio } caaf := &Container{ logger: logger.Named("CaaF"), fissionClient: fissionClient, kubernetesClient: kubernetesClient, instanceID: instanceID, namespace: namespace, fsCache: fscache.MakeFunctionServiceCache(logger), throttler: throttler.MakeThrottler(1 * time.Minute), funcInformer: funcInformer, runtimeImagePullPolicy: utils.GetImagePullPolicy(os.Getenv("RUNTIME_IMAGE_PULL_POLICY")), useIstio: enableIstio, // Time is set slightly higher than NewDeploy as cold starts are longer for CaaF defaultIdlePodReapTime: 1 * time.Minute, } (*caaf.funcInformer).AddEventHandler(caaf.FuncInformerHandler()) informerFactory, err := utils.GetInformerFactoryByExecutor(caaf.kubernetesClient, fv1.ExecutorTypeContainer) if err != nil { return nil, err } caaf.serviceInformer = informerFactory.Core().V1().Services().Informer() caaf.deploymentInformer = informerFactory.Apps().V1().Deployments().Informer() return caaf, nil } // Run start the function along with an object reaper. func (caaf *Container) Run(ctx context.Context) { go caaf.idleObjectReaper() } // GetTypeName returns the executor type name. func (caaf *Container) GetTypeName() fv1.ExecutorType { return fv1.ExecutorTypeContainer } // GetTotalAvailable has not been implemented for CaaF. func (caaf *Container) GetTotalAvailable(fn *fv1.Function) int { // Not Implemented for CaaF. return 0 } // UnTapService has not been implemented for CaaF. func (caaf *Container) UnTapService(key string, svcHost string) { // Not Implemented for CaaF. } // GetFuncSvc returns a function service; error otherwise. func (caaf *Container) GetFuncSvc(ctx context.Context, fn *fv1.Function) (*fscache.FuncSvc, error) { return caaf.createFunction(fn) } // GetFuncSvcFromCache returns a function service from cache; error otherwise. func (caaf *Container) GetFuncSvcFromCache(fn *fv1.Function) (*fscache.FuncSvc, error) { return caaf.fsCache.GetByFunction(&fn.ObjectMeta) } // DeleteFuncSvcFromCache deletes a function service from cache. func (caaf *Container) DeleteFuncSvcFromCache(fsvc *fscache.FuncSvc) { caaf.fsCache.DeleteEntry(fsvc) } // GetFuncSvcFromPoolCache has not been implemented for Container Functions func (caaf *Container) GetFuncSvcFromPoolCache(fn *fv1.Function, requestsPerPod int) (*fscache.FuncSvc, int, error) { // Not Implemented for NewDeployment. Will be used when support of concurrent specialization of same function is added. return nil, 0, nil } // TapService makes a TouchByAddress request to the cache. func (caaf *Container) TapService(svcHost string) error { err := caaf.fsCache.TouchByAddress(svcHost) if err != nil { return err } return nil } func (caaf *Container) getServiceInfo(obj apiv1.ObjectReference) (*apiv1.Service, error) { item, exists, err := utils.GetCachedItem(obj, caaf.serviceInformer) if err != nil || !exists { caaf.logger.Debug( "Falling back to getting service info from k8s API -- this may cause performance issues for your function.", zap.Bool("exists", exists), zap.Error(err), ) service, err := caaf.kubernetesClient.CoreV1().Services(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) return service, err } service := item.(*apiv1.Service) return service, nil } func (caaf *Container) getDeploymentInfo(obj apiv1.ObjectReference) (*appsv1.Deployment, error) { item, exists, err := utils.GetCachedItem(obj, caaf.deploymentInformer) if err != nil || !exists { caaf.logger.Debug( "Falling back to getting deployment info from k8s API -- this may cause performance issues for your function.", zap.Bool("exists", exists), zap.Error(err), ) deployment, err := caaf.kubernetesClient.AppsV1().Deployments(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) return deployment, err } deployment := item.(*appsv1.Deployment) return deployment, nil } // IsValid does a get on the service address to ensure it's a valid service, then // scale deployment to 1 replica if there are no available replicas for function. // Return true if no error occurs, return false otherwise. func (caaf *Container) IsValid(fsvc *fscache.FuncSvc) bool { if len(strings.Split(fsvc.Address, ".")) == 0 { caaf.logger.Error("address not found in function service") return false } if len(fsvc.KubernetesObjects) == 0 { caaf.logger.Error("no kubernetes object related to function", zap.String("function", fsvc.Function.Name)) return false } for _, obj := range fsvc.KubernetesObjects { if strings.ToLower(obj.Kind) == "service" { _, err := caaf.getServiceInfo(obj) if err != nil { if !k8sErrs.IsNotFound(err) { caaf.logger.Error("error validating function service", zap.String("function", fsvc.Function.Name), zap.Error(err)) } return false } } else if strings.ToLower(obj.Kind) == "deployment" { currentDeploy, err := caaf.getDeploymentInfo(obj) if err != nil { if !k8sErrs.IsNotFound(err) { caaf.logger.Error("error validating function deployment", zap.String("function", fsvc.Function.Name), zap.Error(err)) } return false } if currentDeploy.Status.AvailableReplicas < 1 { return false } } } return true } // RefreshFuncPods deletes pods related to the function so that new pods are replenished func (caaf *Container) RefreshFuncPods(logger *zap.Logger, f fv1.Function) error { funcLabels := caaf.getDeployLabels(f.ObjectMeta) dep, err := caaf.kubernetesClient.AppsV1().Deployments(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{ LabelSelector: labels.Set(funcLabels).AsSelector().String(), }) if err != nil { return err } // Ideally there should be only one deployment but for now we rely on label/selector to ensure that condition for _, deployment := range dep.Items { rvCount, err := referencedResourcesRVSum(caaf.kubernetesClient, deployment.Namespace, f.Spec.Secrets, f.Spec.ConfigMaps) if err != nil { return err } patch := fmt.Sprintf(`{"spec" : {"template": {"spec":{"containers":[{"name": "%s", "env":[{"name": "%s", "value": "%v"}]}]}}}}`, f.ObjectMeta.Name, fv1.ResourceVersionCount, rvCount) _, err = caaf.kubernetesClient.AppsV1().Deployments(deployment.ObjectMeta.Namespace).Patch(context.TODO(), deployment.ObjectMeta.Name, k8sTypes.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) if err != nil { return err } } return nil } // AdoptExistingResources attempts to adopt resources for functions in all namespaces. func (caaf *Container) AdoptExistingResources() { fnList, err := caaf.fissionClient.CoreV1().Functions(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) if err != nil { caaf.logger.Error("error getting function list", zap.Error(err)) return } wg := &sync.WaitGroup{} for i := range fnList.Items { fn := &fnList.Items[i] if fn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType == fv1.ExecutorTypeContainer { wg.Add(1) go func() { defer wg.Done() _, err = caaf.fnCreate(fn) if err != nil { caaf.logger.Warn("failed to adopt resources for function", zap.Error(err)) return } caaf.logger.Info("adopt resources for function", zap.String("function", fn.ObjectMeta.Name)) }() } } wg.Wait() } // CleanupOldExecutorObjects cleans orphaned resources. func (caaf *Container) CleanupOldExecutorObjects() { caaf.logger.Info("CaaF starts to clean orphaned resources", zap.String("instanceID", caaf.instanceID)) errs := &multierror.Error{} listOpts := metav1.ListOptions{ LabelSelector: labels.Set(map[string]string{fv1.EXECUTOR_TYPE: string(fv1.ExecutorTypeContainer)}).AsSelector().String(), } err := reaper.CleanupHpa(caaf.logger, caaf.kubernetesClient, caaf.instanceID, listOpts) if err != nil { errs = multierror.Append(errs, err) } err = reaper.CleanupDeployments(caaf.logger, caaf.kubernetesClient, caaf.instanceID, listOpts) if err != nil { errs = multierror.Append(errs, err) } err = reaper.CleanupServices(caaf.logger, caaf.kubernetesClient, caaf.instanceID, listOpts) if err != nil { errs = multierror.Append(errs, err) } if errs.ErrorOrNil() != nil { // TODO retry reaper; logged and ignored for now caaf.logger.Error("Failed to cleanup old executor objects", zap.Error(err)) } } func (caaf *Container) createFunction(fn *fv1.Function) (*fscache.FuncSvc, error) { if fn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType != fv1.ExecutorTypeContainer { return nil, nil } fsvcObj, err := caaf.throttler.RunOnce(string(fn.ObjectMeta.UID), func(ableToCreate bool) (interface{}, error) { if ableToCreate { return caaf.fnCreate(fn) } return caaf.fsCache.GetByFunctionUID(fn.ObjectMeta.UID) }) if err != nil { e := "error creating k8s resources for function" caaf.logger.Error(e, zap.Error(err), zap.String("function_name", fn.ObjectMeta.Name), zap.String("function_namespace", fn.ObjectMeta.Namespace)) return nil, errors.Wrapf(err, "%s %s_%s", e, fn.ObjectMeta.Name, fn.ObjectMeta.Namespace) } fsvc, ok := fsvcObj.(*fscache.FuncSvc) if !ok { caaf.logger.Panic("receive unknown object while creating function - expected pointer of function service object") } return fsvc, err } func (caaf *Container) deleteFunction(fn *fv1.Function) error { if fn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType != fv1.ExecutorTypeContainer { return nil } err := caaf.fnDelete(fn) if err != nil { err = errors.Wrapf(err, "error deleting kubernetes objects of function %v", fn.ObjectMeta) } return err } func (caaf *Container) fnCreate(fn *fv1.Function) (*fscache.FuncSvc, error) { cleanupFunc := func(ns string, name string) { err := caaf.cleanupContainer(ns, name) if err != nil { caaf.logger.Error("received error while cleaning function resources", zap.String("namespace", ns), zap.String("name", name)) } } objName := caaf.getObjName(fn) deployLabels := caaf.getDeployLabels(fn.ObjectMeta) deployAnnotations := caaf.getDeployAnnotations(fn.ObjectMeta) // to support backward compatibility, if the function was created in default ns, we fall back to creating the // deployment of the function in fission-function ns ns := caaf.namespace if fn.ObjectMeta.Namespace != metav1.NamespaceDefault { ns = fn.ObjectMeta.Namespace } // Envoy(istio-proxy) returns 404 directly before istio pilot // propagates latest Envoy-specific configuration. // Since Container waits for pods of deployment to be ready, // change the order of kubeObject creation (create service first, // then deployment) to take advantage of waiting time. svc, err := caaf.createOrGetSvc(fn, deployLabels, deployAnnotations, objName, ns) if err != nil { caaf.logger.Error("error creating service", zap.Error(err), zap.String("service", objName)) go cleanupFunc(ns, objName) return nil, errors.Wrapf(err, "error creating service %v", objName) } svcAddress := fmt.Sprintf("%v.%v", svc.Name, svc.Namespace) depl, err := caaf.createOrGetDeployment(fn, objName, deployLabels, deployAnnotations, ns) if err != nil { caaf.logger.Error("error creating deployment", zap.Error(err), zap.String("deployment", objName)) go cleanupFunc(ns, objName) return nil, errors.Wrapf(err, "error creating deployment %v", objName) } hpa, err := caaf.createOrGetHpa(objName, &fn.Spec.InvokeStrategy.ExecutionStrategy, depl, deployLabels, deployAnnotations) if err != nil { caaf.logger.Error("error creating HPA", zap.Error(err), zap.String("hpa", objName)) go cleanupFunc(ns, objName) return nil, errors.Wrapf(err, "error creating the HPA %v", objName) } kubeObjRefs := []apiv1.ObjectReference{ { //obj.TypeMeta.Kind does not work hence this, needs investigation and a fix Kind: "deployment", Name: depl.ObjectMeta.Name, APIVersion: depl.TypeMeta.APIVersion, Namespace: depl.ObjectMeta.Namespace, ResourceVersion: depl.ObjectMeta.ResourceVersion, UID: depl.ObjectMeta.UID, }, { Kind: "service", Name: svc.ObjectMeta.Name, APIVersion: svc.TypeMeta.APIVersion, Namespace: svc.ObjectMeta.Namespace, ResourceVersion: svc.ObjectMeta.ResourceVersion, UID: svc.ObjectMeta.UID, }, { Kind: "horizontalpodautoscaler", Name: hpa.ObjectMeta.Name, APIVersion: hpa.TypeMeta.APIVersion, Namespace: hpa.ObjectMeta.Namespace, ResourceVersion: hpa.ObjectMeta.ResourceVersion, UID: hpa.ObjectMeta.UID, }, } fsvc := &fscache.FuncSvc{ Name: objName, Function: &fn.ObjectMeta, Address: svcAddress, KubernetesObjects: kubeObjRefs, Executor: fv1.ExecutorTypeContainer, } _, err = caaf.fsCache.Add(*fsvc) if err != nil { caaf.logger.Error("error adding function to cache", zap.Error(err), zap.Any("function", fsvc.Function)) return fsvc, err } caaf.fsCache.IncreaseColdStarts(fn.ObjectMeta.Name, string(fn.ObjectMeta.UID)) return fsvc, nil } func (caaf *Container) updateFunction(oldFn *fv1.Function, newFn *fv1.Function) error { if oldFn.ObjectMeta.ResourceVersion == newFn.ObjectMeta.ResourceVersion { return nil } // Ignoring updates to functions which are not of Container type if newFn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType != fv1.ExecutorTypeContainer && oldFn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType != fv1.ExecutorTypeContainer { return nil } // Executor type is no longer Container if newFn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType != fv1.ExecutorTypeContainer && oldFn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType == fv1.ExecutorTypeContainer { caaf.logger.Info("function does not use new deployment executor anymore, deleting resources", zap.Any("function", newFn)) // IMP - pass the oldFn, as the new/modified function is not in cache return caaf.deleteFunction(oldFn) } // Executor type changed to Container from something else if oldFn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType != fv1.ExecutorTypeContainer && newFn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType == fv1.ExecutorTypeContainer { caaf.logger.Info("function type changed to Container, creating resources", zap.Any("old_function", oldFn.ObjectMeta), zap.Any("new_function", newFn.ObjectMeta)) _, err := caaf.createFunction(newFn) if err != nil { caaf.updateStatus(oldFn, err, "error changing the function's type to Container") } return err } if oldFn.Spec.InvokeStrategy != newFn.Spec.InvokeStrategy { // to support backward compatibility, if the function was created in default ns, we fall back to creating the // deployment of the function in fission-function ns, so cleaning up resources there ns := caaf.namespace if newFn.ObjectMeta.Namespace != metav1.NamespaceDefault { ns = newFn.ObjectMeta.Namespace } fsvc, err := caaf.fsCache.GetByFunctionUID(newFn.ObjectMeta.UID) if err != nil { err = errors.Wrapf(err, "error updating function due to unable to find function service cache: %v", oldFn) return err } hpa, err := caaf.getHpa(ns, fsvc.Name) if err != nil { caaf.updateStatus(oldFn, err, "error getting HPA while updating function") return err } hpaChanged := false if newFn.Spec.InvokeStrategy.ExecutionStrategy.MinScale != oldFn.Spec.InvokeStrategy.ExecutionStrategy.MinScale { replicas := int32(newFn.Spec.InvokeStrategy.ExecutionStrategy.MinScale) hpa.Spec.MinReplicas = &replicas hpaChanged = true } if newFn.Spec.InvokeStrategy.ExecutionStrategy.MaxScale != oldFn.Spec.InvokeStrategy.ExecutionStrategy.MaxScale { hpa.Spec.MaxReplicas = int32(newFn.Spec.InvokeStrategy.ExecutionStrategy.MaxScale) hpaChanged = true } if newFn.Spec.InvokeStrategy.ExecutionStrategy.TargetCPUPercent != oldFn.Spec.InvokeStrategy.ExecutionStrategy.TargetCPUPercent { targetCpupercent := int32(newFn.Spec.InvokeStrategy.ExecutionStrategy.TargetCPUPercent) hpa.Spec.TargetCPUUtilizationPercentage = &targetCpupercent hpaChanged = true } if hpaChanged { err := caaf.updateHpa(hpa) if err != nil { caaf.updateStatus(oldFn, err, "error updating HPA while updating function") return err } } } deployChanged := false // If length of slice has changed then no need to check individual elements if len(oldFn.Spec.Secrets) != len(newFn.Spec.Secrets) { deployChanged = true } else { for i, newSecret := range newFn.Spec.Secrets { if newSecret != oldFn.Spec.Secrets[i] { deployChanged = true break } } } if len(oldFn.Spec.ConfigMaps) != len(newFn.Spec.ConfigMaps) { deployChanged = true } else { for i, newConfig := range newFn.Spec.ConfigMaps { if newConfig != oldFn.Spec.ConfigMaps[i] { deployChanged = true break } } } if !reflect.DeepEqual(oldFn.Spec.PodSpec, newFn.Spec.PodSpec) { deployChanged = true } if deployChanged { return caaf.updateFuncDeployment(newFn) } return nil } func (caaf *Container) updateFuncDeployment(fn *fv1.Function) error { fsvc, err := caaf.fsCache.GetByFunctionUID(fn.ObjectMeta.UID) if err != nil { err = errors.Wrapf(err, "error updating function due to unable to find function service cache: %v", fn) return err } fnObjName := fsvc.Name deployLabels := caaf.getDeployLabels(fn.ObjectMeta) caaf.logger.Info("updating deployment due to function update", zap.String("deployment", fnObjName), zap.Any("function", fn.ObjectMeta.Name)) // to support backward compatibility, if the function was created in default ns, we fall back to creating the // deployment of the function in fission-function ns ns := caaf.namespace if fn.ObjectMeta.Namespace != metav1.NamespaceDefault { ns = fn.ObjectMeta.Namespace } existingDepl, err := caaf.kubernetesClient.AppsV1().Deployments(ns).Get(context.TODO(), fnObjName, metav1.GetOptions{}) if err != nil { return err } // the resource version inside function packageRef is changed, // so the content of fetchRequest in deployment cmd is different. // Therefore, the deployment update will trigger a rolling update. newDeployment, err := caaf.getDeploymentSpec(fn, existingDepl.Spec.Replicas, // use current replicas instead of minscale in the ExecutionStrategy. fnObjName, ns, deployLabels, caaf.getDeployAnnotations(fn.ObjectMeta)) if err != nil { caaf.updateStatus(fn, err, "failed to get new deployment spec while updating function") return err } err = caaf.updateDeployment(newDeployment, ns) if err != nil { caaf.updateStatus(fn, err, "failed to update deployment while updating function") return err } return nil } func (caaf *Container) fnDelete(fn *fv1.Function) error { multierr := &multierror.Error{} // GetByFunction uses resource version as part of cache key, however, // the resource version in function metadata will be changed when a function // is deleted and cause Container backend fails to delete the entry. // Use GetByFunctionUID instead of GetByFunction here to find correct // fsvc entry. fsvc, err := caaf.fsCache.GetByFunctionUID(fn.ObjectMeta.UID) if err != nil { err = errors.Wrap(err, fmt.Sprintf("fsvc not found in cache: %v", fn.ObjectMeta)) return err } objName := fsvc.Name _, err = caaf.fsCache.DeleteOld(fsvc, time.Second*0) if err != nil { multierr = multierror.Append(multierr, errors.Wrapf(err, "error deleting the function from cache")) } // to support backward compatibility, if the function was created in default ns, we fall back to creating the // deployment of the function in fission-function ns, so cleaning up resources there ns := caaf.namespace if fn.ObjectMeta.Namespace != metav1.NamespaceDefault { ns = fn.ObjectMeta.Namespace } err = caaf.cleanupContainer(ns, objName) multierr = multierror.Append(multierr, err) return multierr.ErrorOrNil() } // getObjName returns a unique name for kubernetes objects of function func (caaf *Container) getObjName(fn *fv1.Function) string { // use meta uuid of function, this ensure we always get the same name for the same function. uid := fn.ObjectMeta.UID[len(fn.ObjectMeta.UID)-17:] var functionMetadata string if len(fn.ObjectMeta.Name)+len(fn.ObjectMeta.Namespace) < 35 { functionMetadata = fn.ObjectMeta.Name + "-" + fn.ObjectMeta.Namespace } else { functionMetadata = fn.ObjectMeta.Name[:17] + "-" + fn.ObjectMeta.Namespace[:17] } // contructed name should be 63 characters long, as it is a valid k8s name // functionMetadata should be 35 characters long, as we take 17 characters from functionUid // with newdeploy 10 character prefix return strings.ToLower(fmt.Sprintf("container-%s-%s", functionMetadata, uid)) } func (caaf *Container) getDeployLabels(fnMeta metav1.ObjectMeta) map[string]string { deployLabels := maps.CopyStringMap(fnMeta.Labels) deployLabels[fv1.EXECUTOR_TYPE] = string(fv1.ExecutorTypeContainer) deployLabels[fv1.FUNCTION_NAME] = fnMeta.Name deployLabels[fv1.FUNCTION_NAMESPACE] = fnMeta.Namespace deployLabels[fv1.FUNCTION_UID] = string(fnMeta.UID) return deployLabels } func (caaf *Container) getDeployAnnotations(fnMeta metav1.ObjectMeta) map[string]string { deployAnnotations := maps.CopyStringMap(fnMeta.Annotations) deployAnnotations[fv1.EXECUTOR_INSTANCEID_LABEL] = caaf.instanceID deployAnnotations[fv1.FUNCTION_RESOURCE_VERSION] = fnMeta.ResourceVersion return deployAnnotations } // updateStatus is a function which updates status of update. // Current implementation only logs messages, in future it will update function status func (caaf *Container) updateStatus(fn *fv1.Function, err error, message string) { caaf.logger.Error("function status update", zap.Error(err), zap.Any("function", fn), zap.String("message", message)) } // idleObjectReaper reaps objects after certain idle time func (caaf *Container) idleObjectReaper() { pollSleep := 5 * time.Second for { time.Sleep(pollSleep) funcSvcs, err := caaf.fsCache.ListOld(pollSleep) if err != nil { caaf.logger.Error("error reaping idle pods", zap.Error(err)) continue } for i := range funcSvcs { fsvc := funcSvcs[i] if fsvc.Executor != fv1.ExecutorTypeContainer { continue } fn, err := caaf.fissionClient.CoreV1().Functions(fsvc.Function.Namespace).Get(context.TODO(), fsvc.Function.Name, metav1.GetOptions{}) if err != nil { // CaaF manager handles the function delete event and clean cache/kubeobjs itself, // so we ignore the not found error for functions with CaaF executor type here. if k8sErrs.IsNotFound(err) && fsvc.Executor == fv1.ExecutorTypeContainer { continue } caaf.logger.Error("error getting function", zap.Error(err), zap.String("function", fsvc.Function.Name)) continue } idlePodReapTime := caaf.defaultIdlePodReapTime if fn.Spec.IdleTimeout != nil { idlePodReapTime = time.Duration(*fn.Spec.IdleTimeout) * time.Second } if time.Since(fsvc.Atime) < idlePodReapTime { continue } go func() { deployObj := getDeploymentObj(fsvc.KubernetesObjects) if deployObj == nil { caaf.logger.Error("error finding function deployment", zap.Error(err), zap.String("function", fsvc.Function.Name)) return } currentDeploy, err := caaf.kubernetesClient.AppsV1(). Deployments(deployObj.Namespace).Get(context.TODO(), deployObj.Name, metav1.GetOptions{}) if err != nil { caaf.logger.Error("error getting function deployment", zap.Error(err), zap.String("function", fsvc.Function.Name)) return } minScale := int32(fn.Spec.InvokeStrategy.ExecutionStrategy.MinScale) // do nothing if the current replicas is already lower than minScale if *currentDeploy.Spec.Replicas <= minScale { return } err = caaf.scaleDeployment(deployObj.Namespace, deployObj.Name, minScale) if err != nil { caaf.logger.Error("error scaling down function deployment", zap.Error(err), zap.String("function", fsvc.Function.Name)) } }() } } } func getDeploymentObj(kubeobjs []apiv1.ObjectReference) *apiv1.ObjectReference { for _, kubeobj := range kubeobjs { switch strings.ToLower(kubeobj.Kind) { case "deployment": return &kubeobj } } return nil }
[ "\"ENABLE_ISTIO\"", "\"ENABLE_ISTIO\"", "\"RUNTIME_IMAGE_PULL_POLICY\"" ]
[]
[ "ENABLE_ISTIO", "RUNTIME_IMAGE_PULL_POLICY" ]
[]
["ENABLE_ISTIO", "RUNTIME_IMAGE_PULL_POLICY"]
go
2
0
pkg/nas/nas.go
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package nas import ( "github.com/container-storage-interface/spec/lib/go/csi" "github.com/kubernetes-csi/drivers/pkg/csi-common" "github.com/kubernetes-sigs/alibaba-cloud-csi-driver/pkg/utils" log "github.com/sirupsen/logrus" "os" ) const ( driverName = "nasplugin.csi.alibabacloud.com" // InstanceID is instance id InstanceID = "instance-id" ) var ( version = "1.0.0" ) // NAS the NAS object type NAS struct { driver *csicommon.CSIDriver endpoint string idServer *csicommon.DefaultIdentityServer nodeServer *nodeServer controllerServer csi.ControllerServer cap []*csi.VolumeCapability_AccessMode cscap []*csi.ControllerServiceCapability } //NewDriver create the identity/node/controller server and disk driver func NewDriver(nodeID, endpoint string) *NAS { log.Infof("Driver: %v version: %v", driverName, version) d := &NAS{} d.endpoint = endpoint if nodeID == "" { nodeID, _ = utils.GetMetaData(InstanceID) log.Infof("Use node id : %s", nodeID) } csiDriver := csicommon.NewCSIDriver(driverName, version, nodeID) csiDriver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER}) csiDriver.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{ csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, }) d.driver = csiDriver accessKeyID, accessSecret, accessToken := GetDefaultAK() c := newNasClient(accessKeyID, accessSecret, accessToken) region := os.Getenv("REGION_ID") if region == "" { region = GetMetaData(RegionTag) } d.controllerServer = NewControllerServer(d.driver, c, region) return d } //newNodeServer create the csi node server func newNodeServer(d *NAS) *nodeServer { return &nodeServer{ DefaultNodeServer: csicommon.NewDefaultNodeServer(d.driver), } } // Run start a new NodeServer func (d *NAS) Run() { s := csicommon.NewNonBlockingGRPCServer() s.Start(d.endpoint, csicommon.NewDefaultIdentityServer(d.driver), d.controllerServer, newNodeServer(d)) s.Wait() }
[ "\"REGION_ID\"" ]
[]
[ "REGION_ID" ]
[]
["REGION_ID"]
go
1
0
compiler/tests/04_and4_dec_test.py
#!/usr/bin/env python3 # See LICENSE for licensing information. # # Copyright (c) 2016-2019 Regents of the University of California and The Board # of Regents for the Oklahoma Agricultural and Mechanical College # (acting for and on behalf of Oklahoma State University) # All rights reserved. # import unittest from testutils import * import sys,os sys.path.append(os.getenv("OPENRAM_HOME")) import globals from globals import OPTS from sram_factory import factory import debug @unittest.skip("SKIPPING 04_and4_dec_test") class and4_dec_test(openram_test): def runTest(self): config_file = "{}/tests/configs/config".format(os.getenv("OPENRAM_HOME")) globals.init_openram(config_file) global verify import verify OPTS.num_rw_ports = 1 OPTS.num_r_ports = 1 OPTS.num_w_ports = 0 globals.setup_bitcell() debug.info(2, "Testing and4_dec gate") a = factory.create(module_type="and4_dec") self.local_check(a) globals.end_openram() # instantiate a copdsay of the class to actually run the test if __name__ == "__main__": (OPTS, args) = globals.parse_args() del sys.argv[1:] header(__file__, OPTS.tech_name) unittest.main(testRunner=debugTestRunner())
[]
[]
[ "OPENRAM_HOME" ]
[]
["OPENRAM_HOME"]
python
1
0
test/src/assemblysequenceapi/test/AssemblySequenceAPIServerTest.java
package assemblysequenceapi.test; import java.io.File; import java.net.URL; import java.util.Arrays; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import junit.framework.Assert; import org.apache.commons.io.FileUtils; import org.ini4j.Ini; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import assemblysequenceapi.AssemblySequenceAPIServer; import assemblysequenceapi.FastaReader; import assemblysequenceapi.GetDNASequencesOutput; import assemblysequenceapi.GetDNASequencesParams; import assemblyutil.AssemblyUtilClient; import assemblyutil.FastaAssemblyFile; import assemblyutil.SaveAssemblyParams; import us.kbase.auth.AuthConfig; import us.kbase.auth.AuthToken; import us.kbase.auth.ConfigurableAuthService; import us.kbase.common.service.JsonServerSyslog; import us.kbase.common.service.RpcContext; import us.kbase.common.service.Tuple4; import us.kbase.common.service.UObject; import us.kbase.kbasegenomes.Contig; import us.kbase.kbasegenomes.ContigSet; import us.kbase.kbasegenomes.Feature; import us.kbase.kbasegenomes.Genome; import us.kbase.workspace.CreateWorkspaceParams; import us.kbase.workspace.ObjectSaveData; import us.kbase.workspace.ProvenanceAction; import us.kbase.workspace.SaveObjectsParams; import us.kbase.workspace.WorkspaceClient; import us.kbase.workspace.WorkspaceIdentity; public class AssemblySequenceAPIServerTest { private static AuthToken token = null; private static Map<String, String> config = null; private static WorkspaceClient wsClient = null; private static String wsName = null; private static AssemblySequenceAPIServer impl = null; @BeforeClass public static void init() throws Exception { String configFilePath = System.getenv("KB_DEPLOYMENT_CONFIG"); File deploy = new File(configFilePath); Ini ini = new Ini(deploy); config = ini.get("AssemblySequenceAPI"); // Token validation String authUrl = config.get("auth-service-url"); String authUrlInsecure = config.get("auth-service-url-allow-insecure"); ConfigurableAuthService authService = new ConfigurableAuthService( new AuthConfig().withKBaseAuthServerURL(new URL(authUrl)) .withAllowInsecureURLs("true".equals(authUrlInsecure))); token = authService.validateToken(System.getenv("KB_AUTH_TOKEN")); wsClient = new WorkspaceClient(new URL(config.get("workspace-url")), token); wsClient.setIsInsecureHttpConnectionAllowed(true); // These lines are necessary because we don't want to start linux syslog bridge service JsonServerSyslog.setStaticUseSyslog(false); JsonServerSyslog.setStaticMlogFile(new File(config.get("scratch"), "test.log").getAbsolutePath()); impl = new AssemblySequenceAPIServer(); } private static String getWsName() throws Exception { if (wsName == null) { long suffix = System.currentTimeMillis(); wsName = "test_AssemblySequenceAPI_" + suffix; wsClient.createWorkspace(new CreateWorkspaceParams().withWorkspace(wsName)); } return wsName; } private static RpcContext getContext() { return new RpcContext().withProvenance(Arrays.asList(new ProvenanceAction() .withService("AssemblySequenceAPI").withMethod("please_never_use_it_in_production") .withMethodParams(new ArrayList<UObject>()))); } @AfterClass public static void cleanup() { if (wsName != null) { try { wsClient.deleteWorkspace(new WorkspaceIdentity().withWorkspace(wsName)); System.out.println("Test workspace was deleted"); } catch (Exception ex) { ex.printStackTrace(); } } } @Test public void testGetDnaSequencesForAssembly() throws Exception { File tempFastaFile = new File("/kb/module/work/tmp/GCF_000002945.fa"); FileUtils.copyFile(new File("/kb/module/test/data/GCF_000002945/assembly.fa"), tempFastaFile); URL callbackUrl = new URL(System.getenv("SDK_CALLBACK_URL")); AssemblyUtilClient auCl = new AssemblyUtilClient(callbackUrl, token); auCl.setIsInsecureHttpConnectionAllowed(true); String assemblyName = "Assembly.1"; auCl.saveAssemblyFromFasta(new SaveAssemblyParams().withAssemblyName(assemblyName).withWorkspaceName(getWsName()) .withFile(new FastaAssemblyFile().withPath(tempFastaFile.getAbsolutePath()))); String assemblyRef = getWsName() + "/" + assemblyName; Map<String, List<Tuple4 <String, Long, String, Long>>> requestedFeatures = new LinkedHashMap<String, List<Tuple4 <String, Long, String, Long>>>(); Genome genome = new UObject(new File("/kb/module/test/data/GCF_000002945/genome.json")).asClassInstance(Genome.class); for (Feature ft : genome.getFeatures()) { if (ft.getDnaSequence() == null) { continue; } requestedFeatures.put(ft.getId(), ft.getLocation()); } System.out.println("Features: " + requestedFeatures.size()); GetDNASequencesOutput ret = impl.getDnaSequences(new GetDNASequencesParams().withAssemblyRef(assemblyRef) .withRequestedFeatures(requestedFeatures), token, getContext()); Map<String, String> dnaSequences = ret.getDnaSequences(); for (Feature ft : genome.getFeatures()) { if (!requestedFeatures.containsKey(ft.getId())) { continue; } String origDnaSeq = ft.getDnaSequence(); String newDnaSeq = dnaSequences.get(ft.getId()); Assert.assertEquals("DNA is different for " + ft.getId(), origDnaSeq, newDnaSeq); } } @Test public void testGetDnaSequencesForContigSet() throws Exception { File fastaFile = new File("/kb/module/test/data/GCF_000002945/assembly.fa"); String csName = "ContigSet.1"; List<Contig> contigs = new ArrayList<Contig>(); FastaReader fr = new FastaReader(fastaFile); while (true) { String[] entry = fr.read(); if (entry == null) break; contigs.add(new Contig().withId(entry[0]).withSequence(entry[1])); } ContigSet csObj = new ContigSet().withContigs(contigs).withId(csName).withMd5("") .withSource("KBase").withSourceId("KBase"); wsClient.saveObjects(new SaveObjectsParams().withWorkspace(getWsName()).withObjects(Arrays.asList( new ObjectSaveData().withType("KBaseGenomes.ContigSet").withName(csName) .withData(new UObject(csObj))))); String csRef = getWsName() + "/" + csName; Map<String, List<Tuple4 <String, Long, String, Long>>> requestedFeatures = new LinkedHashMap<String, List<Tuple4 <String, Long, String, Long>>>(); Genome genome = new UObject(new File("/kb/module/test/data/GCF_000002945/genome.json")).asClassInstance(Genome.class); for (Feature ft : genome.getFeatures()) { if (ft.getDnaSequence() == null) { continue; } requestedFeatures.put(ft.getId(), ft.getLocation()); } System.out.println("Features: " + requestedFeatures.size()); GetDNASequencesOutput ret = impl.getDnaSequences(new GetDNASequencesParams().withContigsetRef(csRef) .withRequestedFeatures(requestedFeatures), token, getContext()); Map<String, String> dnaSequences = ret.getDnaSequences(); for (Feature ft : genome.getFeatures()) { if (!requestedFeatures.containsKey(ft.getId())) { continue; } String origDnaSeq = ft.getDnaSequence().toUpperCase(); String newDnaSeq = dnaSequences.get(ft.getId()); Assert.assertEquals("DNA is different for " + ft.getId(), origDnaSeq, newDnaSeq); } } }
[ "\"KB_DEPLOYMENT_CONFIG\"", "\"KB_AUTH_TOKEN\"", "\"SDK_CALLBACK_URL\"" ]
[]
[ "SDK_CALLBACK_URL", "KB_AUTH_TOKEN", "KB_DEPLOYMENT_CONFIG" ]
[]
["SDK_CALLBACK_URL", "KB_AUTH_TOKEN", "KB_DEPLOYMENT_CONFIG"]
java
3
0
system_tests/conftest.py
import os import pytest from sap.aibus.dar.client.data_manager_client import DataManagerClient from sap.aibus.dar.client.inference_client import InferenceClient from sap.aibus.dar.client.model_manager_client import ModelManagerClient from sap.aibus.dar.client.util.credentials import OnlineCredentialsSource from sap.aibus.dar.client.workflow.model import ModelCreator @pytest.fixture() def dar_url(): return os.environ["DAR_URL"] @pytest.fixture() def dar_client_id(): return os.environ["DAR_CLIENT_ID"] @pytest.fixture() def dar_client_secret(): return os.environ["DAR_CLIENT_SECRET"] @pytest.fixture() def dar_uaa_url(): return os.environ["DAR_AUTH_URL"] # For the following fixtures, the parameters to the functions # will be provided by existing fixtures of the same name! @pytest.fixture() def credentials_source(dar_client_id, dar_client_secret, dar_uaa_url): return OnlineCredentialsSource(dar_uaa_url, dar_client_id, dar_client_secret) @pytest.fixture() def data_manager_client(dar_url, credentials_source): client = DataManagerClient(dar_url, credentials_source) return client @pytest.fixture() def model_manager_client(dar_url, credentials_source): client = ModelManagerClient(dar_url, credentials_source) return client @pytest.fixture() def inference_client(dar_url, credentials_source): client = InferenceClient(dar_url, credentials_source) return client @pytest.fixture() def model_creator(dar_url, credentials_source): create_model = ModelCreator(dar_url, credentials_source) return create_model
[]
[]
[ "DAR_CLIENT_SECRET", "DAR_CLIENT_ID", "DAR_AUTH_URL", "DAR_URL" ]
[]
["DAR_CLIENT_SECRET", "DAR_CLIENT_ID", "DAR_AUTH_URL", "DAR_URL"]
python
4
0
grpc-server/grpc_server.go
package grpcserver import ( "context" "crypto/tls" "io/ioutil" "net" "os" "path/filepath" "strings" "sync" "time" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/packethost/pkg/log" "github.com/pkg/errors" "github.com/tinkerbell/tink/db" "github.com/tinkerbell/tink/protos/hardware" "github.com/tinkerbell/tink/protos/template" "github.com/tinkerbell/tink/protos/workflow" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/reflection" ) // Server is the gRPC server for tinkerbell. type server struct { cert []byte modT time.Time db db.Database quit <-chan struct{} dbLock sync.RWMutex dbReady bool watchLock sync.RWMutex watch map[string]chan string logger log.Logger } type ConfigGRPCServer struct { Facility string TLSCert string GRPCAuthority string DB db.Database } // SetupGRPC setup and return a gRPC server. func SetupGRPC(ctx context.Context, logger log.Logger, config *ConfigGRPCServer, errCh chan<- error) ([]byte, time.Time) { params := []grpc.ServerOption{ grpc_middleware.WithUnaryServerChain(grpc_prometheus.UnaryServerInterceptor, otelgrpc.UnaryServerInterceptor()), grpc_middleware.WithStreamServerChain(grpc_prometheus.StreamServerInterceptor, otelgrpc.StreamServerInterceptor()), } server := &server{ db: config.DB, dbReady: true, logger: logger, } cert := config.TLSCert switch cert { case "insecure": // server.cert *must* be nil, which it is because that is the default value // server.modT doesn't matter case "": tlsCert, certPEM, modT := getCerts(config.Facility, logger) params = append(params, grpc.Creds(credentials.NewServerTLSFromCert(&tlsCert))) server.cert = certPEM server.modT = modT default: server.cert = []byte(cert) server.modT = time.Now() } // register servers s := grpc.NewServer(params...) template.RegisterTemplateServiceServer(s, server) workflow.RegisterWorkflowServiceServer(s, server) hardware.RegisterHardwareServiceServer(s, server) reflection.Register(s) grpc_prometheus.Register(s) go func() { lis, err := net.Listen("tcp", config.GRPCAuthority) if err != nil { err = errors.Wrap(err, "failed to listen") logger.Error(err) panic(err) } errCh <- s.Serve(lis) }() go func() { <-ctx.Done() s.GracefulStop() }() return server.cert, server.modT } func getCerts(facility string, logger log.Logger) (tls.Certificate, []byte, time.Time) { var ( certPEM []byte modT time.Time ) certsDir := os.Getenv("TINKERBELL_CERTS_DIR") if certsDir == "" { certsDir = "/certs/" + facility } if !strings.HasSuffix(certsDir, "/") { certsDir += "/" } certFile, err := os.Open(filepath.Clean(certsDir + "bundle.pem")) if err != nil { err = errors.Wrap(err, "failed to open TLS cert") logger.Error(err) panic(err) } if stat, err := certFile.Stat(); err != nil { err = errors.Wrap(err, "failed to stat TLS cert") logger.Error(err) panic(err) } else { modT = stat.ModTime() } certPEM, err = ioutil.ReadAll(certFile) if err != nil { err = errors.Wrap(err, "failed to read TLS cert") logger.Error(err) panic(err) } keyPEM, err := ioutil.ReadFile(filepath.Clean(certsDir + "server-key.pem")) if err != nil { err = errors.Wrap(err, "failed to read TLS key") logger.Error(err) panic(err) } cert, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { err = errors.Wrap(err, "failed to ingest TLS files") logger.Error(err) panic(err) } return cert, certPEM, modT }
[ "\"TINKERBELL_CERTS_DIR\"" ]
[]
[ "TINKERBELL_CERTS_DIR" ]
[]
["TINKERBELL_CERTS_DIR"]
go
1
0
tests/framework/integration/cluster.go
// Copyright 2016 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package integration import ( "context" "crypto/tls" "errors" "fmt" "io" "log" "math/rand" "net" "net/http" "net/http/httptest" "os" "reflect" "sort" "strings" "sync" "sync/atomic" "time" pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/pkg/v3/testutil" "go.etcd.io/etcd/client/pkg/v3/tlsutil" "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/pkg/v3/grpc_testing" "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/server/v3/config" "go.etcd.io/etcd/server/v3/embed" "go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" "go.etcd.io/etcd/server/v3/etcdserver/api/v2http" "go.etcd.io/etcd/server/v3/etcdserver/api/v3client" "go.etcd.io/etcd/server/v3/etcdserver/api/v3election" epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock" lockpb "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc" "go.etcd.io/etcd/server/v3/verify" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/soheilhy/cmux" "go.uber.org/zap" "golang.org/x/crypto/bcrypt" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" ) const ( // RequestWaitTimeout is the time duration to wait for a request to go through or detect leader loss. RequestWaitTimeout = 5 * time.Second TickDuration = 10 * time.Millisecond RequestTimeout = 20 * time.Second ClusterName = "etcd" BasePort = 21000 URLScheme = "unix" URLSchemeTLS = "unixs" BaseGRPCPort = 30000 ) var ( ElectionTicks = 10 // LocalListenCount integration test uses unique ports, counting up, to listen for each // member, ensuring restarted members can listen on the same port again. LocalListenCount = int32(0) TestTLSInfo = transport.TLSInfo{ KeyFile: MustAbsPath("../fixtures/server.key.insecure"), CertFile: MustAbsPath("../fixtures/server.crt"), TrustedCAFile: MustAbsPath("../fixtures/ca.crt"), ClientCertAuth: true, } TestTLSInfoWithSpecificUsage = transport.TLSInfo{ KeyFile: MustAbsPath("../fixtures/server-serverusage.key.insecure"), CertFile: MustAbsPath("../fixtures/server-serverusage.crt"), ClientKeyFile: MustAbsPath("../fixtures/client-clientusage.key.insecure"), ClientCertFile: MustAbsPath("../fixtures/client-clientusage.crt"), TrustedCAFile: MustAbsPath("../fixtures/ca.crt"), ClientCertAuth: true, } TestTLSInfoIP = transport.TLSInfo{ KeyFile: MustAbsPath("../fixtures/server-ip.key.insecure"), CertFile: MustAbsPath("../fixtures/server-ip.crt"), TrustedCAFile: MustAbsPath("../fixtures/ca.crt"), ClientCertAuth: true, } TestTLSInfoExpired = transport.TLSInfo{ KeyFile: MustAbsPath("./fixtures-expired/server.key.insecure"), CertFile: MustAbsPath("./fixtures-expired/server.crt"), TrustedCAFile: MustAbsPath("./fixtures-expired/ca.crt"), ClientCertAuth: true, } TestTLSInfoExpiredIP = transport.TLSInfo{ KeyFile: MustAbsPath("./fixtures-expired/server-ip.key.insecure"), CertFile: MustAbsPath("./fixtures-expired/server-ip.crt"), TrustedCAFile: MustAbsPath("./fixtures-expired/ca.crt"), ClientCertAuth: true, } DefaultTokenJWT = fmt.Sprintf("jwt,pub-key=%s,priv-key=%s,sign-method=RS256,ttl=1s", MustAbsPath("../fixtures/server.crt"), MustAbsPath("../fixtures/server.key.insecure")) // UniqueNumber is used to generate unique port numbers // Should only be accessed via atomic package methods. UniqueNumber int32 ) type ClusterConfig struct { Size int PeerTLS *transport.TLSInfo ClientTLS *transport.TLSInfo DiscoveryURL string AuthToken string QuotaBackendBytes int64 MaxTxnOps uint MaxRequestBytes uint SnapshotCount uint64 SnapshotCatchUpEntries uint64 GRPCKeepAliveMinTime time.Duration GRPCKeepAliveInterval time.Duration GRPCKeepAliveTimeout time.Duration ClientMaxCallSendMsgSize int ClientMaxCallRecvMsgSize int // UseIP is true to use only IP for gRPC requests. UseIP bool // UseBridge adds bridge between client and grpc server. Should be used in tests that // want to manipulate connection or require connection not breaking despite server stop/restart. UseBridge bool // UseTCP configures server listen on tcp socket. If disabled unix socket is used. UseTCP bool EnableLeaseCheckpoint bool LeaseCheckpointInterval time.Duration LeaseCheckpointPersist bool WatchProgressNotifyInterval time.Duration ExperimentalMaxLearners int StrictReconfigCheck bool CorruptCheckTime time.Duration } type Cluster struct { Cfg *ClusterConfig Members []*Member LastMemberNum int mu sync.Mutex clusterClient *clientv3.Client } func SchemeFromTLSInfo(tls *transport.TLSInfo) string { if tls == nil { return URLScheme } return URLSchemeTLS } func (c *Cluster) fillClusterForMembers() error { if c.Cfg.DiscoveryURL != "" { // Cluster will be discovered return nil } addrs := make([]string, 0) for _, m := range c.Members { scheme := SchemeFromTLSInfo(m.PeerTLSInfo) for _, l := range m.PeerListeners { addrs = append(addrs, fmt.Sprintf("%s=%s://%s", m.Name, scheme, l.Addr().String())) } } clusterStr := strings.Join(addrs, ",") var err error for _, m := range c.Members { m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) if err != nil { return err } } return nil } func (c *Cluster) Launch(t testutil.TB) { t.Logf("Launching new cluster...") errc := make(chan error) for _, m := range c.Members { // Members are launched in separate goroutines because if they boot // using discovery url, they have to wait for others to register to continue. go func(m *Member) { errc <- m.Launch() }(m) } for range c.Members { if err := <-errc; err != nil { c.Terminate(t) t.Fatalf("error setting up member: %v", err) } } // wait Cluster to be stable to receive future client requests c.WaitMembersMatch(t, c.ProtoMembers()) c.waitVersion() for _, m := range c.Members { t.Logf(" - %v -> %v (%v)", m.Name, m.ID(), m.GRPCURL()) } } // ProtoMembers returns a list of all active members as client.Members func (c *Cluster) ProtoMembers() []*pb.Member { ms := []*pb.Member{} for _, m := range c.Members { pScheme := SchemeFromTLSInfo(m.PeerTLSInfo) cScheme := SchemeFromTLSInfo(m.ClientTLSInfo) cm := &pb.Member{Name: m.Name} for _, ln := range m.PeerListeners { cm.PeerURLs = append(cm.PeerURLs, pScheme+"://"+ln.Addr().String()) } for _, ln := range m.ClientListeners { cm.ClientURLs = append(cm.ClientURLs, cScheme+"://"+ln.Addr().String()) } ms = append(ms, cm) } return ms } func (c *Cluster) mustNewMember(t testutil.TB) *Member { memberNumber := c.LastMemberNum c.LastMemberNum++ m := MustNewMember(t, MemberConfig{ Name: fmt.Sprintf("m%v", memberNumber), MemberNumber: memberNumber, AuthToken: c.Cfg.AuthToken, PeerTLS: c.Cfg.PeerTLS, ClientTLS: c.Cfg.ClientTLS, QuotaBackendBytes: c.Cfg.QuotaBackendBytes, MaxTxnOps: c.Cfg.MaxTxnOps, MaxRequestBytes: c.Cfg.MaxRequestBytes, SnapshotCount: c.Cfg.SnapshotCount, SnapshotCatchUpEntries: c.Cfg.SnapshotCatchUpEntries, GrpcKeepAliveMinTime: c.Cfg.GRPCKeepAliveMinTime, GrpcKeepAliveInterval: c.Cfg.GRPCKeepAliveInterval, GrpcKeepAliveTimeout: c.Cfg.GRPCKeepAliveTimeout, ClientMaxCallSendMsgSize: c.Cfg.ClientMaxCallSendMsgSize, ClientMaxCallRecvMsgSize: c.Cfg.ClientMaxCallRecvMsgSize, UseIP: c.Cfg.UseIP, UseBridge: c.Cfg.UseBridge, UseTCP: c.Cfg.UseTCP, EnableLeaseCheckpoint: c.Cfg.EnableLeaseCheckpoint, LeaseCheckpointInterval: c.Cfg.LeaseCheckpointInterval, LeaseCheckpointPersist: c.Cfg.LeaseCheckpointPersist, WatchProgressNotifyInterval: c.Cfg.WatchProgressNotifyInterval, ExperimentalMaxLearners: c.Cfg.ExperimentalMaxLearners, StrictReconfigCheck: c.Cfg.StrictReconfigCheck, CorruptCheckTime: c.Cfg.CorruptCheckTime, }) m.DiscoveryURL = c.Cfg.DiscoveryURL return m } // addMember return PeerURLs of the added member. func (c *Cluster) addMember(t testutil.TB) types.URLs { m := c.mustNewMember(t) scheme := SchemeFromTLSInfo(c.Cfg.PeerTLS) // send add request to the Cluster var err error for i := 0; i < len(c.Members); i++ { peerURL := scheme + "://" + m.PeerListeners[0].Addr().String() if err = c.AddMemberByURL(t, c.Members[i].Client, peerURL); err == nil { break } } if err != nil { t.Fatalf("add member failed on all members error: %v", err) } m.InitialPeerURLsMap = types.URLsMap{} for _, mm := range c.Members { m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs } m.InitialPeerURLsMap[m.Name] = m.PeerURLs m.NewCluster = false if err := m.Launch(); err != nil { t.Fatal(err) } c.Members = append(c.Members, m) // wait Cluster to be stable to receive future client requests c.WaitMembersMatch(t, c.ProtoMembers()) return m.PeerURLs } func (c *Cluster) AddMemberByURL(t testutil.TB, cc *clientv3.Client, peerURL string) error { ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) _, err := cc.MemberAdd(ctx, []string{peerURL}) cancel() if err != nil { return err } // wait for the add node entry applied in the Cluster members := append(c.ProtoMembers(), &pb.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}}) c.WaitMembersMatch(t, members) return nil } // AddMember return PeerURLs of the added member. func (c *Cluster) AddMember(t testutil.TB) types.URLs { return c.addMember(t) } func (c *Cluster) RemoveMember(t testutil.TB, cc *clientv3.Client, id uint64) error { // send remove request to the Cluster ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) _, err := cc.MemberRemove(ctx, id) cancel() if err != nil { return err } newMembers := make([]*Member, 0) for _, m := range c.Members { if uint64(m.Server.ID()) != id { newMembers = append(newMembers, m) } else { m.Client.Close() select { case <-m.Server.StopNotify(): m.Terminate(t) // 1s stop delay + election timeout + 1s disk and network delay + connection write timeout // TODO: remove connection write timeout by selecting on http response closeNotifier // blocking on https://github.com/golang/go/issues/9524 case <-time.After(time.Second + time.Duration(ElectionTicks)*TickDuration + time.Second + rafthttp.ConnWriteTimeout): t.Fatalf("failed to remove member %s in time", m.Server.ID()) } } } c.Members = newMembers c.WaitMembersMatch(t, c.ProtoMembers()) return nil } func (c *Cluster) WaitMembersMatch(t testutil.TB, membs []*pb.Member) { ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) defer cancel() for _, m := range c.Members { cc := ToGRPC(m.Client) select { case <-m.Server.StopNotify(): continue default: } for { resp, err := cc.Cluster.MemberList(ctx, &pb.MemberListRequest{Linearizable: false}) if errors.Is(err, context.DeadlineExceeded) { t.Fatal(err) } if err != nil { continue } if isMembersEqual(resp.Members, membs) { break } time.Sleep(TickDuration) } } } // WaitLeader returns index of the member in c.Members that is leader // or fails the test (if not established in 30min). func (c *Cluster) WaitLeader(t testutil.TB) int { return c.WaitMembersForLeader(t, c.Members) } // WaitMembersForLeader waits until given members agree on the same leader, // and returns its 'index' in the 'membs' list func (c *Cluster) WaitMembersForLeader(t testutil.TB, membs []*Member) int { t.Logf("WaitMembersForLeader") ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() l := 0 for l = c.waitMembersForLeader(ctx, t, membs); l < 0; { if ctx.Err() != nil { t.Fatal("WaitLeader FAILED: %v", ctx.Err()) } } t.Logf("WaitMembersForLeader succeeded. Cluster leader index: %v", l) // TODO: Consider second pass check as sometimes leadership is lost // soon after election: // // We perform multiple attempts, as some-times just after successful WaitLLeader // there is a race and leadership is quickly lost: // - MsgAppResp message with higher term from 2acc3d3b521981 [term: 3] {"member": "m0"} // - 9903a56eaf96afac became follower at term 3 {"member": "m0"} // - 9903a56eaf96afac lost leader 9903a56eaf96afac at term 3 {"member": "m0"} return l } // WaitMembersForLeader waits until given members agree on the same leader, // and returns its 'index' in the 'membs' list func (c *Cluster) waitMembersForLeader(ctx context.Context, t testutil.TB, membs []*Member) int { possibleLead := make(map[uint64]bool) var lead uint64 for _, m := range membs { possibleLead[uint64(m.Server.ID())] = true } cc, err := c.ClusterClient() if err != nil { t.Fatal(err) } // ensure leader is up via linearizable get for { ctx, cancel := context.WithTimeout(ctx, 10*TickDuration+time.Second) _, err := cc.Get(ctx, "0") cancel() if err == nil || strings.Contains(err.Error(), "Key not found") { break } } for lead == 0 || !possibleLead[lead] { lead = 0 for _, m := range membs { select { case <-m.Server.StopNotify(): continue default: } if lead != 0 && lead != m.Server.Lead() { lead = 0 time.Sleep(10 * TickDuration) break } lead = m.Server.Lead() } } for i, m := range membs { if uint64(m.Server.ID()) == lead { t.Logf("waitMembersForLeader found leader. Member: %v lead: %x", i, lead) return i } } t.Logf("waitMembersForLeader failed (-1)") return -1 } func (c *Cluster) WaitNoLeader() { c.WaitMembersNoLeader(c.Members) } // WaitMembersNoLeader waits until given members lose leader. func (c *Cluster) WaitMembersNoLeader(membs []*Member) { noLeader := false for !noLeader { noLeader = true for _, m := range membs { select { case <-m.Server.StopNotify(): continue default: } if m.Server.Lead() != 0 { noLeader = false time.Sleep(10 * TickDuration) break } } } } func (c *Cluster) waitVersion() { for _, m := range c.Members { for { if m.Server.ClusterVersion() != nil { break } time.Sleep(TickDuration) } } } // isMembersEqual checks whether two members equal except ID field. // The given wmembs should always set ID field to empty string. func isMembersEqual(membs []*pb.Member, wmembs []*pb.Member) bool { sort.Sort(SortableMemberSliceByPeerURLs(membs)) sort.Sort(SortableMemberSliceByPeerURLs(wmembs)) return cmp.Equal(membs, wmembs, cmpopts.IgnoreFields(pb.Member{}, "ID", "PeerURLs", "ClientURLs")) } func newLocalListener(t testutil.TB) net.Listener { c := atomic.AddInt32(&LocalListenCount, 1) // Go 1.8+ allows only numbers in port addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+BasePort, os.Getpid()) return NewListenerWithAddr(t, addr) } func NewListenerWithAddr(t testutil.TB, addr string) net.Listener { t.Logf("Creating listener with addr: %v", addr) l, err := transport.NewUnixListener(addr) if err != nil { t.Fatal(err) } return l } type Member struct { config.ServerConfig UniqNumber int MemberNumber int PeerListeners, ClientListeners []net.Listener GrpcListener net.Listener // PeerTLSInfo enables peer TLS when set PeerTLSInfo *transport.TLSInfo // ClientTLSInfo enables client TLS when set ClientTLSInfo *transport.TLSInfo DialOptions []grpc.DialOption RaftHandler *testutil.PauseableHandler Server *etcdserver.EtcdServer ServerClosers []func() GrpcServerOpts []grpc.ServerOption GrpcServer *grpc.Server GrpcURL string GrpcBridge *bridge // ServerClient is a clientv3 that directly calls the etcdserver. ServerClient *clientv3.Client // Client is a clientv3 that communicates via socket, either UNIX or TCP. Client *clientv3.Client KeepDataDirTerminate bool ClientMaxCallSendMsgSize int ClientMaxCallRecvMsgSize int UseIP bool UseBridge bool UseTCP bool IsLearner bool Closed bool GrpcServerRecorder *grpc_testing.GrpcRecorder } func (m *Member) GRPCURL() string { return m.GrpcURL } type MemberConfig struct { Name string UniqNumber int64 MemberNumber int PeerTLS *transport.TLSInfo ClientTLS *transport.TLSInfo AuthToken string QuotaBackendBytes int64 MaxTxnOps uint MaxRequestBytes uint SnapshotCount uint64 SnapshotCatchUpEntries uint64 GrpcKeepAliveMinTime time.Duration GrpcKeepAliveInterval time.Duration GrpcKeepAliveTimeout time.Duration ClientMaxCallSendMsgSize int ClientMaxCallRecvMsgSize int UseIP bool UseBridge bool UseTCP bool EnableLeaseCheckpoint bool LeaseCheckpointInterval time.Duration LeaseCheckpointPersist bool WatchProgressNotifyInterval time.Duration ExperimentalMaxLearners int StrictReconfigCheck bool CorruptCheckTime time.Duration } // MustNewMember return an inited member with the given name. If peerTLS is // set, it will use https scheme to communicate between peers. func MustNewMember(t testutil.TB, mcfg MemberConfig) *Member { var err error m := &Member{ MemberNumber: mcfg.MemberNumber, UniqNumber: int(atomic.AddInt32(&LocalListenCount, 1)), } peerScheme := SchemeFromTLSInfo(mcfg.PeerTLS) clientScheme := SchemeFromTLSInfo(mcfg.ClientTLS) pln := newLocalListener(t) m.PeerListeners = []net.Listener{pln} m.PeerURLs, err = types.NewURLs([]string{peerScheme + "://" + pln.Addr().String()}) if err != nil { t.Fatal(err) } m.PeerTLSInfo = mcfg.PeerTLS cln := newLocalListener(t) m.ClientListeners = []net.Listener{cln} m.ClientURLs, err = types.NewURLs([]string{clientScheme + "://" + cln.Addr().String()}) if err != nil { t.Fatal(err) } m.ClientTLSInfo = mcfg.ClientTLS m.Name = mcfg.Name m.DataDir, err = os.MkdirTemp(t.TempDir(), "etcd") if err != nil { t.Fatal(err) } clusterStr := fmt.Sprintf("%s=%s://%s", mcfg.Name, peerScheme, pln.Addr().String()) m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) if err != nil { t.Fatal(err) } m.InitialClusterToken = ClusterName m.NewCluster = true m.BootstrapTimeout = 10 * time.Millisecond if m.PeerTLSInfo != nil { m.ServerConfig.PeerTLSInfo = *m.PeerTLSInfo } m.ElectionTicks = ElectionTicks m.InitialElectionTickAdvance = true m.TickMs = uint(TickDuration / time.Millisecond) m.QuotaBackendBytes = mcfg.QuotaBackendBytes m.MaxTxnOps = mcfg.MaxTxnOps if m.MaxTxnOps == 0 { m.MaxTxnOps = embed.DefaultMaxTxnOps } m.MaxRequestBytes = mcfg.MaxRequestBytes if m.MaxRequestBytes == 0 { m.MaxRequestBytes = embed.DefaultMaxRequestBytes } m.SnapshotCount = etcdserver.DefaultSnapshotCount if mcfg.SnapshotCount != 0 { m.SnapshotCount = mcfg.SnapshotCount } m.SnapshotCatchUpEntries = etcdserver.DefaultSnapshotCatchUpEntries if mcfg.SnapshotCatchUpEntries != 0 { m.SnapshotCatchUpEntries = mcfg.SnapshotCatchUpEntries } // for the purpose of integration testing, simple token is enough m.AuthToken = "simple" if mcfg.AuthToken != "" { m.AuthToken = mcfg.AuthToken } m.BcryptCost = uint(bcrypt.MinCost) // use min bcrypt cost to speedy up integration testing m.GrpcServerOpts = []grpc.ServerOption{} if mcfg.GrpcKeepAliveMinTime > time.Duration(0) { m.GrpcServerOpts = append(m.GrpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ MinTime: mcfg.GrpcKeepAliveMinTime, PermitWithoutStream: false, })) } if mcfg.GrpcKeepAliveInterval > time.Duration(0) && mcfg.GrpcKeepAliveTimeout > time.Duration(0) { m.GrpcServerOpts = append(m.GrpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{ Time: mcfg.GrpcKeepAliveInterval, Timeout: mcfg.GrpcKeepAliveTimeout, })) } m.ClientMaxCallSendMsgSize = mcfg.ClientMaxCallSendMsgSize m.ClientMaxCallRecvMsgSize = mcfg.ClientMaxCallRecvMsgSize m.UseIP = mcfg.UseIP m.UseBridge = mcfg.UseBridge m.UseTCP = mcfg.UseTCP m.EnableLeaseCheckpoint = mcfg.EnableLeaseCheckpoint m.LeaseCheckpointInterval = mcfg.LeaseCheckpointInterval m.LeaseCheckpointPersist = mcfg.LeaseCheckpointPersist m.WatchProgressNotifyInterval = mcfg.WatchProgressNotifyInterval m.InitialCorruptCheck = true if mcfg.CorruptCheckTime > time.Duration(0) { m.CorruptCheckTime = mcfg.CorruptCheckTime } m.WarningApplyDuration = embed.DefaultWarningApplyDuration m.WarningUnaryRequestDuration = embed.DefaultWarningUnaryRequestDuration m.ExperimentalMaxLearners = membership.DefaultMaxLearners if mcfg.ExperimentalMaxLearners != 0 { m.ExperimentalMaxLearners = mcfg.ExperimentalMaxLearners } m.V2Deprecation = config.V2_DEPR_DEFAULT m.GrpcServerRecorder = &grpc_testing.GrpcRecorder{} m.Logger = memberLogger(t, mcfg.Name) m.StrictReconfigCheck = mcfg.StrictReconfigCheck if err := m.listenGRPC(); err != nil { t.Fatalf("listenGRPC FAILED: %v", err) } t.Cleanup(func() { // if we didn't cleanup the logger, the consecutive test // might reuse this (t). raft.ResetDefaultLogger() }) return m } func memberLogger(t testutil.TB, name string) *zap.Logger { level := zapcore.InfoLevel if os.Getenv("CLUSTER_DEBUG") != "" { level = zapcore.DebugLevel } options := zaptest.WrapOptions(zap.Fields(zap.String("member", name))) return zaptest.NewLogger(t, zaptest.Level(level), options).Named(name) } // listenGRPC starts a grpc server over a unix domain socket on the member func (m *Member) listenGRPC() error { // prefix with localhost so cert has right domain network, host, port := m.grpcAddr() grpcAddr := host + ":" + port wd, err := os.Getwd() if err != nil { return err } m.Logger.Info("LISTEN GRPC", zap.String("grpcAddr", grpcAddr), zap.String("m.Name", m.Name), zap.String("workdir", wd)) grpcListener, err := net.Listen(network, grpcAddr) if err != nil { return fmt.Errorf("listen failed on grpc socket %s (%v)", grpcAddr, err) } m.GrpcURL = fmt.Sprintf("%s://%s", m.clientScheme(), grpcAddr) if m.UseBridge { _, err = m.addBridge() if err != nil { grpcListener.Close() return err } } m.GrpcListener = grpcListener return nil } func (m *Member) clientScheme() string { switch { case m.UseTCP && m.ClientTLSInfo != nil: return "https" case m.UseTCP && m.ClientTLSInfo == nil: return "http" case !m.UseTCP && m.ClientTLSInfo != nil: return "unixs" case !m.UseTCP && m.ClientTLSInfo == nil: return "unix" } m.Logger.Panic("Failed to determine client schema") return "" } func (m *Member) addBridge() (*bridge, error) { network, host, port := m.grpcAddr() grpcAddr := host + ":" + port bridgeAddr := grpcAddr + "0" m.Logger.Info("LISTEN BRIDGE", zap.String("grpc-address", bridgeAddr), zap.String("member", m.Name)) bridgeListener, err := transport.NewUnixListener(bridgeAddr) if err != nil { return nil, fmt.Errorf("listen failed on bridge socket %s (%v)", bridgeAddr, err) } m.GrpcBridge, err = newBridge(dialer{network: network, addr: grpcAddr}, bridgeListener) if err != nil { bridgeListener.Close() return nil, err } m.GrpcURL = m.clientScheme() + "://" + bridgeAddr return m.GrpcBridge, nil } func (m *Member) Bridge() *bridge { if !m.UseBridge { m.Logger.Panic("Bridge not available. Please configure using bridge before creating Cluster.") } return m.GrpcBridge } func (m *Member) grpcAddr() (network, host, port string) { // prefix with localhost so cert has right domain host = "localhost" if m.UseIP { // for IP-only TLS certs host = "127.0.0.1" } network = "unix" if m.UseTCP { network = "tcp" } port = m.Name if m.UseTCP { port = fmt.Sprintf("%d", GrpcPortNumber(m.UniqNumber, m.MemberNumber)) } return network, host, port } func GrpcPortNumber(uniqNumber, memberNumber int) int { return BaseGRPCPort + uniqNumber*10 + memberNumber } type dialer struct { network string addr string } func (d dialer) Dial() (net.Conn, error) { return net.Dial(d.network, d.addr) } func (m *Member) ElectionTimeout() time.Duration { return time.Duration(m.Server.Cfg.ElectionTicks*int(m.Server.Cfg.TickMs)) * time.Millisecond } func (m *Member) ID() types.ID { return m.Server.ID() } // NewClientV3 creates a new grpc client connection to the member func NewClientV3(m *Member) (*clientv3.Client, error) { if m.GrpcURL == "" { return nil, fmt.Errorf("member not configured for grpc") } cfg := clientv3.Config{ Endpoints: []string{m.GrpcURL}, DialTimeout: 5 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, MaxCallSendMsgSize: m.ClientMaxCallSendMsgSize, MaxCallRecvMsgSize: m.ClientMaxCallRecvMsgSize, Logger: m.Logger.Named("client"), } if m.ClientTLSInfo != nil { tls, err := m.ClientTLSInfo.ClientConfig() if err != nil { return nil, err } cfg.TLS = tls } if m.DialOptions != nil { cfg.DialOptions = append(cfg.DialOptions, m.DialOptions...) } return newClientV3(cfg) } // Clone returns a member with the same server configuration. The returned // member will not set PeerListeners and ClientListeners. func (m *Member) Clone(t testutil.TB) *Member { mm := &Member{} mm.ServerConfig = m.ServerConfig var err error clientURLStrs := m.ClientURLs.StringSlice() mm.ClientURLs, err = types.NewURLs(clientURLStrs) if err != nil { // this should never fail panic(err) } peerURLStrs := m.PeerURLs.StringSlice() mm.PeerURLs, err = types.NewURLs(peerURLStrs) if err != nil { // this should never fail panic(err) } clusterStr := m.InitialPeerURLsMap.String() mm.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) if err != nil { // this should never fail panic(err) } mm.InitialClusterToken = m.InitialClusterToken mm.ElectionTicks = m.ElectionTicks mm.PeerTLSInfo = m.PeerTLSInfo mm.ClientTLSInfo = m.ClientTLSInfo mm.Logger = memberLogger(t, mm.Name+"c") return mm } // Launch starts a member based on ServerConfig, PeerListeners // and ClientListeners. func (m *Member) Launch() error { m.Logger.Info( "launching a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), zap.String("grpc-url", m.GrpcURL), ) var err error if m.Server, err = etcdserver.NewServer(m.ServerConfig); err != nil { return fmt.Errorf("failed to initialize the etcd server: %v", err) } m.Server.SyncTicker = time.NewTicker(500 * time.Millisecond) m.Server.Start() var peerTLScfg *tls.Config if m.PeerTLSInfo != nil && !m.PeerTLSInfo.Empty() { if peerTLScfg, err = m.PeerTLSInfo.ServerConfig(); err != nil { return err } } if m.GrpcListener != nil { var ( tlscfg *tls.Config ) if m.ClientTLSInfo != nil && !m.ClientTLSInfo.Empty() { tlscfg, err = m.ClientTLSInfo.ServerConfig() if err != nil { return err } } m.GrpcServer = v3rpc.Server(m.Server, tlscfg, m.GrpcServerRecorder.UnaryInterceptor(), m.GrpcServerOpts...) m.ServerClient = v3client.New(m.Server) lockpb.RegisterLockServer(m.GrpcServer, v3lock.NewLockServer(m.ServerClient)) epb.RegisterElectionServer(m.GrpcServer, v3election.NewElectionServer(m.ServerClient)) go m.GrpcServer.Serve(m.GrpcListener) } m.RaftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.Logger, m.Server)} h := (http.Handler)(m.RaftHandler) if m.GrpcListener != nil { h = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { m.RaftHandler.ServeHTTP(w, r) }) } for _, ln := range m.PeerListeners { cm := cmux.New(ln) // don't hang on matcher after closing listener cm.SetReadTimeout(time.Second) // serve http1/http2 rafthttp/grpc ll := cm.Match(cmux.Any()) if peerTLScfg != nil { if ll, err = transport.NewTLSListener(ll, m.PeerTLSInfo); err != nil { return err } } hs := &httptest.Server{ Listener: ll, Config: &http.Server{ Handler: h, TLSConfig: peerTLScfg, ErrorLog: log.New(io.Discard, "net/http", 0), }, TLS: peerTLScfg, } hs.Start() donec := make(chan struct{}) go func() { defer close(donec) cm.Serve() }() closer := func() { ll.Close() hs.CloseClientConnections() hs.Close() <-donec } m.ServerClosers = append(m.ServerClosers, closer) } for _, ln := range m.ClientListeners { hs := &httptest.Server{ Listener: ln, Config: &http.Server{ Handler: v2http.NewClientHandler( m.Logger, m.Server, m.ServerConfig.ReqTimeout(), ), ErrorLog: log.New(io.Discard, "net/http", 0), }, } if m.ClientTLSInfo == nil { hs.Start() } else { info := m.ClientTLSInfo hs.TLS, err = info.ServerConfig() if err != nil { return err } // baseConfig is called on initial TLS handshake start. // // Previously, // 1. Server has non-empty (*tls.Config).Certificates on client hello // 2. Server calls (*tls.Config).GetCertificate iff: // - Server'Server (*tls.Config).Certificates is not empty, or // - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName // // When (*tls.Config).Certificates is always populated on initial handshake, // client is expected to provide a valid matching SNI to pass the TLS // verification, thus trigger server (*tls.Config).GetCertificate to reload // TLS assets. However, a cert whose SAN field does not include domain names // but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus // it was never able to trigger TLS reload on initial handshake; first // ceritifcate object was being used, never being updated. // // Now, (*tls.Config).Certificates is created empty on initial TLS client // handshake, in order to trigger (*tls.Config).GetCertificate and populate // rest of the certificates on every new TLS connection, even when client // SNI is empty (e.g. cert only includes IPs). // // This introduces another problem with "httptest.Server": // when server initial certificates are empty, certificates // are overwritten by Go'Server internal test certs, which have // different SAN fields (e.g. example.com). To work around, // re-overwrite (*tls.Config).Certificates before starting // test server. tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, nil) if err != nil { return err } hs.TLS.Certificates = []tls.Certificate{*tlsCert} hs.StartTLS() } closer := func() { ln.Close() hs.CloseClientConnections() hs.Close() } m.ServerClosers = append(m.ServerClosers, closer) } if m.GrpcURL != "" && m.Client == nil { m.Client, err = NewClientV3(m) if err != nil { return err } } m.Logger.Info( "launched a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), zap.String("grpc-url", m.GrpcURL), ) return nil } func (m *Member) RecordedRequests() []grpc_testing.RequestInfo { return m.GrpcServerRecorder.RecordedRequests() } func (m *Member) WaitOK(t testutil.TB) { m.WaitStarted(t) for m.Server.Leader() == 0 { time.Sleep(TickDuration) } } func (m *Member) WaitStarted(t testutil.TB) { for { ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) _, err := m.Client.Get(ctx, "/", clientv3.WithSerializable()) if err != nil { time.Sleep(TickDuration) continue } cancel() break } } func WaitClientV3(t testutil.TB, kv clientv3.KV) { timeout := time.Now().Add(RequestTimeout) var err error for time.Now().Before(timeout) { ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) _, err = kv.Get(ctx, "/") cancel() if err == nil { return } time.Sleep(TickDuration) } if err != nil { t.Fatalf("timed out waiting for client: %v", err) } } func (m *Member) URL() string { return m.ClientURLs[0].String() } func (m *Member) Pause() { m.RaftHandler.Pause() m.Server.PauseSending() } func (m *Member) Resume() { m.RaftHandler.Resume() m.Server.ResumeSending() } // Close stops the member'Server etcdserver and closes its connections func (m *Member) Close() { if m.GrpcBridge != nil { m.GrpcBridge.Close() m.GrpcBridge = nil } if m.ServerClient != nil { m.ServerClient.Close() m.ServerClient = nil } if m.GrpcServer != nil { ch := make(chan struct{}) go func() { defer close(ch) // close listeners to stop accepting new connections, // will block on any existing transports m.GrpcServer.GracefulStop() }() // wait until all pending RPCs are finished select { case <-ch: case <-time.After(2 * time.Second): // took too long, manually close open transports // e.g. watch streams m.GrpcServer.Stop() <-ch } m.GrpcServer = nil } if m.Server != nil { m.Server.HardStop() } for _, f := range m.ServerClosers { f() } if !m.Closed { // Avoid verification of the same file multiple times // (that might not exist any longer) verify.MustVerifyIfEnabled(verify.Config{ Logger: m.Logger, DataDir: m.DataDir, ExactIndex: false, }) } m.Closed = true } // Stop stops the member, but the data dir of the member is preserved. func (m *Member) Stop(_ testutil.TB) { m.Logger.Info( "stopping a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), zap.String("grpc-url", m.GrpcURL), ) m.Close() m.ServerClosers = nil m.Logger.Info( "stopped a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), zap.String("grpc-url", m.GrpcURL), ) } // CheckLeaderTransition waits for leader transition, returning the new leader ID. func CheckLeaderTransition(m *Member, oldLead uint64) uint64 { interval := time.Duration(m.Server.Cfg.TickMs) * time.Millisecond for m.Server.Lead() == 0 || (m.Server.Lead() == oldLead) { time.Sleep(interval) } return m.Server.Lead() } // StopNotify unblocks when a member stop completes func (m *Member) StopNotify() <-chan struct{} { return m.Server.StopNotify() } // Restart starts the member using the preserved data dir. func (m *Member) Restart(t testutil.TB) error { m.Logger.Info( "restarting a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), zap.String("grpc-url", m.GrpcURL), ) newPeerListeners := make([]net.Listener, 0) for _, ln := range m.PeerListeners { newPeerListeners = append(newPeerListeners, NewListenerWithAddr(t, ln.Addr().String())) } m.PeerListeners = newPeerListeners newClientListeners := make([]net.Listener, 0) for _, ln := range m.ClientListeners { newClientListeners = append(newClientListeners, NewListenerWithAddr(t, ln.Addr().String())) } m.ClientListeners = newClientListeners if m.GrpcListener != nil { if err := m.listenGRPC(); err != nil { t.Fatal(err) } } err := m.Launch() m.Logger.Info( "restarted a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), zap.String("grpc-url", m.GrpcURL), zap.Error(err), ) return err } // Terminate stops the member and removes the data dir. func (m *Member) Terminate(t testutil.TB) { m.Logger.Info( "terminating a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), zap.String("grpc-url", m.GrpcURL), ) m.Close() if !m.KeepDataDirTerminate { if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil { t.Fatal(err) } } m.Logger.Info( "terminated a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), zap.String("grpc-url", m.GrpcURL), ) } // Metric gets the metric value for a member func (m *Member) Metric(metricName string, expectLabels ...string) (string, error) { cfgtls := transport.TLSInfo{} tr, err := transport.NewTimeoutTransport(cfgtls, time.Second, time.Second, time.Second) if err != nil { return "", err } cli := &http.Client{Transport: tr} resp, err := cli.Get(m.ClientURLs[0].String() + "/metrics") if err != nil { return "", err } defer resp.Body.Close() b, rerr := io.ReadAll(resp.Body) if rerr != nil { return "", rerr } lines := strings.Split(string(b), "\n") for _, l := range lines { if !strings.HasPrefix(l, metricName) { continue } ok := true for _, lv := range expectLabels { if !strings.Contains(l, lv) { ok = false break } } if !ok { continue } return strings.Split(l, " ")[1], nil } return "", nil } // InjectPartition drops connections from m to others, vice versa. func (m *Member) InjectPartition(t testutil.TB, others ...*Member) { for _, other := range others { m.Server.CutPeer(other.Server.ID()) other.Server.CutPeer(m.Server.ID()) t.Logf("network partition injected between: %v <-> %v", m.Server.ID(), other.Server.ID()) } } // RecoverPartition recovers connections from m to others, vice versa. func (m *Member) RecoverPartition(t testutil.TB, others ...*Member) { for _, other := range others { m.Server.MendPeer(other.Server.ID()) other.Server.MendPeer(m.Server.ID()) t.Logf("network partition between: %v <-> %v", m.Server.ID(), other.Server.ID()) } } func (m *Member) ReadyNotify() <-chan struct{} { return m.Server.ReadyNotify() } type SortableMemberSliceByPeerURLs []*pb.Member func (p SortableMemberSliceByPeerURLs) Len() int { return len(p) } func (p SortableMemberSliceByPeerURLs) Less(i, j int) bool { return p[i].PeerURLs[0] < p[j].PeerURLs[0] } func (p SortableMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // NewCluster returns a launched Cluster with a grpc client connection // for each Cluster member. func NewCluster(t testutil.TB, cfg *ClusterConfig) *Cluster { t.Helper() assertInTestContext(t) testutil.SkipTestIfShortMode(t, "Cannot start etcd Cluster in --short tests") c := &Cluster{Cfg: cfg} ms := make([]*Member, cfg.Size) for i := 0; i < cfg.Size; i++ { ms[i] = c.mustNewMember(t) } c.Members = ms if err := c.fillClusterForMembers(); err != nil { t.Fatalf("fillClusterForMembers failed: %v", err) } c.Launch(t) return c } func (c *Cluster) TakeClient(idx int) { c.mu.Lock() c.Members[idx].Client = nil c.mu.Unlock() } func (c *Cluster) Terminate(t testutil.TB) { if t != nil { t.Logf("========= Cluster termination started =====================") } c.mu.Lock() if c.clusterClient != nil { if err := c.clusterClient.Close(); err != nil { t.Error(err) } } c.mu.Unlock() for _, m := range c.Members { if m.Client != nil { m.Client.Close() } } var wg sync.WaitGroup wg.Add(len(c.Members)) for _, m := range c.Members { go func(mm *Member) { defer wg.Done() mm.Terminate(t) }(m) } wg.Wait() if t != nil { t.Logf("========= Cluster termination succeeded ===================") } } func (c *Cluster) RandClient() *clientv3.Client { return c.Members[rand.Intn(len(c.Members))].Client } func (c *Cluster) Client(i int) *clientv3.Client { return c.Members[i].Client } func (c *Cluster) Endpoints() []string { var endpoints []string for _, m := range c.Members { endpoints = append(endpoints, m.GrpcURL) } return endpoints } func (c *Cluster) ClusterClient() (client *clientv3.Client, err error) { if c.clusterClient == nil { endpoints := []string{} for _, m := range c.Members { endpoints = append(endpoints, m.GrpcURL) } cfg := clientv3.Config{ Endpoints: endpoints, DialTimeout: 5 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, MaxCallSendMsgSize: c.Cfg.ClientMaxCallSendMsgSize, MaxCallRecvMsgSize: c.Cfg.ClientMaxCallRecvMsgSize, } if c.Cfg.ClientTLS != nil { tls, err := c.Cfg.ClientTLS.ClientConfig() if err != nil { return nil, err } cfg.TLS = tls } c.clusterClient, err = newClientV3(cfg) if err != nil { return nil, err } } return c.clusterClient, nil } // NewClientV3 creates a new grpc client connection to the member func (c *Cluster) NewClientV3(memberIndex int) (*clientv3.Client, error) { return NewClientV3(c.Members[memberIndex]) } func makeClients(t testutil.TB, clus *Cluster, clients *[]*clientv3.Client, chooseMemberIndex func() int) func() *clientv3.Client { var mu sync.Mutex *clients = nil return func() *clientv3.Client { cli, err := clus.NewClientV3(chooseMemberIndex()) if err != nil { t.Fatalf("cannot create client: %v", err) } mu.Lock() *clients = append(*clients, cli) mu.Unlock() return cli } } // MakeSingleNodeClients creates factory of clients that all connect to member 0. // All the created clients are put on the 'clients' list. The factory is thread-safe. func MakeSingleNodeClients(t testutil.TB, clus *Cluster, clients *[]*clientv3.Client) func() *clientv3.Client { return makeClients(t, clus, clients, func() int { return 0 }) } // MakeMultiNodeClients creates factory of clients that all connect to random members. // All the created clients are put on the 'clients' list. The factory is thread-safe. func MakeMultiNodeClients(t testutil.TB, clus *Cluster, clients *[]*clientv3.Client) func() *clientv3.Client { return makeClients(t, clus, clients, func() int { return rand.Intn(len(clus.Members)) }) } // CloseClients closes all the clients from the 'clients' list. func CloseClients(t testutil.TB, clients []*clientv3.Client) { for _, cli := range clients { if err := cli.Close(); err != nil { t.Fatal(err) } } } type GrpcAPI struct { // Cluster is the Cluster API for the client'Server connection. Cluster pb.ClusterClient // KV is the keyvalue API for the client'Server connection. KV pb.KVClient // Lease is the lease API for the client'Server connection. Lease pb.LeaseClient // Watch is the watch API for the client'Server connection. Watch pb.WatchClient // Maintenance is the maintenance API for the client'Server connection. Maintenance pb.MaintenanceClient // Auth is the authentication API for the client'Server connection. Auth pb.AuthClient // Lock is the lock API for the client'Server connection. Lock lockpb.LockClient // Election is the election API for the client'Server connection. Election epb.ElectionClient } // GetLearnerMembers returns the list of learner members in Cluster using MemberList API. func (c *Cluster) GetLearnerMembers() ([]*pb.Member, error) { cli := c.Client(0) resp, err := cli.MemberList(context.Background()) if err != nil { return nil, fmt.Errorf("failed to list member %v", err) } var learners []*pb.Member for _, m := range resp.Members { if m.IsLearner { learners = append(learners, m) } } return learners, nil } // AddAndLaunchLearnerMember creates a leaner member, adds it to Cluster // via v3 MemberAdd API, and then launches the new member. func (c *Cluster) AddAndLaunchLearnerMember(t testutil.TB) { m := c.mustNewMember(t) m.IsLearner = true scheme := SchemeFromTLSInfo(c.Cfg.PeerTLS) peerURLs := []string{scheme + "://" + m.PeerListeners[0].Addr().String()} cli := c.Client(0) _, err := cli.MemberAddAsLearner(context.Background(), peerURLs) if err != nil { t.Fatalf("failed to add learner member %v", err) } m.InitialPeerURLsMap = types.URLsMap{} for _, mm := range c.Members { m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs } m.InitialPeerURLsMap[m.Name] = m.PeerURLs m.NewCluster = false if err := m.Launch(); err != nil { t.Fatal(err) } c.Members = append(c.Members, m) c.waitMembersMatch(t) } // getMembers returns a list of members in Cluster, in format of etcdserverpb.Member func (c *Cluster) getMembers() []*pb.Member { var mems []*pb.Member for _, m := range c.Members { mem := &pb.Member{ Name: m.Name, PeerURLs: m.PeerURLs.StringSlice(), ClientURLs: m.ClientURLs.StringSlice(), IsLearner: m.IsLearner, } mems = append(mems, mem) } return mems } // waitMembersMatch waits until v3rpc MemberList returns the 'same' members info as the // local 'c.Members', which is the local recording of members in the testing Cluster. With // the exception that the local recording c.Members does not have info on Member.ID, which // is generated when the member is been added to Cluster. // // Note: // A successful match means the Member.clientURLs are matched. This means member has already // finished publishing its server attributes to Cluster. Publishing attributes is a Cluster-wide // write request (in v2 server). Therefore, at this point, any raft log entries prior to this // would have already been applied. // // If a new member was added to an existing Cluster, at this point, it has finished publishing // its own server attributes to the Cluster. And therefore by the same argument, it has already // applied the raft log entries (especially those of type raftpb.ConfChangeType). At this point, // the new member has the correct view of the Cluster configuration. // // Special note on learner member: // Learner member is only added to a Cluster via v3rpc MemberAdd API (as of v3.4). When starting // the learner member, its initial view of the Cluster created by peerURLs map does not have info // on whether or not the new member itself is learner. But at this point, a successful match does // indicate that the new learner member has applied the raftpb.ConfChangeAddLearnerNode entry // which was used to add the learner itself to the Cluster, and therefore it has the correct info // on learner. func (c *Cluster) waitMembersMatch(t testutil.TB) { wMembers := c.getMembers() sort.Sort(SortableProtoMemberSliceByPeerURLs(wMembers)) cli := c.Client(0) for { resp, err := cli.MemberList(context.Background()) if err != nil { t.Fatalf("failed to list member %v", err) } if len(resp.Members) != len(wMembers) { continue } sort.Sort(SortableProtoMemberSliceByPeerURLs(resp.Members)) for _, m := range resp.Members { m.ID = 0 } if reflect.DeepEqual(resp.Members, wMembers) { return } time.Sleep(TickDuration) } } type SortableProtoMemberSliceByPeerURLs []*pb.Member func (p SortableProtoMemberSliceByPeerURLs) Len() int { return len(p) } func (p SortableProtoMemberSliceByPeerURLs) Less(i, j int) bool { return p[i].PeerURLs[0] < p[j].PeerURLs[0] } func (p SortableProtoMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // MustNewMember creates a new member instance based on the response of V3 Member Add API. func (c *Cluster) MustNewMember(t testutil.TB, resp *clientv3.MemberAddResponse) *Member { m := c.mustNewMember(t) m.IsLearner = resp.Member.IsLearner m.NewCluster = false m.InitialPeerURLsMap = types.URLsMap{} for _, mm := range c.Members { m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs } m.InitialPeerURLsMap[m.Name] = types.MustNewURLs(resp.Member.PeerURLs) c.Members = append(c.Members, m) return m }
[ "\"CLUSTER_DEBUG\"" ]
[]
[ "CLUSTER_DEBUG" ]
[]
["CLUSTER_DEBUG"]
go
1
0
api/pkg/client/connection_utils.go
package client import ( "encoding/json" "github.com/gin-gonic/gin" "net/http" "os" ) // HomeDir - returns the running user's home directory func HomeDir() string { if linuxHome := os.Getenv("HOME"); linuxHome != "" { return linuxHome } return os.Getenv("USERPROFILE") // windows-fallback } // MarshalAndSetJson ... helper method that un-marshals JSON and returns to browser func MarshalAndSetJson(c *gin.Context, data []byte) { var raw map[string]interface{} if err := json.Unmarshal(data, &raw); err != nil { c.JSON(http.StatusInternalServerError, gin.H{ "result": "Something went wrong unmarshalling the data from kubernetes!", }) } else { c.JSON(http.StatusOK, gin.H{ "result": raw, }) } } // MarshalAndSetJson ... helper method that un-marshals JSON and returns to browser func MarshalAndSetJsonSansGin(data []byte) (map[string]interface{}, error) { var raw map[string]interface{} if err := json.Unmarshal(data, &raw); err != nil { return raw, err } else { return raw, err } }
[ "\"HOME\"", "\"USERPROFILE\"" ]
[]
[ "HOME", "USERPROFILE" ]
[]
["HOME", "USERPROFILE"]
go
2
0
mesonbuild/compilers/mixins/visualstudio.py
# Copyright 2019 The meson development team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Abstractions to simplify compilers that implement an MSVC compatible interface. """ import abc import os import typing as T from ... import arglist from ... import mesonlib from ... import mlog if T.TYPE_CHECKING: from ...environment import Environment from .clike import CLikeCompiler as Compiler else: # This is a bit clever, for mypy we pretend that these mixins descend from # Compiler, so we get all of the methods and attributes defined for us, but # for runtime we make them descend from object (which all classes normally # do). This gives up DRYer type checking, with no runtime impact Compiler = object vs32_instruction_set_args = { 'mmx': ['/arch:SSE'], # There does not seem to be a flag just for MMX 'sse': ['/arch:SSE'], 'sse2': ['/arch:SSE2'], 'sse3': ['/arch:AVX'], # VS leaped from SSE2 directly to AVX. 'sse41': ['/arch:AVX'], 'sse42': ['/arch:AVX'], 'avx': ['/arch:AVX'], 'avx2': ['/arch:AVX2'], 'neon': None, } # T.Dicst[str, T.Optional[T.List[str]]] # The 64 bit compiler defaults to /arch:avx. vs64_instruction_set_args = { 'mmx': ['/arch:AVX'], 'sse': ['/arch:AVX'], 'sse2': ['/arch:AVX'], 'sse3': ['/arch:AVX'], 'ssse3': ['/arch:AVX'], 'sse41': ['/arch:AVX'], 'sse42': ['/arch:AVX'], 'avx': ['/arch:AVX'], 'avx2': ['/arch:AVX2'], 'neon': None, } # T.Dicst[str, T.Optional[T.List[str]]] msvc_optimization_args = { '0': ['/Od'], 'g': [], # No specific flag to optimize debugging, /Zi or /ZI will create debug information '1': ['/O1'], '2': ['/O2'], '3': ['/O2', '/Gw'], 's': ['/O1', '/Gw'], } # type: T.Dict[str, T.List[str]] msvc_debug_args = { False: [], True: ['/Zi'] } # type: T.Dict[bool, T.List[str]] class VisualStudioLikeCompiler(Compiler, metaclass=abc.ABCMeta): """A common interface for all compilers implementing an MSVC-style interface. A number of compilers attempt to mimic MSVC, with varying levels of success, such as Clang-CL and ICL (the Intel C/C++ Compiler for Windows). This class implements as much common logic as possible. """ std_warn_args = ['/W3'] std_opt_args = ['/O2'] ignore_libs = arglist.UNIXY_COMPILER_INTERNAL_LIBS + ['execinfo'] internal_libs = [] # type: T.List[str] crt_args = { 'none': [], 'md': ['/MD'], 'mdd': ['/MDd'], 'mt': ['/MT'], 'mtd': ['/MTd'], } # type: T.Dict[str, T.List[str]] # /showIncludes is needed for build dependency tracking in Ninja # See: https://ninja-build.org/manual.html#_deps # Assume UTF-8 sources by default, but self.unix_args_to_native() removes it # if `/source-charset` is set too. always_args = ['/nologo', '/showIncludes', '/utf-8'] warn_args = { '0': [], '1': ['/W2'], '2': ['/W3'], '3': ['/W4'], } # type: T.Dict[str, T.List[str]] INVOKES_LINKER = False def __init__(self, target: str): self.base_options = {mesonlib.OptionKey(o) for o in ['b_pch', 'b_ndebug', 'b_vscrt']} # FIXME add lto, pgo and the like self.target = target self.is_64 = ('x64' in target) or ('x86_64' in target) # do some canonicalization of target machine if 'x86_64' in target: self.machine = 'x64' elif '86' in target: self.machine = 'x86' elif 'aarch64' in target: self.machine = 'arm64' elif 'arm' in target: self.machine = 'arm' else: self.machine = target if mesonlib.version_compare(self.version, '>=19.28.29910'): # VS 16.9.0 includes cl 19.28.29910 self.base_options.add(mesonlib.OptionKey('b_sanitize')) assert self.linker is not None self.linker.machine = self.machine # Override CCompiler.get_always_args def get_always_args(self) -> T.List[str]: return self.always_args def get_pch_suffix(self) -> str: return 'pch' def get_pch_name(self, header: str) -> str: chopped = os.path.basename(header).split('.')[:-1] chopped.append(self.get_pch_suffix()) pchname = '.'.join(chopped) return pchname def get_pch_base_name(self, header: str) -> str: # This needs to be implemented by inherting classes raise NotImplementedError def get_pch_use_args(self, pch_dir: str, header: str) -> T.List[str]: base = self.get_pch_base_name(header) pchname = self.get_pch_name(header) return ['/FI' + base, '/Yu' + base, '/Fp' + os.path.join(pch_dir, pchname)] def get_preprocess_only_args(self) -> T.List[str]: return ['/EP'] def get_compile_only_args(self) -> T.List[str]: return ['/c'] def get_no_optimization_args(self) -> T.List[str]: return ['/Od','/Oi-'] def sanitizer_compile_args(self, value: str) -> T.List[str]: if value == 'none': return [] if value != 'address': raise mesonlib.MesonException('VS only supports address sanitizer at the moment.') return ['/fsanitize=address'] def get_output_args(self, target: str) -> T.List[str]: if target.endswith('.exe'): return ['/Fe' + target] return ['/Fo' + target] def get_buildtype_args(self, buildtype: str) -> T.List[str]: return [] def get_debug_args(self, is_debug: bool) -> T.List[str]: return msvc_debug_args[is_debug] def get_optimization_args(self, optimization_level: str) -> T.List[str]: args = msvc_optimization_args[optimization_level] if mesonlib.version_compare(self.version, '<18.0'): args = [arg for arg in args if arg != '/Gw'] return args def linker_to_compiler_args(self, args: T.List[str]) -> T.List[str]: return ['/link'] + args def get_pic_args(self) -> T.List[str]: return [] # PIC is handled by the loader on Windows def gen_vs_module_defs_args(self, defsfile: str) -> T.List[str]: if not isinstance(defsfile, str): raise RuntimeError('Module definitions file should be str') # With MSVC, DLLs only export symbols that are explicitly exported, # so if a module defs file is specified, we use that to export symbols return ['/DEF:' + defsfile] def gen_pch_args(self, header: str, source: str, pchname: str) -> T.Tuple[str, T.List[str]]: objname = os.path.splitext(pchname)[0] + '.obj' return objname, ['/Yc' + header, '/Fp' + pchname, '/Fo' + objname] def openmp_flags(self) -> T.List[str]: return ['/openmp'] def openmp_link_flags(self) -> T.List[str]: return [] # FIXME, no idea what these should be. def thread_flags(self, env: 'Environment') -> T.List[str]: return [] @classmethod def unix_args_to_native(cls, args: T.List[str]) -> T.List[str]: result: T.List[str] = [] for i in args: # -mms-bitfields is specific to MinGW-GCC # -pthread is only valid for GCC if i in ('-mms-bitfields', '-pthread'): continue if i.startswith('-LIBPATH:'): i = '/LIBPATH:' + i[9:] elif i.startswith('-L'): i = '/LIBPATH:' + i[2:] # Translate GNU-style -lfoo library name to the import library elif i.startswith('-l'): name = i[2:] if name in cls.ignore_libs: # With MSVC, these are provided by the C runtime which is # linked in by default continue else: i = name + '.lib' elif i.startswith('-isystem'): # just use /I for -isystem system include path s if i.startswith('-isystem='): i = '/I' + i[9:] else: i = '/I' + i[8:] elif i.startswith('-idirafter'): # same as -isystem, but appends the path instead if i.startswith('-idirafter='): i = '/I' + i[11:] else: i = '/I' + i[10:] # -pthread in link flags is only used on Linux elif i == '-pthread': continue # cl.exe does not allow specifying both, so remove /utf-8 that we # added automatically in the case the user overrides it manually. elif i.startswith('/source-charset:') or i.startswith('/execution-charset:'): try: result.remove('/utf-8') except ValueError: pass result.append(i) return result @classmethod def native_args_to_unix(cls, args: T.List[str]) -> T.List[str]: result = [] for arg in args: if arg.startswith(('/LIBPATH:', '-LIBPATH:')): result.append('-L' + arg[9:]) elif arg.endswith(('.a', '.lib')) and not os.path.isabs(arg): result.append('-l' + arg) else: result.append(arg) return result def get_werror_args(self) -> T.List[str]: return ['/WX'] def get_include_args(self, path: str, is_system: bool) -> T.List[str]: if path == '': path = '.' # msvc does not have a concept of system header dirs. return ['-I' + path] def compute_parameters_with_absolute_paths(self, parameter_list: T.List[str], build_dir: str) -> T.List[str]: for idx, i in enumerate(parameter_list): if i[:2] == '-I' or i[:2] == '/I': parameter_list[idx] = i[:2] + os.path.normpath(os.path.join(build_dir, i[2:])) elif i[:9] == '/LIBPATH:': parameter_list[idx] = i[:9] + os.path.normpath(os.path.join(build_dir, i[9:])) return parameter_list # Visual Studio is special. It ignores some arguments it does not # understand and you can't tell it to error out on those. # http://stackoverflow.com/questions/15259720/how-can-i-make-the-microsoft-c-compiler-treat-unknown-flags-as-errors-rather-t def has_arguments(self, args: T.List[str], env: 'Environment', code: str, mode: str) -> T.Tuple[bool, bool]: warning_text = '4044' if mode == 'link' else '9002' with self._build_wrapper(code, env, extra_args=args, mode=mode) as p: if p.returncode != 0: return False, p.cached return not(warning_text in p.stderr or warning_text in p.stdout), p.cached def get_compile_debugfile_args(self, rel_obj: str, pch: bool = False) -> T.List[str]: pdbarr = rel_obj.split('.')[:-1] pdbarr += ['pdb'] args = ['/Fd' + '.'.join(pdbarr)] return args def get_instruction_set_args(self, instruction_set: str) -> T.Optional[T.List[str]]: if self.is_64: return vs64_instruction_set_args.get(instruction_set, None) return vs32_instruction_set_args.get(instruction_set, None) def _calculate_toolset_version(self, version: int) -> T.Optional[str]: if version < 1310: return '7.0' elif version < 1400: return '7.1' # (Visual Studio 2003) elif version < 1500: return '8.0' # (Visual Studio 2005) elif version < 1600: return '9.0' # (Visual Studio 2008) elif version < 1700: return '10.0' # (Visual Studio 2010) elif version < 1800: return '11.0' # (Visual Studio 2012) elif version < 1900: return '12.0' # (Visual Studio 2013) elif version < 1910: return '14.0' # (Visual Studio 2015) elif version < 1920: return '14.1' # (Visual Studio 2017) elif version < 1930: return '14.2' # (Visual Studio 2019) mlog.warning(f'Could not find toolset for version {self.version!r}') return None def get_toolset_version(self) -> T.Optional[str]: # See boost/config/compiler/visualc.cpp for up to date mapping try: version = int(''.join(self.version.split('.')[0:2])) except ValueError: return None return self._calculate_toolset_version(version) def get_default_include_dirs(self) -> T.List[str]: if 'INCLUDE' not in os.environ: return [] return os.environ['INCLUDE'].split(os.pathsep) def get_crt_compile_args(self, crt_val: str, buildtype: str) -> T.List[str]: if crt_val in self.crt_args: return self.crt_args[crt_val] assert crt_val in ['from_buildtype', 'static_from_buildtype'] dbg = 'mdd' rel = 'md' if crt_val == 'static_from_buildtype': dbg = 'mtd' rel = 'mt' # Match what build type flags used to do. if buildtype == 'plain': return [] elif buildtype == 'debug': return self.crt_args[dbg] elif buildtype == 'debugoptimized': return self.crt_args[rel] elif buildtype == 'release': return self.crt_args[rel] elif buildtype == 'minsize': return self.crt_args[rel] else: assert buildtype == 'custom' raise mesonlib.EnvironmentException('Requested C runtime based on buildtype, but buildtype is "custom".') def has_func_attribute(self, name: str, env: 'Environment') -> T.Tuple[bool, bool]: # MSVC doesn't have __attribute__ like Clang and GCC do, so just return # false without compiling anything return name in ['dllimport', 'dllexport'], False def get_argument_syntax(self) -> str: return 'msvc' class MSVCCompiler(VisualStudioLikeCompiler): """Spcific to the Microsoft Compilers.""" def __init__(self, target: str): super().__init__(target) self.id = 'msvc' def get_compile_debugfile_args(self, rel_obj: str, pch: bool = False) -> T.List[str]: args = super().get_compile_debugfile_args(rel_obj, pch) # When generating a PDB file with PCH, all compile commands write # to the same PDB file. Hence, we need to serialize the PDB # writes using /FS since we do parallel builds. This slows down the # build obviously, which is why we only do this when PCH is on. # This was added in Visual Studio 2013 (MSVC 18.0). Before that it was # always on: https://msdn.microsoft.com/en-us/library/dn502518.aspx if pch and mesonlib.version_compare(self.version, '>=18.0'): args = ['/FS'] + args return args def get_instruction_set_args(self, instruction_set: str) -> T.Optional[T.List[str]]: if self.version.split('.')[0] == '16' and instruction_set == 'avx': # VS documentation says that this exists and should work, but # it does not. The headers do not contain AVX intrinsics # and they can not be called. return None return super().get_instruction_set_args(instruction_set) def get_pch_base_name(self, header: str) -> str: return os.path.basename(header) class ClangClCompiler(VisualStudioLikeCompiler): """Spcific to Clang-CL.""" def __init__(self, target: str): super().__init__(target) self.id = 'clang-cl' # Assembly self.can_compile_suffixes.add('s') def has_arguments(self, args: T.List[str], env: 'Environment', code: str, mode: str) -> T.Tuple[bool, bool]: if mode != 'link': args = args + ['-Werror=unknown-argument'] return super().has_arguments(args, env, code, mode) def get_toolset_version(self) -> T.Optional[str]: # XXX: what is the right thing to do here? return '14.1' def get_pch_base_name(self, header: str) -> str: return header
[]
[]
[ "INCLUDE" ]
[]
["INCLUDE"]
python
1
0
kinesis/kinesis_test.go
package kinesis import ( "context" "errors" "os" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/kinesis" "github.com/italolelis/outboxer" ) func TestKinesis_EventStream(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() endpoint := os.Getenv("KINESIS_ENDPOINT") if endpoint == "" { endpoint = "http://localhost:4568" } sess, err := session.NewSession(&aws.Config{ CredentialsChainVerboseErrors: aws.Bool(true), Credentials: credentials.NewStaticCredentials("foo", "var", ""), Endpoint: aws.String(endpoint), Region: aws.String("us-east-1"), }) if err != nil { t.Fatalf("failed to setup an aws session: %s", err) } streamName := aws.String("test") kinesisClient := kinesis.New(sess) if _, err := kinesisClient.CreateStreamWithContext(ctx, &kinesis.CreateStreamInput{ ShardCount: aws.Int64(1), StreamName: streamName, }); err != nil { var kErr awserr.Error if errors.As(err, &kErr) { if kErr.Code() == "ResourceInUseException" { t.Log(kErr.Message()) } } else { t.Fatalf("failed to create stream: %s", err) } } if err := kinesisClient.WaitUntilStreamExistsWithContext(ctx, &kinesis.DescribeStreamInput{StreamName: streamName}); err != nil { t.Fatalf("failed to wait for stream creation: %s", err) } es := New(kinesisClient) if err := es.Send(ctx, &outboxer.OutboxMessage{ Payload: []byte("test payload"), Options: map[string]interface{}{ StreamNameOption: *streamName, }, }); err != nil { t.Fatalf("an error was not expected: %s", err) } }
[ "\"KINESIS_ENDPOINT\"" ]
[]
[ "KINESIS_ENDPOINT" ]
[]
["KINESIS_ENDPOINT"]
go
1
0
pytorch_lightning/strategies/ddp.py
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import shutil import signal import subprocess import sys import tempfile import time from pathlib import Path from time import sleep from typing import Any, Callable, Dict, List, Optional, Union import __main__ import numpy as np import torch import torch.distributed from torch.distributed import GradBucket from torch.nn import Module from torch.nn.parallel.distributed import DistributedDataParallel import pytorch_lightning as pl from pytorch_lightning.core.optimizer import LightningOptimizer from pytorch_lightning.overrides import LightningDistributedModule from pytorch_lightning.overrides.distributed import prepare_for_backward from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO from pytorch_lightning.plugins.precision import PrecisionPlugin from pytorch_lightning.strategies.parallel import ParallelStrategy from pytorch_lightning.strategies.strategy import TBroadcast from pytorch_lightning.trainer.states import TrainerFn from pytorch_lightning.utilities import ( _FAIRSCALE_AVAILABLE, _HYDRA_AVAILABLE, _IS_WINDOWS, _TORCH_GREATER_EQUAL_1_8, _TORCH_GREATER_EQUAL_1_9, _TORCH_GREATER_EQUAL_1_10, ) from pytorch_lightning.utilities.distributed import _revert_sync_batchnorm, distributed_available from pytorch_lightning.utilities.distributed import group as _group from pytorch_lightning.utilities.distributed import init_dist_connection, ReduceOp, sync_ddp_if_available from pytorch_lightning.utilities.enums import _StrategyType from pytorch_lightning.utilities.exceptions import DeadlockDetectedException from pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn from pytorch_lightning.utilities.seed import reset_seed from pytorch_lightning.utilities.types import STEP_OUTPUT if _FAIRSCALE_AVAILABLE: from fairscale.optim import OSS if _HYDRA_AVAILABLE: from hydra.core.hydra_config import HydraConfig from hydra.utils import get_original_cwd, to_absolute_path if _TORCH_GREATER_EQUAL_1_8: from pytorch_lightning.utilities.distributed import register_ddp_comm_hook log = logging.getLogger(__name__) class DDPStrategy(ParallelStrategy): """Plugin for multi-process single-device training on one or multiple nodes. The main process in each node spawns N-1 child processes via :func:`subprocess.Popen`, where N is the number of devices (e.g. GPU) per node. It is very similar to how :mod:`torch.distributed.launch` launches processes. """ distributed_backend = _StrategyType.DDP def __init__( self, accelerator: Optional["pl.accelerators.accelerator.Accelerator"] = None, parallel_devices: Optional[List[torch.device]] = None, cluster_environment: Optional[ClusterEnvironment] = None, checkpoint_io: Optional[CheckpointIO] = None, precision_plugin: Optional[PrecisionPlugin] = None, ddp_comm_state: Optional[object] = None, ddp_comm_hook: Optional[Callable[[Any, GradBucket], torch.futures.Future[torch.Tensor]]] = None, ddp_comm_wrapper: Optional[ Callable[ [Callable[[Any, GradBucket], torch.futures.Future[torch.Tensor]], Any], torch.futures.Future[torch.Tensor], ] ] = None, model_averaging_period: Optional[int] = None, **kwargs: Union[Any, Dict[str, Any]], ) -> None: super().__init__( accelerator=accelerator, parallel_devices=parallel_devices, cluster_environment=cluster_environment, checkpoint_io=checkpoint_io, precision_plugin=precision_plugin, ) log.detail(f"{self.__class__.__name__}: initializing DDP plugin") self.interactive_ddp_procs: List[subprocess.Popen] = [] self._num_nodes = 1 self.sync_batchnorm = False self._ddp_kwargs = kwargs self._ddp_comm_state = ddp_comm_state self._ddp_comm_hook = ddp_comm_hook self._ddp_comm_wrapper = ddp_comm_wrapper self._model_averaging_period = model_averaging_period self._pids: Optional[List[int]] = None self._sync_dir: Optional[str] = None self._rank_0_has_called_call_children_scripts: bool = False self.set_world_ranks() @property def is_distributed(self) -> bool: return True @property def root_device(self) -> torch.device: return self.parallel_devices[self.local_rank] @property def num_nodes(self) -> int: return self._num_nodes @num_nodes.setter def num_nodes(self, num_nodes: int) -> None: # note that world ranks is related to num_nodes, when resetting it, need to reset world ranks self._num_nodes = num_nodes self.set_world_ranks() @property def num_processes(self) -> int: return len(self.parallel_devices) if self.parallel_devices is not None else 0 @property def distributed_sampler_kwargs(self) -> Dict[str, Any]: distributed_sampler_kwargs = dict(num_replicas=(self.num_nodes * self.num_processes), rank=self.global_rank) return distributed_sampler_kwargs @property def _is_single_process_single_device(self) -> bool: return True def setup_environment(self) -> None: # start the other scripts assert self.cluster_environment is not None if not self.cluster_environment.creates_processes_externally: self._call_children_scripts() self.setup_distributed() super().setup_environment() def setup(self, trainer: "pl.Trainer") -> None: super().setup(trainer) # share ddp pids to all processes self._rank_0_has_called_call_children_scripts = ( self.broadcast(self._rank_0_has_called_call_children_scripts) is True ) if self._should_run_deadlock_detection(): self._share_information_to_prevent_deadlock() # move the model to the correct device self.model_to_device() assert self.model is not None if self.sync_batchnorm: self.model = self.configure_sync_batchnorm(self.model) # skip wrapping the model if we are not fitting as no gradients need to be exchanged assert self.lightining_module is not None trainer_fn = self.lightning_module.trainer.state.fn if trainer_fn == TrainerFn.FITTING: self.configure_ddp() def _setup_model(self, model: Module) -> DistributedDataParallel: """Wraps the model into a :class:`~torch.nn.parallel.distributed.DistributedDataParallel` module.""" device_ids = self.determine_ddp_device_ids() log.detail(f"setting up DDP model with device ids: {device_ids}, kwargs: {self._ddp_kwargs}") return DistributedDataParallel(module=model, device_ids=device_ids, **self._ddp_kwargs) def _call_children_scripts(self) -> None: # bookkeeping of spawned processes self._check_can_spawn_children() assert self.cluster_environment is not None # DDP Environment variables os.environ["MASTER_ADDR"] = self.cluster_environment.main_address os.environ["MASTER_PORT"] = str(self.cluster_environment.main_port) # allow the user to pass the node rank os.environ["NODE_RANK"] = str(self.cluster_environment.node_rank()) os.environ["LOCAL_RANK"] = str(self.cluster_environment.local_rank()) # Check if the current calling command looked like `python a/b/c.py` or `python -m a.b.c` # See https://docs.python.org/3/reference/import.html#main-spec if __main__.__spec__ is None: # pragma: no-cover # Script called as `python a/b/c.py` # when user is using hydra find the absolute path path_lib = os.path.abspath if not _HYDRA_AVAILABLE else to_absolute_path # pull out the commands used to run the script and resolve the abs file path command = sys.argv try: full_path = path_lib(command[0]) except Exception: full_path = os.path.abspath(command[0]) command[0] = full_path # use the same python interpreter and actually running command = [sys.executable] + command else: # Script called as `python -m a.b.c` command = [sys.executable, "-m", __main__.__spec__.name] + sys.argv[1:] os.environ["WORLD_SIZE"] = f"{self.num_processes * self.num_nodes}" self.interactive_ddp_procs = [] for local_rank in range(1, self.num_processes): env_copy = os.environ.copy() env_copy["LOCAL_RANK"] = f"{local_rank}" # remove env var if global seed not set if os.environ.get("PL_GLOBAL_SEED") is None and "PL_GLOBAL_SEED" in env_copy: del env_copy["PL_GLOBAL_SEED"] # start process # if hydra is available and initialized, make sure to set the cwd correctly cwd: Optional[str] = None if _HYDRA_AVAILABLE: if HydraConfig.initialized(): cwd = get_original_cwd() os_cwd = f'"{os.getcwd()}"' command += [f"hydra.run.dir={os_cwd}", f"hydra.job.name=train_ddp_process_{local_rank}"] proc = subprocess.Popen(command, env=env_copy, cwd=cwd) self.interactive_ddp_procs.append(proc) # starting all processes at once can cause issues # with dataloaders delay between 1-10 seconds delay = np.random.uniform(1, 5, 1)[0] sleep(delay) self._rank_0_has_called_call_children_scripts = True def setup_distributed(self) -> None: log.detail(f"{self.__class__.__name__}: setting up distributed...") reset_seed() # determine which process we are and world size self.set_world_ranks() # set warning rank rank_zero_only.rank = self.global_rank # set up server using proc 0's ip address # try to init for 20 times at max in case ports are taken # where to store ip_table assert self.cluster_environment is not None init_dist_connection(self.cluster_environment, self.torch_distributed_backend) def _check_can_spawn_children(self) -> None: if self.local_rank != 0: raise RuntimeError( "Lightning attempted to launch new distributed processes with `local_rank > 0`. This should not happen." " Possible reasons: 1) LOCAL_RANK environment variable was incorrectly modified by the user," " 2) `ClusterEnvironment.creates_processes_externally` incorrectly implemented." ) def set_world_ranks(self) -> None: if self.cluster_environment is None: return self.cluster_environment.set_global_rank(self.node_rank * self.num_processes + self.local_rank) self.cluster_environment.set_world_size(self.num_nodes * self.num_processes) rank_zero_only.rank = self.cluster_environment.global_rank() def pre_configure_ddp(self) -> None: # if unset, default `find_unused_parameters` `True` # Many models require setting this parameter to True, as there are corner cases # when not all parameter backward hooks are fired by the autograd engine even if require_grad is set to True. # This flag does come with a performance hit, so it is suggested to disable in cases where it is possible. self._ddp_kwargs["find_unused_parameters"] = self._ddp_kwargs.get("find_unused_parameters", True) assert self.lightning_module is not None if not self.lightning_module.automatic_optimization and not self._ddp_kwargs.get( "find_unused_parameters", False ): # TODO: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization rank_zero_warn( "From PyTorch 1.7.0, Lightning `manual_optimization` needs to set `find_unused_parameters=True` to" " properly work with DDP. Using `find_unused_parameters=True`." ) self._ddp_kwargs["find_unused_parameters"] = True def _register_ddp_hooks(self) -> None: log.detail(f"{self.__class__.__name__}: registering ddp hooks") # In 1.8, DDP communication hooks only work with NCCL backend and SPSD (single process single device) mode # Since 1.9, DDP communication hooks can work on all backends. if _TORCH_GREATER_EQUAL_1_9 or ( _TORCH_GREATER_EQUAL_1_8 and self.root_device.type == "cuda" and self._is_single_process_single_device ): register_ddp_comm_hook( model=self.model, ddp_comm_state=self._ddp_comm_state, ddp_comm_hook=self._ddp_comm_hook, ddp_comm_wrapper=self._ddp_comm_wrapper, ) if _TORCH_GREATER_EQUAL_1_10 and self.lightning_module.trainer.state.fn == TrainerFn.FITTING: import torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD if isinstance(self._ddp_comm_state, post_localSGD.PostLocalSGDState): self._reinit_optimizers_with_post_localSGD(self._ddp_comm_state.start_localSGD_iter) def _reinit_optimizers_with_post_localSGD(self, warmup_steps: int) -> None: log.detail(f"{self.__class__.__name__}: reinitializing optimizers with post localSGD") optimizers = self.optimizers if self._model_averaging_period is None: raise ValueError( "Post-localSGD algorithm is used, but model averaging period is not provided to DDP strategy." ) if _TORCH_GREATER_EQUAL_1_10: if not _IS_WINDOWS: from torch.distributed.optim import DistributedOptimizer import torch.distributed.algorithms.model_averaging.averagers as averagers from torch.distributed.optim import PostLocalSGDOptimizer, ZeroRedundancyOptimizer averager = averagers.PeriodicModelAverager(period=self._model_averaging_period, warmup_steps=warmup_steps) for x, optimizer in enumerate(optimizers): if isinstance(optimizer, LightningOptimizer): optimizer = optimizer._optimizer is_distributed_optimizer = isinstance(optimizer, DistributedOptimizer) if not _IS_WINDOWS else False if ( is_distributed_optimizer or isinstance(optimizer, ZeroRedundancyOptimizer) or (_FAIRSCALE_AVAILABLE and isinstance(optimizer, OSS)) ): raise ValueError( f"Cannot wrap a distributed optimizer of type {optimizer.__name__} by PostLocalSGDOptimizer." ) if isinstance(optimizer, PostLocalSGDOptimizer): continue optim_class = type(optimizer) post_localSGD_optimizer = PostLocalSGDOptimizer( params=optimizer.param_groups, optimizer_class=optim_class, averager=averager, **optimizer.defaults, ) optimizers[x] = post_localSGD_optimizer del optimizer self.optimizers = optimizers def configure_ddp(self) -> None: log.detail(f"{self.__class__.__name__}: configuring DistributedDataParallel") self.pre_configure_ddp() assert self.model is not None self.model = self._setup_model(LightningDistributedModule(self.model)) self._register_ddp_hooks() def determine_ddp_device_ids(self) -> Optional[List[int]]: if self.root_device.type == "cpu": return None return [self.root_device.index] def barrier(self, *args, **kwargs) -> None: if not distributed_available(): return if _TORCH_GREATER_EQUAL_1_8 and torch.distributed.get_backend() == "nccl": torch.distributed.barrier(device_ids=self.determine_ddp_device_ids()) else: torch.distributed.barrier() def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast: obj = [obj] if self.global_rank != src: obj = [None] torch.distributed.broadcast_object_list(obj, src, group=_group.WORLD) return obj[0] def pre_backward(self, closure_loss: torch.Tensor) -> None: """Run before precision plugin executes backward.""" assert self.model is not None assert self.lightning_module is not None if not self.lightning_module.automatic_optimization: prepare_for_backward(self.model, closure_loss) def model_to_device(self) -> None: log.detail(f"{self.__class__.__name__}: moving model to device [{self.root_device}]...") if self.model: self.model.to(self.root_device) def reduce( self, tensor: torch.Tensor, group: Optional[Any] = None, reduce_op: Union[ReduceOp, str, None] = "mean" ) -> torch.Tensor: """Reduces a tensor from several distributed processes to one aggregated tensor. Args: tensor: the tensor to sync and reduce group: the process group to gather results from. Defaults to all processes (world) reduce_op: the reduction operation. Defaults to 'mean'/'avg'. Can also be a string 'sum' to calculate the sum during reduction. Return: reduced value, except when the input was not a tensor the output remains is unchanged """ if isinstance(tensor, torch.Tensor): tensor = sync_ddp_if_available(tensor, group, reduce_op=reduce_op) return tensor def training_step(self, *args, **kwargs) -> STEP_OUTPUT: with self.precision_plugin.train_step_context(): assert self.model is not None return self.model(*args, **kwargs) def validation_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: with self.precision_plugin.val_step_context(): if isinstance(self.model, DistributedDataParallel): # used when calling `trainer.fit` return self.model(*args, **kwargs) else: # used when calling `trainer.validate` assert self.lightning_module is not None return self.lightning_module.validation_step(*args, **kwargs) def test_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: with self.precision_plugin.test_step_context(): assert self.lightning_module is not None return self.lightning_module.test_step(*args, **kwargs) def predict_step(self, *args, **kwargs) -> STEP_OUTPUT: with self.precision_plugin.predict_step_context(): assert self.lightning_module is not None return self.lightning_module.predict_step(*args, **kwargs) def post_training_step(self) -> None: assert self.model is not None assert self.lightning_module is not None if not self.lightning_module.automatic_optimization: self.model.require_backward_grad_sync = True @classmethod def register_strategies(cls, strategy_registry: Dict) -> None: strategy_registry.register( "ddp_find_unused_parameters_false", cls, description="DDP Strategy with `find_unused_parameters` as False", find_unused_parameters=False, ) def _should_run_deadlock_detection(self) -> bool: """Determines whether the plugin will perform process reconciliation in case of errors. If the environment variable `PL_RECONCILE_PROCESS` is set, run detection regardless of the cluster environment. By default this is disabled. Otherwise, if the cluster environment creates the processes, allow the scheduler / parent process to perform the process termination, external to Lightning. """ return os.getenv("PL_RECONCILE_PROCESS", "0") == "1" or self._rank_0_has_called_call_children_scripts def _share_information_to_prevent_deadlock(self) -> None: self._share_pids() # there should be a unique sync_dir per nodes. if self.local_rank == 0: # create a temporary directory used to synchronize processes on deadlock. self._sync_dir = tempfile.mkdtemp() sync_dirs = [] global_node_rank_zero = 0 for _ in range(self.num_nodes): sync_dirs.append(self.broadcast(self._sync_dir, global_node_rank_zero)) global_node_rank_zero += self.world_size // self.num_nodes self._sync_dir = sync_dirs[self.node_rank] def _share_pids(self) -> None: """Make all DDP processes aware of all processes pids.""" self.barrier() pids = self.all_gather(torch.tensor(os.getpid(), device=self.root_device)) pids = pids.cpu().numpy().tolist() self._pids = pids if isinstance(pids, list) else [pids] def reconciliate_processes(self, trace: str) -> None: if self.world_size < 2: return if not self._should_run_deadlock_detection(): return sync_dir = self._sync_dir if not sync_dir: rank_zero_warn("Error handling mechanism for deadlock detection is uninitialized. Skipping check.") return # The cluster may be configured to periodically purge the `/tmp` # directory, in which case `sync_dir` may not exist anymore at this # point. Idempotently create it to ensure its existence. Path(sync_dir).mkdir(parents=True, exist_ok=True) # save a file locally. torch.save(True, os.path.join(sync_dir, f"{self.global_rank}.pl")) # sleep for a short time time.sleep(3) # return if all processes wrote a file in the `sync_dir`. # todo (tchaton) Add support for non-shared file-system which will fail. if len(os.listdir(sync_dir)) == (self.world_size // self.num_nodes): return if not self._pids: return for pid in self._pids: if pid != os.getpid(): os.kill(pid, signal.SIGKILL) shutil.rmtree(sync_dir) raise DeadlockDetectedException(f"DeadLock detected from rank: {self.global_rank} \n {trace}") def teardown(self) -> None: log.detail(f"{self.__class__.__name__}: tearing down DDP plugin") super().teardown() if isinstance(self.model, DistributedDataParallel): self.model = self.lightning_module assert self.model is not None if self.sync_batchnorm: self.model = _revert_sync_batchnorm(self.model) if self.root_device.type == "cuda": # GPU teardown log.detail(f"{self.__class__.__name__}: moving model to CPU") assert self.lightning_module is not None self.lightning_module.cpu() # clean up memory torch.cuda.empty_cache()
[]
[]
[ "PL_RECONCILE_PROCESS", "MASTER_ADDR", "MASTER_PORT", "PL_GLOBAL_SEED", "NODE_RANK", "LOCAL_RANK", "WORLD_SIZE" ]
[]
["PL_RECONCILE_PROCESS", "MASTER_ADDR", "MASTER_PORT", "PL_GLOBAL_SEED", "NODE_RANK", "LOCAL_RANK", "WORLD_SIZE"]
python
7
0
slack_cleaner/oauth.py
import os import webbrowser from slack_sdk import WebClient from slack_sdk.oauth import AuthorizeUrlGenerator from slack_sdk.oauth.state_store import FileOAuthStateStore from flask import Flask, request, make_response from threading import Timer from slack_cleaner.cleaner import start client_secret = os.environ["SLACK_CLIENT_SECRET"] client_id = os.environ["SLACK_CLIENT_ID"] redirect_uri = "http://localhost:23001/slack/oauth" authorize_url_generator = AuthorizeUrlGenerator( authorization_url="https://slack.com/oauth/authorize", client_id=client_id, scopes=["identify", "users:read", "channels:read", "groups:read", "mpim:read", "im:read", "channels:history","groups:history","mpim:history","im:history", "chat:write:bot"], user_scopes=["users:read", "channels:read", "channels:history", "chat:write", "groups:history", "groups:read", "im:history", "im:read", "mpim:history", "mpim:read"], redirect_uri=redirect_uri ) app = Flask(__name__) server = None token = None args = None state_store = FileOAuthStateStore(expiration_seconds=300, base_dir="./slack-cleaner-data") @app.route("/slack/install", methods=["GET"]) def oauth_start(): state = state_store.issue() url = authorize_url_generator.generate(state) return f'<a href="{url}">' \ f'<img alt=""Add to Slack"" height="40" width="139" src="https://platform.slack-edge.com/img/add_to_slack.png" srcset="https://platform.slack-edge.com/img/add_to_slack.png 1x, https://platform.slack-edge.com/img/[email protected] 2x" /></a>' @app.route("/slack/oauth", methods=["GET"]) def oauth_callback(): # Retrieve the auth code and state from the request params if "code" in request.args: # Verify the state parameter if state_store.consume(request.args["state"]): client = WebClient() # no prepared token needed for this # Complete the installation by calling oauth.v2.access API method oauth_response = client.oauth_access( client_id=client_id, client_secret=client_secret, redirect_uri=redirect_uri, code=request.args["code"] ) token = oauth_response.get("access_token") Timer(0, lambda: start(token, args)).start() return "Thanks for installing this app!" else: return make_response(f"Try the installation again (the state value is already expired)", 400) error = request.args["error"] if "error" in request.args else "" return make_response(f"Something is wrong with the installation (error: {error})", 400) def obtain_token_and_run(cmd_args): global args args = cmd_args Timer(0, lambda: app.run(port=23001)).start() webbrowser.open('http://localhost:23001/slack/install')
[]
[]
[ "SLACK_CLIENT_SECRET", "SLACK_CLIENT_ID" ]
[]
["SLACK_CLIENT_SECRET", "SLACK_CLIENT_ID"]
python
2
0
lambda/main.go
package main // Based on https://github.com/aws-samples/lambda-go-samples import ( "fmt" "io/ioutil" "log" "os" "runtime" "strconv" "bytes" "crypto/tls" "encoding/json" "github.com/aws/aws-lambda-go/events" "github.com/aws/aws-lambda-go/lambda" "net/http" "strings" ) // Lambda function handler func Handler(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) { // stdout and stderr are sent to AWS CloudWatch Logs log.Printf("Incoming: %s %s body %d bytes from SourceIP %s\n", req.HTTPMethod, req.Path, len(req.Body), req.RequestContext.Identity.SourceIP) switch req.HTTPMethod { case "POST": return rest_webhooks_post(req) default: return events.APIGatewayProxyResponse{ Body: "Unsupported method", StatusCode: 404, }, nil } } // For marshalling Splunk events type splunkEvent struct { Time int `json:"time"` Host string `json:"host"` Source string `json:"source"` Event map[string]interface{} `json:"event"` } type splunkResult struct { Code int `json:"code"` Text string `json:"text"` } // Handler for an incoming webhooks POST. Make outgoing request func rest_webhooks_post(session events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) { // Received Webhook data is an array [].msys.xxx_event_key.event var sparkPostWebhookEvents []map[string]map[string]map[string]interface{} if err := json.Unmarshal([]byte(session.Body), &sparkPostWebhookEvents); err != nil { log.Fatal(err) } var splunkOutputLines string // Walk the contents, building a Splunk-compatible output for _, ev := range sparkPostWebhookEvents { // dereference msys.xxx_event_key, because "type" attribute is all we need to identify the event for _, event := range ev["msys"] { var se splunkEvent ts, err := strconv.Atoi(event["timestamp"].(string)) if err != nil { log.Fatalf("Timestamp conversion error %s", err) } se.Time = ts se.Host = session.RequestContext.Identity.SourceIP se.Source = "SparkPost" se.Event = event jsonb, err := json.Marshal(se) if err != nil { log.Fatalf("JSON marshaling failed : %s", err) } splunkOutputLines += string(jsonb) + "\n" } } var buf = bytes.NewBufferString(splunkOutputLines) var splunkUrl = strings.Trim(os.Getenv("SPLUNK_URL"), " ") // Trim leading and trailing spaces, if present // Splunk provides x509: certificate signed by unknown authority :-( , so we need to skip those checks (like curl -k) tr := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } client := &http.Client{Transport: tr} // Selectively copy across request method and headers req, _ := http.NewRequest(session.HTTPMethod, splunkUrl, buf) for hname, hval := range session.Headers { switch hname { case "Accept-Encoding", "Accept", "Authorization", "Content-Type": req.Header.Add(hname, hval) case "User-Agent": req.Header.Add(hname, "SparkPost-Splunk adapter") } } res, err := client.Do(req) if err != nil { log.Fatal(err) } resBuffer := new(bytes.Buffer) resBuffer.ReadFrom(res.Body) resStr := resBuffer.String() // Special case: SparkPost sent an empty "ping" on webhook creation. Splunk returns Response 400 {"text":"No data","code":5} var splunkRes splunkResult if err := json.Unmarshal([]byte(resStr), &splunkRes); err != nil { log.Fatal(err) } if splunkOutputLines == "" && res.StatusCode >= 400 && splunkRes.Code == 5 && splunkRes.Text == "No data" { resStr = fmt.Sprintf("[response %d %s mapped -> 200 OK by this adapter]", res.StatusCode, resStr) res.StatusCode = 200 } log.Printf("Outgoing: %s %s: Response %d %s", req.Method, splunkUrl, res.StatusCode, resStr) return events.APIGatewayProxyResponse{ Body: resStr, StatusCode: res.StatusCode, }, nil } func main() { if runtime.GOOS == "darwin" { // Simple code to simulate a request locally on OSX. Takes a local JSON file as cmd-line arg if len(os.Args) < 2 { fmt.Println("Missing JSON filename in command line args") } else { requestFileName := os.Args[1] b, err := ioutil.ReadFile(requestFileName) // just pass the file name if err != nil { fmt.Println(err) } var req events.APIGatewayProxyRequest err = json.Unmarshal(b, &req) _, _ = Handler(req) } } else { // runtime code lambda.Start(Handler) } }
[ "\"SPLUNK_URL\"" ]
[]
[ "SPLUNK_URL" ]
[]
["SPLUNK_URL"]
go
1
0
Chapter11/contextual_bandit_agent.py
import tensorflow as tf import tensorflow.contrib.slim as slim import numpy as np import os from tensorflow.python.framework import ops import warnings warnings.filterwarnings("ignore") os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' ops.reset_default_graph() class contextualBandit(): def __init__(self): ''' This constructor, lists out all of our bandits. We assume the current state being arms 4, 2, 3 and 1 that are the most optimal respectively ''' self.state = 0 self.bandits = np.array([[0.2,0,-0.0,-5], [0.1,-5,1,0.25], [0.3,0.4,-5,0.5], [-5,5,5,5]]) self.num_bandits = self.bandits.shape[0] self.num_actions = self.bandits.shape[1] def getBandit(self): ''' This function returns a random state for each episode. ''' self.state = np.random.randint(0, len(self.bandits)) return self.state def pullArm(self,action): ''' This funciton creates the reword to the bandits on the basis of randomly generated numbers. It then returns either a positive or negative reward -i.e. action ''' bandit = self.bandits[self.state, action] result = np.random.randn(1) if result > bandit: return 1 else: return -1 class ContextualAgent(): def __init__(self, lr, s_size,a_size): ''' This function establishes the feed-forward part of the network. The agent takes a state and produces an action -i.e. contextual agent ''' self.state_in= tf.placeholder(shape=[1], dtype=tf.int32) state_in_OH = slim.one_hot_encoding(self.state_in, s_size) output = slim.fully_connected(state_in_OH, a_size,biases_initializer=None, activation_fn=tf.nn.sigmoid, weights_initializer=tf.ones_initializer()) self.output = tf.reshape(output,[-1]) self.chosen_action = tf.argmax(self.output,0) self.reward_holder = tf.placeholder(shape=[1], dtype=tf.float32) self.action_holder = tf.placeholder(shape=[1], dtype=tf.int32) self.responsible_weight = tf.slice(self.output, self.action_holder,[1]) self.loss = -(tf.log(self.responsible_weight)*self.reward_holder) optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr) self.update = optimizer.minimize(self.loss) tf.reset_default_graph() #Clear the Tensorflow graph. lrarning_rate = 0.001 contextualBandit = contextualBandit() #Load the bandits. contextualAgent = ContextualAgent(lr=lrarning_rate, s_size=contextualBandit.num_bandits, a_size=contextualBandit.num_actions) #Load the agent. weights = tf.trainable_variables()[0] #The weights we will evaluate to look into the network. max_iteration = 10000 #Set the max iteration for training the agent. total_reward = np.zeros([contextualBandit.num_bandits,contextualBandit.num_actions]) #Set scoreboard for bandits to 0. chance_of_random_action = 0.1 #Set the chance of taking a random action. init_op = tf.global_variables_initializer() right_flag = 0 wrong_flag = 0 # Launch the tensorflow graph with tf.Session() as sess: sess.run(init_op) i = 0 while i < max_iteration: s = contextualBandit.getBandit() #Get a state from the environment. #Choose either a random action or one from our network. if np.random.rand(1) < chance_of_random_action: action = np.random.randint(contextualBandit.num_actions) else: action = sess.run(contextualAgent.chosen_action,feed_dict={contextualAgent.state_in:[s]}) reward = contextualBandit.pullArm(action) #Get our reward for taking an action given a bandit. #Update the network. feed_dict={contextualAgent.reward_holder:[reward],contextualAgent.action_holder:[action],contextualAgent.state_in:[s]} _,ww = sess.run([contextualAgent.update,weights], feed_dict=feed_dict) #Update our running tally of scores. total_reward[s,action] += reward if i % 500 == 0: print("Mean reward for each of the " + str(contextualBandit.num_bandits) + " bandits: " + str(np.mean(total_reward,axis=1))) i+=1 for a in range(contextualBandit.num_bandits): print("The agent thinks action " + str(np.argmax(ww[a])+1) + " for bandit " + str(a+1) + " would be the most efficient one.") if np.argmax(ww[a]) == np.argmin(contextualBandit.bandits[a]): right_flag += 1 print(" and it was right at the end!") else: print(" and it was wrong at the end!") wrong_flag += 1 prediction_accuracy = (right_flag/(right_flag+wrong_flag)) print("Prediction accuracy (%):", prediction_accuracy * 100)
[]
[]
[ "TF_CPP_MIN_LOG_LEVEL" ]
[]
["TF_CPP_MIN_LOG_LEVEL"]
python
1
0
codalab/common.py
""" This module exports some simple names used throughout the CodaLab bundle system: - The various CodaLab error classes, with documentation for each. - The State class, an enumeration of all legal bundle states. - precondition, a utility method that check's a function's input preconditions. """ import logging import os import re import http.client import urllib.request import urllib.error from dataclasses import dataclass from retry import retry from enum import Enum from azure.storage.blob import generate_blob_sas, BlobSasPermissions import datetime from codalab.lib.beam.filesystems import ( AZURE_BLOB_ACCOUNT_NAME, AZURE_BLOB_ACCOUNT_KEY, AZURE_BLOB_CONTAINER_NAME, AZURE_BLOB_HTTP_ENDPOINT, ) # Increment this on master when ready to cut a release. # http://semver.org/ CODALAB_VERSION = '1.1.4' BINARY_PLACEHOLDER = '<binary>' URLOPEN_TIMEOUT_SECONDS = int(os.environ.get('CODALAB_URLOPEN_TIMEOUT_SECONDS', 5 * 60)) # Silence verbose HTTP output from Azure Blob logger = logging.getLogger('azure.core.pipeline.policies.http_logging_policy') logger.setLevel(logging.WARNING) class IntegrityError(ValueError): """ Raised by the model when there is a database integrity issue. Indicates a serious error that either means that there was a bug in the model code that left the database in a bad state, or that there was an out-of-band database edit with the same result. """ class PreconditionViolation(ValueError): """ Raised when a value generated by one module fails to satisfy a precondition required by another module. This class of error is serious and should indicate a problem in code, but it it is not an AssertionError because it is not local to a single module. """ class UsageError(ValueError): """ Raised when user input causes an exception. This error is the only one for which the command-line client suppresses output. """ class NotFoundError(UsageError): """ Raised when a requested resource has not been found. Similar to HTTP status 404. """ class AuthorizationError(UsageError): """ Raised when access to a resource is refused because authentication is required and has not been provided. Similar to HTTP status 401. """ class PermissionError(UsageError): """ Raised when access to a resource is refused because the user does not have necessary permissions. Similar to HTTP status 403. """ class LoginPermissionError(ValueError): """ Raised when the login credentials are incorrect. """ class DiskQuotaExceededError(ValueError): """ Raised when the disk quota left on the server is less than the bundle size. """ class SingularityError(ValueError): """ General purpose singularity error """ # Listed in order of most specific to least specific. http_codes_and_exceptions = [ (http.client.FORBIDDEN, PermissionError), (http.client.UNAUTHORIZED, AuthorizationError), (http.client.NOT_FOUND, NotFoundError), (http.client.BAD_REQUEST, UsageError), ] def exception_to_http_error(e): """ Returns the appropriate HTTP error code and message for the given exception. """ for known_code, exception_type in http_codes_and_exceptions: if isinstance(e, exception_type): return known_code, str(e) return http.client.INTERNAL_SERVER_ERROR, str(e) def http_error_to_exception(code, message): """ Returns the appropriate exception for the given HTTP error code and message. """ for known_code, exception_type in http_codes_and_exceptions: if code == known_code: return exception_type(message) if code >= 400 and code < 500: return UsageError(message) return Exception(message) def precondition(condition, message): if not condition: raise PreconditionViolation(message) def ensure_str(response): """ Ensure the data type of input response to be string :param response: a response in bytes or string :return: the input response in string """ if isinstance(response, str): return response try: return response.decode() except UnicodeDecodeError: return BINARY_PLACEHOLDER @retry(urllib.error.URLError, tries=2, delay=1, backoff=2) def urlopen_with_retry(request: urllib.request.Request, timeout: int = URLOPEN_TIMEOUT_SECONDS): """ Makes a request using urlopen with a timeout of URLOPEN_TIMEOUT_SECONDS seconds and retries on failures. Retries a maximum of 2 times, with an initial delay of 1 second and exponential backoff factor of 2 for subsequent failures (1s and 2s). :param request: Can be a url string or a Request object :param timeout: Timeout for urlopen in seconds :return: the response object """ return urllib.request.urlopen(request, timeout=timeout) class StorageType(Enum): """Possible storage types for bundles. When updating this enum, sync it with with the enum in the storage_type column in codalab.model.tables and add the appropriate migrations to reflect the column change. """ DISK_STORAGE = "disk" AZURE_BLOB_STORAGE = "azure_blob" class StorageURLScheme(Enum): """Possible storage URL schemes. URLs for the corresponding storage type will begin with the scheme specified. """ DISK_STORAGE = "" AZURE_BLOB_STORAGE = "azfs://" @dataclass(frozen=True) class LinkedBundlePath: """A LinkedBundlePath refers to a path that points to the location of a linked bundle within a specific storage location. It can either point directly to the bundle, or to a file that is located within that bundle. It is constructed by parsing a given bundle link URL by calling parse_bundle_url(). Attributes: storage_type (StorageType): Which storage type is used to store this bundle. bundle_path (str): Path to the bundle contents in that particular storage. is_archive (bool): Whether this bundle is stored as an indexed archive file (contents.gz / contents.tar.gz + an index.sqlite file. Only done currently by Azure Blob Storage. is_archive_dir (bool): Whether this bundle is stored as a contents.tar.gz file (which represents a directory) or a contents.gz file (which represents a single file). Only applicable if is_archive is True. index_path (str): Path to index.sqlite file that is used to index this bundle's contents. Only applicable if is_archive is True. uses_beam (bool): Whether this bundle's storage type requires using Apache Beam to interact with it. archive_subpath (str): If is_archive is True, returns the subpath within the archive file for the file that this BundlePath points to. bundle_uuid (str): UUID of the bundle that this path refers to. """ storage_type: StorageType bundle_path: str is_archive: bool is_archive_dir: bool index_path: str uses_beam: bool archive_subpath: str bundle_uuid: str def _get_sas_url(self, path, **kwargs): """Generates a SAS URL that can be used to read the given blob for one hour.""" if self.storage_type != StorageType.AZURE_BLOB_STORAGE.value: raise ValueError( f"SAS URLs can only be retrieved for bundles on Blob Storage. Storage type is: {self.storage_type}." ) blob_name = path.replace( f"azfs://{AZURE_BLOB_ACCOUNT_NAME}/{AZURE_BLOB_CONTAINER_NAME}/", "" ) # for example, "0x9955c356ed2f42e3970bdf647f3358c8/contents.gz" sas_token = generate_blob_sas( **kwargs, account_name=AZURE_BLOB_ACCOUNT_NAME, container_name=AZURE_BLOB_CONTAINER_NAME, account_key=AZURE_BLOB_ACCOUNT_KEY, permission=BlobSasPermissions(read=True), expiry=datetime.datetime.now() + datetime.timedelta(hours=1), blob_name=blob_name, ) return f"{AZURE_BLOB_HTTP_ENDPOINT}/{AZURE_BLOB_CONTAINER_NAME}/{blob_name}?{sas_token}" def bundle_path_sas_url(self, **kwargs): return self._get_sas_url(self.bundle_path, **kwargs) def index_path_sas_url(self, **kwargs): return self._get_sas_url(self.index_path, **kwargs) def parse_linked_bundle_url(url): """Parses a linked bundle URL. This bundle URL usually refers to: - an archive file on Blob Storage: "azfs://storageclwsdev0/bundles/uuid/contents.tar.gz" (contents.gz for files, contents.tar.gz for directories) - a single file that is stored within a subpath of an archive file on Blob Storage: "azfs://storageclwsdev0/bundles/uuid/contents.tar.gz/file1" Returns a LinkedBundlePath instance to encode this information. """ if url.startswith(StorageURLScheme.AZURE_BLOB_STORAGE.value): uses_beam = True storage_type = StorageType.AZURE_BLOB_STORAGE.value url = url[len(StorageURLScheme.AZURE_BLOB_STORAGE.value) :] storage_account, container, bundle_uuid, contents_file, *remainder = url.split("/", 4) bundle_path = f"{StorageURLScheme.AZURE_BLOB_STORAGE.value}{storage_account}/{container}/{bundle_uuid}/{contents_file}" is_archive = contents_file.endswith(".gz") or contents_file.endswith(".tar.gz") is_archive_dir = contents_file.endswith(".tar.gz") index_path = None if is_archive: # Archive index is stored as an "index.sqlite" file in the same folder as the archive file. index_path = re.sub(r'/contents(.tar)?.gz$', '/index.sqlite', bundle_path) archive_subpath = remainder[0] if is_archive and len(remainder) else None else: storage_type = StorageType.DISK_STORAGE.value bundle_path = url is_archive = False is_archive_dir = False index_path = None uses_beam = False archive_subpath = None bundle_uuid = None return LinkedBundlePath( storage_type=storage_type, bundle_path=bundle_path, is_archive=is_archive, is_archive_dir=is_archive_dir, index_path=index_path, uses_beam=uses_beam, archive_subpath=archive_subpath, bundle_uuid=bundle_uuid, )
[]
[]
[ "CODALAB_URLOPEN_TIMEOUT_SECONDS" ]
[]
["CODALAB_URLOPEN_TIMEOUT_SECONDS"]
python
1
0
Data/Juliet-Java/Juliet-Java-v103/000/250/870/CWE191_Integer_Underflow__int_Environment_postdec_02.java
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE191_Integer_Underflow__int_Environment_postdec_02.java Label Definition File: CWE191_Integer_Underflow__int.label.xml Template File: sources-sinks-02.tmpl.java */ /* * @description * CWE: 191 Integer Underflow * BadSource: Environment Read data from an environment variable * GoodSource: A hardcoded non-zero, non-min, non-max, even number * Sinks: decrement * GoodSink: Ensure there will not be an underflow before decrementing data * BadSink : Decrement data, which can cause an Underflow * Flow Variant: 02 Control flow: if(true) and if(false) * * */ package testcases.CWE191_Integer_Underflow.s04; import testcasesupport.*; import javax.servlet.http.*; import java.util.logging.Level; public class CWE191_Integer_Underflow__int_Environment_postdec_02 extends AbstractTestCase { public void bad() throws Throwable { int data; if (true) { data = Integer.MIN_VALUE; /* Initialize data */ /* get environment variable ADD */ /* POTENTIAL FLAW: Read data from an environment variable */ { String stringNumber = System.getenv("ADD"); if (stringNumber != null) // avoid NPD incidental warnings { try { data = Integer.parseInt(stringNumber.trim()); } catch(NumberFormatException exceptNumberFormat) { IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat); } } } } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run * but ensure data is inititialized before the Sink to avoid compiler errors */ data = 0; } if (true) { /* POTENTIAL FLAW: if data == Integer.MIN_VALUE, this will overflow */ data--; int result = (int)(data); IO.writeLine("result: " + result); } } /* goodG2B1() - use goodsource and badsink by changing first true to false */ private void goodG2B1() throws Throwable { int data; if (false) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run * but ensure data is inititialized before the Sink to avoid compiler errors */ data = 0; } else { /* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */ data = 2; } if (true) { /* POTENTIAL FLAW: if data == Integer.MIN_VALUE, this will overflow */ data--; int result = (int)(data); IO.writeLine("result: " + result); } } /* goodG2B2() - use goodsource and badsink by reversing statements in first if */ private void goodG2B2() throws Throwable { int data; if (true) { /* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */ data = 2; } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run * but ensure data is inititialized before the Sink to avoid compiler errors */ data = 0; } if (true) { /* POTENTIAL FLAW: if data == Integer.MIN_VALUE, this will overflow */ data--; int result = (int)(data); IO.writeLine("result: " + result); } } /* goodB2G1() - use badsource and goodsink by changing second true to false */ private void goodB2G1() throws Throwable { int data; if (true) { data = Integer.MIN_VALUE; /* Initialize data */ /* get environment variable ADD */ /* POTENTIAL FLAW: Read data from an environment variable */ { String stringNumber = System.getenv("ADD"); if (stringNumber != null) // avoid NPD incidental warnings { try { data = Integer.parseInt(stringNumber.trim()); } catch(NumberFormatException exceptNumberFormat) { IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat); } } } } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run * but ensure data is inititialized before the Sink to avoid compiler errors */ data = 0; } if (false) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ IO.writeLine("Benign, fixed string"); } else { /* FIX: Add a check to prevent an underflow from occurring */ if (data > Integer.MIN_VALUE) { data--; int result = (int)(data); IO.writeLine("result: " + result); } else { IO.writeLine("data value is too small to decrement."); } } } /* goodB2G2() - use badsource and goodsink by reversing statements in second if */ private void goodB2G2() throws Throwable { int data; if (true) { data = Integer.MIN_VALUE; /* Initialize data */ /* get environment variable ADD */ /* POTENTIAL FLAW: Read data from an environment variable */ { String stringNumber = System.getenv("ADD"); if (stringNumber != null) // avoid NPD incidental warnings { try { data = Integer.parseInt(stringNumber.trim()); } catch(NumberFormatException exceptNumberFormat) { IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat); } } } } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run * but ensure data is inititialized before the Sink to avoid compiler errors */ data = 0; } if (true) { /* FIX: Add a check to prevent an underflow from occurring */ if (data > Integer.MIN_VALUE) { data--; int result = (int)(data); IO.writeLine("result: " + result); } else { IO.writeLine("data value is too small to decrement."); } } } public void good() throws Throwable { goodG2B1(); goodG2B2(); goodB2G1(); goodB2G2(); } /* Below is the main(). It is only used when building this testcase on * its own for testing or for building a binary to use in testing binary * analysis tools. It is not used when compiling all the testcases as one * application, which is how source code analysis tools are tested. */ public static void main(String[] args) throws ClassNotFoundException, InstantiationException, IllegalAccessException { mainFromParent(args); } }
[ "\"ADD\"", "\"ADD\"", "\"ADD\"" ]
[]
[ "ADD" ]
[]
["ADD"]
java
1
0
palm/cli.py
import sys import click import importlib import os import pkg_resources from pathlib import Path from typing import Optional, Any, Callable, List from .environment import Environment from .palm_config import PalmConfig from .plugin_manager import PluginManager from .utils import is_cmd_file, cmd_name_from_file, run_on_host CONTEXT_SETTINGS = dict(auto_envvar_prefix="PALM") plugin_manager_instance = PluginManager() palm_config = PalmConfig() class PalmCLI(click.MultiCommand): def __init__( self, name: Optional[str] = None, invoke_without_command: bool = None, no_args_is_help: Optional[bool] = None, subcommand_metavar: Optional[str] = None, chain: bool = None, result_callback: Optional[Callable[..., Any]] = None, **attrs: Any, ) -> None: try: palm_config.validate_branch() except SystemExit as e: sys.exit(1) self.palm = palm_config self.plugin_manager = plugin_manager_instance self.plugin_manager.load_plugins(self.palm.plugins) super().__init__( name=name, invoke_without_command=invoke_without_command, no_args_is_help=no_args_is_help, subcommand_metavar=subcommand_metavar, chain=chain, result_callback=result_callback, **attrs, ) def _commands_from_dir(self, dir) -> List[str]: commands = [] for filename in os.listdir(dir): if is_cmd_file(filename): commands.append(cmd_name_from_file(filename)) return commands def list_commands(self, ctx) -> List[str]: cmds = self.plugin_manager.plugin_command_list dedupe = set(cmds) cmds = list(dedupe) cmds.sort() project_excluded_commands = self.palm.config.get('excluded_commands', []) cmds = filter(lambda x: x not in project_excluded_commands, cmds) return cmds def get_command(self, ctx, cmd_name: str) -> click.Command: try: if self.plugin_manager.is_plugin_command(cmd_name): spec = self.plugin_manager.command_spec(cmd_name) else: raise FileNotFoundError mod = importlib.util.module_from_spec(spec) spec.loader.exec_module(mod) except ImportError as error: click.secho(f'Import error: {error}', fg="red") return except FileNotFoundError: click.secho('Command not found, check spelling!', fg="red") return return mod.cli def get_version(): try: version = pkg_resources.require("palm")[0].version except pkg_resources.DistributionNotFound: version = 'unknown' return version def required_dependencies_ready(): docker_checks = ( ("docker --version", "Docker is not installed, please install it first",), ( "docker-compose --version", "Docker Compose is not installed, please install it first", ), ("docker ps", "Docker is not running, please start it first",), ) for cmd, msg in docker_checks: if run_on_host(cmd, capture_output=True)[0] > 0: click.secho(msg, fg="red") return False return True @click.group(cls=PalmCLI, context_settings=CONTEXT_SETTINGS) @click.version_option(get_version()) @click.pass_context def cli(ctx): """Palmetto data product command line interface.""" is_test = os.getenv("PALM_TEST") if not (is_test or required_dependencies_ready()): ctx.exit(1) ctx.obj = Environment(plugin_manager_instance, palm_config) if __name__ == "__main__": cli()
[]
[]
[ "PALM_TEST" ]
[]
["PALM_TEST"]
python
1
0
pkg/targetdir/targetdir.go
package targetdir import ( "fmt" "os" "runtime" ) // HomeFolder returns the users homefolder this will be $HOME on windows and mac and // USERPROFILE on windows func HomeFolder() string { if runtime.GOOS == "windows" { return os.Getenv("USERPROFILE") } return os.Getenv("HOME") } // TargetHome returns the location of the target // folder, usually $HOME/.target func TargetHome() string { return fmt.Sprintf("%s/.target", HomeFolder()) } // TargetHomeCreate checks for the target directory // and profiles.hcl file and creates if they don't exist func TargetHomeCreate() { if _, err := os.Stat(TargetHome()); os.IsNotExist(err) { os.Mkdir(TargetHome(), 0755) } f := fmt.Sprintf("%s/.target/profiles.hcl", HomeFolder()) if _, err := os.Stat(f); os.IsNotExist(err) { os.Create(f) } }
[ "\"USERPROFILE\"", "\"HOME\"" ]
[]
[ "HOME", "USERPROFILE" ]
[]
["HOME", "USERPROFILE"]
go
2
0
storage/remote/queue_manager_test.go
// Copyright 2013 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package remote import ( "context" "fmt" "io/ioutil" "math" "net/url" "os" "runtime/pprof" "sort" "strconv" "strings" "sync" "testing" "time" "github.com/go-kit/log" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/client_golang/prometheus" client_testutil "github.com/prometheus/client_golang/prometheus/testutil" common_config "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/atomic" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" ) const defaultFlushDeadline = 1 * time.Minute func newHighestTimestampMetric() *maxTimestamp { return &maxTimestamp{ Gauge: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "highest_timestamp_in_seconds", Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.", }), } } func TestSampleDelivery(t *testing.T) { testcases := []struct { name string samples bool exemplars bool }{ {samples: true, exemplars: false, name: "samples only"}, {samples: true, exemplars: true, name: "both samples and exemplars"}, {samples: false, exemplars: true, name: "exemplars only"}, } // Let's create an even number of send batches so we don't run into the // batch timeout case. n := 3 dir := t.TempDir() s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil) defer s.Close() queueConfig := config.DefaultQueueConfig queueConfig.BatchSendDeadline = model.Duration(100 * time.Millisecond) queueConfig.MaxShards = 1 writeConfig := config.DefaultRemoteWriteConfig // We need to set URL's so that metric creation doesn't panic. writeConfig.URL = &common_config.URL{ URL: &url.URL{ Host: "http://test-storage.com", }, } writeConfig.QueueConfig = queueConfig writeConfig.SendExemplars = true conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: []*config.RemoteWriteConfig{ &writeConfig, }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { var ( series []record.RefSeries samples []record.RefSample exemplars []record.RefExemplar ) // Generates same series in both cases. if tc.samples { samples, series = createTimeseries(n, n) } if tc.exemplars { exemplars, series = createExemplars(n, n) } // Apply new config. queueConfig.Capacity = len(samples) queueConfig.MaxSamplesPerSend = len(samples) / 2 require.NoError(t, s.ApplyConfig(conf)) hash, err := toHash(writeConfig) require.NoError(t, err) qm := s.rws.queues[hash] c := NewTestWriteClient() qm.SetClient(c) qm.StoreSeries(series, 0) // Send first half of data. c.expectSamples(samples[:len(samples)/2], series) c.expectExemplars(exemplars[:len(exemplars)/2], series) qm.Append(samples[:len(samples)/2]) qm.AppendExemplars(exemplars[:len(exemplars)/2]) c.waitForExpectedData(t) // Send second half of data. c.expectSamples(samples[len(samples)/2:], series) c.expectExemplars(exemplars[len(exemplars)/2:], series) qm.Append(samples[len(samples)/2:]) qm.AppendExemplars(exemplars[len(exemplars)/2:]) c.waitForExpectedData(t) }) } } func TestMetadataDelivery(t *testing.T) { c := NewTestWriteClient() dir := t.TempDir() cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig metrics := newQueueManagerMetrics(nil, "", "") m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.Start() defer m.Stop() metadata := []scrape.MetricMetadata{} numMetadata := 1532 for i := 0; i < numMetadata; i++ { metadata = append(metadata, scrape.MetricMetadata{ Metric: "prometheus_remote_storage_sent_metadata_bytes_total_" + strconv.Itoa(i), Type: textparse.MetricTypeCounter, Help: "a nice help text", Unit: "", }) } m.AppendMetadata(context.Background(), metadata) require.Equal(t, numMetadata, len(c.receivedMetadata)) // One more write than the rounded qoutient should be performed in order to get samples that didn't // fit into MaxSamplesPerSend. require.Equal(t, numMetadata/mcfg.MaxSamplesPerSend+1, c.writesReceived) // Make sure the last samples were sent. require.Equal(t, c.receivedMetadata[metadata[len(metadata)-1].Metric][0].MetricFamilyName, metadata[len(metadata)-1].Metric) } func TestSampleDeliveryTimeout(t *testing.T) { // Let's send one less sample than batch size, and wait the timeout duration n := 9 samples, series := createTimeseries(n, n) c := NewTestWriteClient() cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig cfg.MaxShards = 1 cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond) dir := t.TempDir() metrics := newQueueManagerMetrics(nil, "", "") m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.StoreSeries(series, 0) m.Start() defer m.Stop() // Send the samples twice, waiting for the samples in the meantime. c.expectSamples(samples, series) m.Append(samples) c.waitForExpectedData(t) c.expectSamples(samples, series) m.Append(samples) c.waitForExpectedData(t) } func TestSampleDeliveryOrder(t *testing.T) { ts := 10 n := config.DefaultQueueConfig.MaxSamplesPerSend * ts samples := make([]record.RefSample, 0, n) series := make([]record.RefSeries, 0, n) for i := 0; i < n; i++ { name := fmt.Sprintf("test_metric_%d", i%ts) samples = append(samples, record.RefSample{ Ref: chunks.HeadSeriesRef(i), T: int64(i), V: float64(i), }) series = append(series, record.RefSeries{ Ref: chunks.HeadSeriesRef(i), Labels: labels.Labels{labels.Label{Name: "__name__", Value: name}}, }) } c := NewTestWriteClient() c.expectSamples(samples, series) dir := t.TempDir() cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig metrics := newQueueManagerMetrics(nil, "", "") m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.StoreSeries(series, 0) m.Start() defer m.Stop() // These should be received by the client. m.Append(samples) c.waitForExpectedData(t) } func TestShutdown(t *testing.T) { deadline := 1 * time.Second c := NewTestBlockedWriteClient() dir := t.TempDir() cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig metrics := newQueueManagerMetrics(nil, "", "") m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false) n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend samples, series := createTimeseries(n, n) m.StoreSeries(series, 0) m.Start() // Append blocks to guarantee delivery, so we do it in the background. go func() { m.Append(samples) }() time.Sleep(100 * time.Millisecond) // Test to ensure that Stop doesn't block. start := time.Now() m.Stop() // The samples will never be delivered, so duration should // be at least equal to deadline, otherwise the flush deadline // was not respected. duration := time.Since(start) if duration > deadline+(deadline/10) { t.Errorf("Took too long to shutdown: %s > %s", duration, deadline) } if duration < deadline { t.Errorf("Shutdown occurred before flush deadline: %s < %s", duration, deadline) } } func TestSeriesReset(t *testing.T) { c := NewTestBlockedWriteClient() deadline := 5 * time.Second numSegments := 4 numSeries := 25 dir := t.TempDir() cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig metrics := newQueueManagerMetrics(nil, "", "") m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false) for i := 0; i < numSegments; i++ { series := []record.RefSeries{} for j := 0; j < numSeries; j++ { series = append(series, record.RefSeries{Ref: chunks.HeadSeriesRef((i * 100) + j), Labels: labels.Labels{{Name: "a", Value: "a"}}}) } m.StoreSeries(series, i) } require.Equal(t, numSegments*numSeries, len(m.seriesLabels)) m.SeriesReset(2) require.Equal(t, numSegments*numSeries/2, len(m.seriesLabels)) } func TestReshard(t *testing.T) { size := 10 // Make bigger to find more races. nSeries := 6 nSamples := config.DefaultQueueConfig.Capacity * size samples, series := createTimeseries(nSamples, nSeries) c := NewTestWriteClient() c.expectSamples(samples, series) cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig cfg.MaxShards = 1 dir := t.TempDir() metrics := newQueueManagerMetrics(nil, "", "") m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.StoreSeries(series, 0) m.Start() defer m.Stop() go func() { for i := 0; i < len(samples); i += config.DefaultQueueConfig.Capacity { sent := m.Append(samples[i : i+config.DefaultQueueConfig.Capacity]) require.True(t, sent, "samples not sent") time.Sleep(100 * time.Millisecond) } }() for i := 1; i < len(samples)/config.DefaultQueueConfig.Capacity; i++ { m.shards.stop() m.shards.start(i) time.Sleep(100 * time.Millisecond) } c.waitForExpectedData(t) } func TestReshardRaceWithStop(t *testing.T) { c := NewTestWriteClient() var m *QueueManager h := sync.Mutex{} h.Lock() cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig exitCh := make(chan struct{}) go func() { for { metrics := newQueueManagerMetrics(nil, "", "") m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.Start() h.Unlock() h.Lock() m.Stop() select { case exitCh <- struct{}{}: return default: } } }() for i := 1; i < 100; i++ { h.Lock() m.reshardChan <- i h.Unlock() } <-exitCh } func TestReshardPartialBatch(t *testing.T) { samples, series := createTimeseries(1, 10) c := NewTestBlockedWriteClient() cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig cfg.MaxShards = 1 batchSendDeadline := time.Millisecond flushDeadline := 10 * time.Millisecond cfg.BatchSendDeadline = model.Duration(batchSendDeadline) metrics := newQueueManagerMetrics(nil, "", "") m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.StoreSeries(series, 0) m.Start() for i := 0; i < 100; i++ { done := make(chan struct{}) go func() { m.Append(samples) time.Sleep(batchSendDeadline) m.shards.stop() m.shards.start(1) done <- struct{}{} }() select { case <-done: case <-time.After(2 * time.Second): t.Error("Deadlock between sending and stopping detected") pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) t.FailNow() } } // We can only call stop if there was not a deadlock. m.Stop() } // TestQueueFilledDeadlock makes sure the code does not deadlock in the case // where a large scrape (> capacity + max samples per send) is appended at the // same time as a batch times out according to the batch send deadline. func TestQueueFilledDeadlock(t *testing.T) { samples, series := createTimeseries(50, 1) c := NewNopWriteClient() cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig cfg.MaxShards = 1 cfg.MaxSamplesPerSend = 10 cfg.Capacity = 20 flushDeadline := time.Second batchSendDeadline := time.Millisecond cfg.BatchSendDeadline = model.Duration(batchSendDeadline) metrics := newQueueManagerMetrics(nil, "", "") m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.StoreSeries(series, 0) m.Start() defer m.Stop() for i := 0; i < 100; i++ { done := make(chan struct{}) go func() { time.Sleep(batchSendDeadline) m.Append(samples) done <- struct{}{} }() select { case <-done: case <-time.After(2 * time.Second): t.Error("Deadlock between sending and appending detected") pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) t.FailNow() } } } func TestReleaseNoninternedString(t *testing.T) { cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig metrics := newQueueManagerMetrics(nil, "", "") c := NewTestWriteClient() m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.Start() defer m.Stop() for i := 1; i < 1000; i++ { m.StoreSeries([]record.RefSeries{ { Ref: chunks.HeadSeriesRef(i), Labels: labels.Labels{ labels.Label{ Name: "asdf", Value: fmt.Sprintf("%d", i), }, }, }, }, 0) m.SeriesReset(1) } metric := client_testutil.ToFloat64(noReferenceReleases) require.Equal(t, 0.0, metric, "expected there to be no calls to release for strings that were not already interned: %d", int(metric)) } func TestShouldReshard(t *testing.T) { type testcase struct { startingShards int samplesIn, samplesOut, lastSendTimestamp int64 expectedToReshard bool } cases := []testcase{ { // Resharding shouldn't take place if the last successful send was > batch send deadline*2 seconds ago. startingShards: 10, samplesIn: 1000, samplesOut: 10, lastSendTimestamp: time.Now().Unix() - int64(3*time.Duration(config.DefaultQueueConfig.BatchSendDeadline)/time.Second), expectedToReshard: false, }, { startingShards: 5, samplesIn: 1000, samplesOut: 10, lastSendTimestamp: time.Now().Unix(), expectedToReshard: true, }, } cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig for _, c := range cases { metrics := newQueueManagerMetrics(nil, "", "") client := NewTestWriteClient() m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.numShards = c.startingShards m.dataIn.incr(c.samplesIn) m.dataOut.incr(c.samplesOut) m.lastSendTimestamp.Store(c.lastSendTimestamp) m.Start() desiredShards := m.calculateDesiredShards() shouldReshard := m.shouldReshard(desiredShards) m.Stop() require.Equal(t, c.expectedToReshard, shouldReshard) } } func createTimeseries(numSamples, numSeries int, extraLabels ...labels.Label) ([]record.RefSample, []record.RefSeries) { samples := make([]record.RefSample, 0, numSamples) series := make([]record.RefSeries, 0, numSeries) for i := 0; i < numSeries; i++ { name := fmt.Sprintf("test_metric_%d", i) for j := 0; j < numSamples; j++ { samples = append(samples, record.RefSample{ Ref: chunks.HeadSeriesRef(i), T: int64(j), V: float64(i), }) } series = append(series, record.RefSeries{ Ref: chunks.HeadSeriesRef(i), Labels: append(labels.Labels{{Name: "__name__", Value: name}}, extraLabels...), }) } return samples, series } func createExemplars(numExemplars, numSeries int) ([]record.RefExemplar, []record.RefSeries) { exemplars := make([]record.RefExemplar, 0, numExemplars) series := make([]record.RefSeries, 0, numSeries) for i := 0; i < numSeries; i++ { name := fmt.Sprintf("test_metric_%d", i) for j := 0; j < numExemplars; j++ { e := record.RefExemplar{ Ref: chunks.HeadSeriesRef(i), T: int64(j), V: float64(i), Labels: labels.FromStrings("traceID", fmt.Sprintf("trace-%d", i)), } exemplars = append(exemplars, e) } series = append(series, record.RefSeries{ Ref: chunks.HeadSeriesRef(i), Labels: labels.Labels{{Name: "__name__", Value: name}}, }) } return exemplars, series } func getSeriesNameFromRef(r record.RefSeries) string { for _, l := range r.Labels { if l.Name == "__name__" { return l.Value } } return "" } type TestWriteClient struct { receivedSamples map[string][]prompb.Sample expectedSamples map[string][]prompb.Sample receivedExemplars map[string][]prompb.Exemplar expectedExemplars map[string][]prompb.Exemplar receivedMetadata map[string][]prompb.MetricMetadata writesReceived int withWaitGroup bool wg sync.WaitGroup mtx sync.Mutex buf []byte } func NewTestWriteClient() *TestWriteClient { return &TestWriteClient{ withWaitGroup: true, receivedSamples: map[string][]prompb.Sample{}, expectedSamples: map[string][]prompb.Sample{}, receivedMetadata: map[string][]prompb.MetricMetadata{}, } } func (c *TestWriteClient) expectSamples(ss []record.RefSample, series []record.RefSeries) { if !c.withWaitGroup { return } c.mtx.Lock() defer c.mtx.Unlock() c.expectedSamples = map[string][]prompb.Sample{} c.receivedSamples = map[string][]prompb.Sample{} for _, s := range ss { seriesName := getSeriesNameFromRef(series[s.Ref]) c.expectedSamples[seriesName] = append(c.expectedSamples[seriesName], prompb.Sample{ Timestamp: s.T, Value: s.V, }) } c.wg.Add(len(ss)) } func (c *TestWriteClient) expectExemplars(ss []record.RefExemplar, series []record.RefSeries) { if !c.withWaitGroup { return } c.mtx.Lock() defer c.mtx.Unlock() c.expectedExemplars = map[string][]prompb.Exemplar{} c.receivedExemplars = map[string][]prompb.Exemplar{} for _, s := range ss { seriesName := getSeriesNameFromRef(series[s.Ref]) e := prompb.Exemplar{ Labels: labelsToLabelsProto(s.Labels, nil), Timestamp: s.T, Value: s.V, } c.expectedExemplars[seriesName] = append(c.expectedExemplars[seriesName], e) } c.wg.Add(len(ss)) } func (c *TestWriteClient) waitForExpectedData(tb testing.TB) { if !c.withWaitGroup { return } c.wg.Wait() c.mtx.Lock() defer c.mtx.Unlock() for ts, expectedSamples := range c.expectedSamples { require.Equal(tb, expectedSamples, c.receivedSamples[ts], ts) } for ts, expectedExemplar := range c.expectedExemplars { require.Equal(tb, expectedExemplar, c.receivedExemplars[ts], ts) } } func (c *TestWriteClient) Store(_ context.Context, req []byte) error { c.mtx.Lock() defer c.mtx.Unlock() // nil buffers are ok for snappy, ignore cast error. if c.buf != nil { c.buf = c.buf[:cap(c.buf)] } reqBuf, err := snappy.Decode(c.buf, req) c.buf = reqBuf if err != nil { return err } var reqProto prompb.WriteRequest if err := proto.Unmarshal(reqBuf, &reqProto); err != nil { return err } count := 0 for _, ts := range reqProto.Timeseries { var seriesName string labels := labelProtosToLabels(ts.Labels) for _, label := range labels { if label.Name == "__name__" { seriesName = label.Value } } for _, sample := range ts.Samples { count++ c.receivedSamples[seriesName] = append(c.receivedSamples[seriesName], sample) } for _, ex := range ts.Exemplars { count++ c.receivedExemplars[seriesName] = append(c.receivedExemplars[seriesName], ex) } } if c.withWaitGroup { c.wg.Add(-count) } for _, m := range reqProto.Metadata { c.receivedMetadata[m.MetricFamilyName] = append(c.receivedMetadata[m.MetricFamilyName], m) } c.writesReceived++ return nil } func (c *TestWriteClient) Name() string { return "testwriteclient" } func (c *TestWriteClient) Endpoint() string { return "http://test-remote.com/1234" } // TestBlockingWriteClient is a queue_manager WriteClient which will block // on any calls to Store(), until the request's Context is cancelled, at which // point the `numCalls` property will contain a count of how many times Store() // was called. type TestBlockingWriteClient struct { numCalls atomic.Uint64 } func NewTestBlockedWriteClient() *TestBlockingWriteClient { return &TestBlockingWriteClient{} } func (c *TestBlockingWriteClient) Store(ctx context.Context, _ []byte) error { c.numCalls.Inc() <-ctx.Done() return nil } func (c *TestBlockingWriteClient) NumCalls() uint64 { return c.numCalls.Load() } func (c *TestBlockingWriteClient) Name() string { return "testblockingwriteclient" } func (c *TestBlockingWriteClient) Endpoint() string { return "http://test-remote-blocking.com/1234" } // For benchmarking the send and not the receive side. type NopWriteClient struct{} func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} } func (c *NopWriteClient) Store(_ context.Context, req []byte) error { return nil } func (c *NopWriteClient) Name() string { return "nopwriteclient" } func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" } func BenchmarkSampleSend(b *testing.B) { // Send one sample per series, which is the typical remote_write case const numSamples = 1 const numSeries = 10000 // Extra labels to make a more realistic workload - taken from Kubernetes' embedded cAdvisor metrics. extraLabels := labels.Labels{ {Name: "kubernetes_io_arch", Value: "amd64"}, {Name: "kubernetes_io_instance_type", Value: "c3.somesize"}, {Name: "kubernetes_io_os", Value: "linux"}, {Name: "container_name", Value: "some-name"}, {Name: "failure_domain_kubernetes_io_region", Value: "somewhere-1"}, {Name: "failure_domain_kubernetes_io_zone", Value: "somewhere-1b"}, {Name: "id", Value: "/kubepods/burstable/pod6e91c467-e4c5-11e7-ace3-0a97ed59c75e/a3c8498918bd6866349fed5a6f8c643b77c91836427fb6327913276ebc6bde28"}, {Name: "image", Value: "registry/organisation/name@sha256:dca3d877a80008b45d71d7edc4fd2e44c0c8c8e7102ba5cbabec63a374d1d506"}, {Name: "instance", Value: "ip-111-11-1-11.ec2.internal"}, {Name: "job", Value: "kubernetes-cadvisor"}, {Name: "kubernetes_io_hostname", Value: "ip-111-11-1-11"}, {Name: "monitor", Value: "prod"}, {Name: "name", Value: "k8s_some-name_some-other-name-5j8s8_kube-system_6e91c467-e4c5-11e7-ace3-0a97ed59c75e_0"}, {Name: "namespace", Value: "kube-system"}, {Name: "pod_name", Value: "some-other-name-5j8s8"}, } samples, series := createTimeseries(numSamples, numSeries, extraLabels...) c := NewNopWriteClient() cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond) cfg.MinShards = 20 cfg.MaxShards = 20 dir := b.TempDir() metrics := newQueueManagerMetrics(nil, "", "") m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.StoreSeries(series, 0) // These should be received by the client. m.Start() defer m.Stop() b.ResetTimer() for i := 0; i < b.N; i++ { m.Append(samples) m.UpdateSeriesSegment(series, i+1) // simulate what wal.Watcher.garbageCollectSeries does m.SeriesReset(i + 1) } // Do not include shutdown b.StopTimer() } func BenchmarkStartup(b *testing.B) { dir := os.Getenv("WALDIR") if dir == "" { return } // Find the second largest segment; we will replay up to this. // (Second largest as WALWatcher will start tailing the largest). dirents, err := ioutil.ReadDir(dir) require.NoError(b, err) var segments []int for _, dirent := range dirents { if i, err := strconv.Atoi(dirent.Name()); err != nil { segments = append(segments, i) } } sort.Ints(segments) logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) logger = log.With(logger, "caller", log.DefaultCaller) cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig for n := 0; n < b.N; n++ { metrics := newQueueManagerMetrics(nil, "", "") c := NewTestBlockedWriteClient() m := NewQueueManager(metrics, nil, nil, logger, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false) m.watcher.SetStartTime(timestamp.Time(math.MaxInt64)) m.watcher.MaxSegment = segments[len(segments)-2] err := m.watcher.Run() require.NoError(b, err) } } func TestProcessExternalLabels(t *testing.T) { for _, tc := range []struct { labels labels.Labels externalLabels labels.Labels expected labels.Labels }{ // Test adding labels at the end. { labels: labels.Labels{{Name: "a", Value: "b"}}, externalLabels: labels.Labels{{Name: "c", Value: "d"}}, expected: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, }, // Test adding labels at the beginning. { labels: labels.Labels{{Name: "c", Value: "d"}}, externalLabels: labels.Labels{{Name: "a", Value: "b"}}, expected: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, }, // Test we don't override existing labels. { labels: labels.Labels{{Name: "a", Value: "b"}}, externalLabels: labels.Labels{{Name: "a", Value: "c"}}, expected: labels.Labels{{Name: "a", Value: "b"}}, }, // Test empty externalLabels. { labels: labels.Labels{{Name: "a", Value: "b"}}, externalLabels: labels.Labels{}, expected: labels.Labels{{Name: "a", Value: "b"}}, }, // Test empty labels. { labels: labels.Labels{}, externalLabels: labels.Labels{{Name: "a", Value: "b"}}, expected: labels.Labels{{Name: "a", Value: "b"}}, }, // Test labels is longer than externalLabels. { labels: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, externalLabels: labels.Labels{{Name: "e", Value: "f"}}, expected: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}, {Name: "e", Value: "f"}}, }, // Test externalLabels is longer than labels. { labels: labels.Labels{{Name: "c", Value: "d"}}, externalLabels: labels.Labels{{Name: "a", Value: "b"}, {Name: "e", Value: "f"}}, expected: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}, {Name: "e", Value: "f"}}, }, } { require.Equal(t, tc.expected, processExternalLabels(tc.labels, tc.externalLabels)) } } func TestCalculateDesiredShards(t *testing.T) { c := NewTestWriteClient() cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig dir := t.TempDir() metrics := newQueueManagerMetrics(nil, "", "") samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration) m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) // Need to start the queue manager so the proper metrics are initialized. // However we can stop it right away since we don't need to do any actual // processing. m.Start() m.Stop() inputRate := int64(50000) var pendingSamples int64 // Two minute startup, no samples are sent. startedAt := time.Now().Add(-2 * time.Minute) // helper function for adding samples. addSamples := func(s int64, ts time.Duration) { pendingSamples += s samplesIn.incr(s) samplesIn.tick() m.highestRecvTimestamp.Set(float64(startedAt.Add(ts).Unix())) } // helper function for sending samples. sendSamples := func(s int64, ts time.Duration) { pendingSamples -= s m.dataOut.incr(s) m.dataOutDuration.incr(int64(m.numShards) * int64(shardUpdateDuration)) // highest sent is how far back pending samples would be at our input rate. highestSent := startedAt.Add(ts - time.Duration(pendingSamples/inputRate)*time.Second) m.metrics.highestSentTimestamp.Set(float64(highestSent.Unix())) m.lastSendTimestamp.Store(time.Now().Unix()) } ts := time.Duration(0) for ; ts < 120*time.Second; ts += shardUpdateDuration { addSamples(inputRate*int64(shardUpdateDuration/time.Second), ts) m.numShards = m.calculateDesiredShards() require.Equal(t, 1, m.numShards) } // Assume 100ms per request, or 10 requests per second per shard. // Shard calculation should never drop below barely keeping up. minShards := int(inputRate) / cfg.MaxSamplesPerSend / 10 // This test should never go above 200 shards, that would be more resources than needed. maxShards := 200 for ; ts < 15*time.Minute; ts += shardUpdateDuration { sin := inputRate * int64(shardUpdateDuration/time.Second) addSamples(sin, ts) sout := int64(m.numShards*cfg.MaxSamplesPerSend) * int64(shardUpdateDuration/(100*time.Millisecond)) // You can't send samples that don't exist so cap at the number of pending samples. if sout > pendingSamples { sout = pendingSamples } sendSamples(sout, ts) t.Log("desiredShards", m.numShards, "pendingSamples", pendingSamples) m.numShards = m.calculateDesiredShards() require.GreaterOrEqual(t, m.numShards, minShards, "Shards are too low. desiredShards=%d, minShards=%d, t_seconds=%d", m.numShards, minShards, ts/time.Second) require.LessOrEqual(t, m.numShards, maxShards, "Shards are too high. desiredShards=%d, maxShards=%d, t_seconds=%d", m.numShards, maxShards, ts/time.Second) } require.Equal(t, int64(0), pendingSamples, "Remote write never caught up, there are still %d pending samples.", pendingSamples) } func TestCalculateDesiredShardsDetail(t *testing.T) { c := NewTestWriteClient() cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig dir := t.TempDir() metrics := newQueueManagerMetrics(nil, "", "") samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration) m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) for _, tc := range []struct { name string prevShards int dataIn int64 // Quantities normalised to seconds. dataOut int64 dataDropped int64 dataOutDuration float64 backlog float64 expectedShards int }{ { name: "nothing in or out 1", prevShards: 1, expectedShards: 1, // Shards stays the same. }, { name: "nothing in or out 10", prevShards: 10, expectedShards: 10, // Shards stays the same. }, { name: "steady throughput", prevShards: 1, dataIn: 10, dataOut: 10, dataOutDuration: 1, expectedShards: 1, }, { name: "scale down", prevShards: 10, dataIn: 10, dataOut: 10, dataOutDuration: 5, expectedShards: 5, }, { name: "scale down constrained", prevShards: 7, dataIn: 10, dataOut: 10, dataOutDuration: 5, expectedShards: 7, }, { name: "scale up", prevShards: 1, dataIn: 10, dataOut: 10, dataOutDuration: 10, expectedShards: 10, }, { name: "scale up constrained", prevShards: 8, dataIn: 10, dataOut: 10, dataOutDuration: 10, expectedShards: 8, }, { name: "backlogged 20s", prevShards: 2, dataIn: 10, dataOut: 10, dataOutDuration: 2, backlog: 20, expectedShards: 4, }, { name: "backlogged 90s", prevShards: 4, dataIn: 10, dataOut: 10, dataOutDuration: 4, backlog: 90, expectedShards: 22, }, { name: "backlog reduced", prevShards: 22, dataIn: 10, dataOut: 20, dataOutDuration: 4, backlog: 10, expectedShards: 3, }, { name: "backlog eliminated", prevShards: 3, dataIn: 10, dataOut: 10, dataOutDuration: 2, backlog: 0, expectedShards: 2, // Shard back down. }, { name: "slight slowdown", prevShards: 1, dataIn: 10, dataOut: 10, dataOutDuration: 1.2, expectedShards: 2, // 1.2 is rounded up to 2. }, { name: "bigger slowdown", prevShards: 1, dataIn: 10, dataOut: 10, dataOutDuration: 1.4, expectedShards: 2, }, { name: "speed up", prevShards: 2, dataIn: 10, dataOut: 10, dataOutDuration: 1.2, backlog: 0, expectedShards: 2, // No reaction - 1.2 is rounded up to 2. }, { name: "speed up more", prevShards: 2, dataIn: 10, dataOut: 10, dataOutDuration: 0.9, backlog: 0, expectedShards: 1, }, { name: "marginal decision A", prevShards: 3, dataIn: 10, dataOut: 10, dataOutDuration: 2.01, backlog: 0, expectedShards: 3, // 2.01 rounds up to 3. }, { name: "marginal decision B", prevShards: 3, dataIn: 10, dataOut: 10, dataOutDuration: 1.99, backlog: 0, expectedShards: 2, // 1.99 rounds up to 2. }, } { t.Run(tc.name, func(t *testing.T) { m.numShards = tc.prevShards forceEMWA(samplesIn, tc.dataIn*int64(shardUpdateDuration/time.Second)) samplesIn.tick() forceEMWA(m.dataOut, tc.dataOut*int64(shardUpdateDuration/time.Second)) forceEMWA(m.dataDropped, tc.dataDropped*int64(shardUpdateDuration/time.Second)) forceEMWA(m.dataOutDuration, int64(tc.dataOutDuration*float64(shardUpdateDuration))) m.highestRecvTimestamp.value = tc.backlog // Not Set() because it can only increase value. require.Equal(t, tc.expectedShards, m.calculateDesiredShards()) }) } } func forceEMWA(r *ewmaRate, rate int64) { r.init = false r.newEvents.Store(rate) } func TestQueueManagerMetrics(t *testing.T) { reg := prometheus.NewPedanticRegistry() metrics := newQueueManagerMetrics(reg, "name", "http://localhost:1234") // Make sure metrics pass linting. problems, err := client_testutil.GatherAndLint(reg) require.NoError(t, err) require.Equal(t, 0, len(problems), "Metric linting problems detected: %v", problems) // Make sure all metrics were unregistered. A failure here means you need // unregister a metric in `queueManagerMetrics.unregister()`. metrics.unregister() err = client_testutil.GatherAndCompare(reg, strings.NewReader("")) require.NoError(t, err) } func TestQueue_FlushAndShutdownDoesNotDeadlock(t *testing.T) { capacity := 100 batchSize := 10 queue := newQueue(batchSize, capacity) for i := 0; i < capacity+batchSize; i++ { queue.Append(sampleOrExemplar{}) } done := make(chan struct{}) go queue.FlushAndShutdown(done) go func() { // Give enough time for FlushAndShutdown to acquire the lock. queue.Batch() // should not block forever even if the lock is acquired. time.Sleep(10 * time.Millisecond) queue.Batch() close(done) }() select { case <-done: case <-time.After(2 * time.Second): t.Error("Deadlock in FlushAndShutdown detected") pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) t.FailNow() } }
[ "\"WALDIR\"" ]
[]
[ "WALDIR" ]
[]
["WALDIR"]
go
1
0
clients/google-api-services-assuredworkloads/v1/1.31.0/com/google/api/services/assuredworkloads/v1/Assuredworkloads.java
/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /* * This code was generated by https://github.com/googleapis/google-api-java-client-services/ * Modify at your own risk. */ package com.google.api.services.assuredworkloads.v1; /** * Service definition for Assuredworkloads (v1). * * <p> * * </p> * * <p> * For more information about this service, see the * <a href="https://cloud.google.com" target="_blank">API Documentation</a> * </p> * * <p> * This service uses {@link AssuredworkloadsRequestInitializer} to initialize global parameters via its * {@link Builder}. * </p> * * @since 1.3 * @author Google, Inc. */ @SuppressWarnings("javadoc") public class Assuredworkloads extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient { // Note: Leave this static initializer at the top of the file. static { com.google.api.client.util.Preconditions.checkState( com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 && (com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 32 || (com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION == 31 && com.google.api.client.googleapis.GoogleUtils.BUGFIX_VERSION >= 1)), "You are currently running with version %s of google-api-client. " + "You need at least version 1.31.1 of google-api-client to run version " + "1.32.1 of the Assured Workloads API library.", com.google.api.client.googleapis.GoogleUtils.VERSION); } /** * The default encoded root URL of the service. This is determined when the library is generated * and normally should not be changed. * * @since 1.7 */ public static final String DEFAULT_ROOT_URL = "https://assuredworkloads.googleapis.com/"; /** * The default encoded mTLS root URL of the service. This is determined when the library is generated * and normally should not be changed. * * @since 1.31 */ public static final String DEFAULT_MTLS_ROOT_URL = "https://assuredworkloads.mtls.googleapis.com/"; /** * The default encoded service path of the service. This is determined when the library is * generated and normally should not be changed. * * @since 1.7 */ public static final String DEFAULT_SERVICE_PATH = ""; /** * The default encoded batch path of the service. This is determined when the library is * generated and normally should not be changed. * * @since 1.23 */ public static final String DEFAULT_BATCH_PATH = "batch"; /** * The default encoded base URL of the service. This is determined when the library is generated * and normally should not be changed. */ public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH; /** * Constructor. * * <p> * Use {@link Builder} if you need to specify any of the optional parameters. * </p> * * @param transport HTTP transport, which should normally be: * <ul> * <li>Google App Engine: * {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li> * <li>Android: {@code newCompatibleTransport} from * {@code com.google.api.client.extensions.android.http.AndroidHttp}</li> * <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()} * </li> * </ul> * @param jsonFactory JSON factory, which may be: * <ul> * <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li> * <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li> * <li>Android Honeycomb or higher: * {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li> * </ul> * @param httpRequestInitializer HTTP request initializer or {@code null} for none * @since 1.7 */ public Assuredworkloads(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory, com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) { this(new Builder(transport, jsonFactory, httpRequestInitializer)); } /** * @param builder builder */ Assuredworkloads(Builder builder) { super(builder); } @Override protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException { super.initialize(httpClientRequest); } /** * An accessor for creating requests from the Organizations collection. * * <p>The typical use is:</p> * <pre> * {@code Assuredworkloads assuredworkloads = new Assuredworkloads(...);} * {@code Assuredworkloads.Organizations.List request = assuredworkloads.organizations().list(parameters ...)} * </pre> * * @return the resource collection */ public Organizations organizations() { return new Organizations(); } /** * The "organizations" collection of methods. */ public class Organizations { /** * An accessor for creating requests from the Locations collection. * * <p>The typical use is:</p> * <pre> * {@code Assuredworkloads assuredworkloads = new Assuredworkloads(...);} * {@code Assuredworkloads.Locations.List request = assuredworkloads.locations().list(parameters ...)} * </pre> * * @return the resource collection */ public Locations locations() { return new Locations(); } /** * The "locations" collection of methods. */ public class Locations { /** * An accessor for creating requests from the Operations collection. * * <p>The typical use is:</p> * <pre> * {@code Assuredworkloads assuredworkloads = new Assuredworkloads(...);} * {@code Assuredworkloads.Operations.List request = assuredworkloads.operations().list(parameters ...)} * </pre> * * @return the resource collection */ public Operations operations() { return new Operations(); } /** * The "operations" collection of methods. */ public class Operations { /** * Gets the latest state of a long-running operation. Clients can use this method to poll the * operation result at intervals as recommended by the API service. * * Create a request for the method "operations.get". * * This request holds the parameters needed by the assuredworkloads server. After setting any * optional parameters, call the {@link Get#execute()} method to invoke the remote operation. * * @param name The name of the operation resource. * @return the request */ public Get get(java.lang.String name) throws java.io.IOException { Get result = new Get(name); initialize(result); return result; } public class Get extends AssuredworkloadsRequest<com.google.api.services.assuredworkloads.v1.model.GoogleLongrunningOperation> { private static final String REST_PATH = "v1/{+name}"; private final java.util.regex.Pattern NAME_PATTERN = java.util.regex.Pattern.compile("^organizations/[^/]+/locations/[^/]+/operations/[^/]+$"); /** * Gets the latest state of a long-running operation. Clients can use this method to poll the * operation result at intervals as recommended by the API service. * * Create a request for the method "operations.get". * * This request holds the parameters needed by the the assuredworkloads server. After setting any * optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p> * {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} * must be called to initialize this instance immediately after invoking the constructor. </p> * * @param name The name of the operation resource. * @since 1.13 */ protected Get(java.lang.String name) { super(Assuredworkloads.this, "GET", REST_PATH, null, com.google.api.services.assuredworkloads.v1.model.GoogleLongrunningOperation.class); this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified."); if (!getSuppressPatternChecks()) { com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(), "Parameter name must conform to the pattern " + "^organizations/[^/]+/locations/[^/]+/operations/[^/]+$"); } } @Override public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException { return super.executeUsingHead(); } @Override public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException { return super.buildHttpRequestUsingHead(); } @Override public Get set$Xgafv(java.lang.String $Xgafv) { return (Get) super.set$Xgafv($Xgafv); } @Override public Get setAccessToken(java.lang.String accessToken) { return (Get) super.setAccessToken(accessToken); } @Override public Get setAlt(java.lang.String alt) { return (Get) super.setAlt(alt); } @Override public Get setCallback(java.lang.String callback) { return (Get) super.setCallback(callback); } @Override public Get setFields(java.lang.String fields) { return (Get) super.setFields(fields); } @Override public Get setKey(java.lang.String key) { return (Get) super.setKey(key); } @Override public Get setOauthToken(java.lang.String oauthToken) { return (Get) super.setOauthToken(oauthToken); } @Override public Get setPrettyPrint(java.lang.Boolean prettyPrint) { return (Get) super.setPrettyPrint(prettyPrint); } @Override public Get setQuotaUser(java.lang.String quotaUser) { return (Get) super.setQuotaUser(quotaUser); } @Override public Get setUploadType(java.lang.String uploadType) { return (Get) super.setUploadType(uploadType); } @Override public Get setUploadProtocol(java.lang.String uploadProtocol) { return (Get) super.setUploadProtocol(uploadProtocol); } /** The name of the operation resource. */ @com.google.api.client.util.Key private java.lang.String name; /** The name of the operation resource. */ public java.lang.String getName() { return name; } /** The name of the operation resource. */ public Get setName(java.lang.String name) { if (!getSuppressPatternChecks()) { com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(), "Parameter name must conform to the pattern " + "^organizations/[^/]+/locations/[^/]+/operations/[^/]+$"); } this.name = name; return this; } @Override public Get set(String parameterName, Object value) { return (Get) super.set(parameterName, value); } } /** * Lists operations that match the specified filter in the request. If the server doesn't support * this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override * the binding to use different resource name schemes, such as `users/operations`. To override the * binding, API services can add a binding such as `"/v1/{name=users}/operations"` to their service * configuration. For backwards compatibility, the default name includes the operations collection * id, however overriding users must ensure the name binding is the parent resource, without the * operations collection id. * * Create a request for the method "operations.list". * * This request holds the parameters needed by the assuredworkloads server. After setting any * optional parameters, call the {@link List#execute()} method to invoke the remote operation. * * @param name The name of the operation's parent resource. * @return the request */ public List list(java.lang.String name) throws java.io.IOException { List result = new List(name); initialize(result); return result; } public class List extends AssuredworkloadsRequest<com.google.api.services.assuredworkloads.v1.model.GoogleLongrunningListOperationsResponse> { private static final String REST_PATH = "v1/{+name}/operations"; private final java.util.regex.Pattern NAME_PATTERN = java.util.regex.Pattern.compile("^organizations/[^/]+/locations/[^/]+$"); /** * Lists operations that match the specified filter in the request. If the server doesn't support * this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to * override the binding to use different resource name schemes, such as `users/operations`. To * override the binding, API services can add a binding such as `"/v1/{name=users}/operations"` to * their service configuration. For backwards compatibility, the default name includes the * operations collection id, however overriding users must ensure the name binding is the parent * resource, without the operations collection id. * * Create a request for the method "operations.list". * * This request holds the parameters needed by the the assuredworkloads server. After setting any * optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p> * {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} * must be called to initialize this instance immediately after invoking the constructor. </p> * * @param name The name of the operation's parent resource. * @since 1.13 */ protected List(java.lang.String name) { super(Assuredworkloads.this, "GET", REST_PATH, null, com.google.api.services.assuredworkloads.v1.model.GoogleLongrunningListOperationsResponse.class); this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified."); if (!getSuppressPatternChecks()) { com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(), "Parameter name must conform to the pattern " + "^organizations/[^/]+/locations/[^/]+$"); } } @Override public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException { return super.executeUsingHead(); } @Override public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException { return super.buildHttpRequestUsingHead(); } @Override public List set$Xgafv(java.lang.String $Xgafv) { return (List) super.set$Xgafv($Xgafv); } @Override public List setAccessToken(java.lang.String accessToken) { return (List) super.setAccessToken(accessToken); } @Override public List setAlt(java.lang.String alt) { return (List) super.setAlt(alt); } @Override public List setCallback(java.lang.String callback) { return (List) super.setCallback(callback); } @Override public List setFields(java.lang.String fields) { return (List) super.setFields(fields); } @Override public List setKey(java.lang.String key) { return (List) super.setKey(key); } @Override public List setOauthToken(java.lang.String oauthToken) { return (List) super.setOauthToken(oauthToken); } @Override public List setPrettyPrint(java.lang.Boolean prettyPrint) { return (List) super.setPrettyPrint(prettyPrint); } @Override public List setQuotaUser(java.lang.String quotaUser) { return (List) super.setQuotaUser(quotaUser); } @Override public List setUploadType(java.lang.String uploadType) { return (List) super.setUploadType(uploadType); } @Override public List setUploadProtocol(java.lang.String uploadProtocol) { return (List) super.setUploadProtocol(uploadProtocol); } /** The name of the operation's parent resource. */ @com.google.api.client.util.Key private java.lang.String name; /** The name of the operation's parent resource. */ public java.lang.String getName() { return name; } /** The name of the operation's parent resource. */ public List setName(java.lang.String name) { if (!getSuppressPatternChecks()) { com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(), "Parameter name must conform to the pattern " + "^organizations/[^/]+/locations/[^/]+$"); } this.name = name; return this; } /** The standard list filter. */ @com.google.api.client.util.Key private java.lang.String filter; /** The standard list filter. */ public java.lang.String getFilter() { return filter; } /** The standard list filter. */ public List setFilter(java.lang.String filter) { this.filter = filter; return this; } /** The standard list page size. */ @com.google.api.client.util.Key private java.lang.Integer pageSize; /** The standard list page size. */ public java.lang.Integer getPageSize() { return pageSize; } /** The standard list page size. */ public List setPageSize(java.lang.Integer pageSize) { this.pageSize = pageSize; return this; } /** The standard list page token. */ @com.google.api.client.util.Key private java.lang.String pageToken; /** The standard list page token. */ public java.lang.String getPageToken() { return pageToken; } /** The standard list page token. */ public List setPageToken(java.lang.String pageToken) { this.pageToken = pageToken; return this; } @Override public List set(String parameterName, Object value) { return (List) super.set(parameterName, value); } } } /** * An accessor for creating requests from the Workloads collection. * * <p>The typical use is:</p> * <pre> * {@code Assuredworkloads assuredworkloads = new Assuredworkloads(...);} * {@code Assuredworkloads.Workloads.List request = assuredworkloads.workloads().list(parameters ...)} * </pre> * * @return the resource collection */ public Workloads workloads() { return new Workloads(); } /** * The "workloads" collection of methods. */ public class Workloads { /** * Creates Assured Workload. * * Create a request for the method "workloads.create". * * This request holds the parameters needed by the assuredworkloads server. After setting any * optional parameters, call the {@link Create#execute()} method to invoke the remote operation. * * @param parent Required. The resource name of the new Workload's parent. Must be of the form * `organizations/{org_id}/locations/{location_id}`. * @param content the {@link com.google.api.services.assuredworkloads.v1.model.GoogleCloudAssuredworkloadsV1Workload} * @return the request */ public Create create(java.lang.String parent, com.google.api.services.assuredworkloads.v1.model.GoogleCloudAssuredworkloadsV1Workload content) throws java.io.IOException { Create result = new Create(parent, content); initialize(result); return result; } public class Create extends AssuredworkloadsRequest<com.google.api.services.assuredworkloads.v1.model.GoogleLongrunningOperation> { private static final String REST_PATH = "v1/{+parent}/workloads"; private final java.util.regex.Pattern PARENT_PATTERN = java.util.regex.Pattern.compile("^organizations/[^/]+/locations/[^/]+$"); /** * Creates Assured Workload. * * Create a request for the method "workloads.create". * * This request holds the parameters needed by the the assuredworkloads server. After setting any * optional parameters, call the {@link Create#execute()} method to invoke the remote operation. * <p> {@link * Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must * be called to initialize this instance immediately after invoking the constructor. </p> * * @param parent Required. The resource name of the new Workload's parent. Must be of the form * `organizations/{org_id}/locations/{location_id}`. * @param content the {@link com.google.api.services.assuredworkloads.v1.model.GoogleCloudAssuredworkloadsV1Workload} * @since 1.13 */ protected Create(java.lang.String parent, com.google.api.services.assuredworkloads.v1.model.GoogleCloudAssuredworkloadsV1Workload content) { super(Assuredworkloads.this, "POST", REST_PATH, content, com.google.api.services.assuredworkloads.v1.model.GoogleLongrunningOperation.class); this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified."); if (!getSuppressPatternChecks()) { com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(), "Parameter parent must conform to the pattern " + "^organizations/[^/]+/locations/[^/]+$"); } } @Override public Create set$Xgafv(java.lang.String $Xgafv) { return (Create) super.set$Xgafv($Xgafv); } @Override public Create setAccessToken(java.lang.String accessToken) { return (Create) super.setAccessToken(accessToken); } @Override public Create setAlt(java.lang.String alt) { return (Create) super.setAlt(alt); } @Override public Create setCallback(java.lang.String callback) { return (Create) super.setCallback(callback); } @Override public Create setFields(java.lang.String fields) { return (Create) super.setFields(fields); } @Override public Create setKey(java.lang.String key) { return (Create) super.setKey(key); } @Override public Create setOauthToken(java.lang.String oauthToken) { return (Create) super.setOauthToken(oauthToken); } @Override public Create setPrettyPrint(java.lang.Boolean prettyPrint) { return (Create) super.setPrettyPrint(prettyPrint); } @Override public Create setQuotaUser(java.lang.String quotaUser) { return (Create) super.setQuotaUser(quotaUser); } @Override public Create setUploadType(java.lang.String uploadType) { return (Create) super.setUploadType(uploadType); } @Override public Create setUploadProtocol(java.lang.String uploadProtocol) { return (Create) super.setUploadProtocol(uploadProtocol); } /** * Required. The resource name of the new Workload's parent. Must be of the form * `organizations/{org_id}/locations/{location_id}`. */ @com.google.api.client.util.Key private java.lang.String parent; /** Required. The resource name of the new Workload's parent. Must be of the form `organizations/{org_id}/locations/{location_id}`. */ public java.lang.String getParent() { return parent; } /** * Required. The resource name of the new Workload's parent. Must be of the form * `organizations/{org_id}/locations/{location_id}`. */ public Create setParent(java.lang.String parent) { if (!getSuppressPatternChecks()) { com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(), "Parameter parent must conform to the pattern " + "^organizations/[^/]+/locations/[^/]+$"); } this.parent = parent; return this; } /** * Optional. A identifier associated with the workload and underlying projects which * allows for the break down of billing costs for a workload. The value provided for the * identifier will add a label to the workload and contained projects with the identifier * as the value. */ @com.google.api.client.util.Key private java.lang.String externalId; /** Optional. A identifier associated with the workload and underlying projects which allows for the break down of billing costs for a workload. The value provided for the identifier will add a label to the workload and contained projects with the identifier as the value. */ public java.lang.String getExternalId() { return externalId; } /** * Optional. A identifier associated with the workload and underlying projects which * allows for the break down of billing costs for a workload. The value provided for the * identifier will add a label to the workload and contained projects with the identifier * as the value. */ public Create setExternalId(java.lang.String externalId) { this.externalId = externalId; return this; } @Override public Create set(String parameterName, Object value) { return (Create) super.set(parameterName, value); } } /** * Deletes the workload. Make sure that workload's direct children are already in a deleted state, * otherwise the request will fail with a FAILED_PRECONDITION error. * * Create a request for the method "workloads.delete". * * This request holds the parameters needed by the assuredworkloads server. After setting any * optional parameters, call the {@link Delete#execute()} method to invoke the remote operation. * * @param name Required. The `name` field is used to identify the workload. Format: * organizations/{org_id}/locations/{location_id}/workloads/{workload_id} * @return the request */ public Delete delete(java.lang.String name) throws java.io.IOException { Delete result = new Delete(name); initialize(result); return result; } public class Delete extends AssuredworkloadsRequest<com.google.api.services.assuredworkloads.v1.model.GoogleProtobufEmpty> { private static final String REST_PATH = "v1/{+name}"; private final java.util.regex.Pattern NAME_PATTERN = java.util.regex.Pattern.compile("^organizations/[^/]+/locations/[^/]+/workloads/[^/]+$"); /** * Deletes the workload. Make sure that workload's direct children are already in a deleted state, * otherwise the request will fail with a FAILED_PRECONDITION error. * * Create a request for the method "workloads.delete". * * This request holds the parameters needed by the the assuredworkloads server. After setting any * optional parameters, call the {@link Delete#execute()} method to invoke the remote operation. * <p> {@link * Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must * be called to initialize this instance immediately after invoking the constructor. </p> * * @param name Required. The `name` field is used to identify the workload. Format: * organizations/{org_id}/locations/{location_id}/workloads/{workload_id} * @since 1.13 */ protected Delete(java.lang.String name) { super(Assuredworkloads.this, "DELETE", REST_PATH, null, com.google.api.services.assuredworkloads.v1.model.GoogleProtobufEmpty.class); this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified."); if (!getSuppressPatternChecks()) { com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(), "Parameter name must conform to the pattern " + "^organizations/[^/]+/locations/[^/]+/workloads/[^/]+$"); } } @Override public Delete set$Xgafv(java.lang.String $Xgafv) { return (Delete) super.set$Xgafv($Xgafv); } @Override public Delete setAccessToken(java.lang.String accessToken) { return (Delete) super.setAccessToken(accessToken); } @Override public Delete setAlt(java.lang.String alt) { return (Delete) super.setAlt(alt); } @Override public Delete setCallback(java.lang.String callback) { return (Delete) super.setCallback(callback); } @Override public Delete setFields(java.lang.String fields) { return (Delete) super.setFields(fields); } @Override public Delete setKey(java.lang.String key) { return (Delete) super.setKey(key); } @Override public Delete setOauthToken(java.lang.String oauthToken) { return (Delete) super.setOauthToken(oauthToken); } @Override public Delete setPrettyPrint(java.lang.Boolean prettyPrint) { return (Delete) super.setPrettyPrint(prettyPrint); } @Override public Delete setQuotaUser(java.lang.String quotaUser) { return (Delete) super.setQuotaUser(quotaUser); } @Override public Delete setUploadType(java.lang.String uploadType) { return (Delete) super.setUploadType(uploadType); } @Override public Delete setUploadProtocol(java.lang.String uploadProtocol) { return (Delete) super.setUploadProtocol(uploadProtocol); } /** * Required. The `name` field is used to identify the workload. Format: * organizations/{org_id}/locations/{location_id}/workloads/{workload_id} */ @com.google.api.client.util.Key private java.lang.String name; /** Required. The `name` field is used to identify the workload. Format: organizations/{org_id}/locations/{location_id}/workloads/{workload_id} */ public java.lang.String getName() { return name; } /** * Required. The `name` field is used to identify the workload. Format: * organizations/{org_id}/locations/{location_id}/workloads/{workload_id} */ public Delete setName(java.lang.String name) { if (!getSuppressPatternChecks()) { com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(), "Parameter name must conform to the pattern " + "^organizations/[^/]+/locations/[^/]+/workloads/[^/]+$"); } this.name = name; return this; } /** * Optional. The etag of the workload. If this is provided, it must match the server's * etag. */ @com.google.api.client.util.Key private java.lang.String etag; /** Optional. The etag of the workload. If this is provided, it must match the server's etag. */ public java.lang.String getEtag() { return etag; } /** * Optional. The etag of the workload. If this is provided, it must match the server's * etag. */ public Delete setEtag(java.lang.String etag) { this.etag = etag; return this; } @Override public Delete set(String parameterName, Object value) { return (Delete) super.set(parameterName, value); } } /** * Gets Assured Workload associated with a CRM Node * * Create a request for the method "workloads.get". * * This request holds the parameters needed by the assuredworkloads server. After setting any * optional parameters, call the {@link Get#execute()} method to invoke the remote operation. * * @param name Required. The resource name of the Workload to fetch. This is the workloads's relative path in the * API, formatted as * "organizations/{organization_id}/locations/{location_id}/workloads/{workload_id}". For * example, "organizations/123/locations/us-east1/workloads/assured-workload-1". * @return the request */ public Get get(java.lang.String name) throws java.io.IOException { Get result = new Get(name); initialize(result); return result; } public class Get extends AssuredworkloadsRequest<com.google.api.services.assuredworkloads.v1.model.GoogleCloudAssuredworkloadsV1Workload> { private static final String REST_PATH = "v1/{+name}"; private final java.util.regex.Pattern NAME_PATTERN = java.util.regex.Pattern.compile("^organizations/[^/]+/locations/[^/]+/workloads/[^/]+$"); /** * Gets Assured Workload associated with a CRM Node * * Create a request for the method "workloads.get". * * This request holds the parameters needed by the the assuredworkloads server. After setting any * optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p> * {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} * must be called to initialize this instance immediately after invoking the constructor. </p> * * @param name Required. The resource name of the Workload to fetch. This is the workloads's relative path in the * API, formatted as * "organizations/{organization_id}/locations/{location_id}/workloads/{workload_id}". For * example, "organizations/123/locations/us-east1/workloads/assured-workload-1". * @since 1.13 */ protected Get(java.lang.String name) { super(Assuredworkloads.this, "GET", REST_PATH, null, com.google.api.services.assuredworkloads.v1.model.GoogleCloudAssuredworkloadsV1Workload.class); this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified."); if (!getSuppressPatternChecks()) { com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(), "Parameter name must conform to the pattern " + "^organizations/[^/]+/locations/[^/]+/workloads/[^/]+$"); } } @Override public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException { return super.executeUsingHead(); } @Override public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException { return super.buildHttpRequestUsingHead(); } @Override public Get set$Xgafv(java.lang.String $Xgafv) { return (Get) super.set$Xgafv($Xgafv); } @Override public Get setAccessToken(java.lang.String accessToken) { return (Get) super.setAccessToken(accessToken); } @Override public Get setAlt(java.lang.String alt) { return (Get) super.setAlt(alt); } @Override public Get setCallback(java.lang.String callback) { return (Get) super.setCallback(callback); } @Override public Get setFields(java.lang.String fields) { return (Get) super.setFields(fields); } @Override public Get setKey(java.lang.String key) { return (Get) super.setKey(key); } @Override public Get setOauthToken(java.lang.String oauthToken) { return (Get) super.setOauthToken(oauthToken); } @Override public Get setPrettyPrint(java.lang.Boolean prettyPrint) { return (Get) super.setPrettyPrint(prettyPrint); } @Override public Get setQuotaUser(java.lang.String quotaUser) { return (Get) super.setQuotaUser(quotaUser); } @Override public Get setUploadType(java.lang.String uploadType) { return (Get) super.setUploadType(uploadType); } @Override public Get setUploadProtocol(java.lang.String uploadProtocol) { return (Get) super.setUploadProtocol(uploadProtocol); } /** * Required. The resource name of the Workload to fetch. This is the workloads's relative * path in the API, formatted as * "organizations/{organization_id}/locations/{location_id}/workloads/{workload_id}". For * example, "organizations/123/locations/us-east1/workloads/assured-workload-1". */ @com.google.api.client.util.Key private java.lang.String name; /** Required. The resource name of the Workload to fetch. This is the workloads's relative path in the API, formatted as "organizations/{organization_id}/locations/{location_id}/workloads/{workload_id}". For example, "organizations/123/locations/us-east1/workloads/assured-workload-1". */ public java.lang.String getName() { return name; } /** * Required. The resource name of the Workload to fetch. This is the workloads's relative * path in the API, formatted as * "organizations/{organization_id}/locations/{location_id}/workloads/{workload_id}". For * example, "organizations/123/locations/us-east1/workloads/assured-workload-1". */ public Get setName(java.lang.String name) { if (!getSuppressPatternChecks()) { com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(), "Parameter name must conform to the pattern " + "^organizations/[^/]+/locations/[^/]+/workloads/[^/]+$"); } this.name = name; return this; } @Override public Get set(String parameterName, Object value) { return (Get) super.set(parameterName, value); } } /** * Lists Assured Workloads under a CRM Node. * * Create a request for the method "workloads.list". * * This request holds the parameters needed by the assuredworkloads server. After setting any * optional parameters, call the {@link List#execute()} method to invoke the remote operation. * * @param parent Required. Parent Resource to list workloads from. Must be of the form * `organizations/{org_id}/locations/{location}`. * @return the request */ public List list(java.lang.String parent) throws java.io.IOException { List result = new List(parent); initialize(result); return result; } public class List extends AssuredworkloadsRequest<com.google.api.services.assuredworkloads.v1.model.GoogleCloudAssuredworkloadsV1ListWorkloadsResponse> { private static final String REST_PATH = "v1/{+parent}/workloads"; private final java.util.regex.Pattern PARENT_PATTERN = java.util.regex.Pattern.compile("^organizations/[^/]+/locations/[^/]+$"); /** * Lists Assured Workloads under a CRM Node. * * Create a request for the method "workloads.list". * * This request holds the parameters needed by the the assuredworkloads server. After setting any * optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p> * {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} * must be called to initialize this instance immediately after invoking the constructor. </p> * * @param parent Required. Parent Resource to list workloads from. Must be of the form * `organizations/{org_id}/locations/{location}`. * @since 1.13 */ protected List(java.lang.String parent) { super(Assuredworkloads.this, "GET", REST_PATH, null, com.google.api.services.assuredworkloads.v1.model.GoogleCloudAssuredworkloadsV1ListWorkloadsResponse.class); this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified."); if (!getSuppressPatternChecks()) { com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(), "Parameter parent must conform to the pattern " + "^organizations/[^/]+/locations/[^/]+$"); } } @Override public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException { return super.executeUsingHead(); } @Override public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException { return super.buildHttpRequestUsingHead(); } @Override public List set$Xgafv(java.lang.String $Xgafv) { return (List) super.set$Xgafv($Xgafv); } @Override public List setAccessToken(java.lang.String accessToken) { return (List) super.setAccessToken(accessToken); } @Override public List setAlt(java.lang.String alt) { return (List) super.setAlt(alt); } @Override public List setCallback(java.lang.String callback) { return (List) super.setCallback(callback); } @Override public List setFields(java.lang.String fields) { return (List) super.setFields(fields); } @Override public List setKey(java.lang.String key) { return (List) super.setKey(key); } @Override public List setOauthToken(java.lang.String oauthToken) { return (List) super.setOauthToken(oauthToken); } @Override public List setPrettyPrint(java.lang.Boolean prettyPrint) { return (List) super.setPrettyPrint(prettyPrint); } @Override public List setQuotaUser(java.lang.String quotaUser) { return (List) super.setQuotaUser(quotaUser); } @Override public List setUploadType(java.lang.String uploadType) { return (List) super.setUploadType(uploadType); } @Override public List setUploadProtocol(java.lang.String uploadProtocol) { return (List) super.setUploadProtocol(uploadProtocol); } /** * Required. Parent Resource to list workloads from. Must be of the form * `organizations/{org_id}/locations/{location}`. */ @com.google.api.client.util.Key private java.lang.String parent; /** Required. Parent Resource to list workloads from. Must be of the form `organizations/{org_id}/locations/{location}`. */ public java.lang.String getParent() { return parent; } /** * Required. Parent Resource to list workloads from. Must be of the form * `organizations/{org_id}/locations/{location}`. */ public List setParent(java.lang.String parent) { if (!getSuppressPatternChecks()) { com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(), "Parameter parent must conform to the pattern " + "^organizations/[^/]+/locations/[^/]+$"); } this.parent = parent; return this; } /** * A custom filter for filtering by properties of a workload. At this time, only filtering * by labels is supported. */ @com.google.api.client.util.Key private java.lang.String filter; /** A custom filter for filtering by properties of a workload. At this time, only filtering by labels is supported. */ public java.lang.String getFilter() { return filter; } /** * A custom filter for filtering by properties of a workload. At this time, only filtering * by labels is supported. */ public List setFilter(java.lang.String filter) { this.filter = filter; return this; } /** Page size. */ @com.google.api.client.util.Key private java.lang.Integer pageSize; /** Page size. */ public java.lang.Integer getPageSize() { return pageSize; } /** Page size. */ public List setPageSize(java.lang.Integer pageSize) { this.pageSize = pageSize; return this; } /** * Page token returned from previous request. Page token contains context from previous * request. Page token needs to be passed in the second and following requests. */ @com.google.api.client.util.Key private java.lang.String pageToken; /** Page token returned from previous request. Page token contains context from previous request. Page token needs to be passed in the second and following requests. */ public java.lang.String getPageToken() { return pageToken; } /** * Page token returned from previous request. Page token contains context from previous * request. Page token needs to be passed in the second and following requests. */ public List setPageToken(java.lang.String pageToken) { this.pageToken = pageToken; return this; } @Override public List set(String parameterName, Object value) { return (List) super.set(parameterName, value); } } /** * Updates an existing workload. Currently allows updating of workload display_name and labels. For * force updates don't set etag field in the Workload. Only one update operation per workload can be * in progress. * * Create a request for the method "workloads.patch". * * This request holds the parameters needed by the assuredworkloads server. After setting any * optional parameters, call the {@link Patch#execute()} method to invoke the remote operation. * * @param name Optional. The resource name of the workload. Format: * organizations/{organization}/locations/{location}/workloads/{workload} Read-only. * @param content the {@link com.google.api.services.assuredworkloads.v1.model.GoogleCloudAssuredworkloadsV1Workload} * @return the request */ public Patch patch(java.lang.String name, com.google.api.services.assuredworkloads.v1.model.GoogleCloudAssuredworkloadsV1Workload content) throws java.io.IOException { Patch result = new Patch(name, content); initialize(result); return result; } public class Patch extends AssuredworkloadsRequest<com.google.api.services.assuredworkloads.v1.model.GoogleCloudAssuredworkloadsV1Workload> { private static final String REST_PATH = "v1/{+name}"; private final java.util.regex.Pattern NAME_PATTERN = java.util.regex.Pattern.compile("^organizations/[^/]+/locations/[^/]+/workloads/[^/]+$"); /** * Updates an existing workload. Currently allows updating of workload display_name and labels. * For force updates don't set etag field in the Workload. Only one update operation per workload * can be in progress. * * Create a request for the method "workloads.patch". * * This request holds the parameters needed by the the assuredworkloads server. After setting any * optional parameters, call the {@link Patch#execute()} method to invoke the remote operation. * <p> {@link * Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must * be called to initialize this instance immediately after invoking the constructor. </p> * * @param name Optional. The resource name of the workload. Format: * organizations/{organization}/locations/{location}/workloads/{workload} Read-only. * @param content the {@link com.google.api.services.assuredworkloads.v1.model.GoogleCloudAssuredworkloadsV1Workload} * @since 1.13 */ protected Patch(java.lang.String name, com.google.api.services.assuredworkloads.v1.model.GoogleCloudAssuredworkloadsV1Workload content) { super(Assuredworkloads.this, "PATCH", REST_PATH, content, com.google.api.services.assuredworkloads.v1.model.GoogleCloudAssuredworkloadsV1Workload.class); this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified."); if (!getSuppressPatternChecks()) { com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(), "Parameter name must conform to the pattern " + "^organizations/[^/]+/locations/[^/]+/workloads/[^/]+$"); } } @Override public Patch set$Xgafv(java.lang.String $Xgafv) { return (Patch) super.set$Xgafv($Xgafv); } @Override public Patch setAccessToken(java.lang.String accessToken) { return (Patch) super.setAccessToken(accessToken); } @Override public Patch setAlt(java.lang.String alt) { return (Patch) super.setAlt(alt); } @Override public Patch setCallback(java.lang.String callback) { return (Patch) super.setCallback(callback); } @Override public Patch setFields(java.lang.String fields) { return (Patch) super.setFields(fields); } @Override public Patch setKey(java.lang.String key) { return (Patch) super.setKey(key); } @Override public Patch setOauthToken(java.lang.String oauthToken) { return (Patch) super.setOauthToken(oauthToken); } @Override public Patch setPrettyPrint(java.lang.Boolean prettyPrint) { return (Patch) super.setPrettyPrint(prettyPrint); } @Override public Patch setQuotaUser(java.lang.String quotaUser) { return (Patch) super.setQuotaUser(quotaUser); } @Override public Patch setUploadType(java.lang.String uploadType) { return (Patch) super.setUploadType(uploadType); } @Override public Patch setUploadProtocol(java.lang.String uploadProtocol) { return (Patch) super.setUploadProtocol(uploadProtocol); } /** * Optional. The resource name of the workload. Format: * organizations/{organization}/locations/{location}/workloads/{workload} Read-only. */ @com.google.api.client.util.Key private java.lang.String name; /** Optional. The resource name of the workload. Format: organizations/{organization}/locations/{location}/workloads/{workload} Read-only. */ public java.lang.String getName() { return name; } /** * Optional. The resource name of the workload. Format: * organizations/{organization}/locations/{location}/workloads/{workload} Read-only. */ public Patch setName(java.lang.String name) { if (!getSuppressPatternChecks()) { com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(), "Parameter name must conform to the pattern " + "^organizations/[^/]+/locations/[^/]+/workloads/[^/]+$"); } this.name = name; return this; } /** Required. The list of fields to be updated. */ @com.google.api.client.util.Key private String updateMask; /** Required. The list of fields to be updated. */ public String getUpdateMask() { return updateMask; } /** Required. The list of fields to be updated. */ public Patch setUpdateMask(String updateMask) { this.updateMask = updateMask; return this; } @Override public Patch set(String parameterName, Object value) { return (Patch) super.set(parameterName, value); } } } } } /** * Builder for {@link Assuredworkloads}. * * <p> * Implementation is not thread-safe. * </p> * * @since 1.3.0 */ public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder { private static String chooseEndpoint(com.google.api.client.http.HttpTransport transport) { // If the GOOGLE_API_USE_MTLS_ENDPOINT environment variable value is "always", use mTLS endpoint. // If the env variable is "auto", use mTLS endpoint if and only if the transport is mTLS. // Use the regular endpoint for all other cases. String useMtlsEndpoint = System.getenv("GOOGLE_API_USE_MTLS_ENDPOINT"); useMtlsEndpoint = useMtlsEndpoint == null ? "auto" : useMtlsEndpoint; if ("always".equals(useMtlsEndpoint) || ("auto".equals(useMtlsEndpoint) && transport != null && transport.isMtls())) { return DEFAULT_MTLS_ROOT_URL; } return DEFAULT_ROOT_URL; } /** * Returns an instance of a new builder. * * @param transport HTTP transport, which should normally be: * <ul> * <li>Google App Engine: * {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li> * <li>Android: {@code newCompatibleTransport} from * {@code com.google.api.client.extensions.android.http.AndroidHttp}</li> * <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()} * </li> * </ul> * @param jsonFactory JSON factory, which may be: * <ul> * <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li> * <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li> * <li>Android Honeycomb or higher: * {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li> * </ul> * @param httpRequestInitializer HTTP request initializer or {@code null} for none * @since 1.7 */ public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory, com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) { super( transport, jsonFactory, Builder.chooseEndpoint(transport), DEFAULT_SERVICE_PATH, httpRequestInitializer, false); setBatchPath(DEFAULT_BATCH_PATH); } /** Builds a new instance of {@link Assuredworkloads}. */ @Override public Assuredworkloads build() { return new Assuredworkloads(this); } @Override public Builder setRootUrl(String rootUrl) { return (Builder) super.setRootUrl(rootUrl); } @Override public Builder setServicePath(String servicePath) { return (Builder) super.setServicePath(servicePath); } @Override public Builder setBatchPath(String batchPath) { return (Builder) super.setBatchPath(batchPath); } @Override public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) { return (Builder) super.setHttpRequestInitializer(httpRequestInitializer); } @Override public Builder setApplicationName(String applicationName) { return (Builder) super.setApplicationName(applicationName); } @Override public Builder setSuppressPatternChecks(boolean suppressPatternChecks) { return (Builder) super.setSuppressPatternChecks(suppressPatternChecks); } @Override public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) { return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks); } @Override public Builder setSuppressAllChecks(boolean suppressAllChecks) { return (Builder) super.setSuppressAllChecks(suppressAllChecks); } /** * Set the {@link AssuredworkloadsRequestInitializer}. * * @since 1.12 */ public Builder setAssuredworkloadsRequestInitializer( AssuredworkloadsRequestInitializer assuredworkloadsRequestInitializer) { return (Builder) super.setGoogleClientRequestInitializer(assuredworkloadsRequestInitializer); } @Override public Builder setGoogleClientRequestInitializer( com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) { return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer); } } }
[ "\"GOOGLE_API_USE_MTLS_ENDPOINT\"" ]
[]
[ "GOOGLE_API_USE_MTLS_ENDPOINT" ]
[]
["GOOGLE_API_USE_MTLS_ENDPOINT"]
java
1
0
train.py
""" Trains, evaluates and saves the KittiSeg model. ------------------------------------------------- The MIT License (MIT) Copyright (c) 2017 Marvin Teichmann More details: https://github.com/MarvinTeichmann/KittiSeg/blob/master/LICENSE """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import commentjson import logging import os import sys import collections def dict_merge(dct, merge_dct): """ Recursive dict merge. Inspired by :meth:``dict.update()``, instead of updating only top-level keys, dict_merge recurses down into dicts nested to an arbitrary depth, updating keys. The ``merge_dct`` is merged into ``dct``. :param dct: dict onto which the merge is executed :param merge_dct: dct merged into dct :return: None """ for k, v in merge_dct.iteritems(): if (k in dct and isinstance(dct[k], dict) and isinstance(merge_dct[k], collections.Mapping)): dict_merge(dct[k], merge_dct[k]) else: dct[k] = merge_dct[k] # configure logging if 'TV_IS_DEV' in os.environ and os.environ['TV_IS_DEV']: logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO, stream=sys.stdout) else: logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO, stream=sys.stdout) # https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070 import numpy as np flags = tf.app.flags FLAGS = flags.FLAGS sys.path.insert(1, 'incl') import tensorvision.train as train import tensorvision.utils as utils flags.DEFINE_string('name', None, 'Append a name Tag to run.') flags.DEFINE_string('project', None, 'Append a name Tag to run.') flags.DEFINE_string('hypes', None, 'File storing model parameters.') flags.DEFINE_string('mod', None, 'Modifier for model parameters.') if 'TV_SAVE' in os.environ and os.environ['TV_SAVE']: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save the run. In case --nosave (default) ' 'output will be saved to the folder TV_DIR_RUNS/debug, ' 'hence it will get overwritten by further runs.')) else: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save the run. In case --nosave (default) ' 'output will be saved to the folder TV_DIR_RUNS/debug ' 'hence it will get overwritten by further runs.')) def main(_): utils.set_gpus_to_use() try: import tensorvision.train import tensorflow_fcn.utils except ImportError: logging.error("Could not import the submodules.") logging.error("Please execute:" "'git submodule update --init --recursive'") exit(1) if tf.app.flags.FLAGS.hypes is None: logging.error("No hype file is given.") logging.info("Usage: python train.py --hypes hypes/KittiClass.json") exit(1) with open(tf.app.flags.FLAGS.hypes, 'r') as f: logging.info("f: %s", f) hypes = commentjson.load(f) utils.load_plugins() if tf.app.flags.FLAGS.mod is not None: import ast mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod) dict_merge(hypes, mod_dict) if 'TV_DIR_RUNS' in os.environ: os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg') utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes) utils._add_paths_to_sys(hypes) train.maybe_download_and_extract(hypes) logging.info("Initialize training folder") train.initialize_training_folder(hypes) logging.info("Start training") train.do_training(hypes) if __name__ == '__main__': tf.app.run()
[]
[]
[ "TV_DIR_RUNS", "TV_IS_DEV", "TV_SAVE" ]
[]
["TV_DIR_RUNS", "TV_IS_DEV", "TV_SAVE"]
python
3
0
src/utils/VideoDataloader.py
# -*- coding: utf-8 -*- """ MSR-VTT Dataset Torch Dataloader and Dataset implemention. get_video_dataloader creates a dataloader with a new MSR-VTT dataset with the specified parameters """ from __future__ import print_function import os import sys import ast import pickle import numpy as np import pandas as pd import PIL import cv2 import torch from torch.utils.data import Dataset, sampler, DataLoader DIR_NAME = os.path.dirname(os.path.realpath(__file__)) sys.path.append('/../') sys.path.append(DIR_NAME) from create_transformer import create_transformer from Vocabulary import Vocabulary from models.EncoderCNN import EncoderCNN def get_video_dataloader(mode='train', videos_path=os.environ['HOME'] + '/Database/MSR-VTT/train-video/', vocab_path='data/processed/msrvtt_vocab.pkl', captions_path='data/processed/msrvtt_captions.csv', batch_size=32, num_frames=40, max_len=30, embedding_size=2048, num_captions=20, load_features=False, load_captions=False, preload=False, model='resnet152', num_workers=0): """ Generate a dataloader with the specified parameters. Args: mode: Dataset type to load videos_path: Path to MSR-VTT videos dataset vocab_path: Path to MSR-VTT vocab file caption_size: Path to captions vocab file batch_size: Batch size for Dataloader num_frames: Number of frames per video to process max_len: Max caption length embedding_size: Size of image embedding num_captions: Number of captions per image in dataset load_features: Boolean for creating or loading image features load_captions: Boolean for creating or loading image captions preload: Boolean for either preloading data into RAM during construction model: base model for encoderCNN num_workers: Dataloader parameter Return: data_loader: A torch dataloader for the MSR-VTT dataset """ # Ensure specified mode is validate try: assert mode in ['train', 'dev', 'test'] except AssertionError: print('Invalid mode specified: {}'.format(mode)) print(' Defaulting to dev mode') mode = 'dev' # Build dataset data = VideoDataset(mode, videos_path, vocab_path, captions_path, batch_size, num_frames, max_len, embedding_size, num_captions, load_features, load_captions, preload, model) if mode == 'train': # Get all possible video indices indices = data.get_indices() # Initialize a sampler for the indices init_sampler = sampler.SubsetRandomSampler(indices=indices) # Create data loader with dataset and sampler data_loader = DataLoader(dataset=data, num_workers=num_workers, batch_sampler=sampler.BatchSampler( sampler=init_sampler, batch_size=batch_size, drop_last=False)) else: data_loader = DataLoader(dataset=data, batch_size=batch_size, shuffle=True, num_workers=num_workers) return data_loader def reset_dataloader(data_loader): """Reset sampler for dataloader.""" indices = data_loader.dataset.get_indices() new_sampler = sampler.SubsetRandomSampler(indices=indices) data_loader.batch_sampler.sampler = new_sampler class VideoDataset(Dataset): """MSR-VTT Torch Dataset (inherits from torch.utils.data.Dataset).""" def get_vocab_size(self): """Returns the size of the attached vocabulary.""" return len(self.vocab) def get_vocab(self): """Returns the vocab idx to word dictionary.""" return self.vocab.idx2word def get_idx(self): """Returns the word to idx dictionary.""" return self.vocab.word2idx def get_seq_len(self): """ Determines and returns the total number of batches per epoch. Returns: The number of batches per epoch. """ num_batches = int(np.floor(len(self.files) / float(self.batch_size))) if len(self.files) % self.batch_size != 0: return num_batches + 1 return num_batches def get_indices(self): """Returns idxs for all video files.""" return np.arange(0, len(self.files)).tolist() def get_references(self, ids): """Get all captions for given ids.""" return [self.df[self.df['vid_id'] == idx] ['decoded_caption'].values.tolist() for idx in ids] def __init__(self, mode='train', videos_path=os.environ['HOME'] + '/Database/MSR-VTT/train-video/', vocab_path='data/processed/msrvtt_vocab.pkl', captions_path='data/processed/msrvtt_captions.csv', batch_size=32, num_frames=40, max_len=30, embedding_size=2048, num_captions=20, load_features=True, load_captions=True, preload=False, model='resnet152'): """ Construct the VideoDataset class. Args: mode: Dataset type to load videos_path: Path to MSR-VTT videos dataset vocab_path: Path to MSR-VTT vocab file caption_size: Path to captions vocab file batch_size: Batch size for Dataloader num_frames: Number of frames per video to process max_len: Max caption length embedding_size: Size of image embedding num_captions: Number of captions per image in dataset load_features: Boolean for creating or loading image features load_captions: Boolean for creating or loading image captions preload: Boolean for either preloading data into RAM during construction model: base model for encoderCNN """ super(VideoDataset, self).__init__() try: assert(mode in ['train', 'dev', 'val', 'test']) except: print("Invalid mode specified: {}".format(mode)) print("Defaulting to train mode") mode = 'train' # Make val synonymous with dev if mode == 'val': mode = 'dev' # Declare class variables self.mode = mode self.num_frames = num_frames self.max_len = max_len self.batch_size = batch_size self.num_captions = num_captions self.videos_path = videos_path self.load_features = load_features self.preload = preload self.model = model if not self.load_features: self.transformer = create_transformer() self.encoder = EncoderCNN(model) # Move to gpu if available if torch.cuda.is_available(): self.encoder.cuda() # Set encoder in evaluation mode self.encoder.eval() # Load vocabulary with open(vocab_path, 'rb') as f: self.vocab = pickle.load(f) # Read in captions dataframe self.df = pd.read_csv(captions_path) self.df = self.df[self.df['set'] == mode] # Load or encode captions into fixed length embeddings, drop # any captions with length greater than max_len if (not load_captions or 'embedded_caption' not in self.df.columns.values): self.df['embedded_caption'] = self.df['caption'].apply( lambda x: self.vocab.encode(x, max_len + 1)) self.df = self.df[self.df['embedded_caption'].apply( lambda x: x[-1]) == self.vocab(self.vocab.pad_word)] self.df['embedded_caption'] = self.df[ 'embedded_caption'].apply(lambda x: x[:-1]) self.df['decoded_caption'] = self.df['embedded_caption'].apply( lambda x: self.vocab.decode(x, clean=True)) else: self.df['embedded_caption'] = self.df[ 'embedded_caption'].apply(ast.literal_eval) self.df['decoded_caption'] = self.df[ 'decoded_caption'].apply(ast.literal_eval) self.df = self.df[ self.df['embedded_caption'].apply( lambda x: x[max_len]) == self.vocab( self.vocab.pad_word)] self.df['embedded_caption'] = self.df[ 'embedded_caption'].apply(lambda x: x[:max_len]) self.files = self.df['vid_id'].unique() # Preload features if self.preload and self.load_features: # Create empty tensors to fill self.vid_embeddings = torch.empty( len(self.files), num_frames, embedding_size) self.cap_embeddings = torch.empty( len(self.files), num_captions, max_len) # Loop through unique video ids for i, vid_id in enumerate(self.files): # Load an store video feature with open(self.videos_path + vid_id + '_' + model + '.pkl', 'rb') as f: self.vid_embeddings[i] = pickle.load(f) # Get captions for video cap_embeddings = self.df[self.df['vid_id'] == vid_id][ 'embedded_caption'].values.tolist() # Randomly sampole or crop to get num_caption captions while len(cap_embeddings) < num_captions: cap_embeddings.append( cap_embeddings[ np.random.randint( 0, len(cap_embeddings))]) if len(cap_embeddings) > num_captions: cap_embeddings = cap_embeddings[:num_captions] # Append to torch tensor self.cap_embeddings[i] = torch.Tensor( np.vstack(cap_embeddings)).long() else: self.preload = False # Prevent preloading if not loading features def __getitem__(self, ix): """ Returns video id, video embedding, and captions for given \ index. If in training mode, return a random caption sample. Otherwise, return all captions for a given ix. Args: ix: Batch index """ vid_id = self.files[ix] # Load preprocessed videos/captions from memory if self.preload: # Select random caption index cap_ix = np.random.randint(0, self.num_captions) if self.mode == 'train': return vid_id, self.vid_embeddings[ ix], self.cap_embeddings[ix, cap_ix].long() return vid_id, self.vid_embeddings[ ix], self.cap_embeddings[ix].long() # Load features from file if self.load_features: with open(self.videos_path + vid_id + '_' + self.model + '.pkl', 'rb') as f: vid_array = pickle.load(f) # Generate features from raw video else: vid_array = self.get_vid_array( self.videos_path + vid_id + '.mp4') vid_array = self.encoder(vid_array) # Grab captions related to video from dataframe captions = self.df[self.df['vid_id'] == vid_id][ 'embedded_caption'].values if self.mode == 'train': # Randomly select caption cap_ix = np.random.randint(0, len(captions)) return (vid_id, vid_array, torch.Tensor(captions[cap_ix]).long()) # Select all captions for video and randomly sample # to fixed length captions = captions.tolist() while len(captions) < self.num_captions: captions.append( captions[ np.random.randint( 0, len(captions))]) if len(captions) > self.num_captions: captions = captions[:self.num_captions] return vid_id, vid_array, torch.Tensor( np.vstack(captions)).long() def __len__(self): """Get number of videos.""" return len(self.files) def get_vid_array(self, video_name): """ Read in video and create a torch array from \ (num_frames, 3, 224, 224). Args: video_name: Path to video Returns: A torch tensor of frame encodings """ try: cap = cv2.VideoCapture(video_name) except: print('Could not open %s' % (video_name)) return None # Make empty arrays to store results in vid_array = torch.zeros(self.num_frames, 3, 224, 224) if torch.cuda.is_available(): vid_array = vid_array.cuda() frame_idx = 0 # Loop through and append frames to torch array while True: ret, frame = cap.read() if not ret or frame_idx == self.num_frames: break try: frame = PIL.Image.fromarray(frame).convert('RGB') if torch.cuda.is_available(): frame = self.transformer(frame).cuda().unsqueeze(0) else: frame = self.transformer(frame).unsqueeze(0) vid_array[frame_idx] = frame frame_idx += 1 except OSError as e: print(e + ' Could not process frame in ' + video_name) cap.release() return vid_array
[]
[]
[ "HOME" ]
[]
["HOME"]
python
1
0
src/manage.py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ta_portal.settings") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
macro_benchmark/Mask_RCNN_PyTorch/maskrcnn_benchmark/config/paths_catalog_dbcluster.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # Copyright (c) 2018-2019 NVIDIA CORPORATION. All rights reserved. """Centralized catalog of paths.""" import os class DatasetCatalog(object): DATA_DIR = os.environ['DATA_DIR'] DATASETS = { "coco_2017_train": { "img_dir": "train2017", "ann_file": "annotations/instances_train2017.json" }, "coco_2017_val": { "img_dir": "val2017", "ann_file": "annotations/instances_val2017.json" }, "coco_2014_train": { "img_dir": "coco_train2014", "ann_file": "annotations/instances_train2014.json" }, "coco_2014_val": { "img_dir": "coco_val2014", "ann_file": "annotations/instances_val2014.json" }, "coco_2014_minival": { "img_dir": "coco_val2014", "ann_file": "annotations/instances_minival2014.json" }, "coco_2014_valminusminival": { "img_dir": "coco_val2014", "ann_file": "annotations/instances_valminusminival2014.json" }, "voc_2007_train": { "data_dir": "voc/VOC2007", "split": "train" }, "voc_2007_train_cocostyle": { "img_dir": "voc/VOC2007/JPEGImages", "ann_file": "voc/VOC2007/Annotations/pascal_train2007.json" }, "voc_2007_val": { "data_dir": "voc/VOC2007", "split": "val" }, "voc_2007_val_cocostyle": { "img_dir": "voc/VOC2007/JPEGImages", "ann_file": "voc/VOC2007/Annotations/pascal_val2007.json" }, "voc_2007_test": { "data_dir": "voc/VOC2007", "split": "test" }, "voc_2007_test_cocostyle": { "img_dir": "voc/VOC2007/JPEGImages", "ann_file": "voc/VOC2007/Annotations/pascal_test2007.json" }, "voc_2012_train": { "data_dir": "voc/VOC2012", "split": "train" }, "voc_2012_train_cocostyle": { "img_dir": "voc/VOC2012/JPEGImages", "ann_file": "voc/VOC2012/Annotations/pascal_train2012.json" }, "voc_2012_val": { "data_dir": "voc/VOC2012", "split": "val" }, "voc_2012_val_cocostyle": { "img_dir": "voc/VOC2012/JPEGImages", "ann_file": "voc/VOC2012/Annotations/pascal_val2012.json" }, "voc_2012_test": { "data_dir": "voc/VOC2012", "split": "test" # PASCAL VOC2012 doesn't made the test annotations available, so there's no json annotation }, "cityscapes_fine_instanceonly_seg_train_cocostyle": { "img_dir": "cityscapes/images", "ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_train.json" }, "cityscapes_fine_instanceonly_seg_val_cocostyle": { "img_dir": "cityscapes/images", "ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_val.json" }, "cityscapes_fine_instanceonly_seg_test_cocostyle": { "img_dir": "cityscapes/images", "ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_test.json" } } @staticmethod def get(name): if "coco" in name: data_dir = DatasetCatalog.DATA_DIR attrs = DatasetCatalog.DATASETS[name] args = dict( root=os.path.join(data_dir, attrs["img_dir"]), ann_file=os.path.join(data_dir, attrs["ann_file"]), ) return dict( factory="COCODataset", args=args, ) elif "voc" in name: data_dir = DatasetCatalog.DATA_DIR attrs = DatasetCatalog.DATASETS[name] args = dict( data_dir=os.path.join(data_dir, attrs["data_dir"]), split=attrs["split"], ) return dict( factory="PascalVOCDataset", args=args, ) raise RuntimeError("Dataset not available: {}".format(name)) class ModelCatalog(object): S3_C2_DETECTRON_URL = "https://dl.fbaipublicfiles.com/detectron" C2_IMAGENET_MODELS = { "MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl", "MSRA/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl", "MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl", "MSRA/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl", "FAIR/20171220/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl", } C2_DETECTRON_SUFFIX = "output/train/coco_2014_train%3Acoco_2014_valminusminival/generalized_rcnn/model_final.pkl" C2_DETECTRON_MODELS = { "35857197/e2e_faster_rcnn_R-50-C4_1x": "01_33_49.iAX0mXvW", "35857345/e2e_faster_rcnn_R-50-FPN_1x": "01_36_30.cUF7QR7I", "35857890/e2e_faster_rcnn_R-101-FPN_1x": "01_38_50.sNxI7sX7", "36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "06_31_39.5MIHi1fZ", "35858791/e2e_mask_rcnn_R-50-C4_1x": "01_45_57.ZgkA7hPB", "35858933/e2e_mask_rcnn_R-50-FPN_1x": "01_48_14.DzEQe4wC", "35861795/e2e_mask_rcnn_R-101-FPN_1x": "02_31_37.KqyEK4tT", "36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "06_35_59.RZotkLKI", } @staticmethod def get(name): if name.startswith("Caffe2Detectron/COCO"): return ModelCatalog.get_c2_detectron_12_2017_baselines(name) if name.startswith("ImageNetPretrained"): return ModelCatalog.get_c2_imagenet_pretrained(name) raise RuntimeError("model not present in the catalog {}".format(name)) @staticmethod def get_c2_imagenet_pretrained(name): prefix = ModelCatalog.S3_C2_DETECTRON_URL name = name[len("ImageNetPretrained/"):] name = ModelCatalog.C2_IMAGENET_MODELS[name] url = "/".join([prefix, name]) return url @staticmethod def get_c2_detectron_12_2017_baselines(name): # Detectron C2 models are stored following the structure # prefix/<model_id>/2012_2017_baselines/<model_name>.yaml.<signature>/suffix # we use as identifiers in the catalog Caffe2Detectron/COCO/<model_id>/<model_name> prefix = ModelCatalog.S3_C2_DETECTRON_URL suffix = ModelCatalog.C2_DETECTRON_SUFFIX # remove identification prefix name = name[len("Caffe2Detectron/COCO/"):] # split in <model_id> and <model_name> model_id, model_name = name.split("/") # parsing to make it match the url address from the Caffe2 models model_name = "{}.yaml".format(model_name) signature = ModelCatalog.C2_DETECTRON_MODELS[name] unique_name = ".".join([model_name, signature]) url = "/".join([prefix, model_id, "12_2017_baselines", unique_name, suffix]) return url
[]
[]
[ "DATA_DIR" ]
[]
["DATA_DIR"]
python
1
0
cmd/migrate/main.go
package main import ( "flag" "fmt" "os" "github.com/golang-migrate/migrate/v4" _ "github.com/golang-migrate/migrate/v4/database/postgres" _ "github.com/golang-migrate/migrate/v4/source/file" "github.com/joho/godotenv" ) const migrationsPath = "file://db/migrations" func main() { if err := godotenv.Load(); err != nil { panic(err) } var isUp bool flag.BoolVar(&isUp, "up", false, "Should migrate up") var isDown bool flag.BoolVar(&isDown, "down", false, "Should migrate down") flag.Parse() m, err := migrate.New(migrationsPath, buildDatabaseURL()) if err != nil { panic(err) } if isUp { migrateUp(m) } else if isDown { migrateDown(m) } else { panic(fmt.Errorf("which direction do you want")) } } func migrateUp(m *migrate.Migrate) { if err := m.Up(); err != nil { panic(err) } } func migrateDown(m *migrate.Migrate) { if err := m.Down(); err != nil { panic(err) } } func buildDatabaseURL() string { return fmt.Sprintf( "postgresql://%s:%s@%s:%s/%s?sslmode=disable", os.Getenv("DATABASE_USER"), os.Getenv("DATABASE_PASSWORD"), os.Getenv("DATABASE_HOST"), os.Getenv("DATABASE_PORT"), os.Getenv("DATABASE_DB"), ) }
[ "\"DATABASE_USER\"", "\"DATABASE_PASSWORD\"", "\"DATABASE_HOST\"", "\"DATABASE_PORT\"", "\"DATABASE_DB\"" ]
[]
[ "DATABASE_PASSWORD", "DATABASE_DB", "DATABASE_HOST", "DATABASE_PORT", "DATABASE_USER" ]
[]
["DATABASE_PASSWORD", "DATABASE_DB", "DATABASE_HOST", "DATABASE_PORT", "DATABASE_USER"]
go
5
0
server/router.go
package server import ( "os" "singo/api" "singo/middleware" "github.com/gin-gonic/gin" ) // NewRouter 路由配置 func NewRouter() *gin.Engine { r := gin.Default() // 中间件, 顺序不能改 r.Use(middleware.Session(os.Getenv("SESSION_SECRET"))) r.Use(middleware.Cors()) r.Use(middleware.CurrentUser()) // 路由 v1 := r.Group("/api/v1") { v1.POST("ping", api.Ping) // 用户登录 v1.POST("user/register", api.UserRegister) // 用户登录 v1.POST("user/login", api.UserLogin) // 需要登录保护的 auth := v1.Group("") auth.Use(middleware.AuthRequired()) { // User Routing auth.GET("user/me", api.UserMe) auth.DELETE("user/logout", api.UserLogout) } } return r }
[ "\"SESSION_SECRET\"" ]
[]
[ "SESSION_SECRET" ]
[]
["SESSION_SECRET"]
go
1
0
cmd/update-main.go
/* * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cmd import ( "bufio" "context" "crypto" "encoding/hex" "fmt" "io/ioutil" "net/http" "os" "path/filepath" "runtime" "strings" "time" "github.com/inconshreveable/go-update" "github.com/minio/cli" "github.com/minio/minio/cmd/logger" _ "github.com/minio/sha256-simd" // Needed for sha256 hash verifier. "github.com/segmentio/go-prompt" ) // Check for new software updates. var updateCmd = cli.Command{ Name: "update", Usage: "Check for a new software update.", Action: mainUpdate, Flags: []cli.Flag{ cli.BoolFlag{ Name: "quiet", Usage: "Disable any update prompt message.", }, }, CustomHelpTemplate: `Name: {{.HelpName}} - {{.Usage}} USAGE: {{.HelpName}}{{if .VisibleFlags}} [FLAGS]{{end}} {{if .VisibleFlags}} FLAGS: {{range .VisibleFlags}}{{.}} {{end}}{{end}} EXIT STATUS: 0 - You are already running the most recent version. 1 - New update was applied successfully. -1 - Error in getting update information. EXAMPLES: 1. Check and update minio: $ {{.HelpName}} `, } const ( minioReleaseTagTimeLayout = "2006-01-02T15-04-05Z" minioOSARCH = runtime.GOOS + "-" + runtime.GOARCH minioReleaseURL = "https://dl.minio.io/server/minio/release/" + minioOSARCH + "/" ) var ( // Newer official download info URLs appear earlier below. minioReleaseInfoURLs = []string{ minioReleaseURL + "minio.sha256sum", minioReleaseURL + "minio.shasum", } // For windows our files have .exe additionally. minioReleaseWindowsInfoURLs = []string{ minioReleaseURL + "minio.exe.sha256sum", minioReleaseURL + "minio.exe.shasum", } ) // minioVersionToReleaseTime - parses a standard official release // Minio version string. // // An official binary's version string is the release time formatted // with RFC3339 (in UTC) - e.g. `2017-09-29T19:16:56Z` func minioVersionToReleaseTime(version string) (releaseTime time.Time, err error) { return time.Parse(time.RFC3339, version) } // releaseTimeToReleaseTag - converts a time to a string formatted as // an official Minio release tag. // // An official minio release tag looks like: // `RELEASE.2017-09-29T19-16-56Z` func releaseTimeToReleaseTag(releaseTime time.Time) string { return "RELEASE." + releaseTime.Format(minioReleaseTagTimeLayout) } // releaseTagToReleaseTime - reverse of `releaseTimeToReleaseTag()` func releaseTagToReleaseTime(releaseTag string) (releaseTime time.Time, err error) { tagTimePart := strings.TrimPrefix(releaseTag, "RELEASE.") if tagTimePart == releaseTag { return releaseTime, fmt.Errorf("%s is not a valid release tag", releaseTag) } return time.Parse(minioReleaseTagTimeLayout, tagTimePart) } // getModTime - get the file modification time of `path` func getModTime(path string) (t time.Time, err error) { // Convert to absolute path absPath, err := filepath.Abs(path) if err != nil { return t, fmt.Errorf("Unable to get absolute path of %s. %s", path, err) } // Version is minio non-standard, we will use minio binary's // ModTime as release time. fi, err := os.Stat(absPath) if err != nil { return t, fmt.Errorf("Unable to get ModTime of %s. %s", absPath, err) } // Return the ModTime return fi.ModTime().UTC(), nil } // GetCurrentReleaseTime - returns this process's release time. If it // is official minio version, parsed version is returned else minio // binary's mod time is returned. func GetCurrentReleaseTime() (releaseTime time.Time, err error) { if releaseTime, err = minioVersionToReleaseTime(Version); err == nil { return releaseTime, err } // Looks like version is minio non-standard, we use minio // binary's ModTime as release time: return getModTime(os.Args[0]) } // IsDocker - returns if the environment minio is running in docker or // not. The check is a simple file existence check. // // https://github.com/moby/moby/blob/master/daemon/initlayer/setup_unix.go#L25 // // "/.dockerenv": "file", // func IsDocker() bool { _, err := os.Stat("/.dockerenv") if os.IsNotExist(err) { return false } // Log error, as we will not propagate it to caller logger.LogIf(context.Background(), err) return err == nil } // IsDCOS returns true if minio is running in DCOS. func IsDCOS() bool { // http://mesos.apache.org/documentation/latest/docker-containerizer/ // Mesos docker containerizer sets this value return os.Getenv("MESOS_CONTAINER_NAME") != "" } // IsKubernetes returns true if minio is running in kubernetes. func IsKubernetes() bool { // Kubernetes env used to validate if we are // indeed running inside a kubernetes pod // is KUBERNETES_SERVICE_HOST but in future // we might need to enhance this. return os.Getenv("KUBERNETES_SERVICE_HOST") != "" } // IsBOSH returns true if minio is deployed from a bosh package func IsBOSH() bool { // "/var/vcap/bosh" exists in BOSH deployed instance. _, err := os.Stat("/var/vcap/bosh") if os.IsNotExist(err) { return false } // Log error, as we will not propagate it to caller logger.LogIf(context.Background(), err) return err == nil } // Minio Helm chart uses DownwardAPIFile to write pod label info to /podinfo/labels // More info: https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/#store-pod-fields // Check if this is Helm package installation and report helm chart version func getHelmVersion(helmInfoFilePath string) string { // Read the file exists. helmInfoFile, err := os.Open(helmInfoFilePath) if err != nil { // Log errors and return "" as Minio can be deployed // without Helm charts as well. if !os.IsNotExist(err) { reqInfo := (&logger.ReqInfo{}).AppendTags("helmInfoFilePath", helmInfoFilePath) ctx := logger.SetReqInfo(context.Background(), reqInfo) logger.LogIf(ctx, err) } return "" } scanner := bufio.NewScanner(helmInfoFile) for scanner.Scan() { if strings.Contains(scanner.Text(), "chart=") { helmChartVersion := strings.TrimPrefix(scanner.Text(), "chart=") // remove quotes from the chart version return strings.Trim(helmChartVersion, `"`) } } return "" } // IsSourceBuild - returns if this binary is a non-official build from // source code. func IsSourceBuild() bool { _, err := minioVersionToReleaseTime(Version) return err != nil } // DO NOT CHANGE USER AGENT STYLE. // The style should be // // Minio (<OS>; <ARCH>[; <MODE>][; dcos][; kubernetes][; docker][; source]) Minio/<VERSION> Minio/<RELEASE-TAG> Minio/<COMMIT-ID> [Minio/universe-<PACKAGE-NAME>] [Minio/helm-<HELM-VERSION>] // // Any change here should be discussed by opening an issue at // https://github.com/minio/minio/issues. func getUserAgent(mode string) string { userAgentParts := []string{} // Helper function to concisely append a pair of strings to a // the user-agent slice. uaAppend := func(p, q string) { userAgentParts = append(userAgentParts, p, q) } uaAppend("Minio (", runtime.GOOS) uaAppend("; ", runtime.GOARCH) if mode != "" { uaAppend("; ", mode) } if IsDCOS() { uaAppend("; ", "dcos") } if IsKubernetes() { uaAppend("; ", "kubernetes") } if IsDocker() { uaAppend("; ", "docker") } if IsBOSH() { uaAppend("; ", "bosh") } if IsSourceBuild() { uaAppend("; ", "source") } uaAppend(") Minio/", Version) uaAppend(" Minio/", ReleaseTag) uaAppend(" Minio/", CommitID) if IsDCOS() { universePkgVersion := os.Getenv("MARATHON_APP_LABEL_DCOS_PACKAGE_VERSION") // On DC/OS environment try to the get universe package version. if universePkgVersion != "" { uaAppend(" Minio/universe-", universePkgVersion) } } if IsKubernetes() { // In Kubernetes environment, try to fetch the helm package version helmChartVersion := getHelmVersion("/podinfo/labels") if helmChartVersion != "" { uaAppend(" Minio/helm-", helmChartVersion) } } pcfTileVersion := os.Getenv("MINIO_PCF_TILE_VERSION") if pcfTileVersion != "" { uaAppend(" Minio/pcf-tile-", pcfTileVersion) } return strings.Join(userAgentParts, "") } func downloadReleaseURL(releaseChecksumURL string, timeout time.Duration, mode string) (content string, err error) { req, err := http.NewRequest("GET", releaseChecksumURL, nil) if err != nil { return content, err } req.Header.Set("User-Agent", getUserAgent(mode)) client := &http.Client{ Timeout: timeout, Transport: &http.Transport{ // need to close connection after usage. DisableKeepAlives: true, }, } resp, err := client.Do(req) if err != nil { return content, err } if resp == nil { return content, fmt.Errorf("No response from server to download URL %s", releaseChecksumURL) } defer CloseResponse(resp.Body) if resp.StatusCode != http.StatusOK { return content, fmt.Errorf("Error downloading URL %s. Response: %v", releaseChecksumURL, resp.Status) } contentBytes, err := ioutil.ReadAll(resp.Body) if err != nil { return content, fmt.Errorf("Error reading response. %s", err) } return string(contentBytes), err } // DownloadReleaseData - downloads release data from minio official server. func DownloadReleaseData(timeout time.Duration, mode string) (data string, err error) { releaseURLs := minioReleaseInfoURLs if runtime.GOOS == globalWindowsOSName { releaseURLs = minioReleaseWindowsInfoURLs } return func() (data string, err error) { for _, url := range releaseURLs { data, err = downloadReleaseURL(url, timeout, mode) if err == nil { return data, nil } } return data, fmt.Errorf("Failed to fetch release URL - last error: %s", err) }() } // parseReleaseData - parses release info file content fetched from // official minio download server. // // The expected format is a single line with two words like: // // fbe246edbd382902db9a4035df7dce8cb441357d minio.RELEASE.2016-10-07T01-16-39Z // // The second word must be `minio.` appended to a standard release tag. func parseReleaseData(data string) (sha256Hex string, releaseTime time.Time, err error) { fields := strings.Fields(data) if len(fields) != 2 { err = fmt.Errorf("Unknown release data `%s`", data) return sha256Hex, releaseTime, err } sha256Hex = fields[0] releaseInfo := fields[1] fields = strings.SplitN(releaseInfo, ".", 2) if len(fields) != 2 { err = fmt.Errorf("Unknown release information `%s`", releaseInfo) return sha256Hex, releaseTime, err } if fields[0] != "minio" { err = fmt.Errorf("Unknown release `%s`", releaseInfo) return sha256Hex, releaseTime, err } releaseTime, err = releaseTagToReleaseTime(fields[1]) if err != nil { err = fmt.Errorf("Unknown release tag format. %s", err) } return sha256Hex, releaseTime, err } func getLatestReleaseTime(timeout time.Duration, mode string) (sha256Hex string, releaseTime time.Time, err error) { data, err := DownloadReleaseData(timeout, mode) if err != nil { return sha256Hex, releaseTime, err } return parseReleaseData(data) } const ( // Kubernetes deployment doc link. kubernetesDeploymentDoc = "https://docs.minio.io/docs/deploy-minio-on-kubernetes" // Mesos deployment doc link. mesosDeploymentDoc = "https://docs.minio.io/docs/deploy-minio-on-dc-os" ) func getDownloadURL(releaseTag string) (downloadURL string) { // Check if we are in DCOS environment, return // deployment guide for update procedures. if IsDCOS() { return mesosDeploymentDoc } // Check if we are in kubernetes environment, return // deployment guide for update procedures. if IsKubernetes() { return kubernetesDeploymentDoc } // Check if we are docker environment, return docker update command if IsDocker() { // Construct release tag name. return fmt.Sprintf("docker pull minio/minio:%s", releaseTag) } // For binary only installations, we return link to the latest binary. if runtime.GOOS == "windows" { return minioReleaseURL + "minio.exe" } return minioReleaseURL + "minio" } func getUpdateInfo(timeout time.Duration, mode string) (updateMsg string, sha256Hex string, currentReleaseTime, latestReleaseTime time.Time, err error) { currentReleaseTime, err = GetCurrentReleaseTime() if err != nil { return updateMsg, sha256Hex, currentReleaseTime, latestReleaseTime, err } sha256Hex, latestReleaseTime, err = getLatestReleaseTime(timeout, mode) if err != nil { return updateMsg, sha256Hex, currentReleaseTime, latestReleaseTime, err } var older time.Duration var downloadURL string if latestReleaseTime.After(currentReleaseTime) { older = latestReleaseTime.Sub(currentReleaseTime) downloadURL = getDownloadURL(releaseTimeToReleaseTag(latestReleaseTime)) } return prepareUpdateMessage(downloadURL, older), sha256Hex, currentReleaseTime, latestReleaseTime, nil } func doUpdate(sha256Hex string, latestReleaseTime time.Time, ok bool) (updateStatusMsg string, err error) { if !ok { updateStatusMsg = colorGreenBold("Minio update to version RELEASE.%s canceled.", latestReleaseTime.Format(minioReleaseTagTimeLayout)) return updateStatusMsg, nil } var sha256Sum []byte sha256Sum, err = hex.DecodeString(sha256Hex) if err != nil { return updateStatusMsg, err } resp, err := http.Get(getDownloadURL(releaseTimeToReleaseTag(latestReleaseTime))) if err != nil { return updateStatusMsg, err } defer CloseResponse(resp.Body) // FIXME: add support for gpg verification as well. if err = update.Apply(resp.Body, update.Options{ Hash: crypto.SHA256, Checksum: sha256Sum, }, ); err != nil { return updateStatusMsg, err } return colorGreenBold("Minio updated to version RELEASE.%s successfully.", latestReleaseTime.Format(minioReleaseTagTimeLayout)), nil } func shouldUpdate(quiet bool, sha256Hex string, latestReleaseTime time.Time) (ok bool) { ok = true if !quiet { ok = prompt.Confirm(colorGreenBold("Update to RELEASE.%s [%s]", latestReleaseTime.Format(minioReleaseTagTimeLayout), "yes")) } return ok } func mainUpdate(ctx *cli.Context) { if len(ctx.Args()) != 0 { cli.ShowCommandHelpAndExit(ctx, "update", -1) } handleCommonEnvVars() quiet := ctx.Bool("quiet") || ctx.GlobalBool("quiet") if quiet { logger.EnableQuiet() } minioMode := "" updateMsg, sha256Hex, _, latestReleaseTime, err := getUpdateInfo(10*time.Second, minioMode) if err != nil { logger.Info(err.Error()) os.Exit(-1) } // Nothing to update running the latest release. if updateMsg == "" { logger.Info(colorGreenBold("You are already running the most recent version of ‘minio’.")) os.Exit(0) } logger.Info(updateMsg) // if the in-place update is disabled then we shouldn't ask the // user to update the binaries. if strings.Contains(updateMsg, minioReleaseURL) && !globalInplaceUpdateDisabled { var updateStatusMsg string updateStatusMsg, err = doUpdate(sha256Hex, latestReleaseTime, shouldUpdate(quiet, sha256Hex, latestReleaseTime)) if err != nil { logger.Info(colorRedBold("Unable to update ‘minio’.")) logger.Info(err.Error()) os.Exit(-1) } logger.Info(updateStatusMsg) os.Exit(1) } }
[ "\"MESOS_CONTAINER_NAME\"", "\"KUBERNETES_SERVICE_HOST\"", "\"MARATHON_APP_LABEL_DCOS_PACKAGE_VERSION\"", "\"MINIO_PCF_TILE_VERSION\"" ]
[]
[ "MESOS_CONTAINER_NAME", "KUBERNETES_SERVICE_HOST", "MARATHON_APP_LABEL_DCOS_PACKAGE_VERSION", "MINIO_PCF_TILE_VERSION" ]
[]
["MESOS_CONTAINER_NAME", "KUBERNETES_SERVICE_HOST", "MARATHON_APP_LABEL_DCOS_PACKAGE_VERSION", "MINIO_PCF_TILE_VERSION"]
go
4
0
stripe/stripe.go
package stripe import ( "encoding/json" "errors" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "strings" ) // enable logging to print the request and reponses to stdout var _log bool // the API Key used to authenticate all Stripe API requests var _key string // the default URL for all Stripe API requests var _url string = "https://api.stripe.com" const apiVersion = "2013-08-13" // SetUrl will override the default Stripe API URL. This is primarily used // for unit testing. func SetUrl(url string) { _url = url } // SetKey will set the default Stripe API key used to authenticate all Stripe // API requests. func SetKey(key string) { _key = key } // Available APIs var ( Charges = new(ChargeClient) Coupons = new(CouponClient) Customers = new(CustomerClient) Invoices = new(InvoiceClient) InvoiceItems = new(InvoiceItemClient) Plans = new(PlanClient) Subscriptions = new(SubscriptionClient) Tokens = new(TokenClient) ) // SetKeyEnv retrieves the Stripe API key using the STRIPE_API_KEY environment // variable. func SetKeyEnv() (err error) { _key = os.Getenv("STRIPE_API_KEY") if _key == "" { err = errors.New("STRIPE_API_KEY not found in environment") } return } // query submits an http.Request and parses the JSON-encoded http.Response, // storing the result in the value pointed to by v. func query(method, path string, values url.Values, v interface{}) error { // parse the stripe URL endpoint, err := url.Parse(_url) if err != nil { return err } // set the endpoint for the specific API endpoint.Path = path endpoint.User = url.User(_key) // if this is an http GET, add the url.Values to the endpoint if method == "GET" { endpoint.RawQuery = values.Encode() } // else if this is not a GET, encode the url.Values in the body. var reqBody io.Reader if method != "GET" && values != nil { reqBody = strings.NewReader(values.Encode()) } // Log request if logging enabled if _log { fmt.Println("REQUEST: ", method, endpoint.String()) fmt.Println(values.Encode()) } // create the request req, err := http.NewRequest(method, endpoint.String(), reqBody) if err != nil { return err } req.Header.Set("Stripe-Version", apiVersion) // submit the http request r, err := http.DefaultClient.Do(req) if err != nil { return err } // read the body of the http message into a byte array body, err := ioutil.ReadAll(r.Body) defer r.Body.Close() if err != nil { return err } // Log response if logging enabled if _log { fmt.Println("RESPONSE: ", r.StatusCode) fmt.Println(string(body)) } // is this an error? if r.StatusCode != 200 { error := Error{} json.Unmarshal(body, &error) return &error } //parse the JSON response into the response object return json.Unmarshal(body, v) } // Response to a Deletion request. type DeleteResp struct { // ID of the Object that was deleted Id string `json:"id"` // Boolean value indicating object was successfully deleted. Deleted bool `json:"deleted"` }
[ "\"STRIPE_API_KEY\"" ]
[]
[ "STRIPE_API_KEY" ]
[]
["STRIPE_API_KEY"]
go
1
0
qa/rpc-tests/maxblocksinflight.py
#!/usr/bin/env python2 # # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import logging ''' In this test we connect to one node over p2p, send it numerous inv's, and compare the resulting number of getdata requests to a max allowed value. We test for exceeding 128 blocks in flight, which was the limit an 0.9 client will reach. [0.10 clients shouldn't request more than 16 from a single peer.] ''' MAX_REQUESTS = 128 class TestManager(NodeConnCB): # set up NodeConnCB callbacks, overriding base class def on_getdata(self, conn, message): self.log.debug("got getdata %s" % repr(message)) # Log the requests for inv in message.inv: if inv.hash not in self.blockReqCounts: self.blockReqCounts[inv.hash] = 0 self.blockReqCounts[inv.hash] += 1 def on_close(self, conn): if not self.disconnectOkay: raise EarlyDisconnectError(0) def __init__(self): NodeConnCB.__init__(self) self.log = logging.getLogger("BlockRelayTest") def add_new_connection(self, connection): self.connection = connection self.blockReqCounts = {} self.disconnectOkay = False def run(self): self.connection.rpc.generate(1) # Leave IBD numBlocksToGenerate = [8, 16, 128, 1024] for count in range(len(numBlocksToGenerate)): current_invs = [] for i in range(numBlocksToGenerate[count]): current_invs.append(CInv(2, random.randrange(0, 1 << 256))) if len(current_invs) >= 50000: self.connection.send_message(msg_inv(current_invs)) current_invs = [] if len(current_invs) > 0: self.connection.send_message(msg_inv(current_invs)) # Wait and see how many blocks were requested time.sleep(2) total_requests = 0 with mininode_lock: for key in self.blockReqCounts: total_requests += self.blockReqCounts[key] if self.blockReqCounts[key] > 1: raise AssertionError("Error, test failed: block %064x requested more than once" % key) if total_requests > MAX_REQUESTS: raise AssertionError("Error, too many blocks (%d) requested" % total_requests) print "Round %d: success (total requests: %d)" % (count, total_requests) self.disconnectOkay = True self.connection.disconnect_node() class MaxBlocksInFlightTest(BitcoinTestFramework): def add_options(self, parser): parser.add_option("--testbinary", dest="testbinary", default=os.getenv("CPAYD", "cpayd"), help="Binary to test max block requests behavior") def setup_chain(self): print "Initializing test directory "+self.options.tmpdir initialize_chain_clean(self.options.tmpdir, 1) def setup_network(self): self.nodes = start_nodes(1, self.options.tmpdir, extra_args=[['-debug', '-whitelist=127.0.0.1']], binary=[self.options.testbinary]) def run_test(self): test = TestManager() test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test)) NetworkThread().start() # Start up network handling in another thread test.run() if __name__ == '__main__': MaxBlocksInFlightTest().main()
[]
[]
[ "CPAYD" ]
[]
["CPAYD"]
python
1
0
example/main.go
// Copyright Red Hat, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "log" "os" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/clientcmd" "maistra.io/api/client/versioned" corev1 "maistra.io/api/core/v1" ) func createClientSet() *versioned.Clientset { kubeconfig := os.Getenv("KUBECONFIG") if len(kubeconfig) == 0 { log.Fatalf("Environment variable KUBECONFIG needs to be set") } restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig) if err != nil { log.Fatalf("Failed to create k8s rest client: %s", err) } return versioned.NewForConfigOrDie(restConfig) } func listExtensions(cs *versioned.Clientset) { fmt.Printf("Listing Extensions in all namespaces:\n") list, err := cs.CoreV1().ServiceMeshExtensions(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) if err != nil { fmt.Printf("error listing: %v\n", err) return } if len(list.Items) == 0 { fmt.Printf("No extensions found.\n") return } for _, e := range list.Items { fmt.Printf("- Found extension %s/%s (ready: %v)\n", e.Namespace, e.Name, e.Status.Deployment.Ready) fmt.Printf("\tConfig for this extension:\n") // e is of type v1.ServiceMeshExtension // the copy below is not needed, it just ilustrates an use of the api var copy *corev1.ServiceMeshExtension = e.DeepCopy() config, err := copy.Spec.Config.MarshalJSON() if err != nil { fmt.Printf("\terror getting extension config: %v\n", err) } else { fmt.Printf("\t%s\n", config) } } } func listControlPlanes(cs *versioned.Clientset) { fmt.Printf("Listing Control planes in all namespaces:\n") list, err := cs.CoreV2().ServiceMeshControlPlanes(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) if err != nil { fmt.Printf("error listing: %v\n", err) return } if len(list.Items) == 0 { fmt.Printf("No control planes found.") return } for _, cp := range list.Items { fmt.Printf("- Found control plane %s/%s: (version: %s)\n", cp.Namespace, cp.Name, cp.Status.ChartVersion) memberroll, err := cs.CoreV1().ServiceMeshMemberRolls(cp.Namespace).Get(context.TODO(), "default", metav1.GetOptions{}) if err != nil { fmt.Printf("could not get the `default' SMMR in the %s namespace: %v\n", cp.Namespace, err) continue } fmt.Printf("\tMembers of this control plane:\n") for _, member := range memberroll.Spec.Members { fmt.Printf("\t\t- %s\n", member) } } } func main() { cs := createClientSet() listExtensions(cs) fmt.Printf("\n") listControlPlanes(cs) }
[ "\"KUBECONFIG\"" ]
[]
[ "KUBECONFIG" ]
[]
["KUBECONFIG"]
go
1
0
cmd/setwallpaper/main.go
package main import ( "errors" "fmt" "io/ioutil" "net/http" "net/url" "os" "path/filepath" "strings" "github.com/urfave/cli" "github.com/xyproto/wallutils" ) // exists checks if the given path exists func exists(path string) bool { _, err := os.Stat(path) return err == nil } // download can download a file to the given filename. // Set redownload to true for downloading again even if it exists. func download(url, filename string, verbose, redownload bool) error { // Check if the file exists (and that force is not enabled) if exists(filename) && !redownload { // The file already exists. This is fine, skip the download return nil } // Prepare the client var client http.Client resp, err := client.Get(url) if err != nil { return err } defer resp.Body.Close() if verbose { if verbose { fmt.Println("Downloading " + url) } } // Download the file b, err := ioutil.ReadAll(resp.Body) if err != nil { return err } // Write the file return ioutil.WriteFile(filename, b, 0644) } func setWallpaperAction(c *cli.Context) error { if c.NArg() == 0 { return errors.New("please specify an image filename or URL") } imageFilename := c.Args().Get(0) // Retrieve flags from the context verbose := c.IsSet("verbose") mode := c.String("mode") downloadDir := c.String("download") if !exists(downloadDir) { // Last resort downloadDir = "/tmp" } if !exists(downloadDir) { return errors.New("could not find: " + downloadDir) } // Check if the argument is an URL that uses the http or https protocol if strings.HasPrefix(imageFilename, "http://") || strings.HasPrefix(imageFilename, "https://") { u, err := url.Parse(imageFilename) if err == nil { // no error downloadFilename := filepath.Join(downloadDir, filepath.Base(imageFilename)) if err := download(u.String(), downloadFilename, verbose, false); err != nil { return err } // Use the downloaded image imageFilename = downloadFilename } } // Find the absolute path absImageFilename, err := filepath.Abs(imageFilename) if err == nil { imageFilename = absImageFilename } // Set the desktop wallpaper if err := wallutils.SetWallpaperCustom(imageFilename, mode, verbose); err != nil { return fmt.Errorf("could not set wallpaper: %s", err) } return nil } func expanduser(path string) string { if strings.HasPrefix(path, "~") { path = strings.Replace(path, "~", os.Getenv("HOME"), 1) } else if strings.HasPrefix(path, "$HOME") { path = strings.Replace(path, "$HOME", os.Getenv("HOME"), 1) } return path } func downloadDirectory() string { // Check if $XDG_DOWNLOAD_DIR is set path := os.Getenv("XDG_DOWNLOAD_DIR") if exists(path) { return path } // Check if XDG_DOWNLOAD_DIR is defined in ~/.config/user-dirs.dirs dirfileContents, err := ioutil.ReadFile(expanduser("~/.config/user-dirs.dirs")) if err == nil { for _, line := range strings.Split(string(dirfileContents), "\n") { if strings.HasPrefix(line, "XDG_DOWNLOAD_DIR") { elements := strings.SplitN(line, "=", 2) path = strings.TrimSpace(elements[1]) if exists(path) { return path } break } } } // Check if ~/downloads exists path = expanduser("~/downloads") if exists(path) { return path } // Check if ~/download exists path = expanduser("~/download") if exists(path) { return path } // Use the home directory, if it exists path = expanduser("~") if exists(path) { return path } // Using /tmp is the last resort return "/tmp" } func main() { app := cli.NewApp() app.Name = "setwallpaper" app.Usage = "set the desktop wallpaper" app.UsageText = "setwallpaper [options] [path or URL to JPEG or PNG image]" app.Version = wallutils.VersionString app.HideHelp = true cli.VersionFlag = cli.BoolFlag{ Name: "version, V", Usage: "output version information", } app.Flags = []cli.Flag{ cli.BoolFlag{ Name: "verbose, v", Usage: "verbose output", }, cli.StringFlag{ Name: "mode, m", Value: "stretch", // the default value Usage: "wallpaper mode (stretch | center | tile | scale) \n\t+ modes specific to the currently running DE/WM", }, cli.StringFlag{ Name: "download, d", Value: downloadDirectory(), // the default value Usage: "download directory", }, } app.Action = setWallpaperAction if err := app.Run(os.Args); err != nil { wallutils.Quit(err) } }
[ "\"HOME\"", "\"HOME\"", "\"XDG_DOWNLOAD_DIR\"" ]
[]
[ "HOME", "XDG_DOWNLOAD_DIR" ]
[]
["HOME", "XDG_DOWNLOAD_DIR"]
go
2
0
testsuite/tests/service_mesh/auth/test_user_key_app_id.py
""" Auth tests for user_key and app_id/app_key authentication modes for Service Mesh Service Mesh by allows both query and headers location to be used """ import pytest from threescale_api.resources import Service from testsuite.capabilities import Capability pytestmark = pytest.mark.required_capabilities(Capability.SERVICE_MESH) @pytest.fixture(scope="module", params=[ pytest.param(Service.AUTH_USER_KEY, id="user-key"), pytest.param(Service.AUTH_APP_ID_KEY, id="app-id") ]) def service_settings(request, service_settings): "Set auth mode to user key" service_settings.update(backend_version=request.param) return service_settings @pytest.fixture def invalid_auth(service_settings): """Returns different invalid credentials for user_key and app_id/app_key authentication modes""" if service_settings["backend_version"] == Service.AUTH_APP_ID_KEY: return {"app_id": ":invalid_id", "app_key": ":invalid_key"} return {"user_key": "invalid_key"} @pytest.mark.parametrize("credentials_location", ["query", "headers"]) def test_request_with_auth(api_client, credentials_location): """Check valid credentials passed in query and headers, should return 200""" # pylint: disable=protected-access client = api_client() client.auth.location = credentials_location response = client.get("/get") assert response.status_code == 200 @pytest.mark.parametrize("credentials_location", ["params", "headers"]) def test_request_with_wrong_auth(api_client, invalid_auth, credentials_location): """Check wrong credentials passed in query (params) or headers, should fail with 403 """ # pylint: disable=protected-access client = api_client() client.auth = None auth = {credentials_location: invalid_auth} response = client.get("/get", **auth) assert response.status_code == 403 def test_request_without_auth(api_client, no_auth_status_code): """Forbid access if no credentials are provided, should fail with 401""" # pylint: disable=protected-access client = api_client() client.auth = None response = client.get("/get") assert response.status_code == no_auth_status_code
[]
[]
[]
[]
[]
python
null
null
null
timeweb/timeweb/wsgi.py
""" WSGI config for timeweb project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'timeweb.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
pkg/network/kube_proxy.go
package network import ( "net" "os" "path/filepath" "time" "github.com/pkg/errors" uns "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" operv1 "github.com/openshift/api/operator/v1" "github.com/openshift/cluster-network-operator/pkg/render" k8sutil "github.com/openshift/cluster-network-operator/pkg/util/k8s" ) // ShouldDeployKubeProxy determines if the desired network type should // install kube-proxy. // openshift-sdn deploys its own kube-proxy. ovn-kubernetes and // Kuryr-Kubernetes handle services on their own. All other // network providers are assumed to require kube-proxy func ShouldDeployKubeProxy(conf *operv1.NetworkSpec) bool { switch conf.DefaultNetwork.Type { case operv1.NetworkTypeOpenShiftSDN: return false case operv1.NetworkTypeOVNKubernetes: return false case operv1.NetworkTypeKuryr: return false default: return true } } // kubeProxyConfiguration builds the (yaml text of) the kube-proxy config object // It merges multiple sources of arguments. The precedence order is: // - pluginDefaults // - conf.KubeProxyConfig.ProxyArguments // - pluginOverrides func kubeProxyConfiguration(pluginDefaults map[string]operv1.ProxyArgumentList, conf *operv1.NetworkSpec, pluginOverrides map[string]operv1.ProxyArgumentList) (string, error) { p := conf.KubeProxyConfig args := map[string]operv1.ProxyArgumentList{} args["bind-address"] = []string{p.BindAddress} if len(conf.ClusterNetwork) == 1 { args["cluster-cidr"] = []string{conf.ClusterNetwork[0].CIDR} } args["iptables-sync-period"] = []string{p.IptablesSyncPeriod} args = k8sutil.MergeKubeProxyArguments(args, pluginDefaults) args = k8sutil.MergeKubeProxyArguments(args, p.ProxyArguments) args = k8sutil.MergeKubeProxyArguments(args, pluginOverrides) return k8sutil.GenerateKubeProxyConfiguration(args) } // ValidateStandaloneKubeProxy validates the kube-proxy configuration if // installation is requested. func ValidateStandaloneKubeProxy(conf *operv1.NetworkSpec) []error { if ShouldDeployKubeProxy(conf) { return validateKubeProxy(conf) } return nil } // validateKubeProxy checks that the kube-proxy specific configuration // is basically sane. // This is called either if DeployKubeProxy is true *or* by openshift-sdn func validateKubeProxy(conf *operv1.NetworkSpec) []error { out := []error{} p := conf.KubeProxyConfig if p == nil { return out } if p.IptablesSyncPeriod != "" { _, err := time.ParseDuration(p.IptablesSyncPeriod) if err != nil { out = append(out, errors.Errorf("IptablesSyncPeriod is not a valid duration (%v)", err)) } } if p.BindAddress != "" { if net.ParseIP(p.BindAddress) == nil { out = append(out, errors.Errorf("BindAddress must be a valid IP address")) } } // Don't allow ports to be overridden if p.ProxyArguments != nil { if val, ok := p.ProxyArguments["metrics-port"]; ok { if !(len(val) == 1 && val[0] == "9101") { out = append(out, errors.Errorf("kube-proxy --metrics-port must be 9101")) } } if val, ok := p.ProxyArguments["healthz-port"]; ok { if !(len(val) == 1 && val[0] == "10256") { out = append(out, errors.Errorf("kube-proxy --healthz-port must be 10256")) } } } return out } // FillKubeProxyDefaults inserts kube-proxy defaults, but only if // kube-proxy will be deployed explicitly. func FillKubeProxyDefaults(conf, previous *operv1.NetworkSpec) { if conf.DeployKubeProxy == nil { v := ShouldDeployKubeProxy(conf) conf.DeployKubeProxy = &v } if !*conf.DeployKubeProxy { return } if conf.KubeProxyConfig == nil { conf.KubeProxyConfig = &operv1.ProxyConfig{} } if conf.KubeProxyConfig.BindAddress == "" { // TODO: this will probably need to change for dual stack ip, _, err := net.ParseCIDR(conf.ClusterNetwork[0].CIDR) if err != nil { // this should not happen return } if ip.To4() != nil { conf.KubeProxyConfig.BindAddress = "0.0.0.0" } else { conf.KubeProxyConfig.BindAddress = "::" } } } // IsKubeProxyChangeSafe is noop, but it would check if the proposed kube-proxy // change is safe. func IsKubeProxyChangeSafe(prev, next *operv1.NetworkSpec) []error { // At present, all kube-proxy changes are safe to deploy return nil } // RenderStandaloneKubeProxy renders the standalone kube-proxy if installation was // requested. func RenderStandaloneKubeProxy(conf *operv1.NetworkSpec, manifestDir string) ([]*uns.Unstructured, error) { if !*conf.DeployKubeProxy { return nil, nil } kpcDefaults := map[string]operv1.ProxyArgumentList{ "metrics-bind-address": {"0.0.0.0"}, "metrics-port": {"9101"}, "healthz-port": {"10256"}, "proxy-mode": {"iptables"}, } kpc, err := kubeProxyConfiguration(kpcDefaults, conf, nil) if err != nil { return nil, errors.Wrapf(err, "failed to generate kube-proxy configuration file") } data := render.MakeRenderData() data.Data["ReleaseVersion"] = os.Getenv("RELEASE_VERSION") data.Data["KubeProxyImage"] = os.Getenv("KUBE_PROXY_IMAGE") data.Data["KUBERNETES_SERVICE_HOST"] = os.Getenv("KUBERNETES_SERVICE_HOST") data.Data["KUBERNETES_SERVICE_PORT"] = os.Getenv("KUBERNETES_SERVICE_PORT") data.Data["KubeProxyConfig"] = kpc manifests, err := render.RenderDir(filepath.Join(manifestDir, "kube-proxy"), &data) if err != nil { return nil, errors.Wrap(err, "failed to render kube-proxy manifests") } return manifests, nil }
[ "\"RELEASE_VERSION\"", "\"KUBE_PROXY_IMAGE\"", "\"KUBERNETES_SERVICE_HOST\"", "\"KUBERNETES_SERVICE_PORT\"" ]
[]
[ "KUBERNETES_SERVICE_HOST", "KUBE_PROXY_IMAGE", "RELEASE_VERSION", "KUBERNETES_SERVICE_PORT" ]
[]
["KUBERNETES_SERVICE_HOST", "KUBE_PROXY_IMAGE", "RELEASE_VERSION", "KUBERNETES_SERVICE_PORT"]
go
4
0
config/sentry.go
package config import "os" // SentryConfig is type SentryConfig struct { DSN string ENV string Release string } // NewSentryConfig is func NewSentryConfig() *SentryConfig { return &SentryConfig{ DSN: os.Getenv("SENTRY_DSN"), ENV: os.Getenv("SENTRY_ENV"), Release: os.Getenv("SENTRY_RELEASE"), } }
[ "\"SENTRY_DSN\"", "\"SENTRY_ENV\"", "\"SENTRY_RELEASE\"" ]
[]
[ "SENTRY_DSN", "SENTRY_RELEASE", "SENTRY_ENV" ]
[]
["SENTRY_DSN", "SENTRY_RELEASE", "SENTRY_ENV"]
go
3
0
tests/conftest.py
# """ # PyTest Fixtures. # """ # import pytest # @pytest.fixture(scope="function") # def tmp(request): # """ # Create a `tmp` object that geneates a unique temporary directory, and file # for each test function that requires it. # """ # t = fs.Tmp() # yield t # t.remove()
[]
[]
[]
[]
[]
python
null
null
null
sdks/python/apache_beam/runners/dataflow/dataflow_runner.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A runner implementation that submits a job for remote execution. The runner will create a JSON description of the job graph and then submit it to the Dataflow Service for remote execution by a worker. """ # pytype: skip-file from __future__ import absolute_import from __future__ import division import base64 import logging import os import subprocess import sys import threading import time import traceback import urllib from builtins import hex from collections import defaultdict from typing import TYPE_CHECKING from typing import List from future.utils import iteritems import apache_beam as beam from apache_beam import coders from apache_beam import error from apache_beam import pvalue from apache_beam.internal import pickler from apache_beam.internal.gcp import json_value from apache_beam.options.pipeline_options import DebugOptions from apache_beam.options.pipeline_options import GoogleCloudOptions from apache_beam.options.pipeline_options import SetupOptions from apache_beam.options.pipeline_options import StandardOptions from apache_beam.options.pipeline_options import TestOptions from apache_beam.options.pipeline_options import WorkerOptions from apache_beam.portability import common_urns from apache_beam.portability.api import beam_runner_api_pb2 from apache_beam.pvalue import AsSideInput from apache_beam.runners.common import DoFnSignature from apache_beam.runners.dataflow.internal import names from apache_beam.runners.dataflow.internal.clients import dataflow as dataflow_api from apache_beam.runners.dataflow.internal.names import PropertyNames from apache_beam.runners.dataflow.internal.names import TransformNames from apache_beam.runners.runner import PipelineResult from apache_beam.runners.runner import PipelineRunner from apache_beam.runners.runner import PipelineState from apache_beam.runners.runner import PValueCache from apache_beam.transforms import window from apache_beam.transforms.core import RunnerAPIPTransformHolder from apache_beam.transforms.display import DisplayData from apache_beam.transforms.sideinputs import SIDE_INPUT_PREFIX from apache_beam.typehints import typehints from apache_beam.utils import processes from apache_beam.utils import proto_utils from apache_beam.utils.interactive_utils import is_in_notebook from apache_beam.utils.plugin import BeamPlugin if TYPE_CHECKING: from apache_beam.pipeline import PTransformOverride if sys.version_info[0] > 2: unquote_to_bytes = urllib.parse.unquote_to_bytes quote = urllib.parse.quote else: unquote_to_bytes = urllib.unquote # pylint: disable=deprecated-urllib-function quote = urllib.quote # pylint: disable=deprecated-urllib-function __all__ = ['DataflowRunner'] _LOGGER = logging.getLogger(__name__) BQ_SOURCE_UW_ERROR = ( 'The Read(BigQuerySource(...)) transform is not supported with newer stack ' 'features (Fn API, Dataflow Runner V2, etc). Please use the transform ' 'apache_beam.io.gcp.bigquery.ReadFromBigQuery instead.') class DataflowRunner(PipelineRunner): """A runner that creates job graphs and submits them for remote execution. Every execution of the run() method will submit an independent job for remote execution that consists of the nodes reachable from the passed in node argument or entire graph if node is None. The run() method returns after the service created the job and will not wait for the job to finish if blocking is set to False. """ # A list of PTransformOverride objects to be applied before running a pipeline # using DataflowRunner. # Currently this only works for overrides where the input and output types do # not change. # For internal SDK use only. This should not be updated by Beam pipeline # authors. # Imported here to avoid circular dependencies. # TODO: Remove the apache_beam.pipeline dependency in CreatePTransformOverride from apache_beam.runners.dataflow.ptransform_overrides import CombineValuesPTransformOverride from apache_beam.runners.dataflow.ptransform_overrides import CreatePTransformOverride from apache_beam.runners.dataflow.ptransform_overrides import JrhReadPTransformOverride from apache_beam.runners.dataflow.ptransform_overrides import ReadPTransformOverride from apache_beam.runners.dataflow.ptransform_overrides import NativeReadPTransformOverride # These overrides should be applied before the proto representation of the # graph is created. _PTRANSFORM_OVERRIDES = [ CombineValuesPTransformOverride(), NativeReadPTransformOverride(), ] # type: List[PTransformOverride] _JRH_PTRANSFORM_OVERRIDES = [ JrhReadPTransformOverride(), ] # type: List[PTransformOverride] # These overrides should be applied after the proto representation of the # graph is created. _NON_PORTABLE_PTRANSFORM_OVERRIDES = [ CreatePTransformOverride(), ReadPTransformOverride(), ] # type: List[PTransformOverride] def __init__(self, cache=None): # Cache of CloudWorkflowStep protos generated while the runner # "executes" a pipeline. self._cache = cache if cache is not None else PValueCache() self._unique_step_id = 0 self._default_environment = None def is_fnapi_compatible(self): return False def apply(self, transform, input, options): self._maybe_add_unified_worker_missing_options(options) return super(DataflowRunner, self).apply(transform, input, options) def _get_unique_step_name(self): self._unique_step_id += 1 return 's%s' % self._unique_step_id @staticmethod def poll_for_job_completion(runner, result, duration): """Polls for the specified job to finish running (successfully or not). Updates the result with the new job information before returning. Args: runner: DataflowRunner instance to use for polling job state. result: DataflowPipelineResult instance used for job information. duration (int): The time to wait (in milliseconds) for job to finish. If it is set to :data:`None`, it will wait indefinitely until the job is finished. """ last_message_time = None current_seen_messages = set() last_error_rank = float('-inf') last_error_msg = None last_job_state = None # How long to wait after pipeline failure for the error # message to show up giving the reason for the failure. # It typically takes about 30 seconds. final_countdown_timer_secs = 50.0 sleep_secs = 5.0 # Try to prioritize the user-level traceback, if any. def rank_error(msg): if 'work item was attempted' in msg: return -1 elif 'Traceback' in msg: return 1 return 0 if duration: start_secs = time.time() duration_secs = duration // 1000 job_id = result.job_id() while True: response = runner.dataflow_client.get_job(job_id) # If get() is called very soon after Create() the response may not contain # an initialized 'currentState' field. if response.currentState is not None: if response.currentState != last_job_state: _LOGGER.info('Job %s is in state %s', job_id, response.currentState) last_job_state = response.currentState if str(response.currentState) != 'JOB_STATE_RUNNING': # Stop checking for new messages on timeout, explanatory # message received, success, or a terminal job state caused # by the user that therefore doesn't require explanation. if (final_countdown_timer_secs <= 0.0 or last_error_msg is not None or str(response.currentState) == 'JOB_STATE_DONE' or str(response.currentState) == 'JOB_STATE_CANCELLED' or str(response.currentState) == 'JOB_STATE_UPDATED' or str(response.currentState) == 'JOB_STATE_DRAINED'): break # Check that job is in a post-preparation state before starting the # final countdown. if (str(response.currentState) not in ('JOB_STATE_PENDING', 'JOB_STATE_QUEUED')): # The job has failed; ensure we see any final error messages. sleep_secs = 1.0 # poll faster during the final countdown final_countdown_timer_secs -= sleep_secs time.sleep(sleep_secs) # Get all messages since beginning of the job run or since last message. page_token = None while True: messages, page_token = runner.dataflow_client.list_messages( job_id, page_token=page_token, start_time=last_message_time) for m in messages: message = '%s: %s: %s' % (m.time, m.messageImportance, m.messageText) if not last_message_time or m.time > last_message_time: last_message_time = m.time current_seen_messages = set() if message in current_seen_messages: # Skip the message if it has already been seen at the current # time. This could be the case since the list_messages API is # queried starting at last_message_time. continue else: current_seen_messages.add(message) # Skip empty messages. if m.messageImportance is None: continue _LOGGER.info(message) if str(m.messageImportance) == 'JOB_MESSAGE_ERROR': if rank_error(m.messageText) >= last_error_rank: last_error_rank = rank_error(m.messageText) last_error_msg = m.messageText if not page_token: break if duration: passed_secs = time.time() - start_secs if passed_secs > duration_secs: _LOGGER.warning( 'Timing out on waiting for job %s after %d seconds', job_id, passed_secs) break result._job = response runner.last_error_msg = last_error_msg @staticmethod def _only_element(iterable): # type: (Iterable[T]) -> T element, = iterable return element @staticmethod def group_by_key_input_visitor(): # Imported here to avoid circular dependencies. from apache_beam.pipeline import PipelineVisitor class GroupByKeyInputVisitor(PipelineVisitor): """A visitor that replaces `Any` element type for input `PCollection` of a `GroupByKey` with a `KV` type. TODO(BEAM-115): Once Python SDk is compatible with the new Runner API, we could directly replace the coder instead of mutating the element type. """ def enter_composite_transform(self, transform_node): self.visit_transform(transform_node) def visit_transform(self, transform_node): # Imported here to avoid circular dependencies. # pylint: disable=wrong-import-order, wrong-import-position from apache_beam.transforms.core import GroupByKey if isinstance(transform_node.transform, GroupByKey): pcoll = transform_node.inputs[0] pcoll.element_type = typehints.coerce_to_kv_type( pcoll.element_type, transform_node.full_label) key_type, value_type = pcoll.element_type.tuple_types if transform_node.outputs: key = DataflowRunner._only_element(transform_node.outputs.keys()) transform_node.outputs[key].element_type = typehints.KV[ key_type, typehints.Iterable[value_type]] return GroupByKeyInputVisitor() @staticmethod def _set_pdone_visitor(pipeline): # Imported here to avoid circular dependencies. from apache_beam.pipeline import PipelineVisitor class SetPDoneVisitor(PipelineVisitor): def __init__(self, pipeline): self._pipeline = pipeline @staticmethod def _maybe_fix_output(transform_node, pipeline): if not transform_node.outputs: pval = pvalue.PDone(pipeline) pval.producer = transform_node transform_node.outputs = {None: pval} def enter_composite_transform(self, transform_node): SetPDoneVisitor._maybe_fix_output(transform_node, self._pipeline) def visit_transform(self, transform_node): SetPDoneVisitor._maybe_fix_output(transform_node, self._pipeline) return SetPDoneVisitor(pipeline) @staticmethod def side_input_visitor(use_unified_worker=False, use_fn_api=False): # Imported here to avoid circular dependencies. # pylint: disable=wrong-import-order, wrong-import-position from apache_beam.pipeline import PipelineVisitor from apache_beam.transforms.core import ParDo class SideInputVisitor(PipelineVisitor): """Ensures input `PCollection` used as a side inputs has a `KV` type. TODO(BEAM-115): Once Python SDK is compatible with the new Runner API, we could directly replace the coder instead of mutating the element type. """ def visit_transform(self, transform_node): if isinstance(transform_node.transform, ParDo): new_side_inputs = [] for ix, side_input in enumerate(transform_node.side_inputs): access_pattern = side_input._side_input_data().access_pattern if access_pattern == common_urns.side_inputs.ITERABLE.urn: if use_unified_worker or not use_fn_api: # TODO(BEAM-9173): Stop patching up the access pattern to # appease Dataflow when using the UW and hardcode the output # type to be Any since the Dataflow JSON and pipeline proto # can differ in coders which leads to encoding/decoding issues # within the runner. side_input.pvalue.element_type = typehints.Any new_side_input = _DataflowIterableSideInput(side_input) else: # Add a map to ('', value) as Dataflow currently only handles # keyed side inputs when using the JRH. pipeline = side_input.pvalue.pipeline new_side_input = _DataflowIterableAsMultimapSideInput( side_input) new_side_input.pvalue = beam.pvalue.PCollection( pipeline, element_type=typehints.KV[bytes, side_input.pvalue.element_type], is_bounded=side_input.pvalue.is_bounded) parent = transform_node.parent or pipeline._root_transform() map_to_void_key = beam.pipeline.AppliedPTransform( pipeline, beam.Map(lambda x: (b'', x)), transform_node.full_label + '/MapToVoidKey%s' % ix, (side_input.pvalue, )) new_side_input.pvalue.producer = map_to_void_key map_to_void_key.add_output(new_side_input.pvalue, None) parent.add_part(map_to_void_key) elif access_pattern == common_urns.side_inputs.MULTIMAP.urn: # Ensure the input coder is a KV coder and patch up the # access pattern to appease Dataflow. side_input.pvalue.element_type = typehints.coerce_to_kv_type( side_input.pvalue.element_type, transform_node.full_label) new_side_input = _DataflowMultimapSideInput(side_input) else: raise ValueError( 'Unsupported access pattern for %r: %r' % (transform_node.full_label, access_pattern)) new_side_inputs.append(new_side_input) if use_fn_api: transform_node.side_inputs = new_side_inputs transform_node.transform.side_inputs = new_side_inputs return SideInputVisitor() @staticmethod def flatten_input_visitor(): # Imported here to avoid circular dependencies. from apache_beam.pipeline import PipelineVisitor class FlattenInputVisitor(PipelineVisitor): """A visitor that replaces the element type for input ``PCollections``s of a ``Flatten`` transform with that of the output ``PCollection``. """ def visit_transform(self, transform_node): # Imported here to avoid circular dependencies. # pylint: disable=wrong-import-order, wrong-import-position from apache_beam import Flatten if isinstance(transform_node.transform, Flatten): output_pcoll = DataflowRunner._only_element( transform_node.outputs.values()) for input_pcoll in transform_node.inputs: input_pcoll.element_type = output_pcoll.element_type return FlattenInputVisitor() def _check_for_unsupported_fnapi_features(self, pipeline_proto): components = pipeline_proto.components for windowing_strategy in components.windowing_strategies.values(): if (windowing_strategy.merge_status == beam_runner_api_pb2.MergeStatus.NEEDS_MERGE and windowing_strategy.window_fn.urn not in ( common_urns.session_windows.urn, )): raise RuntimeError( 'Unsupported merging windowing strategy: %s' % windowing_strategy.window_fn.urn) elif components.coders[ windowing_strategy.window_coder_id].spec.urn not in ( common_urns.coders.GLOBAL_WINDOW.urn, common_urns.coders.INTERVAL_WINDOW.urn): raise RuntimeError( 'Unsupported window coder %s for window fn %s' % ( components.coders[windowing_strategy.window_coder_id].spec.urn, windowing_strategy.window_fn.urn)) def _adjust_pipeline_for_dataflow_v2(self, pipeline): # Dataflow runner requires a KV type for GBK inputs, hence we enforce that # here. pipeline.visit(self.group_by_key_input_visitor()) def run_pipeline(self, pipeline, options): """Remotely executes entire pipeline or parts reachable from node.""" # Label goog-dataflow-notebook if job is started from notebook. if is_in_notebook(): notebook_version = ( 'goog-dataflow-notebook=' + beam.version.__version__.replace('.', '_')) if options.view_as(GoogleCloudOptions).labels: options.view_as(GoogleCloudOptions).labels.append(notebook_version) else: options.view_as(GoogleCloudOptions).labels = [notebook_version] # Import here to avoid adding the dependency for local running scenarios. try: # pylint: disable=wrong-import-order, wrong-import-position from apache_beam.runners.dataflow.internal import apiclient except ImportError: raise ImportError( 'Google Cloud Dataflow runner not available, ' 'please install apache_beam[gcp]') self._maybe_add_unified_worker_missing_options(options) # Convert all side inputs into a form acceptable to Dataflow. pipeline.visit( self.side_input_visitor( apiclient._use_unified_worker(options), apiclient._use_fnapi(options))) # Performing configured PTransform overrides. Note that this is currently # done before Runner API serialization, since the new proto needs to contain # any added PTransforms. pipeline.replace_all(DataflowRunner._PTRANSFORM_OVERRIDES) from apache_beam.runners.dataflow.ptransform_overrides import WriteToBigQueryPTransformOverride pipeline.replace_all([WriteToBigQueryPTransformOverride(pipeline, options)]) if (apiclient._use_fnapi(options) and not apiclient._use_unified_worker(options)): pipeline.replace_all(DataflowRunner._JRH_PTRANSFORM_OVERRIDES) use_fnapi = apiclient._use_fnapi(options) from apache_beam.transforms import environments self._default_environment = ( environments.DockerEnvironment.from_container_image( apiclient.get_container_image_from_options(options), artifacts=environments.python_sdk_dependencies(options))) # This has to be performed before pipeline proto is constructed to make sure # that the changes are reflected in the portable job submission path. self._adjust_pipeline_for_dataflow_v2(pipeline) # Snapshot the pipeline in a portable proto. self.proto_pipeline, self.proto_context = pipeline.to_runner_api( return_context=True, default_environment=self._default_environment) if use_fnapi: self._check_for_unsupported_fnapi_features(self.proto_pipeline) # Cross language transform require using a pipeline object constructed # from the full pipeline proto to make sure that expanded version of # external transforms are reflected in the Pipeline job graph. # TODO(chamikara): remove following pipeline and pipeline proto recreation # after portable job submission path is fully in place. from apache_beam import Pipeline pipeline = Pipeline.from_runner_api( self.proto_pipeline, pipeline.runner, options, allow_proto_holders=True) # Pipelines generated from proto do not have output set to PDone set for # leaf elements. pipeline.visit(self._set_pdone_visitor(pipeline)) # We need to generate a new context that maps to the new pipeline object. self.proto_pipeline, self.proto_context = pipeline.to_runner_api( return_context=True, default_environment=self._default_environment) else: # Performing configured PTransform overrides which should not be reflected # in the proto representation of the graph. pipeline.replace_all(DataflowRunner._NON_PORTABLE_PTRANSFORM_OVERRIDES) # Add setup_options for all the BeamPlugin imports setup_options = options.view_as(SetupOptions) plugins = BeamPlugin.get_all_plugin_paths() if setup_options.beam_plugins is not None: plugins = list(set(plugins + setup_options.beam_plugins)) setup_options.beam_plugins = plugins # Elevate "min_cpu_platform" to pipeline option, but using the existing # experiment. debug_options = options.view_as(DebugOptions) worker_options = options.view_as(WorkerOptions) if worker_options.min_cpu_platform: debug_options.add_experiment( 'min_cpu_platform=' + worker_options.min_cpu_platform) # Elevate "enable_streaming_engine" to pipeline option, but using the # existing experiment. google_cloud_options = options.view_as(GoogleCloudOptions) if google_cloud_options.enable_streaming_engine: debug_options.add_experiment("enable_windmill_service") debug_options.add_experiment("enable_streaming_engine") elif (apiclient._use_fnapi(options) and apiclient._use_unified_worker(options) and options.view_as(StandardOptions).streaming): debug_options.add_experiment("enable_windmill_service") debug_options.add_experiment("enable_streaming_engine") else: if (debug_options.lookup_experiment("enable_windmill_service") or debug_options.lookup_experiment("enable_streaming_engine")): raise ValueError( """Streaming engine both disabled and enabled: --enable_streaming_engine flag is not set, but enable_windmill_service and/or enable_streaming_engine experiments are present. It is recommended you only set the --enable_streaming_engine flag.""") dataflow_worker_jar = getattr(worker_options, 'dataflow_worker_jar', None) if dataflow_worker_jar is not None: if not apiclient._use_fnapi(options): _LOGGER.warning( 'Typical end users should not use this worker jar feature. ' 'It can only be used when FnAPI is enabled.') else: debug_options.add_experiment('use_staged_dataflow_worker_jar') # Make Dataflow workers use FastAvro on Python 3 unless use_avro experiment # is set. Note that use_avro is only interpreted by the Dataflow runner # at job submission and is not interpreted by Dataflow service or workers, # which by default use avro library unless use_fastavro experiment is set. if sys.version_info[0] > 2 and ( not debug_options.lookup_experiment('use_avro')): debug_options.add_experiment('use_fastavro') self.job = apiclient.Job(options, self.proto_pipeline) # Dataflow Runner v1 requires output type of the Flatten to be the same as # the inputs, hence we enforce that here. Dataflow Runner v2 does not # require this. pipeline.visit(self.flatten_input_visitor()) # Trigger a traversal of all reachable nodes. self.visit_transforms(pipeline, options) test_options = options.view_as(TestOptions) # If it is a dry run, return without submitting the job. if test_options.dry_run: result = PipelineResult(PipelineState.DONE) result.wait_until_finish = lambda duration=None: None return result # Get a Dataflow API client and set its options self.dataflow_client = apiclient.DataflowApplicationClient(options) # Create the job description and send a request to the service. The result # can be None if there is no need to send a request to the service (e.g. # template creation). If a request was sent and failed then the call will # raise an exception. result = DataflowPipelineResult( self.dataflow_client.create_job(self.job), self) # TODO(BEAM-4274): Circular import runners-metrics. Requires refactoring. from apache_beam.runners.dataflow.dataflow_metrics import DataflowMetrics self._metrics = DataflowMetrics(self.dataflow_client, result, self.job) result.metric_results = self._metrics return result def _maybe_add_unified_worker_missing_options(self, options): # set default beam_fn_api experiment if use unified # worker experiment flag exists, no-op otherwise. debug_options = options.view_as(DebugOptions) from apache_beam.runners.dataflow.internal import apiclient if apiclient._use_unified_worker(options): if not debug_options.lookup_experiment('beam_fn_api'): debug_options.add_experiment('beam_fn_api') def _get_typehint_based_encoding(self, typehint, window_coder): """Returns an encoding based on a typehint object.""" return self._get_cloud_encoding( self._get_coder(typehint, window_coder=window_coder)) @staticmethod def _get_coder(typehint, window_coder): """Returns a coder based on a typehint object.""" if window_coder: return coders.WindowedValueCoder( coders.registry.get_coder(typehint), window_coder=window_coder) return coders.registry.get_coder(typehint) def _get_cloud_encoding(self, coder, unused=None): """Returns an encoding based on a coder object.""" if not isinstance(coder, coders.Coder): raise TypeError( 'Coder object must inherit from coders.Coder: %s.' % str(coder)) return coder.as_cloud_object(self.proto_context.coders) def _get_side_input_encoding(self, input_encoding): """Returns an encoding for the output of a view transform. Args: input_encoding: encoding of current transform's input. Side inputs need this because the service will check that input and output types match. Returns: An encoding that matches the output and input encoding. This is essential for the View transforms introduced to produce side inputs to a ParDo. """ return { '@type': 'kind:stream', 'component_encodings': [input_encoding], 'is_stream_like': { 'value': True }, } def _get_encoded_output_coder( self, transform_node, window_value=True, output_tag=None): """Returns the cloud encoding of the coder for the output of a transform.""" is_external_transform = isinstance( transform_node.transform, RunnerAPIPTransformHolder) if output_tag in transform_node.outputs: element_type = transform_node.outputs[output_tag].element_type elif len(transform_node.outputs) == 1: output_tag = DataflowRunner._only_element(transform_node.outputs.keys()) # TODO(robertwb): Handle type hints for multi-output transforms. element_type = transform_node.outputs[output_tag].element_type elif is_external_transform: raise ValueError( 'For external transforms, output_tag must be specified ' 'since we cannot fallback to a Python only coder.') else: # TODO(silviuc): Remove this branch (and assert) when typehints are # propagated everywhere. Returning an 'Any' as type hint will trigger # usage of the fallback coder (i.e., cPickler). element_type = typehints.Any if window_value: # All outputs have the same windowing. So getting the coder from an # arbitrary window is fine. output_tag = next(iter(transform_node.outputs.keys())) window_coder = ( transform_node.outputs[output_tag].windowing.windowfn. get_window_coder()) else: window_coder = None return self._get_typehint_based_encoding(element_type, window_coder) def _add_step(self, step_kind, step_label, transform_node, side_tags=()): """Creates a Step object and adds it to the cache.""" # Import here to avoid adding the dependency for local running scenarios. # pylint: disable=wrong-import-order, wrong-import-position from apache_beam.runners.dataflow.internal import apiclient step = apiclient.Step(step_kind, self._get_unique_step_name()) self.job.proto.steps.append(step.proto) step.add_property(PropertyNames.USER_NAME, step_label) # Cache the node/step association for the main output of the transform node. # Main output key of external transforms can be ambiguous, so we only tag if # there's only one tag instead of None. output_tag = ( DataflowRunner._only_element(transform_node.outputs.keys()) if len( transform_node.outputs.keys()) == 1 else None) self._cache.cache_output(transform_node, output_tag, step) # If side_tags is not () then this is a multi-output transform node and we # need to cache the (node, tag, step) for each of the tags used to access # the outputs. This is essential because the keys used to search in the # cache always contain the tag. for tag in side_tags: self._cache.cache_output(transform_node, tag, step) # Finally, we add the display data items to the pipeline step. # If the transform contains no display data then an empty list is added. step.add_property( PropertyNames.DISPLAY_DATA, [ item.get_dict() for item in DisplayData.create_from(transform_node.transform).items ]) return step def _add_singleton_step( self, label, full_label, tag, input_step, windowing_strategy, access_pattern): """Creates a CollectionToSingleton step used to handle ParDo side inputs.""" # Import here to avoid adding the dependency for local running scenarios. from apache_beam.runners.dataflow.internal import apiclient step = apiclient.Step(TransformNames.COLLECTION_TO_SINGLETON, label) self.job.proto.steps.append(step.proto) step.add_property(PropertyNames.USER_NAME, full_label) step.add_property( PropertyNames.PARALLEL_INPUT, { '@type': 'OutputReference', PropertyNames.STEP_NAME: input_step.proto.name, PropertyNames.OUTPUT_NAME: input_step.get_output(tag) }) step.encoding = self._get_side_input_encoding(input_step.encoding) output_info = { PropertyNames.USER_NAME: '%s.%s' % (full_label, PropertyNames.OUTPUT), PropertyNames.ENCODING: step.encoding, PropertyNames.OUTPUT_NAME: PropertyNames.OUT } if common_urns.side_inputs.MULTIMAP.urn == access_pattern: output_info[PropertyNames.USE_INDEXED_FORMAT] = True step.add_property(PropertyNames.OUTPUT_INFO, [output_info]) step.add_property( PropertyNames.WINDOWING_STRATEGY, self.serialize_windowing_strategy( windowing_strategy, self._default_environment)) return step def run_Impulse(self, transform_node, options): standard_options = options.view_as(StandardOptions) debug_options = options.view_as(DebugOptions) use_fn_api = ( debug_options.experiments and 'beam_fn_api' in debug_options.experiments) use_streaming_engine = ( debug_options.experiments and 'enable_streaming_engine' in debug_options.experiments and 'enable_windmill_service' in debug_options.experiments) step = self._add_step( TransformNames.READ, transform_node.full_label, transform_node) if (standard_options.streaming and (not use_fn_api or not use_streaming_engine)): step.add_property(PropertyNames.FORMAT, 'pubsub') step.add_property(PropertyNames.PUBSUB_SUBSCRIPTION, '_starting_signal/') else: step.add_property(PropertyNames.FORMAT, 'impulse') encoded_impulse_element = coders.WindowedValueCoder( coders.BytesCoder(), coders.coders.GlobalWindowCoder()).get_impl().encode_nested( window.GlobalWindows.windowed_value(b'')) if use_fn_api: encoded_impulse_as_str = self.byte_array_to_json_string( encoded_impulse_element) else: encoded_impulse_as_str = base64.b64encode( encoded_impulse_element).decode('ascii') step.add_property(PropertyNames.IMPULSE_ELEMENT, encoded_impulse_as_str) step.encoding = self._get_encoded_output_coder(transform_node) step.add_property( PropertyNames.OUTPUT_INFO, [{ PropertyNames.USER_NAME: ( '%s.%s' % (transform_node.full_label, PropertyNames.OUT)), PropertyNames.ENCODING: step.encoding, PropertyNames.OUTPUT_NAME: PropertyNames.OUT }]) def run_Flatten(self, transform_node, options): step = self._add_step( TransformNames.FLATTEN, transform_node.full_label, transform_node) inputs = [] for one_input in transform_node.inputs: input_step = self._cache.get_pvalue(one_input) inputs.append({ '@type': 'OutputReference', PropertyNames.STEP_NAME: input_step.proto.name, PropertyNames.OUTPUT_NAME: input_step.get_output(one_input.tag) }) step.add_property(PropertyNames.INPUTS, inputs) step.encoding = self._get_encoded_output_coder(transform_node) step.add_property( PropertyNames.OUTPUT_INFO, [{ PropertyNames.USER_NAME: ( '%s.%s' % (transform_node.full_label, PropertyNames.OUT)), PropertyNames.ENCODING: step.encoding, PropertyNames.OUTPUT_NAME: PropertyNames.OUT }]) # TODO(srohde): Remove this after internal usages have been removed. def apply_GroupByKey(self, transform, pcoll, options): return transform.expand(pcoll) def _verify_gbk_coders(self, transform, pcoll): # Infer coder of parent. # # TODO(ccy): make Coder inference and checking less specialized and more # comprehensive. parent = pcoll.producer if parent: # Skip the check because we can assume that any x-lang transform is # properly formed (the onus is on the expansion service to construct # transforms correctly). if isinstance(parent.transform, RunnerAPIPTransformHolder): return coder = parent.transform._infer_output_coder() # pylint: disable=protected-access if not coder: coder = self._get_coder(pcoll.element_type or typehints.Any, None) if not coder.is_kv_coder(): raise ValueError(( 'Coder for the GroupByKey operation "%s" is not a ' 'key-value coder: %s.') % (transform.label, coder)) # TODO(robertwb): Update the coder itself if it changed. coders.registry.verify_deterministic( coder.key_coder(), 'GroupByKey operation "%s"' % transform.label) def run_GroupByKey(self, transform_node, options): input_tag = transform_node.inputs[0].tag input_step = self._cache.get_pvalue(transform_node.inputs[0]) # Verify that the GBK's parent has a KV coder. self._verify_gbk_coders(transform_node.transform, transform_node.inputs[0]) step = self._add_step( TransformNames.GROUP, transform_node.full_label, transform_node) step.add_property( PropertyNames.PARALLEL_INPUT, { '@type': 'OutputReference', PropertyNames.STEP_NAME: input_step.proto.name, PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag) }) step.encoding = self._get_encoded_output_coder(transform_node) step.add_property( PropertyNames.OUTPUT_INFO, [{ PropertyNames.USER_NAME: ( '%s.%s' % (transform_node.full_label, PropertyNames.OUT)), PropertyNames.ENCODING: step.encoding, PropertyNames.OUTPUT_NAME: PropertyNames.OUT }]) windowing = transform_node.transform.get_windowing(transform_node.inputs) step.add_property( PropertyNames.SERIALIZED_FN, self.serialize_windowing_strategy(windowing, self._default_environment)) def run_RunnerAPIPTransformHolder(self, transform_node, options): """Adding Dataflow runner job description for transform holder objects. These holder transform objects are generated for some of the transforms that become available after a cross-language transform expansion, usually if the corresponding transform object cannot be generated in Python SDK (for example, a python `ParDo` transform cannot be generated without a serialized Python `DoFn` object). """ urn = transform_node.transform.proto().urn assert urn # TODO(chamikara): support other transforms that requires holder objects in # Python SDk. if common_urns.primitives.PAR_DO.urn == urn: self.run_ParDo(transform_node, options) else: raise NotImplementedError( '%s uses unsupported URN: %s' % (transform_node.full_label, urn)) def run_ParDo(self, transform_node, options): transform = transform_node.transform input_tag = transform_node.inputs[0].tag input_step = self._cache.get_pvalue(transform_node.inputs[0]) is_external_transform = isinstance(transform, RunnerAPIPTransformHolder) # Attach side inputs. si_dict = {} all_input_labels = transform_node.input_tags_to_preserve si_labels = {} full_label_counts = defaultdict(int) lookup_label = lambda side_pval: si_labels[side_pval] named_inputs = transform_node.named_inputs() label_renames = {} for ix, side_pval in enumerate(transform_node.side_inputs): assert isinstance(side_pval, AsSideInput) step_name = 'SideInput-' + self._get_unique_step_name() si_label = ((SIDE_INPUT_PREFIX + '%d-%s') % (ix, transform_node.full_label) if side_pval.pvalue not in all_input_labels else all_input_labels[side_pval.pvalue]) old_label = (SIDE_INPUT_PREFIX + '%d') % ix if not is_external_transform: label_renames[old_label] = si_label assert old_label in named_inputs pcollection_label = '%s.%s' % ( side_pval.pvalue.producer.full_label.split('/')[-1], side_pval.pvalue.tag if side_pval.pvalue.tag else 'out') si_full_label = '%s/%s(%s.%s)' % ( transform_node.full_label, side_pval.__class__.__name__, pcollection_label, full_label_counts[pcollection_label]) # Count the number of times the same PCollection is a side input # to the same ParDo. full_label_counts[pcollection_label] += 1 self._add_singleton_step( step_name, si_full_label, side_pval.pvalue.tag, self._cache.get_pvalue(side_pval.pvalue), side_pval.pvalue.windowing, side_pval._side_input_data().access_pattern) si_dict[si_label] = { '@type': 'OutputReference', PropertyNames.STEP_NAME: step_name, PropertyNames.OUTPUT_NAME: PropertyNames.OUT } si_labels[side_pval] = si_label # Now create the step for the ParDo transform being handled. transform_name = transform_node.full_label.rsplit('/', 1)[-1] step = self._add_step( TransformNames.DO, transform_node.full_label + ('/{}'.format(transform_name) if transform_node.side_inputs else ''), transform_node, transform_node.transform.output_tags) # Import here to avoid adding the dependency for local running scenarios. # pylint: disable=wrong-import-order, wrong-import-position from apache_beam.runners.dataflow.internal import apiclient transform_proto = self.proto_context.transforms.get_proto(transform_node) transform_id = self.proto_context.transforms.get_id(transform_node) use_fnapi = apiclient._use_fnapi(options) use_unified_worker = apiclient._use_unified_worker(options) # Patch side input ids to be unique across a given pipeline. if (label_renames and transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn): # Patch PTransform proto. for old, new in iteritems(label_renames): transform_proto.inputs[new] = transform_proto.inputs[old] del transform_proto.inputs[old] # Patch ParDo proto. proto_type, _ = beam.PTransform._known_urns[transform_proto.spec.urn] proto = proto_utils.parse_Bytes(transform_proto.spec.payload, proto_type) for old, new in iteritems(label_renames): proto.side_inputs[new].CopyFrom(proto.side_inputs[old]) del proto.side_inputs[old] transform_proto.spec.payload = proto.SerializeToString() # We need to update the pipeline proto. del self.proto_pipeline.components.transforms[transform_id] ( self.proto_pipeline.components.transforms[transform_id].CopyFrom( transform_proto)) # The data transmitted in SERIALIZED_FN is different depending on whether # this is a fnapi pipeline or not. if (use_fnapi and (transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn or use_unified_worker)): serialized_data = transform_id else: serialized_data = pickler.dumps( self._pardo_fn_data(transform_node, lookup_label)) step.add_property(PropertyNames.SERIALIZED_FN, serialized_data) # TODO(BEAM-8882): Enable once dataflow service doesn't reject this. # step.add_property(PropertyNames.PIPELINE_PROTO_TRANSFORM_ID, transform_id) step.add_property( PropertyNames.PARALLEL_INPUT, { '@type': 'OutputReference', PropertyNames.STEP_NAME: input_step.proto.name, PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag) }) # Add side inputs if any. step.add_property(PropertyNames.NON_PARALLEL_INPUTS, si_dict) # Generate description for the outputs. The output names # will be 'None' for main output and '<tag>' for a tagged output. outputs = [] all_output_tags = list(transform_proto.outputs.keys()) # Some external transforms require output tags to not be modified. # So we randomly select one of the output tags as the main output and # leave others as side outputs. Transform execution should not change # dependending on which output tag we choose as the main output here. # Also, some SDKs do not work correctly if output tags are modified. So for # external transforms, we leave tags unmodified. # # Python SDK uses 'None' as the tag of the main output. main_output_tag = (all_output_tags[0] if is_external_transform else 'None') step.encoding = self._get_encoded_output_coder( transform_node, output_tag=main_output_tag) side_output_tags = set(all_output_tags).difference({main_output_tag}) # Add the main output to the description. outputs.append({ PropertyNames.USER_NAME: ( '%s.%s' % (transform_node.full_label, PropertyNames.OUT)), PropertyNames.ENCODING: step.encoding, PropertyNames.OUTPUT_NAME: main_output_tag }) for side_tag in side_output_tags: # The assumption here is that all outputs will have the same typehint # and coder as the main output. This is certainly the case right now # but conceivably it could change in the future. encoding = self._get_encoded_output_coder( transform_node, output_tag=side_tag) outputs.append({ PropertyNames.USER_NAME: ( '%s.%s' % (transform_node.full_label, side_tag)), PropertyNames.ENCODING: encoding, PropertyNames.OUTPUT_NAME: side_tag }) step.add_property(PropertyNames.OUTPUT_INFO, outputs) # Add the restriction encoding if we are a splittable DoFn # and are using the Fn API on the unified worker. restriction_coder = transform.get_restriction_coder() if restriction_coder: step.add_property( PropertyNames.RESTRICTION_ENCODING, self._get_cloud_encoding(restriction_coder)) if options.view_as(StandardOptions).streaming: is_stateful_dofn = ( transform.is_pardo_with_stateful_dofn if is_external_transform else DoFnSignature(transform.dofn).is_stateful_dofn()) if is_stateful_dofn: step.add_property(PropertyNames.USES_KEYED_STATE, 'true') @staticmethod def _pardo_fn_data(transform_node, get_label): transform = transform_node.transform si_tags_and_types = [ # pylint: disable=protected-access (get_label(side_pval), side_pval.__class__, side_pval._view_options()) for side_pval in transform_node.side_inputs] return ( transform.fn, transform.args, transform.kwargs, si_tags_and_types, transform_node.inputs[0].windowing) def run_CombineValuesReplacement(self, transform_node, options): transform = transform_node.transform.transform input_tag = transform_node.inputs[0].tag input_step = self._cache.get_pvalue(transform_node.inputs[0]) step = self._add_step( TransformNames.COMBINE, transform_node.full_label, transform_node) transform_id = self.proto_context.transforms.get_id(transform_node.parent) # The data transmitted in SERIALIZED_FN is different depending on whether # this is a fnapi pipeline or not. from apache_beam.runners.dataflow.internal import apiclient use_fnapi = apiclient._use_fnapi(options) if use_fnapi: # Fnapi pipelines send the transform ID of the CombineValues transform's # parent composite because Dataflow expects the ID of a CombinePerKey # transform. serialized_data = transform_id else: # Combiner functions do not take deferred side-inputs (i.e. PValues) and # therefore the code to handle extra args/kwargs is simpler than for the # DoFn's of the ParDo transform. In the last, empty argument is where # side inputs information would go. serialized_data = pickler.dumps( (transform.fn, transform.args, transform.kwargs, ())) step.add_property(PropertyNames.SERIALIZED_FN, serialized_data) # TODO(BEAM-8882): Enable once dataflow service doesn't reject this. # step.add_property(PropertyNames.PIPELINE_PROTO_TRANSFORM_ID, transform_id) step.add_property( PropertyNames.PARALLEL_INPUT, { '@type': 'OutputReference', PropertyNames.STEP_NAME: input_step.proto.name, PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag) }) # Note that the accumulator must not have a WindowedValue encoding, while # the output of this step does in fact have a WindowedValue encoding. accumulator_encoding = self._get_cloud_encoding( transform.fn.get_accumulator_coder()) output_encoding = self._get_encoded_output_coder(transform_node) step.encoding = output_encoding step.add_property(PropertyNames.ENCODING, accumulator_encoding) # Generate description for main output 'out.' outputs = [] # Add the main output to the description. outputs.append({ PropertyNames.USER_NAME: ( '%s.%s' % (transform_node.full_label, PropertyNames.OUT)), PropertyNames.ENCODING: step.encoding, PropertyNames.OUTPUT_NAME: PropertyNames.OUT }) step.add_property(PropertyNames.OUTPUT_INFO, outputs) def run_Read(self, transform_node, options): transform = transform_node.transform step = self._add_step( TransformNames.READ, transform_node.full_label, transform_node) # TODO(mairbek): refactor if-else tree to use registerable functions. # Initialize the source specific properties. standard_options = options.view_as(StandardOptions) if not hasattr(transform.source, 'format'): # If a format is not set, we assume the source to be a custom source. source_dict = {} source_dict['spec'] = { '@type': names.SOURCE_TYPE, names.SERIALIZED_SOURCE_KEY: pickler.dumps(transform.source) } try: source_dict['metadata'] = { 'estimated_size_bytes': json_value.get_typed_value_descriptor( transform.source.estimate_size()) } except error.RuntimeValueProviderError: # Size estimation is best effort, and this error is by value provider. _LOGGER.info( 'Could not estimate size of source %r due to ' + \ 'RuntimeValueProviderError', transform.source) except Exception: # pylint: disable=broad-except # Size estimation is best effort. So we log the error and continue. _LOGGER.info( 'Could not estimate size of source %r due to an exception: %s', transform.source, traceback.format_exc()) step.add_property(PropertyNames.SOURCE_STEP_INPUT, source_dict) elif transform.source.format == 'text': step.add_property(PropertyNames.FILE_PATTERN, transform.source.path) elif transform.source.format == 'bigquery': if standard_options.streaming: raise ValueError( 'BigQuery source is not currently available for use ' 'in streaming pipelines.') debug_options = options.view_as(DebugOptions) use_fn_api = ( debug_options.experiments and 'beam_fn_api' in debug_options.experiments) if use_fn_api: raise ValueError(BQ_SOURCE_UW_ERROR) step.add_property(PropertyNames.BIGQUERY_EXPORT_FORMAT, 'FORMAT_AVRO') # TODO(silviuc): Add table validation if transform.source.validate. if transform.source.table_reference is not None: step.add_property( PropertyNames.BIGQUERY_DATASET, transform.source.table_reference.datasetId) step.add_property( PropertyNames.BIGQUERY_TABLE, transform.source.table_reference.tableId) # If project owning the table was not specified then the project owning # the workflow (current project) will be used. if transform.source.table_reference.projectId is not None: step.add_property( PropertyNames.BIGQUERY_PROJECT, transform.source.table_reference.projectId) elif transform.source.query is not None: step.add_property(PropertyNames.BIGQUERY_QUERY, transform.source.query) step.add_property( PropertyNames.BIGQUERY_USE_LEGACY_SQL, transform.source.use_legacy_sql) step.add_property( PropertyNames.BIGQUERY_FLATTEN_RESULTS, transform.source.flatten_results) else: raise ValueError( 'BigQuery source %r must specify either a table or' ' a query' % transform.source) if transform.source.kms_key is not None: step.add_property( PropertyNames.BIGQUERY_KMS_KEY, transform.source.kms_key) elif transform.source.format == 'pubsub': if not standard_options.streaming: raise ValueError( 'Cloud Pub/Sub is currently available for use ' 'only in streaming pipelines.') # Only one of topic or subscription should be set. if transform.source.full_subscription: step.add_property( PropertyNames.PUBSUB_SUBSCRIPTION, transform.source.full_subscription) elif transform.source.full_topic: step.add_property( PropertyNames.PUBSUB_TOPIC, transform.source.full_topic) if transform.source.id_label: step.add_property( PropertyNames.PUBSUB_ID_LABEL, transform.source.id_label) if transform.source.with_attributes: # Setting this property signals Dataflow runner to return full # PubsubMessages instead of just the data part of the payload. step.add_property(PropertyNames.PUBSUB_SERIALIZED_ATTRIBUTES_FN, '') if transform.source.timestamp_attribute is not None: step.add_property( PropertyNames.PUBSUB_TIMESTAMP_ATTRIBUTE, transform.source.timestamp_attribute) else: raise ValueError( 'Source %r has unexpected format %s.' % (transform.source, transform.source.format)) if not hasattr(transform.source, 'format'): step.add_property(PropertyNames.FORMAT, names.SOURCE_FORMAT) else: step.add_property(PropertyNames.FORMAT, transform.source.format) # Wrap coder in WindowedValueCoder: this is necessary as the encoding of a # step should be the type of value outputted by each step. Read steps # automatically wrap output values in a WindowedValue wrapper, if necessary. # This is also necessary for proper encoding for size estimation. # Using a GlobalWindowCoder as a place holder instead of the default # PickleCoder because GlobalWindowCoder is known coder. # TODO(robertwb): Query the collection for the windowfn to extract the # correct coder. coder = coders.WindowedValueCoder( coders.registry.get_coder(transform_node.outputs[None].element_type), coders.coders.GlobalWindowCoder()) step.encoding = self._get_cloud_encoding(coder) step.add_property( PropertyNames.OUTPUT_INFO, [{ PropertyNames.USER_NAME: ( '%s.%s' % (transform_node.full_label, PropertyNames.OUT)), PropertyNames.ENCODING: step.encoding, PropertyNames.OUTPUT_NAME: PropertyNames.OUT }]) def run__NativeWrite(self, transform_node, options): transform = transform_node.transform input_tag = transform_node.inputs[0].tag input_step = self._cache.get_pvalue(transform_node.inputs[0]) step = self._add_step( TransformNames.WRITE, transform_node.full_label, transform_node) # TODO(mairbek): refactor if-else tree to use registerable functions. # Initialize the sink specific properties. if transform.sink.format == 'text': # Note that it is important to use typed properties (@type/value dicts) # for non-string properties and also for empty strings. For example, # in the code below the num_shards must have type and also # file_name_suffix and shard_name_template (could be empty strings). step.add_property( PropertyNames.FILE_NAME_PREFIX, transform.sink.file_name_prefix, with_type=True) step.add_property( PropertyNames.FILE_NAME_SUFFIX, transform.sink.file_name_suffix, with_type=True) step.add_property( PropertyNames.SHARD_NAME_TEMPLATE, transform.sink.shard_name_template, with_type=True) if transform.sink.num_shards > 0: step.add_property( PropertyNames.NUM_SHARDS, transform.sink.num_shards, with_type=True) # TODO(silviuc): Implement sink validation. step.add_property(PropertyNames.VALIDATE_SINK, False, with_type=True) elif transform.sink.format == 'bigquery': # TODO(silviuc): Add table validation if transform.sink.validate. step.add_property( PropertyNames.BIGQUERY_DATASET, transform.sink.table_reference.datasetId) step.add_property( PropertyNames.BIGQUERY_TABLE, transform.sink.table_reference.tableId) # If project owning the table was not specified then the project owning # the workflow (current project) will be used. if transform.sink.table_reference.projectId is not None: step.add_property( PropertyNames.BIGQUERY_PROJECT, transform.sink.table_reference.projectId) step.add_property( PropertyNames.BIGQUERY_CREATE_DISPOSITION, transform.sink.create_disposition) step.add_property( PropertyNames.BIGQUERY_WRITE_DISPOSITION, transform.sink.write_disposition) if transform.sink.table_schema is not None: step.add_property( PropertyNames.BIGQUERY_SCHEMA, transform.sink.schema_as_json()) if transform.sink.kms_key is not None: step.add_property( PropertyNames.BIGQUERY_KMS_KEY, transform.sink.kms_key) elif transform.sink.format == 'pubsub': standard_options = options.view_as(StandardOptions) if not standard_options.streaming: raise ValueError( 'Cloud Pub/Sub is currently available for use ' 'only in streaming pipelines.') step.add_property(PropertyNames.PUBSUB_TOPIC, transform.sink.full_topic) if transform.sink.id_label: step.add_property( PropertyNames.PUBSUB_ID_LABEL, transform.sink.id_label) if transform.sink.with_attributes: # Setting this property signals Dataflow runner that the PCollection # contains PubsubMessage objects instead of just raw data. step.add_property(PropertyNames.PUBSUB_SERIALIZED_ATTRIBUTES_FN, '') if transform.sink.timestamp_attribute is not None: step.add_property( PropertyNames.PUBSUB_TIMESTAMP_ATTRIBUTE, transform.sink.timestamp_attribute) else: raise ValueError( 'Sink %r has unexpected format %s.' % (transform.sink, transform.sink.format)) step.add_property(PropertyNames.FORMAT, transform.sink.format) # Wrap coder in WindowedValueCoder: this is necessary for proper encoding # for size estimation. Using a GlobalWindowCoder as a place holder instead # of the default PickleCoder because GlobalWindowCoder is known coder. # TODO(robertwb): Query the collection for the windowfn to extract the # correct coder. coder = coders.WindowedValueCoder( transform.sink.coder, coders.coders.GlobalWindowCoder()) step.encoding = self._get_cloud_encoding(coder) step.add_property(PropertyNames.ENCODING, step.encoding) step.add_property( PropertyNames.PARALLEL_INPUT, { '@type': 'OutputReference', PropertyNames.STEP_NAME: input_step.proto.name, PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag) }) def run_TestStream(self, transform_node, options): from apache_beam.testing.test_stream import ElementEvent from apache_beam.testing.test_stream import ProcessingTimeEvent from apache_beam.testing.test_stream import WatermarkEvent standard_options = options.view_as(StandardOptions) if not standard_options.streaming: raise ValueError( 'TestStream is currently available for use ' 'only in streaming pipelines.') transform = transform_node.transform step = self._add_step( TransformNames.READ, transform_node.full_label, transform_node) step.add_property( PropertyNames.SERIALIZED_FN, self.proto_context.transforms.get_id(transform_node)) step.add_property(PropertyNames.FORMAT, 'test_stream') test_stream_payload = beam_runner_api_pb2.TestStreamPayload() # TestStream source doesn't do any decoding of elements, # so we won't set test_stream_payload.coder_id. output_coder = transform._infer_output_coder() # pylint: disable=protected-access for event in transform._events: new_event = test_stream_payload.events.add() if isinstance(event, ElementEvent): for tv in event.timestamped_values: element = new_event.element_event.elements.add() element.encoded_element = output_coder.encode(tv.value) element.timestamp = tv.timestamp.micros elif isinstance(event, ProcessingTimeEvent): new_event.processing_time_event.advance_duration = ( event.advance_by.micros) elif isinstance(event, WatermarkEvent): new_event.watermark_event.new_watermark = event.new_watermark.micros serialized_payload = self.byte_array_to_json_string( test_stream_payload.SerializeToString()) step.add_property(PropertyNames.SERIALIZED_TEST_STREAM, serialized_payload) step.encoding = self._get_encoded_output_coder(transform_node) step.add_property( PropertyNames.OUTPUT_INFO, [{ PropertyNames.USER_NAME: ( '%s.%s' % (transform_node.full_label, PropertyNames.OUT)), PropertyNames.ENCODING: step.encoding, PropertyNames.OUTPUT_NAME: PropertyNames.OUT }]) # We must mark this method as not a test or else its name is a matcher for # nosetest tests. run_TestStream.__test__ = False # type: ignore[attr-defined] @classmethod def serialize_windowing_strategy(cls, windowing, default_environment): from apache_beam.runners import pipeline_context context = pipeline_context.PipelineContext( default_environment=default_environment) windowing_proto = windowing.to_runner_api(context) return cls.byte_array_to_json_string( beam_runner_api_pb2.MessageWithComponents( components=context.to_runner_api(), windowing_strategy=windowing_proto).SerializeToString()) @classmethod def deserialize_windowing_strategy(cls, serialized_data): # Imported here to avoid circular dependencies. # pylint: disable=wrong-import-order, wrong-import-position from apache_beam.runners import pipeline_context from apache_beam.transforms.core import Windowing proto = beam_runner_api_pb2.MessageWithComponents() proto.ParseFromString(cls.json_string_to_byte_array(serialized_data)) return Windowing.from_runner_api( proto.windowing_strategy, pipeline_context.PipelineContext(proto.components)) @staticmethod def byte_array_to_json_string(raw_bytes): """Implements org.apache.beam.sdk.util.StringUtils.byteArrayToJsonString.""" return quote(raw_bytes) @staticmethod def json_string_to_byte_array(encoded_string): """Implements org.apache.beam.sdk.util.StringUtils.jsonStringToByteArray.""" return unquote_to_bytes(encoded_string) def get_default_gcp_region(self): """Get a default value for Google Cloud region according to https://cloud.google.com/compute/docs/gcloud-compute/#default-properties. If no default can be found, returns None. """ environment_region = os.environ.get('CLOUDSDK_COMPUTE_REGION') if environment_region: _LOGGER.info( 'Using default GCP region %s from $CLOUDSDK_COMPUTE_REGION', environment_region) return environment_region try: cmd = ['gcloud', 'config', 'get-value', 'compute/region'] # Use subprocess.DEVNULL in Python 3.3+. if hasattr(subprocess, 'DEVNULL'): DEVNULL = subprocess.DEVNULL else: DEVNULL = open(os.devnull, 'ab') raw_output = processes.check_output(cmd, stderr=DEVNULL) formatted_output = raw_output.decode('utf-8').strip() if formatted_output: _LOGGER.info( 'Using default GCP region %s from `%s`', formatted_output, ' '.join(cmd)) return formatted_output except RuntimeError: pass return None class _DataflowSideInput(beam.pvalue.AsSideInput): """Wraps a side input as a dataflow-compatible side input.""" def _view_options(self): return { 'data': self._data, } def _side_input_data(self): return self._data class _DataflowIterableAsMultimapSideInput(_DataflowSideInput): """Wraps an iterable side input as dataflow-compatible side input.""" def __init__(self, side_input): # pylint: disable=protected-access side_input_data = side_input._side_input_data() assert ( side_input_data.access_pattern == common_urns.side_inputs.ITERABLE.urn) iterable_view_fn = side_input_data.view_fn self._data = beam.pvalue.SideInputData( common_urns.side_inputs.MULTIMAP.urn, side_input_data.window_mapping_fn, lambda multimap: iterable_view_fn(multimap[b''])) class _DataflowIterableSideInput(_DataflowSideInput): """Wraps an iterable side input as dataflow-compatible side input.""" def __init__(self, side_input): # pylint: disable=protected-access self.pvalue = side_input.pvalue side_input_data = side_input._side_input_data() assert ( side_input_data.access_pattern == common_urns.side_inputs.ITERABLE.urn) self._data = beam.pvalue.SideInputData( common_urns.side_inputs.ITERABLE.urn, side_input_data.window_mapping_fn, side_input_data.view_fn) class _DataflowMultimapSideInput(_DataflowSideInput): """Wraps a multimap side input as dataflow-compatible side input.""" def __init__(self, side_input): # pylint: disable=protected-access self.pvalue = side_input.pvalue side_input_data = side_input._side_input_data() assert ( side_input_data.access_pattern == common_urns.side_inputs.MULTIMAP.urn) self._data = beam.pvalue.SideInputData( common_urns.side_inputs.MULTIMAP.urn, side_input_data.window_mapping_fn, side_input_data.view_fn) class DataflowPipelineResult(PipelineResult): """Represents the state of a pipeline run on the Dataflow service.""" def __init__(self, job, runner): """Initialize a new DataflowPipelineResult instance. Args: job: Job message from the Dataflow API. Could be :data:`None` if a job request was not sent to Dataflow service (e.g. template jobs). runner: DataflowRunner instance. """ self._job = job self._runner = runner self.metric_results = None def _update_job(self): # We need the job id to be able to update job information. There is no need # to update the job if we are in a known terminal state. if self.has_job and not self.is_in_terminal_state(): self._job = self._runner.dataflow_client.get_job(self.job_id()) def job_id(self): return self._job.id def metrics(self): return self.metric_results @property def has_job(self): return self._job is not None def _get_job_state(self): values_enum = dataflow_api.Job.CurrentStateValueValuesEnum # Ordered by the enum values. Values that may be introduced in # future versions of Dataflow API are considered UNRECOGNIZED by the SDK. api_jobstate_map = defaultdict( lambda: PipelineState.UNRECOGNIZED, { values_enum.JOB_STATE_UNKNOWN: PipelineState.UNKNOWN, values_enum.JOB_STATE_STOPPED: PipelineState.STOPPED, values_enum.JOB_STATE_RUNNING: PipelineState.RUNNING, values_enum.JOB_STATE_DONE: PipelineState.DONE, values_enum.JOB_STATE_FAILED: PipelineState.FAILED, values_enum.JOB_STATE_CANCELLED: PipelineState.CANCELLED, values_enum.JOB_STATE_UPDATED: PipelineState.UPDATED, values_enum.JOB_STATE_DRAINING: PipelineState.DRAINING, values_enum.JOB_STATE_DRAINED: PipelineState.DRAINED, values_enum.JOB_STATE_PENDING: PipelineState.PENDING, values_enum.JOB_STATE_CANCELLING: PipelineState.CANCELLING, }) return ( api_jobstate_map[self._job.currentState] if self._job.currentState else PipelineState.UNKNOWN) @property def state(self): """Return the current state of the remote job. Returns: A PipelineState object. """ if not self.has_job: return PipelineState.UNKNOWN self._update_job() return self._get_job_state() def is_in_terminal_state(self): if not self.has_job: return True return PipelineState.is_terminal(self._get_job_state()) def wait_until_finish(self, duration=None): if not self.is_in_terminal_state(): if not self.has_job: raise IOError('Failed to get the Dataflow job id.') thread = threading.Thread( target=DataflowRunner.poll_for_job_completion, args=(self._runner, self, duration)) # Mark the thread as a daemon thread so a keyboard interrupt on the main # thread will terminate everything. This is also the reason we will not # use thread.join() to wait for the polling thread. thread.daemon = True thread.start() while thread.is_alive(): time.sleep(5.0) # TODO: Merge the termination code in poll_for_job_completion and # is_in_terminal_state. terminated = self.is_in_terminal_state() assert duration or terminated, ( 'Job did not reach to a terminal state after waiting indefinitely.') if terminated and self.state != PipelineState.DONE: # TODO(BEAM-1290): Consider converting this to an error log based on # theresolution of the issue. raise DataflowRuntimeException( 'Dataflow pipeline failed. State: %s, Error:\n%s' % (self.state, getattr(self._runner, 'last_error_msg', None)), self) return self.state def cancel(self): if not self.has_job: raise IOError('Failed to get the Dataflow job id.') self._update_job() if self.is_in_terminal_state(): _LOGGER.warning( 'Cancel failed because job %s is already terminated in state %s.', self.job_id(), self.state) else: if not self._runner.dataflow_client.modify_job_state( self.job_id(), 'JOB_STATE_CANCELLED'): cancel_failed_message = ( 'Failed to cancel job %s, please go to the Developers Console to ' 'cancel it manually.') % self.job_id() _LOGGER.error(cancel_failed_message) raise DataflowRuntimeException(cancel_failed_message, self) return self.state def __str__(self): return '<%s %s %s>' % (self.__class__.__name__, self.job_id(), self.state) def __repr__(self): return '<%s %s at %s>' % (self.__class__.__name__, self._job, hex(id(self))) class DataflowRuntimeException(Exception): """Indicates an error has occurred in running this pipeline.""" def __init__(self, msg, result): super(DataflowRuntimeException, self).__init__(msg) self.result = result
[]
[]
[ "CLOUDSDK_COMPUTE_REGION" ]
[]
["CLOUDSDK_COMPUTE_REGION"]
python
1
0
cmd/uidp/uidpsec/main.go
package main import ( "context" "crypto/rand" "encoding/hex" "log" "os" "math/big" "strings" "inspr.dev/inspr/pkg/controller/client" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" ) func generatePassword() string { chars := []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz" + "0123456789") length := 20 var b strings.Builder for i := 0; i < length; i++ { index, _ := rand.Int(rand.Reader, big.NewInt(int64(len(chars)))) b.WriteRune(chars[index.Int64()]) } str := b.String() return str } var clientSet kubernetes.Interface func initInsprd() (string, error) { cont := client.NewControllerClient(client.ControllerConfig{ URL: os.Getenv("INSPRD_URL"), }) token, err := cont.Authorization().Init(context.Background(), os.Getenv("INSPRD_INIT_KEY")) return token, err } // initKube initializes a k8s operator with in cluster configuration func initKube() error { config, err := rest.InClusterConfig() if err != nil { return err } clientSet, err = kubernetes.NewForConfig(config) if err != nil { return err } return nil } func main() { ctx := context.Background() namespace := os.Getenv("K8S_NAMESPACE") secretName := os.Getenv("SECRET_NAME") initKube() secret, err := clientSet.CoreV1().Secrets(namespace).Get(ctx, secretName, v1.GetOptions{}) if err != nil { panic(err) } if _, exists := secret.Data["REFRESH_KEY"]; !exists { bytes := make([]byte, 32) //generate a random 32 byte key for AES-256 if _, err := rand.Read(bytes); err != nil { panic(err.Error()) } key := hex.EncodeToString(bytes) privateKeyBytes := []byte(key) secret.Data["REFRESH_KEY"] = privateKeyBytes if os.Getenv("INIT_INSPRD") == "true" { token, err := initInsprd() if err != nil { panic(err) } secret.Data["ADMIN_TOKEN"] = []byte(token) } if os.Getenv("ADMIN_PASSWORD_GENERATE") == "true" { secret.Data["ADMIN_PASSWORD"] = []byte(generatePassword()) } _, err = clientSet.CoreV1().Secrets(namespace).Update(ctx, secret, v1.UpdateOptions{}) if err != nil { log.Fatal(err.Error()) } } }
[ "\"INSPRD_URL\"", "\"INSPRD_INIT_KEY\"", "\"K8S_NAMESPACE\"", "\"SECRET_NAME\"", "\"INIT_INSPRD\"", "\"ADMIN_PASSWORD_GENERATE\"" ]
[]
[ "K8S_NAMESPACE", "SECRET_NAME", "ADMIN_PASSWORD_GENERATE", "INIT_INSPRD", "INSPRD_INIT_KEY", "INSPRD_URL" ]
[]
["K8S_NAMESPACE", "SECRET_NAME", "ADMIN_PASSWORD_GENERATE", "INIT_INSPRD", "INSPRD_INIT_KEY", "INSPRD_URL"]
go
6
0
aagent/watchers/nagioswatcher/nagios.go
// Copyright (c) 2020-2021, R.I. Pienaar and the Choria Project contributors // // SPDX-License-Identifier: Apache-2.0 package nagioswatcher import ( "bytes" "context" "fmt" "html/template" "math/rand" "os" "os/exec" "strings" "sync" "time" "github.com/choria-io/go-choria/aagent/model" "github.com/google/shlex" "github.com/tidwall/gjson" "github.com/choria-io/go-choria/aagent/util" "github.com/choria-io/go-choria/aagent/watchers/event" "github.com/choria-io/go-choria/aagent/watchers/watcher" iu "github.com/choria-io/go-choria/internal/util" ) type State int const ( OK State = iota WARNING CRITICAL UNKNOWN SKIPPED NOTCHECKED wtype = "nagios" version = "v1" ) var stateNames = map[State]string{ OK: "OK", WARNING: "WARNING", CRITICAL: "CRITICAL", UNKNOWN: "UNKNOWN", // these are internal states that doesnt cause prom updates // or matching state transitions, they are there to force transitions // to unknown on the first time and to avoid immediate double checks // when transitioning between states SKIPPED: "SKIPPED", NOTCHECKED: "NOTCHECKED", } var intStates = map[int]State{ int(OK): OK, int(WARNING): WARNING, int(CRITICAL): CRITICAL, int(UNKNOWN): UNKNOWN, int(SKIPPED): SKIPPED, int(NOTCHECKED): NOTCHECKED, } // StateName returns friendly name for a state func StateName(s int) string { state, ok := intStates[s] if !ok { return stateNames[UNKNOWN] } return stateNames[state] } type properties struct { Annotations map[string]string Plugin string Gossfile string Builtin string Timeout time.Duration LastMessage time.Duration `mapstructure:"last_message"` CertExpiry time.Duration `mapstructure:"pubcert_expire"` } type Execution struct { Executed time.Time `json:"execute"` Status int `json:"status"` PerfData []util.PerfData `json:"perfdata,omitempty"` } type Watcher struct { *watcher.Watcher properties *properties name string machine model.Machine interval time.Duration previousRunTime time.Duration previousOutput string previousPerfData []util.PerfData previousCheck time.Time previousPlugin string previous State force bool history []*Execution machineName string textFileDir string watching bool mu *sync.Mutex } func New(machine model.Machine, name string, states []string, failEvent string, successEvent string, interval string, ai time.Duration, properties map[string]interface{}) (interface{}, error) { var err error nw := &Watcher{ machineName: machine.Name(), textFileDir: machine.TextFileDirectory(), name: name, machine: machine, previous: NOTCHECKED, history: []*Execution{}, mu: &sync.Mutex{}, } nw.Watcher, err = watcher.NewWatcher(name, wtype, ai, states, machine, failEvent, successEvent) if err != nil { return nil, err } err = nw.setProperties(properties) if err != nil { return nil, fmt.Errorf("could not set properties: %s", err) } if interval != "" { nw.interval, err = iu.ParseDuration(interval) if err != nil { return nil, fmt.Errorf("invalid interval: %s", err) } if nw.interval < 500*time.Millisecond { return nil, fmt.Errorf("interval %v is too small", nw.interval) } } updatePromState(nw.machineName, UNKNOWN, machine.TextFileDirectory(), nw) return nw, err } // Delete stops the watcher and remove it from the prom state after the check was removed from disk func (w *Watcher) Delete() { w.mu.Lock() defer w.mu.Unlock() // suppress next check and set state to unknown w.previousCheck = time.Now() deletePromState(w.machineName, w.textFileDir, w) } func (w *Watcher) CurrentState() interface{} { w.mu.Lock() defer w.mu.Unlock() s := &StateNotification{ Event: event.New(w.name, wtype, version, w.machine), Plugin: w.previousPlugin, Status: stateNames[w.previous], StatusCode: int(w.previous), Output: w.previousOutput, PerfData: w.previousPerfData, RunTime: w.previousRunTime.Seconds(), History: w.history, Annotations: w.properties.Annotations, CheckTime: w.previousCheck.Unix(), } if !w.previousCheck.IsZero() { s.CheckTime = w.previousCheck.Unix() } return s } func (w *Watcher) validate() error { if w.properties.Builtin != "" && w.properties.Plugin != "" { return fmt.Errorf("cannot set plugin and builtin") } if w.properties.Builtin == "" && w.properties.Plugin == "" { return fmt.Errorf("plugin or builtin is required") } if w.properties.Builtin == "goss" && w.properties.Gossfile == "" { return fmt.Errorf("gossfile property is required for the goss builtin check") } if w.properties.Builtin == "choria_status" && w.properties.LastMessage == 0 { return fmt.Errorf("last_message property is required for the choria_status builtin check") } if w.properties.Timeout == 0 { w.properties.Timeout = time.Second } return nil } func (w *Watcher) setProperties(props map[string]interface{}) error { if w.properties == nil { w.properties = &properties{ Annotations: make(map[string]string), Timeout: time.Second, } } err := util.ParseMapStructure(props, &w.properties) if err != nil { return err } return w.validate() } func (w *Watcher) NotifyStateChance() { var s State switch w.machine.State() { case "OK": s = OK case "WARNING": s = WARNING case "CRITICAL": s = CRITICAL case "UNKNOWN": s = UNKNOWN case "FORCE_CHECK": w.Infof("Forcing a check of %s", w.machineName) w.force = true w.StateChangeC() <- struct{}{} return } w.mu.Lock() w.previous = s w.mu.Unlock() err := updatePromState(w.machineName, s, w.textFileDir, w) if err != nil { w.Errorf("Could not update prometheus: %s", err) } } func (w *Watcher) Run(ctx context.Context, wg *sync.WaitGroup) { defer wg.Done() if w.textFileDir != "" { w.Infof("nagios watcher starting, updating prometheus in %s", w.textFileDir) } else { w.Infof("nagios watcher starting, prometheus integration disabled") } if w.interval != 0 { wg.Add(1) go w.intervalWatcher(ctx, wg) } for { select { case <-w.StateChangeC(): w.performWatch(ctx) case <-ctx.Done(): w.Infof("Stopping on context interrupt") return } } } func (w *Watcher) intervalWatcher(ctx context.Context, wg *sync.WaitGroup) { defer wg.Done() splay := time.Duration(rand.Intn(int(w.interval.Seconds()))) * time.Second w.Infof("Splaying first check by %v", splay) select { case <-time.NewTimer(splay).C: case <-ctx.Done(): return } tick := time.NewTicker(w.interval) for { select { case <-tick.C: w.performWatch(ctx) case <-ctx.Done(): tick.Stop() return } } } func (w *Watcher) performWatch(ctx context.Context) { if w.isWatching() { return } start := time.Now().UTC() state, err := w.watch(ctx) err = w.handleCheck(start, state, false, err) if err != nil { w.Errorf("could not handle watcher event: %s", err) } } func (w *Watcher) handleCheck(start time.Time, s State, external bool, err error) error { if s == SKIPPED || s == NOTCHECKED { return nil } w.Debugf("handling check for %s %s %v", w.properties.Plugin, stateNames[s], err) w.mu.Lock() w.previous = s if len(w.history) >= 15 { w.history = w.history[1:] } w.history = append(w.history, &Execution{Executed: start, Status: int(s), PerfData: w.previousPerfData}) w.mu.Unlock() // dont notify if we are externally transitioning because probably notifications were already sent if !external { w.NotifyWatcherState(w.CurrentState()) } w.Debugf("Notifying prometheus") err = updatePromState(w.machineName, s, w.textFileDir, w) if err != nil { w.Errorf("Could not update prometheus: %s", err) } if external { return nil } return w.Transition(stateNames[s]) } func (w *Watcher) processOverrides(c string) (string, error) { res, err := template.New(w.name).Funcs(w.funcMap()).Parse(c) if err != nil { return c, err } wr := new(bytes.Buffer) err = res.Execute(wr, struct{}{}) if err != nil { return c, err } return wr.String(), nil } func (w *Watcher) funcMap() template.FuncMap { return template.FuncMap{ "o": func(path string, dflt interface{}) string { overrides, err := w.machine.OverrideData() if err != nil { return fmt.Sprintf("%v", dflt) } if len(overrides) == 0 { return fmt.Sprintf("%v", dflt) } r := gjson.GetBytes(overrides, w.machineName+"."+path) if !r.Exists() { return fmt.Sprintf("%v", dflt) } return r.String() }, } } func (w *Watcher) watchUsingPlugin(ctx context.Context) (state State, output string, err error) { timeoutCtx, cancel := context.WithTimeout(ctx, w.properties.Timeout) defer cancel() plugin, err := w.processOverrides(w.properties.Plugin) if err != nil { w.Errorf("could not process overrides for plugin command: %s", err) return UNKNOWN, "", err } w.Infof("Running %s", w.properties.Plugin) splitcmd, err := shlex.Split(plugin) if err != nil { w.Errorf("Exec watcher %s failed: %s", plugin, err) return UNKNOWN, "", err } w.previousPlugin = plugin cmd := exec.CommandContext(timeoutCtx, splitcmd[0], splitcmd[1:]...) cmd.Env = append(cmd.Env, fmt.Sprintf("MACHINE_WATCHER_NAME=%s", w.name)) cmd.Env = append(cmd.Env, fmt.Sprintf("MACHINE_NAME=%s", w.machineName)) cmd.Env = append(cmd.Env, fmt.Sprintf("PATH=%s%s%s", os.Getenv("PATH"), string(os.PathListSeparator), w.machine.Directory())) cmd.Dir = w.machine.Directory() var pstate *os.ProcessState outb, err := cmd.CombinedOutput() if err != nil { eerr, ok := err.(*exec.ExitError) if ok { pstate = eerr.ProcessState } else { w.Errorf("Exec watcher %s failed: %s", w.properties.Plugin, err) w.previousOutput = err.Error() return UNKNOWN, "", err } } else { pstate = cmd.ProcessState } output = string(outb) w.Debugf("Output from %s: %s", w.properties.Plugin, output) s, ok := intStates[pstate.ExitCode()] if ok { return s, output, nil } return UNKNOWN, output, nil } func (w *Watcher) watchUsingBuiltin(_ context.Context) (state State, output string, err error) { switch { case w.properties.Builtin == "heartbeat": return w.builtinHeartbeat() case strings.HasPrefix(w.properties.Builtin, "goss"): return w.watchUsingGoss() case w.properties.Builtin == "choria_status": return w.watchUsingChoria() default: return UNKNOWN, "", fmt.Errorf("unsupported builtin %q", w.properties.Builtin) } } func (w *Watcher) startWatching() { w.mu.Lock() w.watching = true w.mu.Unlock() } func (w *Watcher) isWatching() bool { w.mu.Lock() defer w.mu.Unlock() return w.watching } func (w *Watcher) stopWatching() { w.mu.Lock() w.watching = false w.mu.Unlock() } func (w *Watcher) watch(ctx context.Context) (state State, err error) { if !w.ShouldWatch() { return SKIPPED, nil } w.startWatching() defer w.stopWatching() start := time.Now() w.previousCheck = start defer func() { w.mu.Lock() w.previousRunTime = time.Since(start) w.mu.Unlock() }() var output string switch { case w.properties.Plugin != "": state, output, err = w.watchUsingPlugin(ctx) case w.properties.Builtin != "": state, output, err = w.watchUsingBuiltin(ctx) default: state = UNKNOWN err = fmt.Errorf("command or builtin required") } w.previousOutput = strings.TrimSpace(output) w.previousPerfData = util.ParsePerfData(output) return state, err } func (w *Watcher) ShouldWatch() bool { if w.force { w.force = false return true } since := time.Since(w.previousCheck) if !w.previousCheck.IsZero() && since < w.interval-time.Second { w.Debugf("Skipping check due to previous check being %v sooner than interval %v", since, w.interval) return false } return w.Watcher.ShouldWatch() }
[ "\"PATH\"" ]
[]
[ "PATH" ]
[]
["PATH"]
go
1
0
conans/test/functional/generators/xcode_gcc_vs_test.py
import os import re import unittest from conans.model.graph_info import GRAPH_INFO_FILE from conans.model.graph_lock import LOCKFILE from conans.model.ref import ConanFileReference, PackageReference from conans.paths import (BUILD_INFO, BUILD_INFO_CMAKE, BUILD_INFO_GCC, BUILD_INFO_VISUAL_STUDIO, BUILD_INFO_XCODE, CONANFILE_TXT, CONANINFO) from conans.test.utils.tools import TestClient from conans.util.files import load class VSXCodeGeneratorsTest(unittest.TestCase): def generators_test(self): ref = ConanFileReference.loads("Hello/0.1@lasote/stable") client = TestClient() client.save({"conanfile.py": """from conans import ConanFile import os class Pkg(ConanFile): def package(self): os.makedirs(os.path.join(self.package_folder, "lib")) os.makedirs(os.path.join(self.package_folder, "include")) def package_info(self): self.cpp_info.libs = ["hello"] self.cpp_info.cxxflags = ["-some_cxx_compiler_flag"] self.cpp_info.cflags = ["-some_c_compiler_flag"] """}) client.run("export . Hello/0.1@lasote/stable") conanfile_txt = '''[requires] Hello/0.1@lasote/stable # My req comment [generators] gcc # I need this generator for.. cmake visual_studio xcode ''' client.save({"conanfile.txt": conanfile_txt}, clean_first=True) # Install requirements client.run('install . --build missing') self.assertEqual(sorted([CONANFILE_TXT, BUILD_INFO_GCC, BUILD_INFO_CMAKE, BUILD_INFO_VISUAL_STUDIO, BUILD_INFO, BUILD_INFO_XCODE, CONANINFO, GRAPH_INFO_FILE, LOCKFILE]), sorted(os.listdir(client.current_folder))) cmake = load(os.path.join(client.current_folder, BUILD_INFO_CMAKE)) gcc = load(os.path.join(client.current_folder, BUILD_INFO_GCC)) self.assertIn("CONAN_INCLUDE_DIRS", cmake) self.assertIn("CONAN_LIB_DIRS", cmake) self.assertIn("CONAN_LIBS", cmake) self.assertIn("CONAN_INCLUDE_DIRS", cmake) self.assertIn("CONAN_LIB_DIRS", cmake) self.assertIn("/data/Hello/0.1/lasote/stable/package", cmake) self.assertIn("-L", gcc) self.assertIn("-l", gcc) self.assertIn("-I", gcc) self.assertIn("/data/Hello/0.1/lasote/stable/package", gcc) # CHECK VISUAL STUDIO GENERATOR from xml.dom import minidom xmldoc = minidom.parse(os.path.join(client.current_folder, BUILD_INFO_VISUAL_STUDIO)) definition_group = xmldoc.getElementsByTagName('ItemDefinitionGroup')[0] compiler = definition_group.getElementsByTagName("ClCompile")[0] linker = definition_group.getElementsByTagName("Link")[0] def element_content(node): return node.firstChild.data if node.firstChild else "" include_dirs = element_content(xmldoc.getElementsByTagName("ConanIncludeDirectories")[0]) definitions = element_content(xmldoc.getElementsByTagName("ConanPreprocessorDefinitions")[0]) lib_dirs = element_content(xmldoc.getElementsByTagName("ConanLibraryDirectories")[0]) libs = element_content(linker.getElementsByTagName("AdditionalDependencies")[0]) package_id = os.listdir(client.cache.package_layout(ref).packages())[0] pref = PackageReference(ref, package_id) package_path = client.cache.package_layout(pref.ref).package(pref) replaced_path = re.sub(os.getenv("USERPROFILE", "not user profile").replace("\\", "\\\\"), "$(USERPROFILE)", package_path, flags=re.I) expected_lib_dirs = os.path.join(replaced_path, "lib") expected_include_dirs = os.path.join(replaced_path, "include") self.assertIn(expected_lib_dirs, lib_dirs) self.assertEqual("$(ConanLibraries)%(AdditionalDependencies)", libs) self.assertEqual("", definitions) self.assertIn(expected_include_dirs, include_dirs) # CHECK XCODE GENERATOR xcode = load(os.path.join(client.current_folder, BUILD_INFO_XCODE)) expected_c_flags = '-some_c_compiler_flag' expected_cpp_flags = '-some_cxx_compiler_flag' expected_lib_dirs = os.path.join(package_path, "lib").replace("\\", "/") expected_include_dirs = os.path.join(package_path, "include").replace("\\", "/") self.assertIn('LIBRARY_SEARCH_PATHS = $(inherited) "%s"' % expected_lib_dirs, xcode) self.assertIn('HEADER_SEARCH_PATHS = $(inherited) "%s"' % expected_include_dirs, xcode) self.assertIn("GCC_PREPROCESSOR_DEFINITIONS = $(inherited)", xcode) self.assertIn('OTHER_CFLAGS = $(inherited) %s' % expected_c_flags, xcode) self.assertIn('OTHER_CPLUSPLUSFLAGS = $(inherited) %s' % expected_cpp_flags, xcode) self.assertIn('FRAMEWORK_SEARCH_PATHS = $(inherited) "%s"' % package_path.replace("\\", "/"), xcode)
[]
[]
[ "USERPROFILE" ]
[]
["USERPROFILE"]
python
1
0
conans/client/command.py
import argparse import inspect import json import os import sys from argparse import ArgumentError from difflib import get_close_matches from six.moves import input as raw_input from conans import __version__ as client_version from conans.client.cmd.uploader import UPLOAD_POLICY_FORCE, \ UPLOAD_POLICY_NO_OVERWRITE, UPLOAD_POLICY_NO_OVERWRITE_RECIPE, UPLOAD_POLICY_SKIP from conans.client.conan_api import (Conan, default_manifest_folder, _make_abs_path) from conans.client.conan_command_output import CommandOutputer from conans.client.output import Color from conans.client.printer import Printer from conans.errors import ConanException, ConanInvalidConfiguration, NoRemoteAvailable, \ ConanMigrationError from conans.model.ref import ConanFileReference, PackageReference, get_reference_fields, \ check_valid_ref from conans.unicode import get_cwd from conans.util.config_parser import get_bool_from_text from conans.util.files import exception_message_safe from conans.util.files import save from conans.util.log import logger # Exit codes for conan command: SUCCESS = 0 # 0: Success (done) ERROR_GENERAL = 1 # 1: General ConanException error (done) ERROR_MIGRATION = 2 # 2: Migration error USER_CTRL_C = 3 # 3: Ctrl+C USER_CTRL_BREAK = 4 # 4: Ctrl+Break ERROR_SIGTERM = 5 # 5: SIGTERM ERROR_INVALID_CONFIGURATION = 6 # 6: Invalid configuration (done) class Extender(argparse.Action): """Allows using the same flag several times in command and creates a list with the values. For example: conan install MyPackage/1.2@user/channel -o qt:value -o mode:2 -s cucumber:true It creates: options = ['qt:value', 'mode:2'] settings = ['cucumber:true'] """ def __call__(self, parser, namespace, values, option_strings=None): # @UnusedVariable # Need None here incase `argparse.SUPPRESS` was supplied for `dest` dest = getattr(namespace, self.dest, None) if not hasattr(dest, 'extend') or dest == self.default: dest = [] setattr(namespace, self.dest, dest) # if default isn't set to None, this method might be called # with the default as `values` for other arguments which # share this destination. parser.set_defaults(**{self.dest: None}) if isinstance(values, str): dest.append(values) elif values: try: dest.extend(values) except ValueError: dest.append(values) class OnceArgument(argparse.Action): """Allows declaring a parameter that can have only one value, by default argparse takes the latest declared and it's very confusing. """ def __call__(self, parser, namespace, values, option_string=None): if getattr(namespace, self.dest) is not None and self.default is None: msg = '{o} can only be specified once'.format(o=option_string) raise argparse.ArgumentError(None, msg) setattr(namespace, self.dest, values) class SmartFormatter(argparse.HelpFormatter): def _fill_text(self, text, width, indent): import textwrap text = textwrap.dedent(text) return ''.join(indent + line for line in text.splitlines(True)) _QUERY_EXAMPLE = "os=Windows AND (arch=x86 OR compiler=gcc)" _PATTERN_EXAMPLE = "boost/*" _REFERENCE_EXAMPLE = "MyPackage/1.2@user/channel" _PREF_EXAMPLE = "MyPackage/1.2@user/channel:af7901d8bdfde621d086181aa1c495c25a17b137" _BUILD_FOLDER_HELP = ("Directory for the build process. Defaulted to the current directory. A " "relative path to the current directory can also be specified") _INSTALL_FOLDER_HELP = ("Directory containing the conaninfo.txt and conanbuildinfo.txt files " "(from previous 'conan install'). Defaulted to --build-folder") _KEEP_SOURCE_HELP = ("Do not remove the source folder in the local cache, even if the recipe changed. " "Use this for testing purposes only") _PATTERN_OR_REFERENCE_HELP = ("Pattern or package recipe reference, e.g., '%s', " "'%s'" % (_PATTERN_EXAMPLE, _REFERENCE_EXAMPLE)) _PATTERN_REF_OR_PREF_HELP = ("Pattern, recipe reference or package reference e.g., '%s', " "'%s', '%s'" % (_PATTERN_EXAMPLE, _REFERENCE_EXAMPLE, _PREF_EXAMPLE)) _REF_OR_PREF_HELP = ("Recipe reference or package reference e.g., '%s', " "'%s'" % (_REFERENCE_EXAMPLE, _PREF_EXAMPLE)) _PATH_HELP = ("Path to a folder containing a conanfile.py or to a recipe file " "e.g., my_folder/conanfile.py") _QUERY_HELP = ("Packages query: '%s'. The 'pattern_or_reference' parameter has " "to be a reference: %s" % (_QUERY_EXAMPLE, _REFERENCE_EXAMPLE)) _SOURCE_FOLDER_HELP = ("Directory containing the sources. Defaulted to the conanfile's directory. A" " relative path to the current directory can also be specified") class Command(object): """A single command of the conan application, with all the first level commands. Manages the parsing of parameters and delegates functionality in collaborators. It can also show the help of the tool. """ def __init__(self, conan_api): assert isinstance(conan_api, Conan) self._conan = conan_api self._out = conan_api.out @property def _outputer(self): # FIXME, this access to the cache for output is ugly, should be removed return CommandOutputer(self._out, self._conan.app.cache) def help(self, *args): """ Shows help for a specific command. """ parser = argparse.ArgumentParser(description=self.help.__doc__, prog="conan help", formatter_class=SmartFormatter) parser.add_argument("command", help='command', nargs="?") args = parser.parse_args(*args) if not args.command: self._show_help() return try: commands = self._commands() method = commands[args.command] self._warn_python_version() method(["--help"]) except KeyError: raise ConanException("Unknown command '%s'" % args.command) def new(self, *args): """ Creates a new package recipe template with a 'conanfile.py' and optionally, 'test_package' testing files. """ parser = argparse.ArgumentParser(description=self.new.__doc__, prog="conan new", formatter_class=SmartFormatter) parser.add_argument("name", help='Package name, e.g.: "poco/1.9.4" or complete reference' ' for CI scripts: "poco/1.9.4@user/channel"') parser.add_argument("-t", "--test", action='store_true', default=False, help='Create test_package skeleton to test package') parser.add_argument("-i", "--header", action='store_true', default=False, help='Create a headers only package template') parser.add_argument("-c", "--pure-c", action='store_true', default=False, help='Create a C language package only package, ' 'deleting "self.settings.compiler.libcxx" setting ' 'in the configure method') parser.add_argument("-s", "--sources", action='store_true', default=False, help='Create a package with embedded sources in "src" folder, ' 'using "exports_sources" instead of retrieving external code with ' 'the "source()" method') parser.add_argument("-b", "--bare", action='store_true', default=False, help='Create the minimum package recipe, without build() method. ' 'Useful in combination with "export-pkg" command') parser.add_argument("-m", "--template", help='Use the given template from the local cache for conanfile.py') parser.add_argument("-cis", "--ci-shared", action='store_true', default=False, help='Package will have a "shared" option to be used in CI') parser.add_argument("-cilg", "--ci-travis-gcc", action='store_true', default=False, help='Generate travis-ci files for linux gcc') parser.add_argument("-cilc", "--ci-travis-clang", action='store_true', default=False, help='Generate travis-ci files for linux clang') parser.add_argument("-cio", "--ci-travis-osx", action='store_true', default=False, help='Generate travis-ci files for OSX apple-clang') parser.add_argument("-ciw", "--ci-appveyor-win", action='store_true', default=False, help='Generate appveyor files for Appveyor ' 'Visual Studio') parser.add_argument("-ciglg", "--ci-gitlab-gcc", action='store_true', default=False, help='Generate GitLab files for linux gcc') parser.add_argument("-ciglc", "--ci-gitlab-clang", action='store_true', default=False, help='Generate GitLab files for linux clang') parser.add_argument("-ciccg", "--ci-circleci-gcc", action='store_true', default=False, help='Generate CircleCI files for linux gcc') parser.add_argument("-ciccc", "--ci-circleci-clang", action='store_true', default=False, help='Generate CircleCI files for linux clang') parser.add_argument("-cicco", "--ci-circleci-osx", action='store_true', default=False, help='Generate CircleCI files for OSX apple-clang') parser.add_argument("-gi", "--gitignore", action='store_true', default=False, help='Generate a .gitignore with the known patterns to excluded') parser.add_argument("-ciu", "--ci-upload-url", help='Define URL of the repository to upload') args = parser.parse_args(*args) self._warn_python_version() self._conan.new(args.name, header=args.header, pure_c=args.pure_c, test=args.test, exports_sources=args.sources, bare=args.bare, visual_versions=args.ci_appveyor_win, linux_gcc_versions=args.ci_travis_gcc, linux_clang_versions=args.ci_travis_clang, gitignore=args.gitignore, osx_clang_versions=args.ci_travis_osx, shared=args.ci_shared, upload_url=args.ci_upload_url, gitlab_gcc_versions=args.ci_gitlab_gcc, gitlab_clang_versions=args.ci_gitlab_clang, circleci_gcc_versions=args.ci_circleci_gcc, circleci_clang_versions=args.ci_circleci_clang, circleci_osx_versions=args.ci_circleci_osx, template=args.template) def inspect(self, *args): """ Displays conanfile attributes, like name, version, and options. Works locally, in local cache and remote. """ parser = argparse.ArgumentParser(description=self.inspect.__doc__, prog="conan inspect", formatter_class=SmartFormatter) parser.add_argument("path_or_reference", help="Path to a folder containing a recipe" " (conanfile.py) or to a recipe file. e.g., " "./my_project/conanfile.py. It could also be a reference") parser.add_argument("-a", "--attribute", help='The attribute to be displayed, e.g "name"', nargs="?", action=Extender) parser.add_argument("-r", "--remote", help='look in the specified remote server', action=OnceArgument) parser.add_argument("-j", "--json", default=None, action=OnceArgument, help='json output file') parser.add_argument('--raw', default=None, action=OnceArgument, help='Print just the value of the requested attribute') args = parser.parse_args(*args) if args.raw and args.attribute: raise ConanException("Argument '--raw' is incompatible with '-a'") if args.raw and args.json: raise ConanException("Argument '--raw' is incompatible with '--json'") attributes = [args.raw, ] if args.raw else args.attribute quiet = bool(args.raw) result = self._conan.inspect(args.path_or_reference, attributes, args.remote, quiet=quiet) Printer(self._out).print_inspect(result, raw=args.raw) if args.json: json_output = json.dumps(result) if not os.path.isabs(args.json): json_output_file = os.path.join(get_cwd(), args.json) else: json_output_file = args.json save(json_output_file, json_output) def test(self, *args): """ Tests a package consuming it from a conanfile.py with a test() method. This command installs the conanfile dependencies (including the tested package), calls a 'conan build' to build test apps and finally executes the test() method. The testing recipe does not require name or version, neither definition of package() or package_info() methods. The package to be tested must exist in the local cache or any configured remote. """ parser = argparse.ArgumentParser(description=self.test.__doc__, prog="conan test", formatter_class=SmartFormatter) parser.add_argument("path", help='Path to the "testing" folder containing a conanfile.py or' ' to a recipe file with test() method' 'e.g. conan test_package/conanfile.py pkg/version@user/channel') parser.add_argument("reference", help='pkg/version@user/channel of the package to be tested') parser.add_argument("-tbf", "--test-build-folder", action=OnceArgument, help="Working directory of the build process.") _add_common_install_arguments(parser, build_help=_help_build_policies.format("never")) args = parser.parse_args(*args) self._warn_python_version() return self._conan.test(args.path, args.reference, args.profile, args.settings, args.options, args.env, args.remote, args.update, build_modes=args.build, test_build_folder=args.test_build_folder, lockfile=args.lockfile) def create(self, *args): """ Builds a binary package for a recipe (conanfile.py). Uses the specified configuration in a profile or in -s settings, -o options, etc. If a 'test_package' folder (the name can be configured with -tf) is found, the command will run the consumer project to ensure that the package has been created correctly. Check 'conan test' command to know more about 'test_folder' project. """ parser = argparse.ArgumentParser(description=self.create.__doc__, prog="conan create", formatter_class=SmartFormatter) parser.add_argument("path", help=_PATH_HELP) parser.add_argument("reference", nargs='?', default=None, help='user/channel, version@user/channel or pkg/version@user/channel ' '(if name or version declared in conanfile.py, they should match)') parser.add_argument("-j", "--json", default=None, action=OnceArgument, help='json file path where the install information will be written to') parser.add_argument('-k', '-ks', '--keep-source', default=False, action='store_true', help=_KEEP_SOURCE_HELP) parser.add_argument('-kb', '--keep-build', default=False, action='store_true', help='Do not remove the build folder in local cache. ' 'Implies --keep-source. ' 'Use this for testing purposes only') parser.add_argument("-ne", "--not-export", default=False, action='store_true', help='Do not export the conanfile.py') parser.add_argument("-tbf", "--test-build-folder", action=OnceArgument, help='Working directory for the build of the test project.') parser.add_argument("-tf", "--test-folder", action=OnceArgument, help='Alternative test folder name. By default it is "test_package". ' 'Use "None" to skip the test stage') parser.add_argument("--ignore-dirty", default=False, action='store_true', help='When using the "scm" feature with "auto" values, capture the' ' revision and url even if there are uncommitted changes') _add_manifests_arguments(parser) _add_common_install_arguments(parser, build_help=_help_build_policies.format("package name")) args = parser.parse_args(*args) self._warn_python_version() name, version, user, channel, _ = get_reference_fields(args.reference, user_channel_input=True) if any([user, channel]) and not all([user, channel]): # Or user/channel or nothing, but not partial raise ConanException("Invalid parameter '%s', " "specify the full reference or user/channel" % args.reference) if args.test_folder == "None": # Now if parameter --test-folder=None (string None) we have to skip tests args.test_folder = False cwd = get_cwd() info = None try: info = self._conan.create(args.path, name, version, user, channel, args.profile, args.settings, args.options, args.env, args.test_folder, args.not_export, args.build, args.keep_source, args.keep_build, args.verify, args.manifests, args.manifests_interactive, args.remote, args.update, test_build_folder=args.test_build_folder, lockfile=args.lockfile, ignore_dirty=args.ignore_dirty) except ConanException as exc: info = exc.info raise finally: if args.json and info: self._outputer.json_output(info, args.json, cwd) def download(self, *args): """ Downloads recipe and binaries to the local cache, without using settings. It works specifying the recipe reference and package ID to be installed. Not transitive, requirements of the specified reference will NOT be retrieved. Useful together with 'conan copy' to automate the promotion of packages to a different user/channel. Only if a reference is specified, it will download all packages from the specified remote. If no remote is specified, it will use the default remote. """ parser = argparse.ArgumentParser(description=self.download.__doc__, prog="conan download", formatter_class=SmartFormatter) parser.add_argument("reference", help='pkg/version@user/channel') parser.add_argument("-p", "--package", nargs=1, action=Extender, help='Force install specified package ID (ignore settings/options)' ' [DEPRECATED: use full reference instead]') parser.add_argument("-r", "--remote", help='look in the specified remote server', action=OnceArgument) parser.add_argument("-re", "--recipe", help='Downloads only the recipe', default=False, action="store_true") args = parser.parse_args(*args) try: pref = PackageReference.loads(args.reference, validate=True) except ConanException: reference = args.reference packages_list = args.package if packages_list: self._out.warn("Usage of `--package` argument is deprecated." " Use a full reference instead: " "`conan download [...] {}:{}`".format(reference, packages_list[0])) else: reference = repr(pref.ref) if pref.ref.user is None: if pref.ref.revision: reference = "%s/%s@#%s" % (pref.ref.name, pref.ref.version, pref.ref.revision) else: reference += "@" pkgref = "{}#{}".format(pref.id, pref.revision) if pref.revision else pref.id packages_list = [pkgref] if args.package: raise ConanException("Use a full package reference (preferred) or the `--package`" " command argument, but not both.") self._warn_python_version() return self._conan.download(reference=reference, packages=packages_list, remote_name=args.remote, recipe=args.recipe) def install(self, *args): """ Installs the requirements specified in a recipe (conanfile.py or conanfile.txt). It can also be used to install a concrete package specifying a reference. If any requirement is not found in the local cache, it will retrieve the recipe from a remote, looking for it sequentially in the configured remotes. When the recipes have been downloaded it will try to download a binary package matching the specified settings, only from the remote from which the recipe was retrieved. If no binary package is found, it can be built from sources using the '--build' option. When the package is installed, Conan will write the files for the specified generators. """ parser = argparse.ArgumentParser(description=self.install.__doc__, prog="conan install", formatter_class=SmartFormatter) parser.add_argument("path_or_reference", help="Path to a folder containing a recipe" " (conanfile.py or conanfile.txt) or to a recipe file. e.g., " "./my_project/conanfile.txt. It could also be a reference") parser.add_argument("reference", nargs="?", help='Reference for the conanfile path of the first argument: ' 'user/channel, version@user/channel or pkg/version@user/channel' '(if name or version declared in conanfile.py, they should match)') parser.add_argument("-g", "--generator", nargs=1, action=Extender, help='Generators to use') parser.add_argument("-if", "--install-folder", action=OnceArgument, help='Use this directory as the directory where to put the generator' 'files. e.g., conaninfo/conanbuildinfo.txt') _add_manifests_arguments(parser) parser.add_argument("--no-imports", action='store_true', default=False, help='Install specified packages but avoid running imports') parser.add_argument("-j", "--json", default=None, action=OnceArgument, help='Path to a json file where the install information will be ' 'written') _add_common_install_arguments(parser, build_help=_help_build_policies.format("never")) args = parser.parse_args(*args) cwd = get_cwd() # We need @ otherwise it could be a path, so check strict path_is_reference = check_valid_ref(args.path_or_reference) info = None try: if not path_is_reference: name, version, user, channel, _ = get_reference_fields(args.reference, user_channel_input=True) info = self._conan.install(path=args.path_or_reference, name=name, version=version, user=user, channel=channel, settings=args.settings, options=args.options, env=args.env, remote_name=args.remote, verify=args.verify, manifests=args.manifests, manifests_interactive=args.manifests_interactive, build=args.build, profile_names=args.profile, update=args.update, generators=args.generator, no_imports=args.no_imports, install_folder=args.install_folder, lockfile=args.lockfile) else: if args.reference: raise ConanException("A full reference was provided as first argument, second " "argument not allowed") ref = ConanFileReference.loads(args.path_or_reference, validate=False) manifest_interactive = args.manifests_interactive info = self._conan.install_reference(ref, settings=args.settings, options=args.options, env=args.env, remote_name=args.remote, verify=args.verify, manifests=args.manifests, manifests_interactive=manifest_interactive, build=args.build, profile_names=args.profile, update=args.update, generators=args.generator, install_folder=args.install_folder, lockfile=args.lockfile) except ConanException as exc: info = exc.info raise finally: if args.json and info: self._outputer.json_output(info, args.json, cwd) def config(self, *args): """ Manages Conan configuration. Used to edit conan.conf, or install config files. """ parser = argparse.ArgumentParser(description=self.config.__doc__, prog="conan config", formatter_class=SmartFormatter) subparsers = parser.add_subparsers(dest='subcommand', help='sub-command help') subparsers.required = True get_subparser = subparsers.add_parser('get', help='Get the value of configuration item') home_subparser = subparsers.add_parser('home', help='Retrieve the Conan home directory') install_subparser = subparsers.add_parser('install', help='Install a full configuration ' 'from a local or remote zip file') rm_subparser = subparsers.add_parser('rm', help='Remove an existing config element') set_subparser = subparsers.add_parser('set', help='Set a value for a configuration item') get_subparser.add_argument("item", nargs="?", help="Item to print") home_subparser.add_argument("-j", "--json", default=None, action=OnceArgument, help='json file path where the config home will be written to') install_subparser.add_argument("item", nargs="?", help="git repository, local folder or zip file (local or " "http) where the configuration is stored") install_subparser.add_argument("--verify-ssl", nargs="?", default="True", help='Verify SSL connection when downloading file') install_subparser.add_argument("--type", "-t", choices=["git"], help='Type of remote config') install_subparser.add_argument("--args", "-a", help='String with extra arguments for "git clone"') install_subparser.add_argument("-sf", "--source-folder", help='Install files only from a source subfolder from the ' 'specified origin') install_subparser.add_argument("-tf", "--target-folder", help='Install to that path in the conan cache') rm_subparser.add_argument("item", help="Item to remove") set_subparser.add_argument("item", help="'item=value' to set") args = parser.parse_args(*args) if args.subcommand == "set": try: key, value = args.item.split("=", 1) except ValueError: if "hooks." in args.item: key, value = args.item.split("=", 1)[0], None else: raise ConanException("Please specify 'key=value'") return self._conan.config_set(key, value) elif args.subcommand == "get": return self._conan.config_get(args.item) elif args.subcommand == "rm": return self._conan.config_rm(args.item) elif args.subcommand == "home": conan_home = self._conan.config_home() self._out.info(conan_home) if args.json: self._outputer.json_output({"home": conan_home}, args.json, os.getcwd()) return conan_home elif args.subcommand == "install": verify_ssl = get_bool_from_text(args.verify_ssl) return self._conan.config_install(args.item, verify_ssl, args.type, args.args, source_folder=args.source_folder, target_folder=args.target_folder) def info(self, *args): """ Gets information about the dependency graph of a recipe. It can be used with a recipe or a reference for any existing package in your local cache. """ info_only_options = ["id", "build_id", "remote", "url", "license", "requires", "update", "required", "date", "author", "description", "None"] path_only_options = ["export_folder", "build_folder", "package_folder", "source_folder"] str_path_only_options = ", ".join(['"%s"' % field for field in path_only_options]) str_only_options = ", ".join(['"%s"' % field for field in info_only_options]) parser = argparse.ArgumentParser(description=self.info.__doc__, prog="conan info", formatter_class=SmartFormatter) parser.add_argument("path_or_reference", help="Path to a folder containing a recipe" " (conanfile.py or conanfile.txt) or to a recipe file. e.g., " "./my_project/conanfile.txt. It could also be a reference") parser.add_argument("--paths", action='store_true', default=False, help='Show package paths in local cache') parser.add_argument("-bo", "--build-order", help="given a modified reference, return an ordered list to build (CI)." " [DEPRECATED: use 'conan graph build-order ...' instead]", nargs=1, action=Extender) parser.add_argument("-g", "--graph", action=OnceArgument, help='Creates file with project dependencies graph. It will generate ' 'a DOT or HTML file depending on the filename extension') parser.add_argument("-if", "--install-folder", action=OnceArgument, help="local folder containing the conaninfo.txt and conanbuildinfo.txt " "files (from a previous conan install execution). Defaulted to " "current folder, unless --profile, -s or -o is specified. If you " "specify both install-folder and any setting/option " "it will raise an error.") parser.add_argument("-j", "--json", nargs='?', const="1", type=str, help='Path to a json file where the information will be written') parser.add_argument("-n", "--only", nargs=1, action=Extender, help="Show only the specified fields: %s. '--paths' information can " "also be filtered with options %s. Use '--only None' to show only " "references." % (str_only_options, str_path_only_options)) parser.add_argument("--package-filter", nargs='?', help='Print information only for packages that match the filter pattern' ' e.g., MyPackage/1.2@user/channel or MyPackage*') dry_build_help = ("Apply the --build argument to output the information, " "as it would be done by the install command") parser.add_argument("-db", "--dry-build", action=Extender, nargs="?", help=dry_build_help) build_help = ("Given a build policy, return an ordered list of packages that would be built" " from sources during the install command") _add_common_install_arguments(parser, build_help=build_help) args = parser.parse_args(*args) if args.build_order: self._out.warn("Usage of `--build-order` argument is deprecated and can return" " wrong results. Use `conan graph build-order ...` instead.") if args.install_folder and (args.profile or args.settings or args.options or args.env): raise ArgumentError(None, "--install-folder cannot be used together with -s, -o, -e or -pr") if args.build_order and args.graph: raise ArgumentError(None, "--build-order cannot be used together with --graph") # BUILD ORDER ONLY if args.build_order: ret = self._conan.info_build_order(args.path_or_reference, settings=args.settings, options=args.options, env=args.env, profile_names=args.profile, remote_name=args.remote, build_order=args.build_order, check_updates=args.update, install_folder=args.install_folder) if args.json: json_arg = True if args.json == "1" else args.json self._outputer.json_build_order(ret, json_arg, get_cwd()) else: self._outputer.build_order(ret) # INSTALL SIMULATION, NODES TO INSTALL elif args.build is not None: nodes, _ = self._conan.info_nodes_to_build(args.path_or_reference, build_modes=args.build, settings=args.settings, options=args.options, env=args.env, profile_names=args.profile, remote_name=args.remote, check_updates=args.update, install_folder=args.install_folder) if args.json: json_arg = True if args.json == "1" else args.json self._outputer.json_nodes_to_build(nodes, json_arg, get_cwd()) else: self._outputer.nodes_to_build(nodes) # INFO ABOUT DEPS OF CURRENT PROJECT OR REFERENCE else: data = self._conan.info(args.path_or_reference, remote_name=args.remote, settings=args.settings, options=args.options, env=args.env, profile_names=args.profile, update=args.update, install_folder=args.install_folder, build=args.dry_build, lockfile=args.lockfile) deps_graph, _ = data only = args.only if args.only == ["None"]: only = [] if only and args.paths and (set(only) - set(path_only_options)): raise ConanException("Invalid --only value '%s' with --path specified, allowed " "values: [%s]." % (only, str_path_only_options)) elif only and not args.paths and (set(only) - set(info_only_options)): raise ConanException("Invalid --only value '%s', allowed values: [%s].\n" "Use --only=None to show only the references." % (only, str_only_options)) if args.graph: self._outputer.info_graph(args.graph, deps_graph, get_cwd()) if args.json: json_arg = True if args.json == "1" else args.json self._outputer.json_info(deps_graph, json_arg, get_cwd(), show_paths=args.paths) if not args.graph and not args.json: self._outputer.info(deps_graph, only, args.package_filter, args.paths) def source(self, *args): """ Calls your local conanfile.py 'source()' method. Usually downloads and uncompresses the package sources. """ parser = argparse.ArgumentParser(description=self.source.__doc__, prog="conan source", formatter_class=SmartFormatter) parser.add_argument("path", help=_PATH_HELP) parser.add_argument("-sf", "--source-folder", action=OnceArgument, help='Destination directory. Defaulted to current directory') parser.add_argument("-if", "--install-folder", action=OnceArgument, help=_INSTALL_FOLDER_HELP + " Optional, source method will run without " "the information retrieved from the conaninfo.txt and " "conanbuildinfo.txt, only required when using conditional source() " "based on settings, options, env_info and user_info") args = parser.parse_args(*args) try: if "@" in args.path and ConanFileReference.loads(args.path): raise ArgumentError(None, "'conan source' doesn't accept a reference anymore. " "If you were using it as a concurrency workaround, " "you can call 'conan install' simultaneously from several " "different processes, the concurrency is now natively supported" ". The path parameter should be a folder containing a " "conanfile.py file.") except ConanException: pass self._warn_python_version() return self._conan.source(args.path, args.source_folder, args.install_folder) def build(self, *args): """ Calls your local conanfile.py 'build()' method. The recipe will be built in the local directory specified by --build-folder, reading the sources from --source-folder. If you are using a build helper, like CMake(), the --package-folder will be configured as the destination folder for the install step. """ parser = argparse.ArgumentParser(description=self.build.__doc__, prog="conan build", formatter_class=SmartFormatter) parser.add_argument("path", help=_PATH_HELP) parser.add_argument("-b", "--build", default=None, action="store_true", help="Execute the build step (variable should_build=True). When " "specified, configure/install/test won't run unless " "--configure/--install/--test specified") parser.add_argument("-bf", "--build-folder", action=OnceArgument, help=_BUILD_FOLDER_HELP) parser.add_argument("-c", "--configure", default=None, action="store_true", help="Execute the configuration step (variable should_configure=True). " "When specified, build/install/test won't run unless " "--build/--install/--test specified") parser.add_argument("-i", "--install", default=None, action="store_true", help="Execute the install step (variable should_install=True). When " "specified, configure/build/test won't run unless " "--configure/--build/--test specified") parser.add_argument("-t", "--test", default=None, action="store_true", help="Execute the test step (variable should_test=True). When " "specified, configure/build/install won't run unless " "--configure/--build/--install specified") parser.add_argument("-if", "--install-folder", action=OnceArgument, help=_INSTALL_FOLDER_HELP) parser.add_argument("-pf", "--package-folder", action=OnceArgument, help="Directory to install the package (when the build system or " "build() method does it). Defaulted to the '{build_folder}/package' " "folder. A relative path can be specified, relative to the current " "folder. Also an absolute path is allowed.") parser.add_argument("-sf", "--source-folder", action=OnceArgument, help=_SOURCE_FOLDER_HELP) args = parser.parse_args(*args) self._warn_python_version() if args.build or args.configure or args.install or args.test: build, config, install, test = (bool(args.build), bool(args.configure), bool(args.install), bool(args.test)) else: build = config = install = test = True return self._conan.build(conanfile_path=args.path, source_folder=args.source_folder, package_folder=args.package_folder, build_folder=args.build_folder, install_folder=args.install_folder, should_configure=config, should_build=build, should_install=install, should_test=test) def package(self, *args): """ Calls your local conanfile.py 'package()' method. This command works in the user space and it will copy artifacts from the --build-folder and --source-folder folder to the --package-folder one. It won't create a new package in the local cache, if you want to do it, use 'conan create' or 'conan export-pkg' after a 'conan build' command. """ parser = argparse.ArgumentParser(description=self.package.__doc__, prog="conan package", formatter_class=SmartFormatter) parser.add_argument("path", help=_PATH_HELP) parser.add_argument("-bf", "--build-folder", action=OnceArgument, help=_BUILD_FOLDER_HELP) parser.add_argument("-if", "--install-folder", action=OnceArgument, help=_INSTALL_FOLDER_HELP) parser.add_argument("-pf", "--package-folder", action=OnceArgument, help="folder to install the package. Defaulted to the " "'{build_folder}/package' folder. A relative path can be specified" " (relative to the current directory). Also an absolute path" " is allowed.") parser.add_argument("-sf", "--source-folder", action=OnceArgument, help=_SOURCE_FOLDER_HELP) args = parser.parse_args(*args) try: if "@" in args.path and ConanFileReference.loads(args.path): raise ArgumentError(None, "'conan package' doesn't accept a reference anymore. " "The path parameter should be a conanfile.py or a folder " "containing one. If you were using the 'conan package' " "command for development purposes we recommend to use " "the local development commands: 'conan build' + " "'conan package' and finally 'conan create' to regenerate the " "package, or 'conan export_package' to store the already built " "binaries in the local cache without rebuilding them.") except ConanException: pass self._warn_python_version() return self._conan.package(path=args.path, build_folder=args.build_folder, package_folder=args.package_folder, source_folder=args.source_folder, install_folder=args.install_folder) def imports(self, *args): """ Calls your local conanfile.py or conanfile.txt 'imports' method. It requires to have been previously installed and have a conanbuildinfo.txt generated file in the --install-folder (defaulted to the current directory). """ parser = argparse.ArgumentParser(description=self.imports.__doc__, prog="conan imports", formatter_class=SmartFormatter) parser.add_argument("path", help=_PATH_HELP + " With --undo option, this parameter is the folder " "containing the conan_imports_manifest.txt file generated in a previous" " execution. e.g.: conan imports ./imported_files --undo ") parser.add_argument("-if", "--install-folder", action=OnceArgument, help=_INSTALL_FOLDER_HELP) parser.add_argument("-imf", "--import-folder", action=OnceArgument, help="Directory to copy the artifacts to. By default it will be the" " current directory") parser.add_argument("-u", "--undo", default=False, action="store_true", help="Undo imports. Remove imported files") args = parser.parse_args(*args) if args.undo: return self._conan.imports_undo(args.path) try: if "@" in args.path and ConanFileReference.loads(args.path): raise ArgumentError(None, "Parameter 'path' cannot be a reference. Use a folder " "containing a conanfile.py or conanfile.txt file.") except ConanException: pass self._warn_python_version() return self._conan.imports(args.path, args.import_folder, args.install_folder) def export_pkg(self, *args): """ Exports a recipe, then creates a package from local source and build folders. If '--package-folder' is provided it will copy the files from there, otherwise, it will execute package() method over '--source-folder' and '--build-folder' to create the binary package. """ parser = argparse.ArgumentParser(description=self.export_pkg.__doc__, prog="conan export-pkg", formatter_class=SmartFormatter) parser.add_argument("path", help=_PATH_HELP) parser.add_argument("reference", nargs='?', default=None, help="user/channel or pkg/version@user/channel " "(if name and version are not declared in the " "conanfile.py)") parser.add_argument("-bf", "--build-folder", action=OnceArgument, help=_BUILD_FOLDER_HELP) parser.add_argument("-e", "--env", nargs=1, action=Extender, help='Environment variables that will be set during the package build, ' '-e CXX=/usr/bin/clang++') parser.add_argument('-f', '--force', default=False, action='store_true', help='Overwrite existing package if existing') parser.add_argument("-if", "--install-folder", action=OnceArgument, help=_INSTALL_FOLDER_HELP + " If these files are found in the specified" " folder and any of '-e', '-o', '-pr' or '-s' arguments are used, it " "will raise an error.") parser.add_argument("-o", "--options", nargs=1, action=Extender, help='Define options values, e.g., -o pkg:with_qt=True') parser.add_argument("-pr", "--profile", action=Extender, help='Profile for this package') parser.add_argument("-pf", "--package-folder", action=OnceArgument, help="folder containing a locally created package. If a value is given," " it won't call the recipe 'package()' method, and will run a copy" " of the provided folder.") parser.add_argument("-s", "--settings", nargs=1, action=Extender, help='Define settings values, e.g., -s compiler=gcc') parser.add_argument("-sf", "--source-folder", action=OnceArgument, help=_SOURCE_FOLDER_HELP) parser.add_argument("-j", "--json", default=None, action=OnceArgument, help='Path to a json file where the install information will be ' 'written') parser.add_argument("-l", "--lockfile", action=OnceArgument, nargs='?', const=".", help="Path to a lockfile or folder containing 'conan.lock' file. " "Lockfile will be updated with the exported package") parser.add_argument("--ignore-dirty", default=False, action='store_true', help='When using the "scm" feature with "auto" values, capture the' ' revision and url even if there are uncommitted changes') args = parser.parse_args(*args) self._warn_python_version() name, version, user, channel, _ = get_reference_fields(args.reference, user_channel_input=True) cwd = os.getcwd() info = None try: info = self._conan.export_pkg(conanfile_path=args.path, name=name, version=version, source_folder=args.source_folder, build_folder=args.build_folder, package_folder=args.package_folder, install_folder=args.install_folder, profile_names=args.profile, env=args.env, settings=args.settings, options=args.options, force=args.force, user=user, channel=channel, lockfile=args.lockfile, ignore_dirty=args.ignore_dirty) except ConanException as exc: info = exc.info raise finally: if args.json and info: self._outputer.json_output(info, args.json, cwd) def export(self, *args): """ Copies the recipe (conanfile.py & associated files) to your local cache. Use the 'reference' param to specify a user and channel where to export it. Once the recipe is in the local cache it can be shared, reused and to any remote with the 'conan upload' command. """ parser = argparse.ArgumentParser(description=self.export.__doc__, prog="conan export", formatter_class=SmartFormatter) parser.add_argument("path", help=_PATH_HELP) parser.add_argument("reference", nargs='?', default=None, help="user/channel, or Pkg/version@user/channel (if name " "and version are not declared in the conanfile.py") parser.add_argument('-k', '-ks', '--keep-source', default=False, action='store_true', help=_KEEP_SOURCE_HELP) parser.add_argument("-l", "--lockfile", action=OnceArgument, nargs='?', const=".", help="Path to a lockfile or folder containing 'conan.lock' file. " "Lockfile will be updated with the exported package") parser.add_argument("--ignore-dirty", default=False, action='store_true', help='When using the "scm" feature with "auto" values, capture the' ' revision and url even if there are uncommitted changes') args = parser.parse_args(*args) self._warn_python_version() name, version, user, channel, _ = get_reference_fields(args.reference, user_channel_input=True) if any([user, channel]) and not all([user, channel]): # Or user/channel or nothing, but not partial raise ConanException("Invalid parameter '%s', " "specify the full reference or user/channel" % args.reference) return self._conan.export(path=args.path, name=name, version=version, user=user, channel=channel, keep_source=args.keep_source, lockfile=args.lockfile, ignore_dirty=args.ignore_dirty) def remove(self, *args): """ Removes packages or binaries matching pattern from local cache or remote. It can also be used to remove the temporary source or build folders in the local conan cache. If no remote is specified, the removal will be done by default in the local conan cache. """ parser = argparse.ArgumentParser(description=self.remove.__doc__, prog="conan remove", formatter_class=SmartFormatter) parser.add_argument('pattern_or_reference', nargs="?", help=_PATTERN_OR_REFERENCE_HELP) parser.add_argument('-b', '--builds', nargs="*", action=Extender, help=("By default, remove all the build folders or select one, " "specifying the package ID")) parser.add_argument('-f', '--force', default=False, action='store_true', help='Remove without requesting a confirmation') parser.add_argument("-l", "--locks", default=False, action="store_true", help="Remove locks") parser.add_argument("-o", "--outdated", default=False, action="store_true", help="Remove only outdated from recipe packages. " "This flag can only be used with a reference") parser.add_argument('-p', '--packages', nargs="*", action=Extender, help="Remove all packages of the specified reference if " "no specific package ID is provided") parser.add_argument('-q', '--query', default=None, action=OnceArgument, help=_QUERY_HELP) parser.add_argument('-r', '--remote', action=OnceArgument, help='Will remove from the specified remote') parser.add_argument('-s', '--src', default=False, action="store_true", help='Remove source folders') parser.add_argument('-t', '--system-reqs', default=False, action="store_true", help='Remove system_reqs folders') args = parser.parse_args(*args) self._warn_python_version() if args.packages is not None and args.query: raise ConanException("'-q' and '-p' parameters can't be used at the same time") if args.builds is not None and args.query: raise ConanException("'-q' and '-b' parameters can't be used at the same time") if args.outdated and not args.pattern_or_reference: raise ConanException("'--outdated' argument can only be used with a reference") if args.locks: if args.pattern_or_reference: raise ConanException("Specifying a pattern is not supported when removing locks") self._conan.remove_locks() self._out.info("Cache locks removed") return elif args.system_reqs: if args.packages: raise ConanException("'-t' and '-p' parameters can't be used at the same time") if not args.pattern_or_reference: raise ConanException("Please specify a valid pattern or reference to be cleaned") if check_valid_ref(args.pattern_or_reference): return self._conan.remove_system_reqs(args.pattern_or_reference) return self._conan.remove_system_reqs_by_pattern(args.pattern_or_reference) else: if not args.pattern_or_reference: raise ConanException('Please specify a pattern to be removed ("*" for all)') return self._conan.remove(pattern=args.pattern_or_reference, query=args.query, packages=args.packages, builds=args.builds, src=args.src, force=args.force, remote_name=args.remote, outdated=args.outdated) def copy(self, *args): """ Copies conan recipes and packages to another user/channel. Useful to promote packages (e.g. from "beta" to "stable") or transfer them from one user to another. """ parser = argparse.ArgumentParser(description=self.copy.__doc__, prog="conan copy", formatter_class=SmartFormatter) parser.add_argument("reference", default="", help='package reference. e.g., MyPackage/1.2@user/channel') parser.add_argument("user_channel", default="", help='Destination user/channel. e.g., lasote/testing') parser.add_argument("-p", "--package", nargs=1, action=Extender, help='copy specified package ID ' '[DEPRECATED: use full reference instead]') parser.add_argument("--all", action='store_true', default=False, help='Copy all packages from the specified package recipe') parser.add_argument("--force", action='store_true', default=False, help='Override destination packages and the package recipe') args = parser.parse_args(*args) try: pref = PackageReference.loads(args.reference, validate=True) except ConanException: reference = args.reference packages_list = args.package if packages_list: self._out.warn("Usage of `--package` argument is deprecated." " Use a full reference instead: " "`conan copy [...] {}:{}`".format(reference, packages_list[0])) if args.all and packages_list: raise ConanException("Cannot specify both --all and --package") else: reference = repr(pref.ref) packages_list = [pref.id] if args.package: raise ConanException("Use a full package reference (preferred) or the `--package`" " command argument, but not both.") if args.all: raise ConanException("'--all' argument cannot be used together with full reference") self._warn_python_version() return self._conan.copy(reference=reference, user_channel=args.user_channel, force=args.force, packages=packages_list or args.all) def user(self, *args): """ Authenticates against a remote with user/pass, caching the auth token. Useful to avoid the user and password being requested later. e.g. while you're uploading a package. You can have one user for each remote. Changing the user, or introducing the password is only necessary to perform changes in remote packages. """ # FIXME: Difficult and confusing CLI. Better with: # - conan user clean -> clean users # - conan user list ('remote') -> list users (of a remote) # - conan user auth 'remote' ('user') ('password') -> login a remote (w/o user or pass) # - conan user set 'user' 'remote' -> set user for a remote (not login) necessary?? parser = argparse.ArgumentParser(description=self.user.__doc__, prog="conan user", formatter_class=SmartFormatter) parser.add_argument("name", nargs='?', default=None, help='Username you want to use. If no name is provided it will show the' ' current user') parser.add_argument('-c', '--clean', default=False, action='store_true', help='Remove user and tokens for all remotes') parser.add_argument("-p", "--password", nargs='?', const="", type=str, action=OnceArgument, help='User password. Use double quotes if password with spacing, ' 'and escape quotes if existing. If empty, the password is ' 'requested interactively (not exposed)') parser.add_argument("-r", "--remote", help='Use the specified remote server', action=OnceArgument) parser.add_argument("-j", "--json", default=None, action=OnceArgument, help='json file path where the user list will be written to') parser.add_argument("-s", "--skip-auth", default=False, action='store_true', help='Skips the authentication with the server if there are local ' 'stored credentials. It doesn\'t check if the ' 'current credentials are valid or not') args = parser.parse_args(*args) if args.clean and any((args.name, args.remote, args.password, args.json, args.skip_auth)): raise ConanException("'--clean' argument cannot be used together with 'name', " "'--password', '--remote', '--json' or '--skip.auth'") elif args.json and any((args.name, args.password)): raise ConanException("'--json' cannot be used together with 'name' or '--password'") cwd = os.getcwd() info = None try: if args.clean: # clean users self._conan.users_clean() elif not args.name and args.password is None: # list users info = self._conan.users_list(args.remote) self._outputer.print_user_list(info) elif args.password is None: # set user for remote (no password indicated) remote_name, prev_user, user = self._conan.user_set(args.name, args.remote) self._outputer.print_user_set(remote_name, prev_user, user) else: # login a remote remote_name = args.remote or self._conan.get_default_remote().name name = args.name password = args.password remote_name, prev_user, user = self._conan.authenticate(name, remote_name=remote_name, password=password, skip_auth=args.skip_auth) self._outputer.print_user_set(remote_name, prev_user, user) except ConanException as exc: info = exc.info raise finally: if args.json and info: self._outputer.json_output(info, args.json, cwd) def search(self, *args): """ Searches package recipes and binaries in the local cache or a remote. If you provide a pattern, then it will search for existing package recipes matching it. If a full reference is provided (pkg/0.1@user/channel) then the existing binary packages for that reference will be displayed. The default remote is ignored, if no remote is specified, the search will be done in the local cache. Search is case sensitive, the exact case has to be used. For case insensitive file systems, like Windows, case sensitive search can be forced with '--case-sensitive'. """ parser = argparse.ArgumentParser(description=self.search.__doc__, prog="conan search", formatter_class=SmartFormatter) parser.add_argument('pattern_or_reference', nargs='?', help=_PATTERN_OR_REFERENCE_HELP) parser.add_argument('-o', '--outdated', default=False, action='store_true', help="Show only outdated from recipe packages. " "This flag can only be used with a reference") parser.add_argument('-q', '--query', default=None, action=OnceArgument, help=_QUERY_HELP) parser.add_argument('-r', '--remote', action=OnceArgument, help="Remote to search in. '-r all' searches all remotes") parser.add_argument('--case-sensitive', default=False, action='store_true', help='Make a case-sensitive search. Use it to guarantee ' 'case-sensitive ' 'search in Windows or other case-insensitive file systems') parser.add_argument('--raw', default=False, action='store_true', help='Print just the list of recipes') parser.add_argument('--table', action=OnceArgument, help="Outputs html file with a table of binaries. Only valid for a " "reference search") parser.add_argument("-j", "--json", default=None, action=OnceArgument, help='json file path where the search information will be written to') parser.add_argument("-rev", "--revisions", default=False, action='store_true', help='Get a list of revisions for a reference or a ' 'package reference.') args = parser.parse_args(*args) if args.table and args.json: raise ConanException("'--table' argument cannot be used together with '--json'") # Searching foo/bar is considered a pattern (FIXME: 2.0) so use strict mode to disambiguate is_reference = check_valid_ref(args.pattern_or_reference) if is_reference: ref = ConanFileReference.loads(args.pattern_or_reference) else: ref = None if args.query: raise ConanException("-q parameter only allowed with a valid recipe reference, " "not with a pattern") cwd = os.getcwd() info = None try: if args.revisions: # Show revisions of a ref if ref: info = self._conan.get_recipe_revisions(repr(ref), remote_name=args.remote) self._outputer.print_revisions(ref, info, args.raw, remote_name=args.remote) return # Show revisions of pref try: pref = PackageReference.loads(args.pattern_or_reference) except (TypeError, ConanException, AttributeError): pass else: info = self._conan.get_package_revisions(repr(pref), remote_name=args.remote) self._outputer.print_revisions(ref, info, args.raw, remote_name=args.remote) return # A pattern: Listing references by pattern but showing revisions if args.remote: exc_msg = "With --revision, specify a reference (e.g {ref}) " \ "a valid pattern " \ "or a package reference with " \ "recipe revision (e.g {ref}#3453453453:" \ "d50a0d523d98c15bb147b18f" \ "a7d203887c38be8b)".format(ref=_REFERENCE_EXAMPLE) raise ConanException(exc_msg) info = self._conan.search_recipes(args.pattern_or_reference, remote_name=None, case_sensitive=args.case_sensitive, fill_revisions=True) self._outputer.print_search_references(info["results"], args.pattern_or_reference, args.raw, all_remotes_search=None) return if ref: info = self._conan.search_packages(repr(ref), query=args.query, remote_name=args.remote, outdated=args.outdated) # search is done for one reference self._outputer.print_search_packages(info["results"], ref, args.query, args.table, args.raw, outdated=args.outdated) else: if args.table: raise ConanException("'--table' argument can only be used with a reference") elif args.outdated: raise ConanException("'--outdated' argument can only be used with a reference") info = self._conan.search_recipes(args.pattern_or_reference, remote_name=args.remote, case_sensitive=args.case_sensitive) # Deprecate 2.0: Dirty check if search is done for all remotes or for remote "all" try: remote_all = self._conan.get_remote_by_name("all") except NoRemoteAvailable: remote_all = None all_remotes_search = (remote_all is None and args.remote == "all") self._outputer.print_search_references(info["results"], args.pattern_or_reference, args.raw, all_remotes_search) except ConanException as exc: info = exc.info raise finally: if args.json and info: self._outputer.json_output(info, args.json, cwd) def upload(self, *args): """ Uploads a recipe and binary packages to a remote. If no remote is specified, the first configured remote (by default conan-center, use 'conan remote list' to list the remotes) will be used. """ parser = argparse.ArgumentParser(description=self.upload.__doc__, prog="conan upload", formatter_class=SmartFormatter) parser.add_argument('pattern_or_reference', help=_PATTERN_REF_OR_PREF_HELP) parser.add_argument("-p", "--package", default=None, help="Package ID [DEPRECATED: use full reference instead]", action=OnceArgument) parser.add_argument('-q', '--query', default=None, action=OnceArgument, help="Only upload packages matching a specific query. " + _QUERY_HELP) parser.add_argument("-r", "--remote", action=OnceArgument, help='upload to this specific remote') parser.add_argument("--all", action='store_true', default=False, help='Upload both package recipe and packages') parser.add_argument("--skip-upload", action='store_true', default=False, help='Do not upload anything, just run the checks and the compression') parser.add_argument("--force", action='store_true', default=False, help='Do not check conan recipe date, override remote with local') parser.add_argument("--check", action='store_true', default=False, help='Perform an integrity check, using the manifests, before upload') parser.add_argument('-c', '--confirm', default=False, action='store_true', help='Upload all matching recipes without confirmation') parser.add_argument('--retry', default=None, type=int, action=OnceArgument, help="In case of fail retries to upload again the specified times.") parser.add_argument('--retry-wait', default=None, type=int, action=OnceArgument, help='Waits specified seconds before retry again') parser.add_argument("-no", "--no-overwrite", nargs="?", type=str, choices=["all", "recipe"], action=OnceArgument, const="all", help="Uploads package only if recipe is the same as the remote one") parser.add_argument("-j", "--json", default=None, action=OnceArgument, help='json file path where the upload information will be written to') parser.add_argument("--parallel", action='store_true', default=False, help='Upload files in parallel using multiple threads ' 'The default number of launched threads is 8') args = parser.parse_args(*args) try: pref = PackageReference.loads(args.pattern_or_reference, validate=True) except ConanException: reference = args.pattern_or_reference package_id = args.package if package_id: self._out.warn("Usage of `--package` argument is deprecated." " Use a full reference instead: " "`conan upload [...] {}:{}`".format(reference, package_id)) if args.query and package_id: raise ConanException("'--query' argument cannot be used together with '--package'") else: reference = repr(pref.ref) package_id = "{}#{}".format(pref.id, pref.revision) if pref.revision else pref.id if args.package: raise ConanException("Use a full package reference (preferred) or the `--package`" " command argument, but not both.") if args.query: raise ConanException("'--query' argument cannot be used together with " "full reference") if args.force and args.no_overwrite: raise ConanException("'--no-overwrite' argument cannot be used together with '--force'") if args.force and args.skip_upload: raise ConanException("'--skip-upload' argument cannot be used together with '--force'") if args.no_overwrite and args.skip_upload: raise ConanException("'--skip-upload' argument cannot be used together " "with '--no-overwrite'") self._warn_python_version() if args.force: policy = UPLOAD_POLICY_FORCE elif args.no_overwrite == "all": policy = UPLOAD_POLICY_NO_OVERWRITE elif args.no_overwrite == "recipe": policy = UPLOAD_POLICY_NO_OVERWRITE_RECIPE elif args.skip_upload: policy = UPLOAD_POLICY_SKIP else: policy = None info = None try: info = self._conan.upload(pattern=reference, package=package_id, query=args.query, remote_name=args.remote, all_packages=args.all, policy=policy, confirm=args.confirm, retry=args.retry, retry_wait=args.retry_wait, integrity_check=args.check, parallel_upload=args.parallel) except ConanException as exc: info = exc.info raise finally: if args.json and info: self._outputer.json_output(info, args.json, os.getcwd()) def remote(self, *args): """ Manages the remote list and the package recipes associated with a remote. """ parser = argparse.ArgumentParser(description=self.remote.__doc__, prog="conan remote", formatter_class=SmartFormatter) subparsers = parser.add_subparsers(dest='subcommand', help='sub-command help') subparsers.required = True # create the parser for the "a" command parser_list = subparsers.add_parser('list', help='List current remotes') parser_list.add_argument("-raw", "--raw", action='store_true', default=False, help='Raw format. Valid for "remotes.txt" file for ' '"conan config install"') parser_add = subparsers.add_parser('add', help='Add a remote') parser_add.add_argument('remote', help='Name of the remote') parser_add.add_argument('url', help='URL of the remote') parser_add.add_argument('verify_ssl', nargs="?", default="True", help='Verify SSL certificated. Default True') parser_add.add_argument("-i", "--insert", nargs="?", const=0, type=int, action=OnceArgument, help="insert remote at specific index") parser_add.add_argument("-f", "--force", default=False, action='store_true', help="Force addition, will update if existing") parser_rm = subparsers.add_parser('remove', help='Remove a remote') parser_rm.add_argument('remote', help='Name of the remote') parser_upd = subparsers.add_parser('update', help='Update the remote url') parser_upd.add_argument('remote', help='Name of the remote') parser_upd.add_argument('url', help='URL') parser_upd.add_argument('verify_ssl', nargs="?", default="True", help='Verify SSL certificated. Default True') parser_upd.add_argument("-i", "--insert", nargs="?", const=0, type=int, action=OnceArgument, help="Insert remote at specific index") parser_rename = subparsers.add_parser('rename', help='Update the remote name') parser_rename.add_argument('remote', help='The old remote name') parser_rename.add_argument('new_remote', help='The new remote name') subparsers.add_parser('list_ref', help='List the package recipes and its associated remotes') parser_padd = subparsers.add_parser('add_ref', help="Associate a recipe's reference to a remote") parser_padd.add_argument('reference', help='Package recipe reference') parser_padd.add_argument('remote', help='Name of the remote') parser_prm = subparsers.add_parser('remove_ref', help="Dissociate a recipe's reference and its remote") parser_prm.add_argument('reference', help='Package recipe reference') parser_pupd = subparsers.add_parser('update_ref', help="Update the remote associated with " "a package recipe") parser_pupd.add_argument('reference', help='Package recipe reference') parser_pupd.add_argument('remote', help='Name of the remote') list_pref = subparsers.add_parser('list_pref', help='List the package binaries and ' 'its associated remotes') list_pref.add_argument('reference', help='Package recipe reference') add_pref = subparsers.add_parser('add_pref', help="Associate a package reference to a remote") add_pref.add_argument('package_reference', help='Binary package reference') add_pref.add_argument('remote', help='Name of the remote') remove_pref = subparsers.add_parser('remove_pref', help="Dissociate a package's reference " "and its remote") remove_pref.add_argument('package_reference', help='Binary package reference') update_pref = subparsers.add_parser('update_pref', help="Update the remote associated with " "a binary package") update_pref.add_argument('package_reference', help='Bianary package reference') update_pref.add_argument('remote', help='Name of the remote') subparsers.add_parser('clean', help="Clean the list of remotes and all " "recipe-remote associations") parser_enable = subparsers.add_parser('enable', help='Enable a remote') parser_enable.add_argument('remote', help='Name of the remote') parser_disable = subparsers.add_parser('disable', help='Disable a remote') parser_disable.add_argument('remote', help='Name of the remote') args = parser.parse_args(*args) reference = args.reference if hasattr(args, 'reference') else None package_reference = args.package_reference if hasattr(args, 'package_reference') else None verify_ssl = get_bool_from_text(args.verify_ssl) if hasattr(args, 'verify_ssl') else False remote_name = args.remote if hasattr(args, 'remote') else None new_remote = args.new_remote if hasattr(args, 'new_remote') else None url = args.url if hasattr(args, 'url') else None if args.subcommand == "list": remotes = self._conan.remote_list() self._outputer.remote_list(remotes, args.raw) elif args.subcommand == "add": return self._conan.remote_add(remote_name, url, verify_ssl, args.insert, args.force) elif args.subcommand == "remove": return self._conan.remote_remove(remote_name) elif args.subcommand == "rename": return self._conan.remote_rename(remote_name, new_remote) elif args.subcommand == "update": return self._conan.remote_update(remote_name, url, verify_ssl, args.insert) elif args.subcommand == "list_ref": refs = self._conan.remote_list_ref() self._outputer.remote_ref_list(refs) elif args.subcommand == "add_ref": return self._conan.remote_add_ref(reference, remote_name) elif args.subcommand == "remove_ref": return self._conan.remote_remove_ref(reference) elif args.subcommand == "update_ref": return self._conan.remote_update_ref(reference, remote_name) elif args.subcommand == "list_pref": refs = self._conan.remote_list_pref(reference) self._outputer.remote_pref_list(refs) elif args.subcommand == "add_pref": return self._conan.remote_add_pref(package_reference, remote_name) elif args.subcommand == "remove_pref": return self._conan.remote_remove_pref(package_reference) elif args.subcommand == "update_pref": return self._conan.remote_update_pref(package_reference, remote_name) elif args.subcommand == "clean": return self._conan.remote_clean() elif args.subcommand == "enable": return self._conan.remote_set_disabled_state(remote_name, False) elif args.subcommand == "disable": return self._conan.remote_set_disabled_state(remote_name, True) def profile(self, *args): """ Lists profiles in the '.conan/profiles' folder, or shows profile details. The 'list' subcommand will always use the default user 'conan/profiles' folder. But the 'show' subcommand can resolve absolute and relative paths, as well as to map names to '.conan/profiles' folder, in the same way as the '--profile' install argument. """ parser = argparse.ArgumentParser(description=self.profile.__doc__, prog="conan profile", formatter_class=SmartFormatter) subparsers = parser.add_subparsers(dest='subcommand') subparsers.required = True # create the parser for the "profile" command parser_list = subparsers.add_parser('list', help='List current profiles') parser_list.add_argument("-j", "--json", default=None, action=OnceArgument, help='json file path where the profile list will be written to') parser_show = subparsers.add_parser('show', help='Show the values defined for a profile') parser_show.add_argument('profile', help="name of the profile in the '.conan/profiles' " "folder or path to a profile file") parser_new = subparsers.add_parser('new', help='Creates a new empty profile') parser_new.add_argument('profile', help="Name for the profile in the '.conan/profiles' " "folder or path and name for a profile file") parser_new.add_argument("--detect", action='store_true', default=False, help='Autodetect settings and fill [settings] section') parser_new.add_argument("--force", action='store_true', default=False, help='Overwrite existing profile if existing') parser_update = subparsers.add_parser('update', help='Update a profile with desired value') parser_update.add_argument('item', help="'item=value' to update. e.g., settings.compiler=gcc") parser_update.add_argument('profile', help="Name of the profile in the '.conan/profiles' " "folder or path to a profile file") parser_get = subparsers.add_parser('get', help='Get a profile key') parser_get.add_argument('item', help='Key of the value to get, e.g.: settings.compiler') parser_get.add_argument('profile', help="Name of the profile in the '.conan/profiles' " "folder or path to a profile file") parser_remove = subparsers.add_parser('remove', help='Remove a profile key') parser_remove.add_argument('item', help='key, e.g.: settings.compiler') parser_remove.add_argument('profile', help="Name of the profile in the '.conan/profiles' " "folder or path to a profile file") args = parser.parse_args(*args) profile = args.profile if hasattr(args, 'profile') else None if args.subcommand == "list": profiles = self._conan.profile_list() self._outputer.profile_list(profiles) if args.json: self._outputer.json_output(profiles, args.json, os.getcwd()) elif args.subcommand == "show": profile_text = self._conan.read_profile(profile) self._outputer.print_profile(profile, profile_text) elif args.subcommand == "new": self._conan.create_profile(profile, args.detect, args.force) elif args.subcommand == "update": try: key, value = args.item.split("=", 1) except ValueError: raise ConanException("Please specify key=value") self._conan.update_profile(profile, key, value) elif args.subcommand == "get": key = args.item self._out.writeln(self._conan.get_profile_key(profile, key)) elif args.subcommand == "remove": self._conan.delete_profile_key(profile, args.item) def get(self, *args): """ Gets a file or list a directory of a given reference or package. """ parser = argparse.ArgumentParser(description=self.get.__doc__, prog="conan get", formatter_class=SmartFormatter) parser.add_argument('reference', help=_REF_OR_PREF_HELP) parser.add_argument('path', help='Path to the file or directory. If not specified will get the ' 'conanfile if only a reference is specified and a conaninfo.txt ' 'file contents if the package is also specified', default=None, nargs="?") parser.add_argument("-p", "--package", default=None, help="Package ID [DEPRECATED: use full reference instead]", action=OnceArgument) parser.add_argument("-r", "--remote", action=OnceArgument, help='Get from this specific remote') parser.add_argument("-raw", "--raw", action='store_true', default=False, help='Do not decorate the text') args = parser.parse_args(*args) try: pref = PackageReference.loads(args.reference, validate=True) except ConanException: reference = args.reference package_id = args.package if package_id: self._out.warn("Usage of `--package` argument is deprecated." " Use a full reference instead: " "`conan get [...] {}:{}`".format(reference, package_id)) else: reference = repr(pref.ref) package_id = pref.id if args.package: raise ConanException("Use a full package reference (preferred) or the `--package`" " command argument, but not both.") ret, path = self._conan.get_path(reference, package_id, args.path, args.remote) if isinstance(ret, list): self._outputer.print_dir_list(ret, path, args.raw) else: self._outputer.print_file_contents(ret, path, args.raw) def alias(self, *args): """ Creates and exports an 'alias package recipe'. An "alias" package is a symbolic name (reference) for another package (target). When some package depends on an alias, the target one will be retrieved and used instead, so the alias reference, the symbolic name, does not appear in the final dependency graph. """ parser = argparse.ArgumentParser(description=self.alias.__doc__, prog="conan alias", formatter_class=SmartFormatter) parser.add_argument('reference', help='Alias reference. e.g.: mylib/1.X@user/channel') parser.add_argument('target', help='Target reference. e.g.: mylib/1.12@user/channel') args = parser.parse_args(*args) self._warn_python_version() self._conan.export_alias(args.reference, args.target) def workspace(self, *args): """ Manages a workspace (a set of packages consumed from the user workspace that belongs to the same project). Use this command to manage a Conan workspace, use the subcommand 'install' to create the workspace from a file. """ parser = argparse.ArgumentParser(description=self.workspace.__doc__, prog="conan workspace", formatter_class=SmartFormatter) subparsers = parser.add_subparsers(dest='subcommand', help='sub-command help') subparsers.required = True install_parser = subparsers.add_parser('install', help='same as a "conan install" command' ' but using the workspace data from the file. ' 'If no file is provided, it will look for a ' 'file named "conanws.yml"') install_parser.add_argument('path', help='path to workspace definition file (it will look' ' for a "conanws.yml" inside if a directory is' ' given)') _add_common_install_arguments(install_parser, build_help=_help_build_policies.format("never")) install_parser.add_argument("-if", "--install-folder", action=OnceArgument, help="Folder where the workspace files will be created" " (default to current working directory)") args = parser.parse_args(*args) if args.subcommand == "install": self._conan.workspace_install(args.path, args.settings, args.options, args.env, args.remote, args.build, args.profile, args.update, install_folder=args.install_folder) def editable(self, *args): """ Manages editable packages (packages that reside in the user workspace, but are consumed as if they were in the cache). Use the subcommands 'add', 'remove' and 'list' to create, remove or list packages currently installed in this mode. """ parser = argparse.ArgumentParser(description=self.editable.__doc__, prog="conan editable", formatter_class=SmartFormatter) subparsers = parser.add_subparsers(dest='subcommand', help='sub-command help') subparsers.required = True add_parser = subparsers.add_parser('add', help='Put a package in editable mode') add_parser.add_argument('path', help='Path to the package folder in the user workspace') add_parser.add_argument('reference', help='Package reference e.g.: mylib/1.X@user/channel') add_parser.add_argument("-l", "--layout", help='Relative or absolute path to a file containing the layout.' ' Relative paths will be resolved first relative to current dir, ' 'then to local cache "layouts" folder') remove_parser = subparsers.add_parser('remove', help='Disable editable mode for a package') remove_parser.add_argument('reference', help='Package reference e.g.: mylib/1.X@user/channel') subparsers.add_parser('list', help='List packages in editable mode') args = parser.parse_args(*args) self._warn_python_version() if args.subcommand == "add": self._conan.editable_add(args.path, args.reference, args.layout, cwd=os.getcwd()) self._out.success("Reference '{}' in editable mode".format(args.reference)) elif args.subcommand == "remove": ret = self._conan.editable_remove(args.reference) if ret: self._out.success("Removed editable mode for reference '{}'".format(args.reference)) else: self._out.warn("Reference '{}' was not installed " "as editable".format(args.reference)) elif args.subcommand == "list": for k, v in self._conan.editable_list().items(): self._out.info("%s" % k) self._out.writeln(" Path: %s" % v["path"]) self._out.writeln(" Layout: %s" % v["layout"]) def graph(self, *args): """ Generates and manipulates lock files. """ parser = argparse.ArgumentParser(description=self.graph.__doc__, prog="conan graph", formatter_class=SmartFormatter) subparsers = parser.add_subparsers(dest='subcommand', help='sub-command help') subparsers.required = True # create the parser for the "a" command merge_cmd = subparsers.add_parser('update-lock', help='merge two lockfiles') merge_cmd.add_argument('old_lockfile', help='path to previous lockfile') merge_cmd.add_argument('new_lockfile', help='path to modified lockfile') build_order_cmd = subparsers.add_parser('build-order', help='Returns build-order') build_order_cmd.add_argument('lockfile', help='lockfile folder') build_order_cmd.add_argument("-b", "--build", action=Extender, nargs="?", help=_help_build_policies.format("never")) build_order_cmd.add_argument("--json", action=OnceArgument, help="generate output file in json format") clean_cmd = subparsers.add_parser('clean-modified', help='Clean modified') clean_cmd.add_argument('lockfile', help='lockfile folder') lock_cmd = subparsers.add_parser('lock', help='create a lockfile') lock_cmd.add_argument("path_or_reference", help="Path to a folder containing a recipe" " (conanfile.py or conanfile.txt) or to a recipe file. e.g., " "./my_project/conanfile.txt. It could also be a reference") lock_cmd.add_argument("-l", "--lockfile", action=OnceArgument, help="Path to lockfile to be created. If not specified 'conan.lock'" " will be created in current folder") _add_common_install_arguments(lock_cmd, build_help="Packages to build from source", lockfile=False) args = parser.parse_args(*args) self._warn_python_version() if args.subcommand == "update-lock": self._conan.update_lock(args.old_lockfile, args.new_lockfile) elif args.subcommand == "build-order": build_order = self._conan.build_order(args.lockfile, args.build) self._out.writeln(build_order) if args.json: json_file = _make_abs_path(args.json) save(json_file, json.dumps(build_order, indent=True)) elif args.subcommand == "clean-modified": self._conan.lock_clean_modified(args.lockfile) elif args.subcommand == "lock": self._conan.create_lock(args.path_or_reference, remote_name=args.remote, settings=args.settings, options=args.options, env=args.env, profile_names=args.profile, update=args.update, lockfile=args.lockfile, build=args.build) def _show_help(self): """ Prints a summary of all commands. """ grps = [("Consumer commands", ("install", "config", "get", "info", "search")), ("Creator commands", ("new", "create", "upload", "export", "export-pkg", "test")), ("Package development commands", ("source", "build", "package", "editable", "workspace")), ("Misc commands", ("profile", "remote", "user", "imports", "copy", "remove", "alias", "download", "inspect", "help", "graph"))] def check_all_commands_listed(): """Keep updated the main directory, raise if don't""" all_commands = self._commands() all_in_grps = [command for _, command_list in grps for command in command_list] if set(all_in_grps) != set(all_commands): diff = set(all_commands) - set(all_in_grps) raise Exception("Some command is missing in the main help: %s" % ",".join(diff)) return all_commands commands = check_all_commands_listed() max_len = max((len(c) for c in commands)) + 1 fmt = ' %-{}s'.format(max_len) for group_name, comm_names in grps: self._out.writeln(group_name, Color.BRIGHT_MAGENTA) for name in comm_names: # future-proof way to ensure tabular formatting self._out.write(fmt % name, Color.GREEN) # Help will be all the lines up to the first empty one docstring_lines = commands[name].__doc__.split('\n') start = False data = [] for line in docstring_lines: line = line.strip() if not line: if start: break start = True continue data.append(line) import textwrap txt = textwrap.fill(' '.join(data), 80, subsequent_indent=" "*(max_len+2)) self._out.writeln(txt) self._out.writeln("") self._out.writeln('Conan commands. Type "conan <command> -h" for help', Color.BRIGHT_YELLOW) def _commands(self): """ Returns a list of available commands. """ result = {} for m in inspect.getmembers(self, predicate=inspect.ismethod): method_name = m[0] if not method_name.startswith('_'): if "export_pkg" == method_name: method_name = "export-pkg" method = m[1] if method.__doc__ and not method.__doc__.startswith('HIDDEN'): result[method_name] = method return result def _print_similar(self, command): """ Looks for similar commands and prints them if found. """ matches = get_close_matches( word=command, possibilities=self._commands().keys(), n=5, cutoff=0.75) if len(matches) == 0: return if len(matches) > 1: self._out.writeln("The most similar commands are") else: self._out.writeln("The most similar command is") for match in matches: self._out.writeln(" %s" % match) self._out.writeln("") def _warn_python_version(self): import textwrap width = 70 version = sys.version_info if version.major == 2: self._out.writeln("*"*width, front=Color.BRIGHT_RED) self._out.writeln(textwrap.fill("Python 2 is deprecated as of 01/01/2020 and Conan has" " stopped supporting it officially. We strongly recommend" " you to use Python >= 3.5. Conan will completely stop" " working with Python 2 in the following releases", width), front=Color.BRIGHT_RED) self._out.writeln("*"*width, front=Color.BRIGHT_RED) if os.environ.get('USE_UNSUPPORTED_CONAN_WITH_PYTHON_2', 0): # IMPORTANT: This environment variable is not a silver buller. Python 2 is currently deprecated # and some libraries we use as dependencies have stopped supporting it. Conan might fail to run # and we are no longer fixing errors related to Python 2. self._out.writeln(textwrap.fill("Python 2 deprecation notice has been bypassed" " by envvar 'USE_UNSUPPORTED_CONAN_WITH_PYTHON_2'", width)) else: self._out.writeln(textwrap.fill("If you really need to run Conan with Python 2 in your" " CI without this interactive input, please contact us" " at [email protected]", width), front=Color.BRIGHT_RED) self._out.writeln("*" * width, front=Color.BRIGHT_RED) self._out.write(textwrap.fill("Understood the risk, keep going [y/N]: ", width, drop_whitespace=False), front=Color.BRIGHT_RED) ret = raw_input().lower() if ret not in ["yes", "ye", "y"]: self._out.writeln(textwrap.fill("Wise choice. Stopping here!", width)) sys.exit(0) elif version.minor == 4: self._out.writeln("*"*width, front=Color.BRIGHT_RED) self._out.writeln(textwrap.fill("Python 3.4 support has been dropped. It is strongly " "recommended to use Python >= 3.5 with Conan", width), front=Color.BRIGHT_RED) self._out.writeln("*"*width, front=Color.BRIGHT_RED) def run(self, *args): """HIDDEN: entry point for executing commands, dispatcher to class methods """ ret_code = SUCCESS try: try: command = args[0][0] except IndexError: # No parameters self._show_help() return False try: commands = self._commands() method = commands[command] except KeyError as exc: if command in ["-v", "--version"]: self._out.success("Conan version %s" % client_version) return False self._warn_python_version() if command in ["-h", "--help"]: self._show_help() return False self._out.writeln( "'%s' is not a Conan command. See 'conan --help'." % command) self._out.writeln("") self._print_similar(command) raise ConanException("Unknown command %s" % str(exc)) method(args[0][1:]) except KeyboardInterrupt as exc: logger.error(exc) ret_code = SUCCESS except SystemExit as exc: if exc.code != 0: logger.error(exc) self._out.error("Exiting with code: %d" % exc.code) ret_code = exc.code except ConanInvalidConfiguration as exc: ret_code = ERROR_INVALID_CONFIGURATION self._out.error(exc) except ConanException as exc: ret_code = ERROR_GENERAL self._out.error(exc) except Exception as exc: import traceback print(traceback.format_exc()) ret_code = ERROR_GENERAL msg = exception_message_safe(exc) self._out.error(msg) return ret_code def _add_manifests_arguments(parser): parser.add_argument("-m", "--manifests", const=default_manifest_folder, nargs="?", help='Install dependencies manifests in folder for later verify.' ' Default folder is .conan_manifests, but can be changed', action=OnceArgument) parser.add_argument("-mi", "--manifests-interactive", const=default_manifest_folder, nargs="?", help='Install dependencies manifests in folder for later verify, ' 'asking user for confirmation. ' 'Default folder is .conan_manifests, but can be changed', action=OnceArgument) parser.add_argument("-v", "--verify", const=default_manifest_folder, nargs="?", help='Verify dependencies manifests against stored ones', action=OnceArgument) def _add_common_install_arguments(parser, build_help, lockfile=True): if build_help: parser.add_argument("-b", "--build", action=Extender, nargs="?", help=build_help) parser.add_argument("-e", "--env", nargs=1, action=Extender, help='Environment variables that will be set during the package build, ' '-e CXX=/usr/bin/clang++') parser.add_argument("-o", "--options", nargs=1, action=Extender, help='Define options values, e.g., -o Pkg:with_qt=True') parser.add_argument("-pr", "--profile", default=None, action=Extender, help='Apply the specified profile to the install command') parser.add_argument("-r", "--remote", action=OnceArgument, help='Look in the specified remote server') parser.add_argument("-s", "--settings", nargs=1, action=Extender, help='Settings to build the package, overwriting the defaults. e.g., ' '-s compiler=gcc') parser.add_argument("-u", "--update", action='store_true', default=False, help="Check updates exist from upstream remotes") if lockfile: parser.add_argument("-l", "--lockfile", action=OnceArgument, nargs='?', const=".", help="Path to a lockfile or folder containing 'conan.lock' file. " "Lockfile can be updated if packages change") _help_build_policies = '''Optional, specify which packages to build from source. Combining multiple '--build' options on one command line is allowed. For dependencies, the optional 'build_policy' attribute in their conanfile.py takes precedence over the command line parameter. Possible parameters: --build Force build for all packages, do not use binary packages. --build=never Disallow build for all packages, use binary packages or fail if a binary package is not found. Cannot be combined with other '--build' options. --build=missing Build packages from source whose binary package is not found. --build=outdated Build packages from source whose binary package was not generated from the latest recipe or is not found. --build=cascade Build packages from source that have at least one dependency being built from source. --build=[pattern] Build packages from source whose package reference matches the pattern. The pattern uses 'fnmatch' style wildcards. Default behavior: If you omit the '--build' option, the 'build_policy' attribute in conanfile.py will be used if it exists, otherwise the behavior is like '--build={}'. ''' def main(args): """ main entry point of the conan application, using a Command to parse parameters Exit codes for conan command: 0: Success (done) 1: General ConanException error (done) 2: Migration error 3: Ctrl+C 4: Ctrl+Break 5: SIGTERM 6: Invalid configuration (done) """ try: conan_api, _, _ = Conan.factory() except ConanMigrationError: # Error migrating sys.exit(ERROR_MIGRATION) except ConanException as e: sys.stderr.write("Error in Conan initialization: {}".format(e)) sys.exit(ERROR_GENERAL) command = Command(conan_api) current_dir = get_cwd() try: import signal def ctrl_c_handler(_, __): print('You pressed Ctrl+C!') sys.exit(USER_CTRL_C) def sigterm_handler(_, __): print('Received SIGTERM!') sys.exit(ERROR_SIGTERM) def ctrl_break_handler(_, __): print('You pressed Ctrl+Break!') sys.exit(USER_CTRL_BREAK) signal.signal(signal.SIGINT, ctrl_c_handler) signal.signal(signal.SIGTERM, sigterm_handler) if sys.platform == 'win32': signal.signal(signal.SIGBREAK, ctrl_break_handler) error = command.run(args) finally: os.chdir(current_dir) sys.exit(error)
[]
[]
[ "USE_UNSUPPORTED_CONAN_WITH_PYTHON_2" ]
[]
["USE_UNSUPPORTED_CONAN_WITH_PYTHON_2"]
python
1
0
integration/client/container_linux_test.go
/* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package client import ( "bytes" "context" "fmt" "io" "os" "path/filepath" "runtime" "strings" "sync" "syscall" "testing" "time" "github.com/containerd/cgroups" cgroupsv2 "github.com/containerd/cgroups/v2" . "github.com/containerd/containerd" "github.com/containerd/containerd/cio" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/oci" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/runtime/linux/runctypes" "github.com/containerd/containerd/runtime/v2/runc/options" "github.com/containerd/containerd/sys" "github.com/opencontainers/runtime-spec/specs-go" exec "golang.org/x/sys/execabs" "golang.org/x/sys/unix" ) const testUserNSImage = "ghcr.io/containerd/alpine:3.14.0" func TestTaskUpdate(t *testing.T) { t.Parallel() client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( ctx, cancel = testContext(t) id = t.Name() ) defer cancel() image, err := client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } limit := int64(32 * 1024 * 1024) memory := func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { s.Linux.Resources.Memory = &specs.LinuxMemory{ Limit: &limit, } return nil } container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"), memory)) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) task, err := container.NewTask(ctx, empty()) if err != nil { t.Fatal(err) } defer task.Delete(ctx) statusC, err := task.Wait(ctx) if err != nil { t.Fatal(err) } var ( cgroup cgroups.Cgroup cgroup2 *cgroupsv2.Manager ) // check that the task has a limit of 32mb if cgroups.Mode() == cgroups.Unified { groupPath, err := cgroupsv2.PidGroupPath(int(task.Pid())) if err != nil { t.Fatal(err) } cgroup2, err = cgroupsv2.LoadManager("/sys/fs/cgroup", groupPath) if err != nil { t.Fatal(err) } stat, err := cgroup2.Stat() if err != nil { t.Fatal(err) } if int64(stat.Memory.UsageLimit) != limit { t.Fatalf("expected memory limit to be set to %d but received %d", limit, stat.Memory.UsageLimit) } } else { cgroup, err = cgroups.Load(cgroups.V1, cgroups.PidPath(int(task.Pid()))) if err != nil { t.Fatal(err) } stat, err := cgroup.Stat(cgroups.IgnoreNotExist) if err != nil { t.Fatal(err) } if int64(stat.Memory.Usage.Limit) != limit { t.Fatalf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit) } } limit = 64 * 1024 * 1024 if err := task.Update(ctx, WithResources(&specs.LinuxResources{ Memory: &specs.LinuxMemory{ Limit: &limit, }, })); err != nil { t.Error(err) } // check that the task has a limit of 64mb if cgroups.Mode() == cgroups.Unified { stat, err := cgroup2.Stat() if err != nil { t.Fatal(err) } if int64(stat.Memory.UsageLimit) != limit { t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.UsageLimit) } } else { stat, err := cgroup.Stat(cgroups.IgnoreNotExist) if err != nil { t.Fatal(err) } if int64(stat.Memory.Usage.Limit) != limit { t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit) } } if err := task.Kill(ctx, unix.SIGKILL); err != nil { t.Fatal(err) } <-statusC } func TestShimInCgroup(t *testing.T) { t.Parallel() client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( ctx, cancel = testContext(t) id = t.Name() ) defer cancel() image, err := client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), oci.WithProcessArgs("sleep", "30"))) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) // create a cgroup for the shim to use path := "/containerd/shim" var ( cg cgroups.Cgroup cg2 *cgroupsv2.Manager ) if cgroups.Mode() == cgroups.Unified { cg2, err = cgroupsv2.NewManager("/sys/fs/cgroup", path, &cgroupsv2.Resources{}) if err != nil { t.Fatal(err) } defer cg2.Delete() } else { cg, err = cgroups.New(cgroups.V1, cgroups.StaticPath(path), &specs.LinuxResources{}) if err != nil { t.Fatal(err) } defer cg.Delete() } task, err := container.NewTask(ctx, empty(), WithShimCgroup(path)) if err != nil { t.Fatal(err) } defer task.Delete(ctx) statusC, err := task.Wait(ctx) if err != nil { t.Fatal(err) } // check to see if the shim is inside the cgroup if cgroups.Mode() == cgroups.Unified { processes, err := cg2.Procs(false) if err != nil { t.Fatal(err) } if len(processes) == 0 { t.Errorf("created cgroup should have at least one process inside: %d", len(processes)) } } else { processes, err := cg.Processes(cgroups.Devices, false) if err != nil { t.Fatal(err) } if len(processes) == 0 { t.Errorf("created cgroup should have at least one process inside: %d", len(processes)) } } if err := task.Kill(ctx, unix.SIGKILL); err != nil { t.Fatal(err) } <-statusC } func TestShimDoesNotLeakPipes(t *testing.T) { containerdPid := ctrd.cmd.Process.Pid initialPipes, err := numPipes(containerdPid) if err != nil { t.Fatal(err) } client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( image Image ctx, cancel = testContext(t) id = t.Name() ) defer cancel() image, err = client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"))) if err != nil { t.Fatal(err) } task, err := container.NewTask(ctx, empty()) if err != nil { t.Fatal(err) } exitChannel, err := task.Wait(ctx) if err != nil { t.Fatal(err) } if err := task.Start(ctx); err != nil { t.Fatal(err) } if err := task.Kill(ctx, syscall.SIGKILL); err != nil { t.Fatal(err) } <-exitChannel if _, err := task.Delete(ctx); err != nil { t.Fatal(err) } if err := container.Delete(ctx, WithSnapshotCleanup); err != nil { t.Fatal(err) } currentPipes, err := numPipes(containerdPid) if err != nil { t.Fatal(err) } if initialPipes != currentPipes { t.Errorf("Pipes have leaked after container has been deleted. Initially there were %d pipes, after container deletion there were %d pipes", initialPipes, currentPipes) } } func numPipes(pid int) (int, error) { cmd := exec.Command("sh", "-c", fmt.Sprintf("lsof -p %d | grep FIFO", pid)) var stdout bytes.Buffer cmd.Stdout = &stdout if err := cmd.Run(); err != nil { return 0, err } return strings.Count(stdout.String(), "\n"), nil } func TestDaemonReconnectsToShimIOPipesOnRestart(t *testing.T) { client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( image Image ctx, cancel = testContext(t) id = t.Name() ) defer cancel() image, err = client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"))) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) task, err := container.NewTask(ctx, empty()) if err != nil { t.Fatal(err) } defer task.Delete(ctx) _, err = task.Wait(ctx) if err != nil { t.Fatal(err) } if err := task.Start(ctx); err != nil { t.Fatal(err) } if err := ctrd.Restart(nil); err != nil { t.Fatal(err) } waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second) serving, err := client.IsServing(waitCtx) waitCancel() if !serving { t.Fatalf("containerd did not start within 2s: %v", err) } // After we restarted containerd we write some messages to the log pipes, simulating shim writing stuff there. // Then we make sure that these messages are available on the containerd log thus proving that the server reconnected to the log pipes runtimeVersion := getRuntimeVersion() logDirPath := getLogDirPath(runtimeVersion, id) switch runtimeVersion { case "v1": writeToFile(t, filepath.Join(logDirPath, "shim.stdout.log"), fmt.Sprintf("%s writing to stdout\n", id)) writeToFile(t, filepath.Join(logDirPath, "shim.stderr.log"), fmt.Sprintf("%s writing to stderr\n", id)) case "v2": writeToFile(t, filepath.Join(logDirPath, "log"), fmt.Sprintf("%s writing to log\n", id)) } statusC, err := task.Wait(ctx) if err != nil { t.Fatal(err) } if err := task.Kill(ctx, syscall.SIGKILL); err != nil { t.Fatal(err) } <-statusC stdioContents, err := os.ReadFile(ctrdStdioFilePath) if err != nil { t.Fatal(err) } switch runtimeVersion { case "v1": if !strings.Contains(string(stdioContents), fmt.Sprintf("%s writing to stdout", id)) { t.Fatal("containerd did not connect to the shim stdout pipe") } if !strings.Contains(string(stdioContents), fmt.Sprintf("%s writing to stderr", id)) { t.Fatal("containerd did not connect to the shim stderr pipe") } case "v2": if !strings.Contains(string(stdioContents), fmt.Sprintf("%s writing to log", id)) { t.Fatal("containerd did not connect to the shim log pipe") } } } func writeToFile(t *testing.T, filePath, message string) { writer, err := os.OpenFile(filePath, os.O_WRONLY, 0600) if err != nil { t.Fatal(err) } if _, err := writer.WriteString(message); err != nil { t.Fatal(err) } if err := writer.Close(); err != nil { t.Fatal(err) } } func getLogDirPath(runtimeVersion, id string) string { switch runtimeVersion { case "v1": return filepath.Join(defaultRoot, plugin.RuntimeLinuxV1, testNamespace, id) case "v2": return filepath.Join(defaultState, "io.containerd.runtime.v2.task", testNamespace, id) default: panic(fmt.Errorf("Unsupported runtime version %s", runtimeVersion)) } } func getRuntimeVersion() string { switch rt := os.Getenv("TEST_RUNTIME"); rt { case plugin.RuntimeLinuxV1: return "v1" default: return "v2" } } func TestContainerAttach(t *testing.T) { t.Parallel() if runtime.GOOS == "windows" { // On windows, closing the write side of the pipe closes the read // side, sending an EOF to it and preventing reopening it. // Hence this test will always fails on windows t.Skip("invalid logic on windows") } client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( image Image ctx, cancel = testContext(t) id = t.Name() ) defer cancel() image, err = client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withCat())) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) expected := "hello" + newLine direct, err := newDirectIO(ctx, false) if err != nil { t.Fatal(err) } defer direct.Delete() var ( wg sync.WaitGroup buf = bytes.NewBuffer(nil) ) wg.Add(1) go func() { defer wg.Done() io.Copy(buf, direct.Stdout) }() task, err := container.NewTask(ctx, direct.IOCreate) if err != nil { t.Fatal(err) } defer task.Delete(ctx) status, err := task.Wait(ctx) if err != nil { t.Error(err) } if err := task.Start(ctx); err != nil { t.Fatal(err) } if _, err := fmt.Fprint(direct.Stdin, expected); err != nil { t.Error(err) } // load the container and re-load the task if container, err = client.LoadContainer(ctx, id); err != nil { t.Fatal(err) } if task, err = container.Task(ctx, direct.IOAttach); err != nil { t.Fatal(err) } if _, err := fmt.Fprint(direct.Stdin, expected); err != nil { t.Error(err) } direct.Stdin.Close() if err := task.CloseIO(ctx, WithStdinCloser); err != nil { t.Error(err) } <-status wg.Wait() if _, err := task.Delete(ctx); err != nil { t.Error(err) } output := buf.String() // we wrote the same thing after attach expected = expected + expected if output != expected { t.Errorf("expected output %q but received %q", expected, output) } } func TestContainerUser(t *testing.T) { t.Parallel() t.Run("UserNameAndGroupName", func(t *testing.T) { testContainerUser(t, "www-data:www-data", "33:33") }) t.Run("UserIDAndGroupName", func(t *testing.T) { testContainerUser(t, "1001:www-data", "1001:33") }) t.Run("UserNameAndGroupID", func(t *testing.T) { testContainerUser(t, "www-data:1002", "33:1002") }) t.Run("UserIDAndGroupID", func(t *testing.T) { testContainerUser(t, "1001:1002", "1001:1002") }) } func testContainerUser(t *testing.T, userstr, expectedOutput string) { client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( image Image ctx, cancel = testContext(t) id = strings.Replace(t.Name(), "/", "_", -1) ) defer cancel() image, err = client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } direct, err := newDirectIO(ctx, false) if err != nil { t.Fatal(err) } defer direct.Delete() var ( wg sync.WaitGroup buf = bytes.NewBuffer(nil) ) wg.Add(1) go func() { defer wg.Done() io.Copy(buf, direct.Stdout) }() container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), oci.WithUser(userstr), oci.WithProcessArgs("sh", "-c", "echo $(id -u):$(id -g)")), ) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) task, err := container.NewTask(ctx, direct.IOCreate) if err != nil { t.Fatal(err) } defer task.Delete(ctx) statusC, err := task.Wait(ctx) if err != nil { t.Fatal(err) } if err := task.Start(ctx); err != nil { t.Fatal(err) } <-statusC wg.Wait() output := strings.TrimSuffix(buf.String(), "\n") if output != expectedOutput { t.Errorf("expected uid:gid to be %q, but received %q", expectedOutput, output) } } func TestContainerAttachProcess(t *testing.T) { t.Parallel() if runtime.GOOS == "windows" { // On windows, closing the write side of the pipe closes the read // side, sending an EOF to it and preventing reopening it. // Hence this test will always fails on windows t.Skip("invalid logic on windows") } client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( image Image ctx, cancel = testContext(t) id = t.Name() ) defer cancel() image, err = client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "100"))) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) expected := "hello" + newLine // creating IO early for easy resource cleanup direct, err := newDirectIO(ctx, false) if err != nil { t.Fatal(err) } defer direct.Delete() var ( wg sync.WaitGroup buf = bytes.NewBuffer(nil) ) wg.Add(1) go func() { defer wg.Done() io.Copy(buf, direct.Stdout) }() task, err := container.NewTask(ctx, empty()) if err != nil { t.Fatal(err) } defer task.Delete(ctx) status, err := task.Wait(ctx) if err != nil { t.Error(err) } if err := task.Start(ctx); err != nil { t.Fatal(err) } spec, err := container.Spec(ctx) if err != nil { t.Fatal(err) } processSpec := spec.Process processSpec.Args = []string{"cat"} execID := t.Name() + "_exec" process, err := task.Exec(ctx, execID, processSpec, direct.IOCreate) if err != nil { t.Fatal(err) } processStatusC, err := process.Wait(ctx) if err != nil { t.Fatal(err) } if err := process.Start(ctx); err != nil { t.Fatal(err) } if _, err := fmt.Fprint(direct.Stdin, expected); err != nil { t.Error(err) } if process, err = task.LoadProcess(ctx, execID, direct.IOAttach); err != nil { t.Fatal(err) } if _, err := fmt.Fprint(direct.Stdin, expected); err != nil { t.Error(err) } direct.Stdin.Close() if err := process.CloseIO(ctx, WithStdinCloser); err != nil { t.Error(err) } <-processStatusC wg.Wait() if err := task.Kill(ctx, syscall.SIGKILL); err != nil { t.Error(err) } output := buf.String() // we wrote the same thing after attach expected = expected + expected if output != expected { t.Errorf("expected output %q but received %q", expected, output) } <-status } func TestContainerLoadUnexistingProcess(t *testing.T) { t.Parallel() if runtime.GOOS == "windows" { // On windows, closing the write side of the pipe closes the read // side, sending an EOF to it and preventing reopening it. // Hence this test will always fails on windows t.Skip("invalid logic on windows") } client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( image Image ctx, cancel = testContext(t) id = t.Name() ) defer cancel() image, err = client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "100"))) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) // creating IO early for easy resource cleanup direct, err := newDirectIO(ctx, false) if err != nil { t.Fatal(err) } defer direct.Delete() task, err := container.NewTask(ctx, empty()) if err != nil { t.Fatal(err) } defer task.Delete(ctx) status, err := task.Wait(ctx) if err != nil { t.Error(err) } if err := task.Start(ctx); err != nil { t.Fatal(err) } if _, err = task.LoadProcess(ctx, "this-process-does-not-exist", direct.IOAttach); err == nil { t.Fatal("an error should have occurred when loading a process that does not exist") } if !errdefs.IsNotFound(err) { t.Fatalf("an error of type NotFound should have been returned when loading a process that does not exist, got %#v instead ", err) } if err := task.Kill(ctx, syscall.SIGKILL); err != nil { t.Error(err) } <-status } func TestContainerUserID(t *testing.T) { t.Parallel() client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( image Image ctx, cancel = testContext(t) id = t.Name() ) defer cancel() image, err = client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } direct, err := newDirectIO(ctx, false) if err != nil { t.Fatal(err) } defer direct.Delete() var ( wg sync.WaitGroup buf = bytes.NewBuffer(nil) ) wg.Add(1) go func() { defer wg.Done() io.Copy(buf, direct.Stdout) }() // sys user in the busybox image has a uid and gid of 3. container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), oci.WithUserID(3), oci.WithProcessArgs("sh", "-c", "echo $(id -u):$(id -g)")), ) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) task, err := container.NewTask(ctx, direct.IOCreate) if err != nil { t.Fatal(err) } defer task.Delete(ctx) statusC, err := task.Wait(ctx) if err != nil { t.Fatal(err) } if err := task.Start(ctx); err != nil { t.Fatal(err) } <-statusC wg.Wait() output := strings.TrimSuffix(buf.String(), "\n") if output != "3:3" { t.Errorf("expected uid:gid to be 3:3, but received %q", output) } } func TestContainerKillAll(t *testing.T) { t.Parallel() client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( image Image ctx, cancel = testContext(t) id = t.Name() ) defer cancel() image, err = client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sh", "-c", "top"), oci.WithHostNamespace(specs.PIDNamespace), ), ) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) stdout := bytes.NewBuffer(nil) task, err := container.NewTask(ctx, cio.NewCreator(withByteBuffers(stdout))) if err != nil { t.Fatal(err) } defer task.Delete(ctx) statusC, err := task.Wait(ctx) if err != nil { t.Fatal(err) } if err := task.Start(ctx); err != nil { t.Fatal(err) } if err := task.Kill(ctx, syscall.SIGKILL, WithKillAll); err != nil { t.Error(err) } <-statusC if _, err := task.Delete(ctx); err != nil { t.Fatal(err) } } func TestDaemonRestartWithRunningShim(t *testing.T) { client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( image Image ctx, cancel = testContext(t) id = t.Name() ) defer cancel() image, err = client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), oci.WithProcessArgs("sleep", "100"))) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) task, err := container.NewTask(ctx, empty()) if err != nil { t.Fatal(err) } defer task.Delete(ctx) statusC, err := task.Wait(ctx) if err != nil { t.Error(err) } pid := task.Pid() if pid < 1 { t.Fatalf("invalid task pid %d", pid) } if err := task.Start(ctx); err != nil { t.Fatal(err) } var exitStatus ExitStatus if err := ctrd.Restart(func() { exitStatus = <-statusC }); err != nil { t.Fatal(err) } if exitStatus.Error() == nil { t.Errorf(`first task.Wait() should have failed with "transport is closing"`) } waitCtx, cancel := context.WithTimeout(ctx, 1*time.Second) c, err := ctrd.waitForStart(waitCtx) cancel() if err != nil { t.Fatal(err) } c.Close() statusC, err = task.Wait(ctx) if err != nil { t.Error(err) } if err := task.Kill(ctx, syscall.SIGKILL); err != nil { t.Fatal(err) } <-statusC if err := unix.Kill(int(pid), 0); err != unix.ESRCH { t.Errorf("pid %d still exists", pid) } } func TestContainerRuntimeOptionsv1(t *testing.T) { t.Parallel() client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( image Image ctx, cancel = testContext(t) id = t.Name() ) defer cancel() image, err = client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } container, err := client.NewContainer( ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withExitStatus(7)), WithRuntime(plugin.RuntimeLinuxV1, &runctypes.RuncOptions{Runtime: "no-runc"}), ) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) task, err := container.NewTask(ctx, empty()) if err == nil { t.Errorf("task creation should have failed") task.Delete(ctx) return } if !strings.Contains(err.Error(), `"no-runc"`) { t.Errorf("task creation should have failed because of lack of executable. Instead failed with: %v", err.Error()) } } func TestContainerRuntimeOptionsv2(t *testing.T) { t.Parallel() client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( image Image ctx, cancel = testContext(t) id = t.Name() ) defer cancel() image, err = client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } container, err := client.NewContainer( ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withExitStatus(7)), WithRuntime(plugin.RuntimeRuncV1, &options.Options{BinaryName: "no-runc"}), ) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) task, err := container.NewTask(ctx, empty()) if err == nil { t.Errorf("task creation should have failed") task.Delete(ctx) return } if !strings.Contains(err.Error(), `"no-runc"`) { t.Errorf("task creation should have failed because of lack of executable. Instead failed with: %v", err.Error()) } } func TestContainerKillInitPidHost(t *testing.T) { initContainerAndCheckChildrenDieOnKill(t, oci.WithHostNamespace(specs.PIDNamespace)) } func TestUserNamespaces(t *testing.T) { t.Parallel() t.Run("WritableRootFS", func(t *testing.T) { testUserNamespaces(t, false) }) // see #1373 and runc#1572 t.Run("ReadonlyRootFS", func(t *testing.T) { testUserNamespaces(t, true) }) } func checkUserNS(t *testing.T) { cmd := exec.Command("true") cmd.SysProcAttr = &syscall.SysProcAttr{ Cloneflags: syscall.CLONE_NEWUSER, } if err := cmd.Run(); err != nil { t.Skip("User namespaces are unavailable") } } func testUserNamespaces(t *testing.T, readonlyRootFS bool) { checkUserNS(t) client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( image Image ctx, cancel = testContext(t) id = strings.Replace(t.Name(), "/", "-", -1) ) defer cancel() image, err = client.Pull(ctx, testUserNSImage, WithPullUnpack) if err != nil { t.Fatal(err) } opts := []NewContainerOpts{WithNewSpec(oci.WithImageConfig(image), withExitStatus(7), oci.WithUserNamespace([]specs.LinuxIDMapping{ { ContainerID: 0, HostID: 1000, Size: 10000, }, }, []specs.LinuxIDMapping{ { ContainerID: 0, HostID: 2000, Size: 10000, }, }), )} if readonlyRootFS { opts = append([]NewContainerOpts{WithRemappedSnapshotView(id, image, 1000, 2000)}, opts...) } else { opts = append([]NewContainerOpts{WithRemappedSnapshot(id, image, 1000, 2000)}, opts...) } container, err := client.NewContainer(ctx, id, opts...) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) var copts interface{} if CheckRuntime(client.Runtime(), "io.containerd.runc") { copts = &options.Options{ IoUid: 1000, IoGid: 2000, } } else { copts = &runctypes.CreateOptions{ IoUid: 1000, IoGid: 2000, } } task, err := container.NewTask(ctx, cio.NewCreator(cio.WithStdio), func(_ context.Context, client *Client, r *TaskInfo) error { r.Options = copts return nil }) if err != nil { t.Fatal(err) } defer task.Delete(ctx) statusC, err := task.Wait(ctx) if err != nil { t.Fatal(err) } if pid := task.Pid(); pid < 1 { t.Errorf("invalid task pid %d", pid) } if err := task.Start(ctx); err != nil { t.Error(err) task.Delete(ctx) return } status := <-statusC code, _, err := status.Result() if err != nil { t.Fatal(err) } if code != 7 { t.Errorf("expected status 7 from wait but received %d", code) } deleteStatus, err := task.Delete(ctx) if err != nil { t.Fatal(err) } if ec := deleteStatus.ExitCode(); ec != 7 { t.Errorf("expected status 7 from delete but received %d", ec) } } func TestUIDNoGID(t *testing.T) { t.Parallel() ctx, cancel := testContext(t) defer cancel() id := t.Name() client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() image, err := client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithUserID(1000))) if err != nil { t.Fatal(err) } defer container.Delete(ctx) spec, err := container.Spec(ctx) if err != nil { t.Fatal(err) } if uid := spec.Process.User.UID; uid != 1000 { t.Fatalf("expected uid 1000 but received %d", uid) } if gid := spec.Process.User.GID; gid != 0 { t.Fatalf("expected gid 0 but received %d", gid) } } func TestBindLowPortNonRoot(t *testing.T) { t.Parallel() client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( image Image ctx, cancel = testContext(t) id = t.Name() ) defer cancel() image, err = client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("nc", "-l", "-p", "80"), oci.WithUIDGID(1000, 1000)), ) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) task, err := container.NewTask(ctx, empty()) if err != nil { t.Fatal(err) } defer task.Delete(ctx) statusC, err := task.Wait(ctx) if err != nil { t.Fatal(err) } if err := task.Start(ctx); err != nil { t.Fatal(err) } status := <-statusC code, _, err := status.Result() if err != nil { t.Fatal(err) } if code != 1 { t.Errorf("expected status 1 from wait but received %d", code) } if _, err := task.Delete(ctx); err != nil { t.Fatal(err) } } func TestBindLowPortNonOpt(t *testing.T) { t.Parallel() client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( image Image ctx, cancel = testContext(t) id = t.Name() ) defer cancel() image, err = client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("nc", "-l", "-p", "80"), oci.WithUIDGID(1000, 1000), oci.WithAmbientCapabilities([]string{"CAP_NET_BIND_SERVICE"})), ) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) task, err := container.NewTask(ctx, empty()) if err != nil { t.Fatal(err) } defer task.Delete(ctx) statusC, err := task.Wait(ctx) if err != nil { t.Fatal(err) } if err := task.Start(ctx); err != nil { t.Fatal(err) } go func() { time.Sleep(2 * time.Second) task.Kill(ctx, unix.SIGTERM) }() status := <-statusC code, _, err := status.Result() if err != nil { t.Fatal(err) } // 128 + sigterm if code != 143 { t.Errorf("expected status 143 from wait but received %d", code) } if _, err := task.Delete(ctx); err != nil { t.Fatal(err) } } func TestShimOOMScore(t *testing.T) { containerdPid := ctrd.cmd.Process.Pid containerdScore, err := sys.GetOOMScoreAdj(containerdPid) if err != nil { t.Fatal(err) } client, err := newClient(t, address) if err != nil { t.Fatal(err) } defer client.Close() var ( image Image ctx, cancel = testContext(t) id = t.Name() ) defer cancel() path := "/containerd/oomshim" var ( cg cgroups.Cgroup cg2 *cgroupsv2.Manager ) if cgroups.Mode() == cgroups.Unified { cg2, err = cgroupsv2.NewManager("/sys/fs/cgroup", path, &cgroupsv2.Resources{}) if err != nil { t.Fatal(err) } defer cg2.Delete() } else { cg, err = cgroups.New(cgroups.V1, cgroups.StaticPath(path), &specs.LinuxResources{}) if err != nil { t.Fatal(err) } defer cg.Delete() } image, err = client.GetImage(ctx, testImage) if err != nil { t.Fatal(err) } container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"))) if err != nil { t.Fatal(err) } defer container.Delete(ctx, WithSnapshotCleanup) task, err := container.NewTask(ctx, empty(), WithShimCgroup(path)) if err != nil { t.Fatal(err) } defer task.Delete(ctx) statusC, err := task.Wait(ctx) if err != nil { t.Fatal(err) } expectedScore := containerdScore + 1 if expectedScore > sys.OOMScoreAdjMax { expectedScore = sys.OOMScoreAdjMax } // find the shim's pid if cgroups.Mode() == cgroups.Unified { processes, err := cg2.Procs(false) if err != nil { t.Fatal(err) } for _, pid := range processes { score, err := sys.GetOOMScoreAdj(int(pid)) if err != nil { t.Fatal(err) } if score != expectedScore { t.Errorf("expected score %d but got %d for shim process", expectedScore, score) } } } else { processes, err := cg.Processes(cgroups.Devices, false) if err != nil { t.Fatal(err) } for _, p := range processes { score, err := sys.GetOOMScoreAdj(p.Pid) if err != nil { t.Fatal(err) } if score != expectedScore { t.Errorf("expected score %d but got %d for shim process", expectedScore, score) } } } if err := task.Kill(ctx, unix.SIGKILL); err != nil { t.Fatal(err) } <-statusC }
[ "\"TEST_RUNTIME\"" ]
[]
[ "TEST_RUNTIME" ]
[]
["TEST_RUNTIME"]
go
1
0
MagE/asgi.py
""" ASGI config for MagE project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MagE.settings') application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
cmd/alertmanager/main.go
// Copyright 2015 Prometheus Team // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "net" "net/http" "net/url" "os" "os/signal" "path/filepath" "runtime" "strings" "sync" "syscall" "time" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/model" "github.com/prometheus/common/promlog" promlogflag "github.com/prometheus/common/promlog/flag" "github.com/prometheus/common/route" "github.com/prometheus/common/version" "github.com/prometheus/exporter-toolkit/web" webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag" "gopkg.in/alecthomas/kingpin.v2" "github.com/prometheus/alertmanager/api" "github.com/prometheus/alertmanager/cluster" "github.com/prometheus/alertmanager/config" "github.com/prometheus/alertmanager/dispatch" "github.com/prometheus/alertmanager/inhibit" "github.com/prometheus/alertmanager/nflog" "github.com/prometheus/alertmanager/notify" "github.com/prometheus/alertmanager/notify/email" "github.com/prometheus/alertmanager/notify/newrelic" "github.com/prometheus/alertmanager/notify/opsgenie" "github.com/prometheus/alertmanager/notify/pagerduty" "github.com/prometheus/alertmanager/notify/pushover" "github.com/prometheus/alertmanager/notify/slack" "github.com/prometheus/alertmanager/notify/victorops" "github.com/prometheus/alertmanager/notify/webhook" "github.com/prometheus/alertmanager/notify/wechat" "github.com/prometheus/alertmanager/provider/mem" "github.com/prometheus/alertmanager/silence" "github.com/prometheus/alertmanager/template" "github.com/prometheus/alertmanager/timeinterval" "github.com/prometheus/alertmanager/types" "github.com/prometheus/alertmanager/ui" ) var ( requestDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "alertmanager_http_request_duration_seconds", Help: "Histogram of latencies for HTTP requests.", Buckets: []float64{.05, 0.1, .25, .5, .75, 1, 2, 5, 20, 60}, }, []string{"handler", "method"}, ) responseSize = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "alertmanager_http_response_size_bytes", Help: "Histogram of response size for HTTP requests.", Buckets: prometheus.ExponentialBuckets(100, 10, 7), }, []string{"handler", "method"}, ) clusterEnabled = prometheus.NewGauge( prometheus.GaugeOpts{ Name: "alertmanager_cluster_enabled", Help: "Indicates whether the clustering is enabled or not.", }, ) configuredReceivers = prometheus.NewGauge( prometheus.GaugeOpts{ Name: "alertmanager_receivers", Help: "Number of configured receivers.", }, ) configuredIntegrations = prometheus.NewGauge( prometheus.GaugeOpts{ Name: "alertmanager_integrations", Help: "Number of configured integrations.", }, ) promlogConfig = promlog.Config{} ) func init() { prometheus.MustRegister(requestDuration) prometheus.MustRegister(responseSize) prometheus.MustRegister(clusterEnabled) prometheus.MustRegister(configuredReceivers) prometheus.MustRegister(configuredIntegrations) prometheus.MustRegister(version.NewCollector("alertmanager")) } func instrumentHandler(handlerName string, handler http.HandlerFunc) http.HandlerFunc { handlerLabel := prometheus.Labels{"handler": handlerName} return promhttp.InstrumentHandlerDuration( requestDuration.MustCurryWith(handlerLabel), promhttp.InstrumentHandlerResponseSize( responseSize.MustCurryWith(handlerLabel), handler, ), ) } const defaultClusterAddr = "0.0.0.0:9094" // buildReceiverIntegrations builds a list of integration notifiers off of a // receiver config. func buildReceiverIntegrations(nc *config.Receiver, tmpl *template.Template, logger log.Logger) ([]notify.Integration, error) { var ( errs types.MultiError integrations []notify.Integration add = func(name string, i int, rs notify.ResolvedSender, f func(l log.Logger) (notify.Notifier, error)) { n, err := f(log.With(logger, "integration", name)) if err != nil { errs.Add(err) return } integrations = append(integrations, notify.NewIntegration(n, rs, name, i)) } ) for i, c := range nc.WebhookConfigs { add("webhook", i, c, func(l log.Logger) (notify.Notifier, error) { return webhook.New(c, tmpl, l) }) } for i, c := range nc.EmailConfigs { add("email", i, c, func(l log.Logger) (notify.Notifier, error) { return email.New(c, tmpl, l), nil }) } for i, c := range nc.PagerdutyConfigs { add("pagerduty", i, c, func(l log.Logger) (notify.Notifier, error) { return pagerduty.New(c, tmpl, l) }) } for i, c := range nc.OpsGenieConfigs { add("opsgenie", i, c, func(l log.Logger) (notify.Notifier, error) { return opsgenie.New(c, tmpl, l) }) } for i, c := range nc.WechatConfigs { add("wechat", i, c, func(l log.Logger) (notify.Notifier, error) { return wechat.New(c, tmpl, l) }) } for i, c := range nc.SlackConfigs { add("slack", i, c, func(l log.Logger) (notify.Notifier, error) { return slack.New(c, tmpl, l) }) } for i, c := range nc.VictorOpsConfigs { add("victorops", i, c, func(l log.Logger) (notify.Notifier, error) { return victorops.New(c, tmpl, l) }) } for i, c := range nc.PushoverConfigs { add("pushover", i, c, func(l log.Logger) (notify.Notifier, error) { return pushover.New(c, tmpl, l) }) } for i, c := range nc.NewRelicConfigs { add("newrelic", i, c, func(l log.Logger) (notify.Notifier, error) { return newrelic.New(c, tmpl, l) }) } if errs.Len() > 0 { return nil, &errs } return integrations, nil } func main() { os.Exit(run()) } func run() int { if os.Getenv("DEBUG") != "" { runtime.SetBlockProfileRate(20) runtime.SetMutexProfileFraction(20) } var ( configFile = kingpin.Flag("config.file", "Alertmanager configuration file name.").Default("alertmanager.yml").String() dataDir = kingpin.Flag("storage.path", "Base path for data storage.").Default("data/").String() retention = kingpin.Flag("data.retention", "How long to keep data for.").Default("120h").Duration() alertGCInterval = kingpin.Flag("alerts.gc-interval", "Interval between alert GC.").Default("30m").Duration() webConfig = webflag.AddFlags(kingpin.CommandLine) externalURL = kingpin.Flag("web.external-url", "The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy). Used for generating relative and absolute links back to Alertmanager itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Alertmanager. If omitted, relevant URL components will be derived automatically.").String() routePrefix = kingpin.Flag("web.route-prefix", "Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url.").String() listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for the web interface and API.").Default(":9093").String() getConcurrency = kingpin.Flag("web.get-concurrency", "Maximum number of GET requests processed concurrently. If negative or zero, the limit is GOMAXPROC or 8, whichever is larger.").Default("0").Int() httpTimeout = kingpin.Flag("web.timeout", "Timeout for HTTP requests. If negative or zero, no timeout is set.").Default("0").Duration() clusterBindAddr = kingpin.Flag("cluster.listen-address", "Listen address for cluster. Set to empty string to disable HA mode."). Default(defaultClusterAddr).String() clusterAdvertiseAddr = kingpin.Flag("cluster.advertise-address", "Explicit address to advertise in cluster.").String() peers = kingpin.Flag("cluster.peer", "Initial peers (may be repeated).").Strings() peerTimeout = kingpin.Flag("cluster.peer-timeout", "Time to wait between peers to send notifications.").Default("15s").Duration() gossipInterval = kingpin.Flag("cluster.gossip-interval", "Interval between sending gossip messages. By lowering this value (more frequent) gossip messages are propagated across the cluster more quickly at the expense of increased bandwidth.").Default(cluster.DefaultGossipInterval.String()).Duration() pushPullInterval = kingpin.Flag("cluster.pushpull-interval", "Interval for gossip state syncs. Setting this interval lower (more frequent) will increase convergence speeds across larger clusters at the expense of increased bandwidth usage.").Default(cluster.DefaultPushPullInterval.String()).Duration() tcpTimeout = kingpin.Flag("cluster.tcp-timeout", "Timeout for establishing a stream connection with a remote node for a full state sync, and for stream read and write operations.").Default(cluster.DefaultTcpTimeout.String()).Duration() probeTimeout = kingpin.Flag("cluster.probe-timeout", "Timeout to wait for an ack from a probed node before assuming it is unhealthy. This should be set to 99-percentile of RTT (round-trip time) on your network.").Default(cluster.DefaultProbeTimeout.String()).Duration() probeInterval = kingpin.Flag("cluster.probe-interval", "Interval between random node probes. Setting this lower (more frequent) will cause the cluster to detect failed nodes more quickly at the expense of increased bandwidth usage.").Default(cluster.DefaultProbeInterval.String()).Duration() settleTimeout = kingpin.Flag("cluster.settle-timeout", "Maximum time to wait for cluster connections to settle before evaluating notifications.").Default(cluster.DefaultPushPullInterval.String()).Duration() reconnectInterval = kingpin.Flag("cluster.reconnect-interval", "Interval between attempting to reconnect to lost peers.").Default(cluster.DefaultReconnectInterval.String()).Duration() peerReconnectTimeout = kingpin.Flag("cluster.reconnect-timeout", "Length of time to attempt to reconnect to a lost peer.").Default(cluster.DefaultReconnectTimeout.String()).Duration() ) promlogflag.AddFlags(kingpin.CommandLine, &promlogConfig) kingpin.CommandLine.UsageWriter(os.Stdout) kingpin.Version(version.Print("alertmanager")) kingpin.CommandLine.GetFlag("help").Short('h') kingpin.Parse() logger := promlog.New(&promlogConfig) level.Info(logger).Log("msg", "Starting Alertmanager", "version", version.Info()) level.Info(logger).Log("build_context", version.BuildContext()) err := os.MkdirAll(*dataDir, 0777) if err != nil { level.Error(logger).Log("msg", "Unable to create data directory", "err", err) return 1 } var peer *cluster.Peer if *clusterBindAddr != "" { peer, err = cluster.Create( log.With(logger, "component", "cluster"), prometheus.DefaultRegisterer, *clusterBindAddr, *clusterAdvertiseAddr, *peers, true, *pushPullInterval, *gossipInterval, *tcpTimeout, *probeTimeout, *probeInterval, ) if err != nil { level.Error(logger).Log("msg", "unable to initialize gossip mesh", "err", err) return 1 } clusterEnabled.Set(1) } stopc := make(chan struct{}) var wg sync.WaitGroup wg.Add(1) notificationLogOpts := []nflog.Option{ nflog.WithRetention(*retention), nflog.WithSnapshot(filepath.Join(*dataDir, "nflog")), nflog.WithMaintenance(15*time.Minute, stopc, wg.Done), nflog.WithMetrics(prometheus.DefaultRegisterer), nflog.WithLogger(log.With(logger, "component", "nflog")), } notificationLog, err := nflog.New(notificationLogOpts...) if err != nil { level.Error(logger).Log("err", err) return 1 } if peer != nil { c := peer.AddState("nfl", notificationLog, prometheus.DefaultRegisterer) notificationLog.SetBroadcast(c.Broadcast) } marker := types.NewMarker(prometheus.DefaultRegisterer) silenceOpts := silence.Options{ SnapshotFile: filepath.Join(*dataDir, "silences"), Retention: *retention, Logger: log.With(logger, "component", "silences"), Metrics: prometheus.DefaultRegisterer, } silences, err := silence.New(silenceOpts) if err != nil { level.Error(logger).Log("err", err) return 1 } if peer != nil { c := peer.AddState("sil", silences, prometheus.DefaultRegisterer) silences.SetBroadcast(c.Broadcast) } // Start providers before router potentially sends updates. wg.Add(1) go func() { silences.Maintenance(15*time.Minute, filepath.Join(*dataDir, "silences"), stopc) wg.Done() }() defer func() { close(stopc) wg.Wait() }() // Peer state listeners have been registered, now we can join and get the initial state. if peer != nil { err = peer.Join( *reconnectInterval, *peerReconnectTimeout, ) if err != nil { level.Warn(logger).Log("msg", "unable to join gossip mesh", "err", err) } ctx, cancel := context.WithTimeout(context.Background(), *settleTimeout) defer func() { cancel() if err := peer.Leave(10 * time.Second); err != nil { level.Warn(logger).Log("msg", "unable to leave gossip mesh", "err", err) } }() go peer.Settle(ctx, *gossipInterval*10) } alerts, err := mem.NewAlerts(context.Background(), marker, *alertGCInterval, nil, logger) if err != nil { level.Error(logger).Log("err", err) return 1 } defer alerts.Close() var disp *dispatch.Dispatcher defer disp.Stop() groupFn := func(routeFilter func(*dispatch.Route) bool, alertFilter func(*types.Alert, time.Time) bool) (dispatch.AlertGroups, map[model.Fingerprint][]string) { return disp.Groups(routeFilter, alertFilter) } // An interface value that holds a nil concrete value is non-nil. // Therefore we explicly pass an empty interface, to detect if the // cluster is not enabled in notify. var clusterPeer cluster.ClusterPeer if peer != nil { clusterPeer = peer } api, err := api.New(api.Options{ Alerts: alerts, Silences: silences, StatusFunc: marker.Status, Peer: clusterPeer, Timeout: *httpTimeout, Concurrency: *getConcurrency, Logger: log.With(logger, "component", "api"), Registry: prometheus.DefaultRegisterer, GroupFunc: groupFn, }) if err != nil { level.Error(logger).Log("err", errors.Wrap(err, "failed to create API")) return 1 } amURL, err := extURL(logger, os.Hostname, *listenAddress, *externalURL) if err != nil { level.Error(logger).Log("msg", "failed to determine external URL", "err", err) return 1 } level.Debug(logger).Log("externalURL", amURL.String()) waitFunc := func() time.Duration { return 0 } if peer != nil { waitFunc = clusterWait(peer, *peerTimeout) } timeoutFunc := func(d time.Duration) time.Duration { if d < notify.MinTimeout { d = notify.MinTimeout } return d + waitFunc() } var ( inhibitor *inhibit.Inhibitor tmpl *template.Template ) dispMetrics := dispatch.NewDispatcherMetrics(false, prometheus.DefaultRegisterer) pipelineBuilder := notify.NewPipelineBuilder(prometheus.DefaultRegisterer) configLogger := log.With(logger, "component", "configuration") configCoordinator := config.NewCoordinator( *configFile, prometheus.DefaultRegisterer, configLogger, ) configCoordinator.Subscribe(func(conf *config.Config) error { tmpl, err = template.FromGlobs(conf.Templates...) if err != nil { return errors.Wrap(err, "failed to parse templates") } tmpl.ExternalURL = amURL // Build the routing tree and record which receivers are used. routes := dispatch.NewRoute(conf.Route, nil) activeReceivers := make(map[string]struct{}) routes.Walk(func(r *dispatch.Route) { activeReceivers[r.RouteOpts.Receiver] = struct{}{} }) // Build the map of receiver to integrations. receivers := make(map[string][]notify.Integration, len(activeReceivers)) var integrationsNum int for _, rcv := range conf.Receivers { if _, found := activeReceivers[rcv.Name]; !found { // No need to build a receiver if no route is using it. level.Info(configLogger).Log("msg", "skipping creation of receiver not referenced by any route", "receiver", rcv.Name) continue } integrations, err := buildReceiverIntegrations(rcv, tmpl, logger) if err != nil { return err } // rcv.Name is guaranteed to be unique across all receivers. receivers[rcv.Name] = integrations integrationsNum += len(integrations) } // Build the map of time interval names to mute time definitions. muteTimes := make(map[string][]timeinterval.TimeInterval, len(conf.MuteTimeIntervals)) for _, ti := range conf.MuteTimeIntervals { muteTimes[ti.Name] = ti.TimeIntervals } inhibitor.Stop() disp.Stop() inhibitor = inhibit.NewInhibitor(alerts, conf.InhibitRules, marker, logger) silencer := silence.NewSilencer(silences, marker, logger) // An interface value that holds a nil concrete value is non-nil. // Therefore we explicly pass an empty interface, to detect if the // cluster is not enabled in notify. var pipelinePeer notify.Peer if peer != nil { pipelinePeer = peer } pipeline := pipelineBuilder.New( receivers, waitFunc, inhibitor, silencer, muteTimes, notificationLog, pipelinePeer, ) configuredReceivers.Set(float64(len(activeReceivers))) configuredIntegrations.Set(float64(integrationsNum)) api.Update(conf, func(labels model.LabelSet) { inhibitor.Mutes(labels) silencer.Mutes(labels) }) disp = dispatch.NewDispatcher(alerts, routes, pipeline, marker, timeoutFunc, nil, logger, dispMetrics) routes.Walk(func(r *dispatch.Route) { if r.RouteOpts.RepeatInterval > *retention { level.Warn(configLogger).Log( "msg", "repeat_interval is greater than the data retention period. It can lead to notifications being repeated more often than expected.", "repeat_interval", r.RouteOpts.RepeatInterval, "retention", *retention, "route", r.Key(), ) } }) go disp.Run() go inhibitor.Run() return nil }) if err := configCoordinator.Reload(); err != nil { return 1 } // Make routePrefix default to externalURL path if empty string. if *routePrefix == "" { *routePrefix = amURL.Path } *routePrefix = "/" + strings.Trim(*routePrefix, "/") level.Debug(logger).Log("routePrefix", *routePrefix) router := route.New().WithInstrumentation(instrumentHandler) if *routePrefix != "/" { router.Get("/", func(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, *routePrefix, http.StatusFound) }) router = router.WithPrefix(*routePrefix) } webReload := make(chan chan error) ui.Register(router, webReload, logger) mux := api.Register(router, *routePrefix) srv := &http.Server{Addr: *listenAddress, Handler: mux} srvc := make(chan struct{}) go func() { level.Info(logger).Log("msg", "Listening", "address", *listenAddress) if err := web.ListenAndServe(srv, *webConfig, logger); err != http.ErrServerClosed { level.Error(logger).Log("msg", "Listen error", "err", err) close(srvc) } defer func() { if err := srv.Close(); err != nil { level.Error(logger).Log("msg", "Error on closing the server", "err", err) } }() }() var ( hup = make(chan os.Signal, 1) hupReady = make(chan bool) term = make(chan os.Signal, 1) ) signal.Notify(hup, syscall.SIGHUP) signal.Notify(term, os.Interrupt, syscall.SIGTERM) go func() { <-hupReady for { select { case <-hup: // ignore error, already logged in `reload()` _ = configCoordinator.Reload() case errc := <-webReload: errc <- configCoordinator.Reload() } } }() // Wait for reload or termination signals. close(hupReady) // Unblock SIGHUP handler. for { select { case <-term: level.Info(logger).Log("msg", "Received SIGTERM, exiting gracefully...") return 0 case <-srvc: return 1 } } } // clusterWait returns a function that inspects the current peer state and returns // a duration of one base timeout for each peer with a higher ID than ourselves. func clusterWait(p *cluster.Peer, timeout time.Duration) func() time.Duration { return func() time.Duration { return time.Duration(p.Position()) * timeout } } func extURL(logger log.Logger, hostnamef func() (string, error), listen, external string) (*url.URL, error) { if external == "" { hostname, err := hostnamef() if err != nil { return nil, err } _, port, err := net.SplitHostPort(listen) if err != nil { return nil, err } if port == "" { level.Warn(logger).Log("msg", "no port found for listen address", "address", listen) } external = fmt.Sprintf("http://%s:%s/", hostname, port) } u, err := url.Parse(external) if err != nil { return nil, err } if u.Scheme != "http" && u.Scheme != "https" { return nil, errors.Errorf("%q: invalid %q scheme, only 'http' and 'https' are supported", u.String(), u.Scheme) } ppref := strings.TrimRight(u.Path, "/") if ppref != "" && !strings.HasPrefix(ppref, "/") { ppref = "/" + ppref } u.Path = ppref return u, nil }
[ "\"DEBUG\"" ]
[]
[ "DEBUG" ]
[]
["DEBUG"]
go
1
0
main.go
package main import ( "errors" "log" "net/http" "os" "os/signal" "runtime" "strconv" "syscall" ) type Exchange struct { anx ANX btcc BTCC bitstamp Bitstamp bitfinex Bitfinex brightonpeak BrightonPeak btce BTCE btcmarkets BTCMarkets gdax GDAX gemini Gemini okcoinChina OKCoin okcoinIntl OKCoin itbit ItBit lakebtc LakeBTC localbitcoins LocalBitcoins poloniex Poloniex huobi HUOBI kraken Kraken } type Bot struct { config Config exchange Exchange exchanges []IBotExchange tickers []Ticker portfolio Portfolio tickerChan chan Ticker shutdown chan bool } var bot Bot func setupBotExchanges() { for _, exch := range bot.config.Exchanges { for i := 0; i < len(bot.exchanges); i++ { if bot.exchanges[i] != nil { if bot.exchanges[i].GetName() == exch.Name { bot.exchanges[i].Setup(exch) if bot.exchanges[i].IsEnabled() { log.Printf("%s: Exchange support: %s (Authenticated API support: %s - Verbose mode: %s).\n", exch.Name, IsEnabled(exch.Enabled), IsEnabled(exch.AuthenticatedAPISupport), IsEnabled(exch.Verbose)) bot.exchanges[i].Start() } else { log.Printf("%s: Exchange support: %s\n", exch.Name, IsEnabled(exch.Enabled)) } } } } } } func main() { HandleInterrupt() log.Println("Loading config file config.json..") err := errors.New("") bot.config, err = ReadConfig() if err != nil { log.Printf("Fatal error opening config.json file. Error: %s", err) return } log.Println("Config file loaded. Checking settings.. ") err = CheckExchangeConfigValues() if err != nil { log.Println("Fatal error checking config values. Error:", err) return } log.Printf("Bot '%s' started.\n", bot.config.Name) AdjustGoMaxProcs() if bot.config.SMS.Enabled { err = CheckSMSGlobalConfigValues() if err != nil { log.Println(err) // non fatal event bot.config.SMS.Enabled = false } else { log.Printf("SMS support enabled. Number of SMS contacts %d.\n", GetEnabledSMSContacts()) } } else { log.Println("SMS support disabled.") } log.Printf("Available Exchanges: %d. Enabled Exchanges: %d.\n", len(bot.config.Exchanges), GetEnabledExchanges()) log.Println("Bot Exchange support:") bot.exchanges = []IBotExchange{ new(ANX), new(Kraken), new(BTCC), new(Bitstamp), new(BrightonPeak), new(Bitfinex), new(BTCE), new(BTCMarkets), new(GDAX), new(Gemini), new(OKCoin), new(OKCoin), new(ItBit), new(LakeBTC), new(LocalBitcoins), new(Poloniex), new(HUOBI), } for i := 0; i < len(bot.exchanges); i++ { if bot.exchanges[i] != nil { bot.exchanges[i].SetDefaults() log.Printf("Exchange %s successfully set default settings.\n", bot.exchanges[i].GetName()) } } setupBotExchanges() err = RetrieveConfigCurrencyPairs(bot.config) if err != nil { log.Println("Fatal error retrieving config currency AvailablePairs. Error: ", err) } if bot.config.Webserver.Enabled { err := CheckWebserverValues() if err != nil { log.Println(err) // non fatal event //bot.config.Webserver.Enabled = false } else { listenAddr := bot.config.Webserver.ListenAddress log.Printf("HTTP Webserver support enabled. Listen URL: http://%s:%d/\n", ExtractHost(listenAddr), ExtractPort(listenAddr)) router := NewRouter(bot.exchanges) log.Fatal(http.ListenAndServe(listenAddr, router)) } } if !bot.config.Webserver.Enabled { log.Println("HTTP Webserver support disabled.") } <-bot.shutdown Shutdown() } func AdjustGoMaxProcs() { log.Println("Adjusting bot runtime performance..") maxProcsEnv := os.Getenv("GOMAXPROCS") maxProcs := runtime.NumCPU() log.Println("Number of CPU's detected:", maxProcs) if maxProcsEnv != "" { log.Println("GOMAXPROCS env =", maxProcsEnv) env, err := strconv.Atoi(maxProcsEnv) if err != nil { log.Println("Unable to convert GOMAXPROCS to int, using", maxProcs) } else { maxProcs = env } } log.Println("Set GOMAXPROCS to:", maxProcs) runtime.GOMAXPROCS(maxProcs) } func HandleInterrupt() { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) go func() { sig := <-c log.Printf("Captured %v.", sig) Shutdown() }() } func Shutdown() { log.Println("Bot shutting down..") err := SaveConfig() if err != nil { log.Println("Unable to save config.") } else { log.Println("Config file saved successfully.") } log.Println("Exiting.") os.Exit(1) }
[ "\"GOMAXPROCS\"" ]
[]
[ "GOMAXPROCS" ]
[]
["GOMAXPROCS"]
go
1
0
gitlint/cli.py
# pylint: disable=bad-option-value,wrong-import-position # We need to disable the import position checks because of the windows check that we need to do below import copy import logging import os import platform import stat import sys import click # Error codes MAX_VIOLATION_ERROR_CODE = 252 # noqa USAGE_ERROR_CODE = 253 # noqa GIT_CONTEXT_ERROR_CODE = 254 # noqa CONFIG_ERROR_CODE = 255 # noqa import gitlint from gitlint.lint import GitLinter from gitlint.config import LintConfigBuilder, LintConfigError, LintConfigGenerator from gitlint.git import GitContext, GitContextError, git_version from gitlint import hooks from gitlint.shell import shell from gitlint.utils import ustr, LOG_FORMAT, IS_PY2 DEFAULT_CONFIG_FILE = ".gitlint" DEFAULT_COMMIT_MSG_EDITOR = "vim" # Since we use the return code to denote the amount of errors, we need to change the default click usage error code click.UsageError.exit_code = USAGE_ERROR_CODE # We don't use logging.getLogger(__main__) here because that will cause DEBUG output to be lost # when invoking gitlint as a python module (python -m gitlint.cli) LOG = logging.getLogger("gitlint.cli") class GitLintUsageError(Exception): """ Exception indicating there is an issue with how gitlint is used. """ pass def setup_logging(): """ Setup gitlint logging """ root_log = logging.getLogger("gitlint") root_log.propagate = False # Don't propagate to child loggers, the gitlint root logger handles everything handler = logging.StreamHandler() formatter = logging.Formatter(LOG_FORMAT) handler.setFormatter(formatter) root_log.addHandler(handler) root_log.setLevel(logging.ERROR) def log_system_info(): LOG.debug("Platform: %s", platform.platform()) LOG.debug("Python version: %s", sys.version) LOG.debug("Git version: %s", git_version()) LOG.debug("Gitlint version: %s", gitlint.__version__) LOG.debug("GITLINT_USE_SH_LIB: %s", os.environ.get("GITLINT_USE_SH_LIB", "[NOT SET]")) LOG.debug("DEFAULT_ENCODING: %s", gitlint.utils.DEFAULT_ENCODING) def build_config( # pylint: disable=too-many-arguments target, config_path, c, extra_path, ignore, contrib, ignore_stdin, staged, verbose, silent, debug ): """ Creates a LintConfig object based on a set of commandline parameters. """ config_builder = LintConfigBuilder() # Config precedence: # First, load default config or config from configfile if config_path: config_builder.set_from_config_file(config_path) elif os.path.exists(DEFAULT_CONFIG_FILE): config_builder.set_from_config_file(DEFAULT_CONFIG_FILE) # Then process any commandline configuration flags config_builder.set_config_from_string_list(c) # Finally, overwrite with any convenience commandline flags if ignore: config_builder.set_option('general', 'ignore', ignore) if contrib: config_builder.set_option('general', 'contrib', contrib) if ignore_stdin: config_builder.set_option('general', 'ignore-stdin', ignore_stdin) if silent: config_builder.set_option('general', 'verbosity', 0) elif verbose > 0: config_builder.set_option('general', 'verbosity', verbose) if extra_path: config_builder.set_option('general', 'extra-path', extra_path) if target: config_builder.set_option('general', 'target', target) if debug: config_builder.set_option('general', 'debug', debug) if staged: config_builder.set_option('general', 'staged', staged) config = config_builder.build() return config, config_builder def get_stdin_data(): """ Helper function that returns data send to stdin or False if nothing is send """ # STDIN can only be 3 different types of things ("modes") # 1. An interactive terminal device (i.e. a TTY -> sys.stdin.isatty() or stat.S_ISCHR) # 2. A (named) pipe (stat.S_ISFIFO) # 3. A regular file (stat.S_ISREG) # Technically, STDIN can also be other device type like a named unix socket (stat.S_ISSOCK), but we don't # support that in gitlint (at least not today). # # Now, the behavior that we want is the following: # If someone sends something directly to gitlint via a pipe or a regular file, read it. If not, read from the # local repository. # Note that we don't care about whether STDIN is a TTY or not, we only care whether data is via a pipe or regular # file. # However, in case STDIN is not a TTY, it HAS to be one of the 2 other things (pipe or regular file), even if # no-one is actually sending anything to gitlint over them. In this case, we still want to read from the local # repository. # To support this use-case (which is common in CI runners such as Jenkins and Gitlab), we need to actually attempt # to read from STDIN in case it's a pipe or regular file. In case that fails, then we'll fall back to reading # from the local repo. mode = os.fstat(sys.stdin.fileno()).st_mode stdin_is_pipe_or_file = stat.S_ISFIFO(mode) or stat.S_ISREG(mode) if stdin_is_pipe_or_file: input_data = sys.stdin.read() # Only return the input data if there's actually something passed # i.e. don't consider empty piped data if input_data: return ustr(input_data) return False def build_git_context(lint_config, msg_filename, refspec): """ Builds a git context based on passed parameters and order of precedence """ # Determine which GitContext method to use if a custom message is passed from_commit_msg = GitContext.from_commit_msg if lint_config.staged: LOG.debug("Fetching additional meta-data from staged commit") from_commit_msg = lambda message: GitContext.from_staged_commit(message, lint_config.target) # noqa # Order of precedence: # 1. Any data specified via --msg-filename if msg_filename: LOG.debug("Using --msg-filename.") return from_commit_msg(ustr(msg_filename.read())) # 2. Any data sent to stdin (unless stdin is being ignored) if not lint_config.ignore_stdin: stdin_input = get_stdin_data() if stdin_input: LOG.debug("Stdin data: '%s'", stdin_input) LOG.debug("Stdin detected and not ignored. Using as input.") return from_commit_msg(stdin_input) if lint_config.staged: raise GitLintUsageError(u"The 'staged' option (--staged) can only be used when using '--msg-filename' or " u"when piping data to gitlint via stdin.") # 3. Fallback to reading from local repository LOG.debug("No --msg-filename flag, no or empty data passed to stdin. Using the local repo.") return GitContext.from_local_repository(lint_config.target, refspec) class ContextObj(object): """ Simple class to hold data that is passed between Click commands via the Click context. """ def __init__(self, config, config_builder, refspec, msg_filename, gitcontext=None): self.config = config self.config_builder = config_builder self.refspec = refspec self.msg_filename = msg_filename self.gitcontext = gitcontext @click.group(invoke_without_command=True, context_settings={'max_content_width': 120}, epilog="When no COMMAND is specified, gitlint defaults to 'gitlint lint'.") @click.option('--target', type=click.Path(exists=True, resolve_path=True, file_okay=False, readable=True), help="Path of the target git repository. [default: current working directory]") @click.option('-C', '--config', type=click.Path(exists=True, dir_okay=False, readable=True, resolve_path=True), help="Config file location [default: {0}]".format(DEFAULT_CONFIG_FILE)) @click.option('-c', multiple=True, help="Config flags in format <rule>.<option>=<value> (e.g.: -c T1.line-length=80). " + "Flag can be used multiple times to set multiple config values.") # pylint: disable=bad-continuation @click.option('--commits', default=None, help="The range of commits to lint. [default: HEAD]") @click.option('-e', '--extra-path', help="Path to a directory or python module with extra user-defined rules", type=click.Path(exists=True, resolve_path=True, readable=True)) @click.option('--ignore', default="", help="Ignore rules (comma-separated by id or name).") @click.option('--contrib', default="", help="Contrib rules to enable (comma-separated by id or name).") @click.option('--msg-filename', type=click.File(), help="Path to a file containing a commit-msg.") @click.option('--ignore-stdin', is_flag=True, help="Ignore any stdin data. Useful for running in CI server.") @click.option('--staged', is_flag=True, help="Read staged commit meta-info from the local repository.") @click.option('-v', '--verbose', count=True, default=0, help="Verbosity, more v's for more verbose output (e.g.: -v, -vv, -vvv). [default: -vvv]", ) @click.option('-s', '--silent', help="Silent mode (no output). Takes precedence over -v, -vv, -vvv.", is_flag=True) @click.option('-d', '--debug', help="Enable debugging output.", is_flag=True) @click.version_option(version=gitlint.__version__) @click.pass_context def cli( # pylint: disable=too-many-arguments ctx, target, config, c, commits, extra_path, ignore, contrib, msg_filename, ignore_stdin, staged, verbose, silent, debug, ): """ Git lint tool, checks your git commit messages for styling issues Documentation: http://jorisroovers.github.io/gitlint """ try: if debug: logging.getLogger("gitlint").setLevel(logging.DEBUG) LOG.debug("To report issues, please visit https://github.com/jorisroovers/gitlint/issues") log_system_info() # Get the lint config from the commandline parameters and # store it in the context (click allows storing an arbitrary object in ctx.obj). config, config_builder = build_config(target, config, c, extra_path, ignore, contrib, ignore_stdin, staged, verbose, silent, debug) LOG.debug(u"Configuration\n%s", ustr(config)) ctx.obj = ContextObj(config, config_builder, commits, msg_filename) # If no subcommand is specified, then just lint if ctx.invoked_subcommand is None: ctx.invoke(lint) except GitContextError as e: click.echo(ustr(e)) ctx.exit(GIT_CONTEXT_ERROR_CODE) except GitLintUsageError as e: click.echo(u"Error: {0}".format(ustr(e))) ctx.exit(USAGE_ERROR_CODE) except LintConfigError as e: click.echo(u"Config Error: {0}".format(ustr(e))) ctx.exit(CONFIG_ERROR_CODE) @cli.command("lint") @click.pass_context def lint(ctx): """ Lints a git repository [default command] """ lint_config = ctx.obj.config refspec = ctx.obj.refspec msg_filename = ctx.obj.msg_filename gitcontext = build_git_context(lint_config, msg_filename, refspec) # Set gitcontext in the click context, so we can use it in command that are ran after this # in particular, this is used by run-hook ctx.obj.gitcontext = gitcontext number_of_commits = len(gitcontext.commits) # Exit if we don't have commits in the specified range. Use a 0 exit code, since a popular use-case is one # where users are using --commits in a check job to check the commit messages inside a CI job. By returning 0, we # ensure that these jobs don't fail if for whatever reason the specified commit range is empty. if number_of_commits == 0: LOG.debug(u'No commits in range "%s"', refspec) ctx.exit(0) LOG.debug(u'Linting %d commit(s)', number_of_commits) general_config_builder = ctx.obj.config_builder last_commit = gitcontext.commits[-1] # Let's get linting! first_violation = True exit_code = 0 for commit in gitcontext.commits: # Build a config_builder taking into account the commit specific config (if any) config_builder = general_config_builder.clone() config_builder.set_config_from_commit(commit) # Create a deepcopy from the original config, so we have a unique config object per commit # This is important for configuration rules to be able to modifying the config on a per commit basis commit_config = config_builder.build(copy.deepcopy(lint_config)) # Actually do the linting linter = GitLinter(commit_config) violations = linter.lint(commit) # exit code equals the total number of violations in all commits exit_code += len(violations) if violations: # Display the commit hash & new lines intelligently if number_of_commits > 1 and commit.sha: linter.display.e(u"{0}Commit {1}:".format( "\n" if not first_violation or commit is last_commit else "", commit.sha[:10] )) linter.print_violations(violations) first_violation = False # cap actual max exit code because bash doesn't like exit codes larger than 255: # http://tldp.org/LDP/abs/html/exitcodes.html exit_code = min(MAX_VIOLATION_ERROR_CODE, exit_code) LOG.debug("Exit Code = %s", exit_code) ctx.exit(exit_code) @cli.command("install-hook") @click.pass_context def install_hook(ctx): """ Install gitlint as a git commit-msg hook. """ try: hooks.GitHookInstaller.install_commit_msg_hook(ctx.obj.config) hook_path = hooks.GitHookInstaller.commit_msg_hook_path(ctx.obj.config) click.echo(u"Successfully installed gitlint commit-msg hook in {0}".format(hook_path)) ctx.exit(0) except hooks.GitHookInstallerError as e: click.echo(ustr(e), err=True) ctx.exit(GIT_CONTEXT_ERROR_CODE) @cli.command("uninstall-hook") @click.pass_context def uninstall_hook(ctx): """ Uninstall gitlint commit-msg hook. """ try: hooks.GitHookInstaller.uninstall_commit_msg_hook(ctx.obj.config) hook_path = hooks.GitHookInstaller.commit_msg_hook_path(ctx.obj.config) click.echo(u"Successfully uninstalled gitlint commit-msg hook from {0}".format(hook_path)) ctx.exit(0) except hooks.GitHookInstallerError as e: click.echo(ustr(e), err=True) ctx.exit(GIT_CONTEXT_ERROR_CODE) @cli.command("run-hook") @click.pass_context def run_hook(ctx): """ Runs the gitlint commit-msg hook. """ exit_code = 1 while exit_code > 0: try: click.echo(u"gitlint: checking commit message...") ctx.invoke(lint) click.echo(u"gitlint: " + click.style("OK", fg='green') + u" (no violations in commit message)") except click.exceptions.Exit as e: click.echo(u"-----------------------------------------------") click.echo(u"gitlint: " + click.style("Your commit message contains the above violations.", fg='red')) value = None while value not in ["y", "n", "e"]: click.echo("Continue with commit anyways (this keeps the current commit message)? " "[y(es)/n(no)/e(dit)] ", nl=False) # Ideally, we'd want to use click.getchar() or click.prompt() to get user's input here instead of # input(). However, those functions currently don't support getting answers from stdin. # This wouldn't be a huge issue since this is unlikely to occur in the real world, # were it not that we use a stdin to pipe answers into gitlint in our integration tests. # If that ever changes, we can revisit this. # Related click pointers: # - https://github.com/pallets/click/issues/1370 # - https://github.com/pallets/click/pull/1372 # - From https://click.palletsprojects.com/en/7.x/utils/#getting-characters-from-terminal # Note that this function will always read from the terminal, even if stdin is instead a pipe. # # We also need a to use raw_input() in Python2 as input() is unsafe (and raw_input() doesn't exist in # Python3). See https://stackoverflow.com/a/4960216/381010 input_func = input if IS_PY2: input_func = raw_input # noqa pylint: disable=undefined-variable value = input_func() if value == "y": LOG.debug("run-hook: commit message accepted") ctx.exit(0) elif value == "e": LOG.debug("run-hook: editing commit message") msg_filename = ctx.obj.msg_filename if msg_filename: msg_filename.seek(0) editor = os.environ.get("EDITOR", DEFAULT_COMMIT_MSG_EDITOR) msg_filename_path = os.path.realpath(msg_filename.name) LOG.debug("run-hook: %s %s", editor, msg_filename_path) shell([editor, msg_filename_path]) else: click.echo(u"Editing only possible when --msg-filename is specified.") ctx.exit(e.exit_code) elif value == "n": LOG.debug("run-hook: commit message declined") click.echo(u"Commit aborted.") click.echo(u"Your commit message: ") click.echo(u"-----------------------------------------------") click.echo(ctx.obj.gitcontext.commits[0].message.full) click.echo(u"-----------------------------------------------") ctx.exit(e.exit_code) exit_code = e.exit_code @cli.command("generate-config") @click.pass_context def generate_config(ctx): """ Generates a sample gitlint config file. """ path = click.prompt('Please specify a location for the sample gitlint config file', default=DEFAULT_CONFIG_FILE) path = os.path.realpath(path) dir_name = os.path.dirname(path) if not os.path.exists(dir_name): click.echo(u"Error: Directory '{0}' does not exist.".format(dir_name), err=True) ctx.exit(USAGE_ERROR_CODE) elif os.path.exists(path): click.echo(u"Error: File \"{0}\" already exists.".format(path), err=True) ctx.exit(USAGE_ERROR_CODE) LintConfigGenerator.generate_config(path) click.echo(u"Successfully generated {0}".format(path)) ctx.exit(0) # Let's Party! setup_logging() if __name__ == "__main__": # pylint: disable=no-value-for-parameter cli() # pragma: no cover
[]
[]
[ "EDITOR", "GITLINT_USE_SH_LIB" ]
[]
["EDITOR", "GITLINT_USE_SH_LIB"]
python
2
0
test/client_test.go
// // DISCLAIMER // // Copyright 2017 ArangoDB GmbH, Cologne, Germany // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Copyright holder is ArangoDB GmbH, Cologne, Germany // // Author Ewout Prangsma // package test import ( "context" "crypto/tls" "log" httplib "net/http" "os" "strconv" "strings" "sync" "testing" "time" "github.com/arangodb/go-driver/util/connection/wrappers" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" _ "net/http/pprof" driver "github.com/arangodb/go-driver" "github.com/arangodb/go-driver/http" "github.com/arangodb/go-driver/jwt" "github.com/arangodb/go-driver/vst" "github.com/arangodb/go-driver/vst/protocol" ) var ( logEndpointsOnce sync.Once runPProfServerOnce sync.Once ) // skipBetweenVersion skips the test if the current server version is less than // the min version or higher/equal max version func skipBetweenVersion(c driver.Client, minVersion, maxVersion driver.Version, t *testing.T) driver.VersionInfo { x, err := c.Version(nil) if err != nil { t.Fatalf("Failed to get version info: %s", describe(err)) } if x.Version.CompareTo(minVersion) < 0 { t.Skipf("Skipping below version '%s', got version '%s'", minVersion, x.Version) } if x.Version.CompareTo(maxVersion) >= 0 { t.Skipf("Skipping above version '%s', got version '%s'", maxVersion, x.Version) } return x } // skipBelowVersion skips the test if the current server version is less than // the given version. func skipBelowVersion(c driver.Client, version driver.Version, t *testing.T) driver.VersionInfo { x, err := c.Version(nil) if err != nil { t.Fatalf("Failed to get version info: %s", describe(err)) } if x.Version.CompareTo(version) < 0 { t.Skipf("Skipping below version '%s', got version '%s'", version, x.Version) } return x } // getEndpointsFromEnv returns the endpoints specified in the TEST_ENDPOINTS // environment variable. func getEndpointsFromEnv(t testEnv) []string { eps := strings.Split(os.Getenv("TEST_ENDPOINTS"), ",") if len(eps) == 0 { t.Fatal("No endpoints found in environment variable TEST_ENDPOINTS") } return eps } // getContentTypeFromEnv returns the content-type specified in the TEST_CONTENT_TYPE // environment variable (json|vpack). func getContentTypeFromEnv(t testEnv) driver.ContentType { switch ct := os.Getenv("TEST_CONTENT_TYPE"); ct { case "vpack": return driver.ContentTypeVelocypack case "json", "": return driver.ContentTypeJSON default: t.Fatalf("Unknown content type '%s'", ct) return 0 } } // createAuthenticationFromEnv initializes an authentication specified in the TEST_AUTHENTICATION // environment variable. func createAuthenticationFromEnv(t testEnv) driver.Authentication { authSpec := os.Getenv("TEST_AUTHENTICATION") if authSpec == "" { return nil } parts := strings.Split(authSpec, ":") switch parts[0] { case "basic": if len(parts) != 3 { t.Fatalf("Expected username & password for basic authentication") } return driver.BasicAuthentication(parts[1], parts[2]) case "jwt": if len(parts) != 3 { t.Fatalf("Expected username & password for jwt authentication") } return driver.JWTAuthentication(parts[1], parts[2]) case "super": if len(parts) != 2 { t.Fatalf("Expected 'super' and jwt secret") } header, err := jwt.CreateArangodJwtAuthorizationHeader(parts[1], "arangodb") if err != nil { t.Fatalf("Could not create JWT authentication header: %s", describe(err)) } return driver.RawAuthentication(header) default: t.Fatalf("Unknown authentication: '%s'", parts[0]) return nil } } // createConnectionFromEnvWitLog initializes a Connection from information specified in environment variables with logger. func createConnectionFromEnvWitLog(t testEnv, logger zerolog.Logger) driver.Connection { conn := createConnectionFromEnv(t) return wrappers.NewLoggerConnection(conn, wrappers.NewZeroLogLogger(logger), true) } // createConnectionFromEnv initializes a Connection from information specified in environment variables. func createConnectionFromEnv(t testEnv) driver.Connection { connSpec := os.Getenv("TEST_CONNECTION") connVer := os.Getenv("TEST_CVERSION") switch connSpec { case "vst": var version protocol.Version switch connVer { case "1.0", "": version = protocol.Version1_0 case "1.1": version = protocol.Version1_1 default: t.Fatalf("Unknown connection version '%s'", connVer) } config := vst.ConnectionConfig{ Endpoints: getEndpointsFromEnv(t), TLSConfig: &tls.Config{InsecureSkipVerify: true}, Transport: protocol.TransportConfig{ Version: version, }, } conn, err := vst.NewConnection(config) if err != nil { t.Fatalf("Failed to create new vst connection: %s", describe(err)) } return conn case "http", "": config := http.ConnectionConfig{ Endpoints: getEndpointsFromEnv(t), TLSConfig: &tls.Config{InsecureSkipVerify: true}, ContentType: getContentTypeFromEnv(t), } conn, err := http.NewConnection(config) if err != nil { t.Fatalf("Failed to create new http connection: %s", describe(err)) } return conn default: t.Fatalf("Unknown connection type: '%s'", connSpec) return nil } } // createClientFromEnv initializes a Client from information specified in environment variables. func createClientFromEnv(t testEnv, waitUntilReady bool) driver.Client { runPProfServerOnce.Do(func() { if os.Getenv("TEST_PPROF") != "" { go func() { // Start pprof server on port 6060 // To use it in the test, run a command like: // docker exec -it go-driver-test sh -c "apk add -U curl && curl http://localhost:6060/debug/pprof/goroutine?debug=1" log.Println(httplib.ListenAndServe("localhost:6060", nil)) }() } }) conn := createConnectionFromEnv(t) if os.Getenv("TEST_REQUEST_LOG") != "" { conn = WrapLogger(t, conn) } c, err := driver.NewClient(driver.ClientConfig{ Connection: conn, Authentication: createAuthenticationFromEnv(t), }) if err != nil { t.Fatalf("Failed to create new client: %s", describe(err)) } if waitUntilReady { timeout := time.Minute ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() if up := waitUntilServerAvailable(ctx, c, t); up != nil { t.Fatalf("Connection is not available in %s: %s", timeout, describe(up)) } if testModeSingle != getTestMode() { // Synchronize endpoints if err := waitUntilEndpointSynchronized(ctx, c, "", t); err != nil { t.Errorf("Failed to synchronize endpoints: %s", describe(err)) } else { logEndpointsOnce.Do(func() { t.Logf("Found endpoints: %v", conn.Endpoints()) }) } } } return c } // waitUntilServerAvailable keeps waiting until the server/cluster that the client is addressing is available. func waitUntilServerAvailable(ctx context.Context, c driver.Client, t testEnv) error { return driverErrorCheck(ctx, c, func(ctx context.Context, client driver.Client) error { if getTestMode() != testModeSingle { // Refresh endpoints if err := client.SynchronizeEndpoints2(ctx, "_system"); err != nil { return err } } if _, err := client.Version(ctx); err != nil { return err } if _, err := client.Databases(ctx); err != nil { return err } return nil }, func(err error) (bool, error) { if err == nil { return true, nil } if driver.IsNoLeaderOrOngoing(err) { t.Logf("Retry. Waiting for leader: %s", describe(err)) return false, nil } if driver.IsArangoErrorWithCode(err, 503) { t.Logf("Retry. Service not ready: %s", describe(err)) return false, nil } t.Logf("Retry. Unknown error: %s", describe(err)) return false, nil }).Retry(3*time.Second, time.Minute) } // waitUntilClusterHealthy keeps waiting until the servers are healthy func waitUntilClusterHealthy(c driver.Client) error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if _, err := c.Cluster(ctx); err != nil { if driver.IsPreconditionFailed(err) { // only in cluster mode return nil } return err } return retry(time.Second, time.Minute, func() error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() cluster, err := c.Cluster(ctx) if err != nil { return err } health, err := cluster.Health(ctx) if err != nil { return err } for _, h := range health.Health { if h.Status != driver.ServerStatusGood { return nil } } return interrupt{} }) } // waitUntilEndpointSynchronized keeps waiting until the endpoints are synchronized. leadership might be ongoing. func waitUntilEndpointSynchronized(ctx context.Context, c driver.Client, dbname string, t testEnv) error { return driverErrorCheck(ctx, c, func(ctx context.Context, client driver.Client) error { callCtx, cancel := context.WithTimeout(ctx, time.Second*5) defer cancel() if err := c.SynchronizeEndpoints2(callCtx, dbname); err != nil { return err } return nil }, func(err error) (bool, error) { if err == nil { return true, nil } else { return false, nil } }).Retry(3*time.Second, time.Minute) } // TestCreateClientHttpConnection creates an HTTP connection to the environment specified // endpoints and creates a client for that. func TestCreateClientHttpConnection(t *testing.T) { conn, err := http.NewConnection(http.ConnectionConfig{ Endpoints: getEndpointsFromEnv(t), Transport: NewConnectionTransport(), }) if err != nil { t.Fatalf("Failed to create new http connection: %s", describe(err)) } _, err = driver.NewClient(driver.ClientConfig{ Connection: conn, Authentication: createAuthenticationFromEnv(t), }) if err != nil { t.Fatalf("Failed to create new client: %s", describe(err)) } } // TestResponseHeader checks the Response.Header function. func TestResponseHeader(t *testing.T) { c := createClientFromEnv(t, true) ctx := context.Background() version, err := c.Version(nil) if err != nil { t.Fatalf("Version failed: %s", describe(err)) } isv33p := version.Version.CompareTo("3.3") >= 0 if !isv33p { t.Skip("This test requires version 3.3") } else { var resp driver.Response db := ensureDatabase(ctx, c, "_system", nil, t) col := ensureCollection(ctx, db, "response_header_test", nil, t) defer clean(t, ctx, col) // `ETag` header must contain the `_rev` of the new document in quotes. doc := map[string]string{ "Test": "TestResponseHeader", "Intent": "Check Response.Header", } meta, err := col.CreateDocument(driver.WithResponse(ctx, &resp), doc) if err != nil { t.Fatalf("CreateDocument failed: %s", describe(err)) } expectedETag := strconv.Quote(meta.Rev) if x := resp.Header("ETag"); x != expectedETag { t.Errorf("Unexpected result from Header('ETag'), got '%s', expected '%s'", x, expectedETag) } if x := resp.Header("Etag"); x != expectedETag { t.Errorf("Unexpected result from Header('Etag'), got '%s', expected '%s'", x, expectedETag) } if x := resp.Header("etag"); x != expectedETag { t.Errorf("Unexpected result from Header('etag'), got '%s', expected '%s'", x, expectedETag) } if x := resp.Header("ETAG"); x != expectedETag { t.Errorf("Unexpected result from Header('ETAG'), got '%s', expected '%s'", x, expectedETag) } } } type dummyRequestRepeat struct { counter int } func (r *dummyRequestRepeat) Repeat(conn driver.Connection, resp driver.Response, err error) bool { r.counter++ if r.counter == 2 { return false } return true } func TestCreateClientHttpRepeatConnection(t *testing.T) { if getTestMode() != testModeSingle { t.Skipf("Not a single") } createClientFromEnv(t, true) requestRepeat := dummyRequestRepeat{} conn := createConnectionFromEnv(t) c, err := driver.NewClient(driver.ClientConfig{ Connection: http.NewRepeatConnection(conn, &requestRepeat), Authentication: createAuthenticationFromEnv(t), }) _, err = c.Connection().SetAuthentication(createAuthenticationFromEnv(t)) assert.Equal(t, http.ErrAuthenticationNotChanged, err) _, err = c.Databases(nil) require.NoError(t, err) assert.Equal(t, 2, requestRepeat.counter) }
[ "\"TEST_ENDPOINTS\"", "\"TEST_CONTENT_TYPE\"", "\"TEST_AUTHENTICATION\"", "\"TEST_CONNECTION\"", "\"TEST_CVERSION\"", "\"TEST_PPROF\"", "\"TEST_REQUEST_LOG\"" ]
[]
[ "TEST_CVERSION", "TEST_CONTENT_TYPE", "TEST_PPROF", "TEST_ENDPOINTS", "TEST_REQUEST_LOG", "TEST_AUTHENTICATION", "TEST_CONNECTION" ]
[]
["TEST_CVERSION", "TEST_CONTENT_TYPE", "TEST_PPROF", "TEST_ENDPOINTS", "TEST_REQUEST_LOG", "TEST_AUTHENTICATION", "TEST_CONNECTION"]
go
7
0
manage.py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mtaani.settings") try: from django.core.management import execute_from_command_line except ImportError: # The above import may fail for some other reason. Ensure that the # issue is really that Django is missing to avoid masking other # exceptions on Python 2. try: import django except ImportError: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) raise execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
shared/version/api.go
package version import ( "os" "strconv" ) // APIVersion contains the API base version. Only bumped for backward incompatible changes. var APIVersion = "1.0" // APIExtensions is the list of all API extensions in the order they were added. // // The following kind of changes come with a new extensions: // // - New configuration key // - New valid values for a configuration key // - New REST API endpoint // - New argument inside an existing REST API call // - New HTTPs authentication mechanisms or protocols // // This list is used mainly by the LXD server code, but it's in the shared // package as well for reference. var APIExtensions = []string{ "storage_zfs_remove_snapshots", "container_host_shutdown_timeout", "container_stop_priority", "container_syscall_filtering", "auth_pki", "container_last_used_at", "etag", "patch", "usb_devices", "https_allowed_credentials", "image_compression_algorithm", "directory_manipulation", "container_cpu_time", "storage_zfs_use_refquota", "storage_lvm_mount_options", "network", "profile_usedby", "container_push", "container_exec_recording", "certificate_update", "container_exec_signal_handling", "gpu_devices", "container_image_properties", "migration_progress", "id_map", "network_firewall_filtering", "network_routes", "storage", "file_delete", "file_append", "network_dhcp_expiry", "storage_lvm_vg_rename", "storage_lvm_thinpool_rename", "network_vlan", "image_create_aliases", "container_stateless_copy", "container_only_migration", "storage_zfs_clone_copy", "unix_device_rename", "storage_lvm_use_thinpool", "storage_rsync_bwlimit", "network_vxlan_interface", "storage_btrfs_mount_options", "entity_description", "image_force_refresh", "storage_lvm_lv_resizing", "id_map_base", "file_symlinks", "container_push_target", "network_vlan_physical", "storage_images_delete", "container_edit_metadata", "container_snapshot_stateful_migration", "storage_driver_ceph", "storage_ceph_user_name", "resource_limits", "storage_volatile_initial_source", "storage_ceph_force_osd_reuse", "storage_block_filesystem_btrfs", "resources", "kernel_limits", "storage_api_volume_rename", "macaroon_authentication", "network_sriov", "console", "restrict_devlxd", "migration_pre_copy", "infiniband", "maas_network", "devlxd_events", "proxy", "network_dhcp_gateway", "file_get_symlink", "network_leases", "unix_device_hotplug", "storage_api_local_volume_handling", "operation_description", "clustering", "event_lifecycle", "storage_api_remote_volume_handling", "nvidia_runtime", "container_mount_propagation", "container_backup", "devlxd_images", "container_local_cross_pool_handling", "proxy_unix", "proxy_udp", "clustering_join", "proxy_tcp_udp_multi_port_handling", "network_state", "proxy_unix_dac_properties", "container_protection_delete", "unix_priv_drop", "pprof_http", "proxy_haproxy_protocol", "network_hwaddr", "proxy_nat", "network_nat_order", "container_full", "candid_authentication", "backup_compression", "candid_config", "nvidia_runtime_config", "storage_api_volume_snapshots", "storage_unmapped", "projects", "candid_config_key", "network_vxlan_ttl", "container_incremental_copy", "usb_optional_vendorid", "snapshot_scheduling", "container_copy_project", "clustering_server_address", "clustering_image_replication", "container_protection_shift", "snapshot_expiry", "container_backup_override_pool", "snapshot_expiry_creation", "network_leases_location", "resources_cpu_socket", "resources_gpu", "resources_numa", "kernel_features", "id_map_current", "event_location", "storage_api_remote_volume_snapshots", "network_nat_address", "container_nic_routes", "rbac", "cluster_internal_copy", "seccomp_notify", "lxc_features", "container_nic_ipvlan", "network_vlan_sriov", "storage_cephfs", "container_nic_ipfilter", "resources_v2", "container_exec_user_group_cwd", "container_syscall_intercept", "container_disk_shift", "storage_shifted", "resources_infiniband", "daemon_storage", "instances", "image_types", "resources_disk_sata", "clustering_roles", "images_expiry", "resources_network_firmware", "backup_compression_algorithm", "ceph_data_pool_name", "container_syscall_intercept_mount", "compression_squashfs", "container_raw_mount", "container_nic_routed", "container_syscall_intercept_mount_fuse", "container_disk_ceph", "virtual-machines", "image_profiles", "clustering_architecture", "resources_disk_id", "storage_lvm_stripes", "vm_boot_priority", "unix_hotplug_devices", "api_filtering", "instance_nic_network", "clustering_sizing", "firewall_driver", "projects_limits", "container_syscall_intercept_hugetlbfs", "limits_hugepages", "container_nic_routed_gateway", "projects_restrictions", "custom_volume_snapshot_expiry", "volume_snapshot_scheduling", "trust_ca_certificates", "snapshot_disk_usage", "clustering_edit_roles", "container_nic_routed_host_address", "container_nic_ipvlan_gateway", "resources_usb_pci", "resources_cpu_threads_numa", "resources_cpu_core_die", "api_os", "container_nic_routed_host_table", "container_nic_ipvlan_host_table", "container_nic_ipvlan_mode", "resources_system", "images_push_relay", "network_dns_search", "container_nic_routed_limits", "instance_nic_bridged_vlan", "network_state_bond_bridge", "usedby_consistency", "custom_block_volumes", "clustering_failure_domains", "resources_gpu_mdev", "console_vga_type", "projects_limits_disk", "network_type_macvlan", "network_type_sriov", "container_syscall_intercept_bpf_devices", "network_type_ovn", "projects_networks", "projects_networks_restricted_uplinks", "custom_volume_backup", "backup_override_name", "storage_rsync_compression", "network_type_physical", "network_ovn_external_subnets", "network_ovn_nat", "network_ovn_external_routes_remove", "tpm_device_type", "storage_zfs_clone_copy_rebase", "gpu_mdev", "resources_pci_iommu", "resources_network_usb", "resources_disk_address", "network_physical_ovn_ingress_mode", "network_ovn_dhcp", "network_physical_routes_anycast", "projects_limits_instances", "network_state_vlan", "instance_nic_bridged_port_isolation", "instance_bulk_state_change", } // APIExtensionsCount returns the number of available API extensions. func APIExtensionsCount() int { count := len(APIExtensions) // This environment variable is an internal one to force the code // to believe that we have an API extensions count greater than we // actually have. It's used by integration tests to exercise the // cluster upgrade process. artificialBump := os.Getenv("LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS") if artificialBump != "" { n, err := strconv.Atoi(artificialBump) if err == nil { count += n } } return count }
[ "\"LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS\"" ]
[]
[ "LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS" ]
[]
["LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS"]
go
1
0
cmd/executor/cmd/root.go
/* Copyright 2018 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmd import ( "fmt" "os" "path/filepath" "regexp" "strings" "time" "github.com/GoogleContainerTools/kaniko/pkg/buildcontext" "github.com/GoogleContainerTools/kaniko/pkg/config" "github.com/GoogleContainerTools/kaniko/pkg/constants" "github.com/GoogleContainerTools/kaniko/pkg/executor" "github.com/GoogleContainerTools/kaniko/pkg/logging" "github.com/GoogleContainerTools/kaniko/pkg/timing" "github.com/GoogleContainerTools/kaniko/pkg/util" "github.com/genuinetools/amicontained/container" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/pflag" ) var ( opts = &config.KanikoOptions{} force bool logLevel string logFormat string ) func init() { RootCmd.PersistentFlags().StringVarP(&logLevel, "verbosity", "v", logging.DefaultLevel, "Log level (debug, info, warn, error, fatal, panic") RootCmd.PersistentFlags().StringVar(&logFormat, "log-format", logging.FormatColor, "Log format (text, color, json)") RootCmd.PersistentFlags().BoolVarP(&force, "force", "", false, "Force building outside of a container") addKanikoOptionsFlags() addHiddenFlags(RootCmd) } // RootCmd is the kaniko command that is run var RootCmd = &cobra.Command{ Use: "executor", PersistentPreRunE: func(cmd *cobra.Command, args []string) error { if cmd.Use == "executor" { resolveEnvironmentBuildArgs(opts.BuildArgs, os.Getenv) if err := logging.Configure(logLevel, logFormat); err != nil { return err } if !opts.NoPush && len(opts.Destinations) == 0 { return errors.New("You must provide --destination, or use --no-push") } if err := cacheFlagsValid(); err != nil { return errors.Wrap(err, "cache flags invalid") } if err := resolveSourceContext(); err != nil { return errors.Wrap(err, "error resolving source context") } if err := resolveDockerfilePath(); err != nil { return errors.Wrap(err, "error resolving dockerfile path") } if len(opts.Destinations) == 0 && opts.ImageNameDigestFile != "" { return errors.New("You must provide --destination if setting ImageNameDigestFile") } // Update whitelisted paths util.UpdateWhitelist(opts.WhitelistVarRun) } return nil }, Run: func(cmd *cobra.Command, args []string) { if !checkContained() { if !force { exit(errors.New("kaniko should only be run inside of a container, run with the --force flag if you are sure you want to continue")) } logrus.Warn("kaniko is being run outside of a container. This can have dangerous effects on your system") } if err := executor.CheckPushPermissions(opts); err != nil { exit(errors.Wrap(err, "error checking push permissions -- make sure you entered the correct tag name, and that you are authenticated correctly, and try again")) } if err := resolveRelativePaths(); err != nil { exit(errors.Wrap(err, "error resolving relative paths to absolute paths")) } if err := os.Chdir("/"); err != nil { exit(errors.Wrap(err, "error changing to root dir")) } image, err := executor.DoBuild(opts) if err != nil { exit(errors.Wrap(err, "error building image")) } if err := executor.DoPush(image, opts); err != nil { exit(errors.Wrap(err, "error pushing image")) } benchmarkFile := os.Getenv("BENCHMARK_FILE") // false is a keyword for integration tests to turn off benchmarking if benchmarkFile != "" && benchmarkFile != "false" { f, err := os.Create(benchmarkFile) if err != nil { logrus.Warnf("Unable to create benchmarking file %s: %s", benchmarkFile, err) } defer f.Close() s, err := timing.JSON() if err != nil { logrus.Warnf("Unable to write benchmark file: %s", err) } f.WriteString(s) } }, } // addKanikoOptionsFlags configures opts func addKanikoOptionsFlags() { RootCmd.PersistentFlags().StringVarP(&opts.DockerfilePath, "dockerfile", "f", "Dockerfile", "Path to the dockerfile to be built.") RootCmd.PersistentFlags().StringVarP(&opts.SrcContext, "context", "c", "/workspace/", "Path to the dockerfile build context.") RootCmd.PersistentFlags().StringVarP(&opts.Bucket, "bucket", "b", "", "Name of the GCS bucket from which to access build context as tarball.") RootCmd.PersistentFlags().VarP(&opts.Destinations, "destination", "d", "Registry the final image should be pushed to. Set it repeatedly for multiple destinations.") RootCmd.PersistentFlags().StringVarP(&opts.SnapshotMode, "snapshotMode", "", "full", "Change the file attributes inspected during snapshotting") RootCmd.PersistentFlags().VarP(&opts.BuildArgs, "build-arg", "", "This flag allows you to pass in ARG values at build time. Set it repeatedly for multiple values.") RootCmd.PersistentFlags().BoolVarP(&opts.Insecure, "insecure", "", false, "Push to insecure registry using plain HTTP") RootCmd.PersistentFlags().BoolVarP(&opts.SkipTLSVerify, "skip-tls-verify", "", false, "Push to insecure registry ignoring TLS verify") RootCmd.PersistentFlags().BoolVarP(&opts.InsecurePull, "insecure-pull", "", false, "Pull from insecure registry using plain HTTP") RootCmd.PersistentFlags().BoolVarP(&opts.SkipTLSVerifyPull, "skip-tls-verify-pull", "", false, "Pull from insecure registry ignoring TLS verify") RootCmd.PersistentFlags().StringVarP(&opts.TarPath, "tarPath", "", "", "Path to save the image in as a tarball instead of pushing") RootCmd.PersistentFlags().BoolVarP(&opts.SingleSnapshot, "single-snapshot", "", false, "Take a single snapshot at the end of the build.") RootCmd.PersistentFlags().BoolVarP(&opts.Reproducible, "reproducible", "", false, "Strip timestamps out of the image to make it reproducible") RootCmd.PersistentFlags().StringVarP(&opts.Target, "target", "", "", "Set the target build stage to build") RootCmd.PersistentFlags().BoolVarP(&opts.NoPush, "no-push", "", false, "Do not push the image to the registry") RootCmd.PersistentFlags().StringVarP(&opts.CacheRepo, "cache-repo", "", "", "Specify a repository to use as a cache, otherwise one will be inferred from the destination provided") RootCmd.PersistentFlags().StringVarP(&opts.CacheDir, "cache-dir", "", "/cache", "Specify a local directory to use as a cache.") RootCmd.PersistentFlags().StringVarP(&opts.DigestFile, "digest-file", "", "", "Specify a file to save the digest of the built image to.") RootCmd.PersistentFlags().StringVarP(&opts.ImageNameDigestFile, "image-name-with-digest-file", "", "", "Specify a file to save the image name w/ digest of the built image to.") RootCmd.PersistentFlags().StringVarP(&opts.OCILayoutPath, "oci-layout-path", "", "", "Path to save the OCI image layout of the built image.") RootCmd.PersistentFlags().BoolVarP(&opts.Cache, "cache", "", false, "Use cache when building image") RootCmd.PersistentFlags().BoolVarP(&opts.Cleanup, "cleanup", "", false, "Clean the filesystem at the end") RootCmd.PersistentFlags().DurationVarP(&opts.CacheTTL, "cache-ttl", "", time.Hour*336, "Cache timeout in hours. Defaults to two weeks.") RootCmd.PersistentFlags().VarP(&opts.InsecureRegistries, "insecure-registry", "", "Insecure registry using plain HTTP to push and pull. Set it repeatedly for multiple registries.") RootCmd.PersistentFlags().VarP(&opts.SkipTLSVerifyRegistries, "skip-tls-verify-registry", "", "Insecure registry ignoring TLS verify to push and pull. Set it repeatedly for multiple registries.") opts.RegistriesCertificates = make(map[string]string) RootCmd.PersistentFlags().VarP(&opts.RegistriesCertificates, "registry-certificate", "", "Use the provided certificate for TLS communication with the given registry. Expected format is 'my.registry.url=/path/to/the/server/certificate'.") RootCmd.PersistentFlags().StringVarP(&opts.RegistryMirror, "registry-mirror", "", "", "Registry mirror to use has pull-through cache instead of docker.io.") RootCmd.PersistentFlags().BoolVarP(&opts.WhitelistVarRun, "whitelist-var-run", "", true, "Ignore /var/run directory when taking image snapshot. Set it to false to preserve /var/run/ in destination image. (Default true).") RootCmd.PersistentFlags().VarP(&opts.Labels, "label", "", "Set metadata for an image. Set it repeatedly for multiple labels.") } // addHiddenFlags marks certain flags as hidden from the executor help text func addHiddenFlags(cmd *cobra.Command) { // This flag is added in a vendored directory, hide so that it doesn't come up via --help pflag.CommandLine.MarkHidden("azure-container-registry-config") // Hide this flag as we want to encourage people to use the --context flag instead cmd.PersistentFlags().MarkHidden("bucket") } func checkContained() bool { _, err := container.DetectRuntime() return err == nil } // cacheFlagsValid makes sure the flags passed in related to caching are valid func cacheFlagsValid() error { if !opts.Cache { return nil } // If --cache=true and --no-push=true, then cache repo must be provided // since cache can't be inferred from destination if opts.CacheRepo == "" && opts.NoPush { return errors.New("if using cache with --no-push, specify cache repo with --cache-repo") } return nil } // resolveDockerfilePath resolves the Dockerfile path to an absolute path func resolveDockerfilePath() error { if isURL(opts.DockerfilePath) { return nil } if util.FilepathExists(opts.DockerfilePath) { abs, err := filepath.Abs(opts.DockerfilePath) if err != nil { return errors.Wrap(err, "getting absolute path for dockerfile") } opts.DockerfilePath = abs return copyDockerfile() } // Otherwise, check if the path relative to the build context exists if util.FilepathExists(filepath.Join(opts.SrcContext, opts.DockerfilePath)) { abs, err := filepath.Abs(filepath.Join(opts.SrcContext, opts.DockerfilePath)) if err != nil { return errors.Wrap(err, "getting absolute path for src context/dockerfile path") } opts.DockerfilePath = abs return copyDockerfile() } return errors.New("please provide a valid path to a Dockerfile within the build context with --dockerfile") } // resolveEnvironmentBuildArgs replace build args without value by the same named environment variable func resolveEnvironmentBuildArgs(arguments []string, resolver func(string) string) { for index, argument := range arguments { i := strings.Index(argument, "=") if i < 0 { value := resolver(argument) arguments[index] = fmt.Sprintf("%s=%s", argument, value) } } } // copy Dockerfile to /kaniko/Dockerfile so that if it's specified in the .dockerignore // it won't be copied into the image func copyDockerfile() error { if _, err := util.CopyFile(opts.DockerfilePath, constants.DockerfilePath, "", util.DoNotChangeUID, util.DoNotChangeGID); err != nil { return errors.Wrap(err, "copying dockerfile") } opts.DockerfilePath = constants.DockerfilePath return nil } // resolveSourceContext unpacks the source context if it is a tar in a bucket or in kaniko container // it resets srcContext to be the path to the unpacked build context within the image func resolveSourceContext() error { if opts.SrcContext == "" && opts.Bucket == "" { return errors.New("please specify a path to the build context with the --context flag or a bucket with the --bucket flag") } if opts.SrcContext != "" && !strings.Contains(opts.SrcContext, "://") { return nil } if opts.Bucket != "" { if !strings.Contains(opts.Bucket, "://") { // if no prefix use Google Cloud Storage as default for backwards compatibility opts.SrcContext = constants.GCSBuildContextPrefix + opts.Bucket } else { opts.SrcContext = opts.Bucket } } contextExecutor, err := buildcontext.GetBuildContext(opts.SrcContext) if err != nil { return err } logrus.Debugf("Getting source context from %s", opts.SrcContext) opts.SrcContext, err = contextExecutor.UnpackTarFromBuildContext() if err != nil { return err } logrus.Debugf("Build context located at %s", opts.SrcContext) return nil } func resolveRelativePaths() error { optsPaths := []*string{ &opts.DockerfilePath, &opts.SrcContext, &opts.CacheDir, &opts.TarPath, &opts.DigestFile, &opts.ImageNameDigestFile, } for _, p := range optsPaths { if path := *p; shdSkip(path) { logrus.Debugf("Skip resolving path %s", path) continue } // Resolve relative path to absolute path var err error relp := *p // save original relative path if *p, err = filepath.Abs(*p); err != nil { return errors.Wrapf(err, "Couldn't resolve relative path %s to an absolute path", *p) } logrus.Debugf("Resolved relative path %s to %s", relp, *p) } return nil } func exit(err error) { fmt.Println(err) os.Exit(1) } func isURL(path string) bool { if match, _ := regexp.MatchString("^https?://", path); match { return true } return false } func shdSkip(path string) bool { return path == "" || isURL(path) || filepath.IsAbs(path) }
[ "\"BENCHMARK_FILE\"" ]
[]
[ "BENCHMARK_FILE" ]
[]
["BENCHMARK_FILE"]
go
1
0
sdks/python/apache_beam/runners/dataflow/dataflow_runner.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A runner implementation that submits a job for remote execution. The runner will create a JSON description of the job graph and then submit it to the Dataflow Service for remote execution by a worker. """ # pytype: skip-file from __future__ import absolute_import from __future__ import division import base64 import json import logging import os import subprocess import sys import threading import time import traceback import urllib from builtins import hex from collections import defaultdict from typing import TYPE_CHECKING from typing import List from future.utils import iteritems import apache_beam as beam from apache_beam import coders from apache_beam import error from apache_beam import pvalue from apache_beam.internal import pickler from apache_beam.internal.gcp import json_value from apache_beam.options.pipeline_options import DebugOptions from apache_beam.options.pipeline_options import GoogleCloudOptions from apache_beam.options.pipeline_options import SetupOptions from apache_beam.options.pipeline_options import StandardOptions from apache_beam.options.pipeline_options import TestOptions from apache_beam.options.pipeline_options import WorkerOptions from apache_beam.portability import common_urns from apache_beam.portability.api import beam_runner_api_pb2 from apache_beam.pvalue import AsSideInput from apache_beam.runners.common import DoFnSignature from apache_beam.runners.dataflow.internal import names from apache_beam.runners.dataflow.internal.clients import dataflow as dataflow_api from apache_beam.runners.dataflow.internal.names import PropertyNames from apache_beam.runners.dataflow.internal.names import TransformNames from apache_beam.runners.runner import PipelineResult from apache_beam.runners.runner import PipelineRunner from apache_beam.runners.runner import PipelineState from apache_beam.runners.runner import PValueCache from apache_beam.transforms import window from apache_beam.transforms.core import RunnerAPIPTransformHolder from apache_beam.transforms.display import DisplayData from apache_beam.transforms.sideinputs import SIDE_INPUT_PREFIX from apache_beam.typehints import typehints from apache_beam.utils import processes from apache_beam.utils import proto_utils from apache_beam.utils.interactive_utils import is_in_notebook from apache_beam.utils.plugin import BeamPlugin if TYPE_CHECKING: from apache_beam.pipeline import PTransformOverride if sys.version_info[0] > 2: unquote_to_bytes = urllib.parse.unquote_to_bytes quote = urllib.parse.quote else: unquote_to_bytes = urllib.unquote # pylint: disable=deprecated-urllib-function quote = urllib.quote # pylint: disable=deprecated-urllib-function __all__ = ['DataflowRunner'] _LOGGER = logging.getLogger(__name__) BQ_SOURCE_UW_ERROR = ( 'The Read(BigQuerySource(...)) transform is not supported with newer stack ' 'features (Fn API, Dataflow Runner V2, etc). Please use the transform ' 'apache_beam.io.gcp.bigquery.ReadFromBigQuery instead.') class DataflowRunner(PipelineRunner): """A runner that creates job graphs and submits them for remote execution. Every execution of the run() method will submit an independent job for remote execution that consists of the nodes reachable from the passed in node argument or entire graph if node is None. The run() method returns after the service created the job and will not wait for the job to finish if blocking is set to False. """ # A list of PTransformOverride objects to be applied before running a pipeline # using DataflowRunner. # Currently this only works for overrides where the input and output types do # not change. # For internal SDK use only. This should not be updated by Beam pipeline # authors. # Imported here to avoid circular dependencies. # TODO: Remove the apache_beam.pipeline dependency in CreatePTransformOverride from apache_beam.runners.dataflow.ptransform_overrides import CombineValuesPTransformOverride from apache_beam.runners.dataflow.ptransform_overrides import CreatePTransformOverride from apache_beam.runners.dataflow.ptransform_overrides import ReadPTransformOverride from apache_beam.runners.dataflow.ptransform_overrides import JrhReadPTransformOverride # Thesse overrides should be applied before the proto representation of the # graph is created. _PTRANSFORM_OVERRIDES = [ CombineValuesPTransformOverride() ] # type: List[PTransformOverride] _JRH_PTRANSFORM_OVERRIDES = [ JrhReadPTransformOverride(), ] # type: List[PTransformOverride] # These overrides should be applied after the proto representation of the # graph is created. _NON_PORTABLE_PTRANSFORM_OVERRIDES = [ CreatePTransformOverride(), ReadPTransformOverride(), ] # type: List[PTransformOverride] def __init__(self, cache=None): # Cache of CloudWorkflowStep protos generated while the runner # "executes" a pipeline. self._cache = cache if cache is not None else PValueCache() self._unique_step_id = 0 def is_fnapi_compatible(self): return False def apply(self, transform, input, options): self._maybe_add_unified_worker_missing_options(options) return super(DataflowRunner, self).apply(transform, input, options) def _get_unique_step_name(self): self._unique_step_id += 1 return 's%s' % self._unique_step_id @staticmethod def poll_for_job_completion(runner, result, duration): """Polls for the specified job to finish running (successfully or not). Updates the result with the new job information before returning. Args: runner: DataflowRunner instance to use for polling job state. result: DataflowPipelineResult instance used for job information. duration (int): The time to wait (in milliseconds) for job to finish. If it is set to :data:`None`, it will wait indefinitely until the job is finished. """ last_message_time = None current_seen_messages = set() last_error_rank = float('-inf') last_error_msg = None last_job_state = None # How long to wait after pipeline failure for the error # message to show up giving the reason for the failure. # It typically takes about 30 seconds. final_countdown_timer_secs = 50.0 sleep_secs = 5.0 # Try to prioritize the user-level traceback, if any. def rank_error(msg): if 'work item was attempted' in msg: return -1 elif 'Traceback' in msg: return 1 return 0 if duration: start_secs = time.time() duration_secs = duration // 1000 job_id = result.job_id() while True: response = runner.dataflow_client.get_job(job_id) # If get() is called very soon after Create() the response may not contain # an initialized 'currentState' field. if response.currentState is not None: if response.currentState != last_job_state: _LOGGER.info('Job %s is in state %s', job_id, response.currentState) last_job_state = response.currentState if str(response.currentState) != 'JOB_STATE_RUNNING': # Stop checking for new messages on timeout, explanatory # message received, success, or a terminal job state caused # by the user that therefore doesn't require explanation. if (final_countdown_timer_secs <= 0.0 or last_error_msg is not None or str(response.currentState) == 'JOB_STATE_DONE' or str(response.currentState) == 'JOB_STATE_CANCELLED' or str(response.currentState) == 'JOB_STATE_UPDATED' or str(response.currentState) == 'JOB_STATE_DRAINED'): break # Check that job is in a post-preparation state before starting the # final countdown. if (str(response.currentState) not in ('JOB_STATE_PENDING', 'JOB_STATE_QUEUED')): # The job has failed; ensure we see any final error messages. sleep_secs = 1.0 # poll faster during the final countdown final_countdown_timer_secs -= sleep_secs time.sleep(sleep_secs) # Get all messages since beginning of the job run or since last message. page_token = None while True: messages, page_token = runner.dataflow_client.list_messages( job_id, page_token=page_token, start_time=last_message_time) for m in messages: message = '%s: %s: %s' % (m.time, m.messageImportance, m.messageText) if not last_message_time or m.time > last_message_time: last_message_time = m.time current_seen_messages = set() if message in current_seen_messages: # Skip the message if it has already been seen at the current # time. This could be the case since the list_messages API is # queried starting at last_message_time. continue else: current_seen_messages.add(message) # Skip empty messages. if m.messageImportance is None: continue _LOGGER.info(message) if str(m.messageImportance) == 'JOB_MESSAGE_ERROR': if rank_error(m.messageText) >= last_error_rank: last_error_rank = rank_error(m.messageText) last_error_msg = m.messageText if not page_token: break if duration: passed_secs = time.time() - start_secs if passed_secs > duration_secs: _LOGGER.warning( 'Timing out on waiting for job %s after %d seconds', job_id, passed_secs) break result._job = response runner.last_error_msg = last_error_msg @staticmethod def _only_element(iterable): # type: (Iterable[T]) -> T element, = iterable return element @staticmethod def group_by_key_input_visitor(): # Imported here to avoid circular dependencies. from apache_beam.pipeline import PipelineVisitor class GroupByKeyInputVisitor(PipelineVisitor): """A visitor that replaces `Any` element type for input `PCollection` of a `GroupByKey` or `_GroupByKeyOnly` with a `KV` type. TODO(BEAM-115): Once Python SDk is compatible with the new Runner API, we could directly replace the coder instead of mutating the element type. """ def enter_composite_transform(self, transform_node): self.visit_transform(transform_node) def visit_transform(self, transform_node): # Imported here to avoid circular dependencies. # pylint: disable=wrong-import-order, wrong-import-position from apache_beam.transforms.core import GroupByKey, _GroupByKeyOnly if isinstance(transform_node.transform, (GroupByKey, _GroupByKeyOnly)): pcoll = transform_node.inputs[0] pcoll.element_type = typehints.coerce_to_kv_type( pcoll.element_type, transform_node.full_label) key_type, value_type = pcoll.element_type.tuple_types if transform_node.outputs: key = DataflowRunner._only_element(transform_node.outputs.keys()) transform_node.outputs[key].element_type = typehints.KV[ key_type, typehints.Iterable[value_type]] return GroupByKeyInputVisitor() @staticmethod def _set_pdone_visitor(pipeline): # Imported here to avoid circular dependencies. from apache_beam.pipeline import PipelineVisitor class SetPDoneVisitor(PipelineVisitor): def __init__(self, pipeline): self._pipeline = pipeline @staticmethod def _maybe_fix_output(transform_node, pipeline): if not transform_node.outputs: pval = pvalue.PDone(pipeline) pval.producer = transform_node transform_node.outputs = {None: pval} def enter_composite_transform(self, transform_node): SetPDoneVisitor._maybe_fix_output(transform_node, self._pipeline) def visit_transform(self, transform_node): SetPDoneVisitor._maybe_fix_output(transform_node, self._pipeline) return SetPDoneVisitor(pipeline) @staticmethod def side_input_visitor(use_unified_worker=False): # Imported here to avoid circular dependencies. # pylint: disable=wrong-import-order, wrong-import-position from apache_beam.pipeline import PipelineVisitor from apache_beam.transforms.core import ParDo class SideInputVisitor(PipelineVisitor): """Ensures input `PCollection` used as a side inputs has a `KV` type. TODO(BEAM-115): Once Python SDK is compatible with the new Runner API, we could directly replace the coder instead of mutating the element type. """ def visit_transform(self, transform_node): if isinstance(transform_node.transform, ParDo): new_side_inputs = [] for ix, side_input in enumerate(transform_node.side_inputs): access_pattern = side_input._side_input_data().access_pattern if access_pattern == common_urns.side_inputs.ITERABLE.urn: if use_unified_worker: # TODO(BEAM-9173): Stop patching up the access pattern to # appease Dataflow when using the UW and hardcode the output # type to be Any since the Dataflow JSON and pipeline proto # can differ in coders which leads to encoding/decoding issues # within the runner. side_input.pvalue.element_type = typehints.Any new_side_input = _DataflowIterableSideInput(side_input) else: # Add a map to ('', value) as Dataflow currently only handles # keyed side inputs when using the JRH. pipeline = side_input.pvalue.pipeline new_side_input = _DataflowIterableAsMultimapSideInput( side_input) new_side_input.pvalue = beam.pvalue.PCollection( pipeline, element_type=typehints.KV[bytes, side_input.pvalue.element_type], is_bounded=side_input.pvalue.is_bounded) parent = transform_node.parent or pipeline._root_transform() map_to_void_key = beam.pipeline.AppliedPTransform( pipeline, beam.Map(lambda x: (b'', x)), transform_node.full_label + '/MapToVoidKey%s' % ix, (side_input.pvalue, )) new_side_input.pvalue.producer = map_to_void_key map_to_void_key.add_output(new_side_input.pvalue, None) parent.add_part(map_to_void_key) elif access_pattern == common_urns.side_inputs.MULTIMAP.urn: # Ensure the input coder is a KV coder and patch up the # access pattern to appease Dataflow. side_input.pvalue.element_type = typehints.coerce_to_kv_type( side_input.pvalue.element_type, transform_node.full_label) new_side_input = _DataflowMultimapSideInput(side_input) else: raise ValueError( 'Unsupported access pattern for %r: %r' % (transform_node.full_label, access_pattern)) new_side_inputs.append(new_side_input) transform_node.side_inputs = new_side_inputs transform_node.transform.side_inputs = new_side_inputs return SideInputVisitor() @staticmethod def flatten_input_visitor(): # Imported here to avoid circular dependencies. from apache_beam.pipeline import PipelineVisitor class FlattenInputVisitor(PipelineVisitor): """A visitor that replaces the element type for input ``PCollections``s of a ``Flatten`` transform with that of the output ``PCollection``. """ def visit_transform(self, transform_node): # Imported here to avoid circular dependencies. # pylint: disable=wrong-import-order, wrong-import-position from apache_beam import Flatten if isinstance(transform_node.transform, Flatten): output_pcoll = DataflowRunner._only_element( transform_node.outputs.values()) for input_pcoll in transform_node.inputs: input_pcoll.element_type = output_pcoll.element_type return FlattenInputVisitor() def _check_for_unsupported_fnapi_features(self, pipeline_proto): components = pipeline_proto.components for windowing_strategy in components.windowing_strategies.values(): if (windowing_strategy.merge_status == beam_runner_api_pb2.MergeStatus.NEEDS_MERGE and windowing_strategy.window_fn.urn not in ( common_urns.session_windows.urn, )): raise RuntimeError( 'Unsupported merging windowing strategy: %s' % windowing_strategy.window_fn.urn) elif components.coders[ windowing_strategy.window_coder_id].spec.urn not in ( common_urns.coders.GLOBAL_WINDOW.urn, common_urns.coders.INTERVAL_WINDOW.urn): raise RuntimeError( 'Unsupported window coder %s for window fn %s' % ( components.coders[windowing_strategy.window_coder_id].spec.urn, windowing_strategy.window_fn.urn)) def run_pipeline(self, pipeline, options): """Remotely executes entire pipeline or parts reachable from node.""" # Label goog-dataflow-notebook if job is started from notebook. if is_in_notebook(): notebook_version = ( 'goog-dataflow-notebook=' + beam.version.__version__.replace('.', '_')) if options.view_as(GoogleCloudOptions).labels: options.view_as(GoogleCloudOptions).labels.append(notebook_version) else: options.view_as(GoogleCloudOptions).labels = [notebook_version] # Import here to avoid adding the dependency for local running scenarios. try: # pylint: disable=wrong-import-order, wrong-import-position from apache_beam.runners.dataflow.internal import apiclient except ImportError: raise ImportError( 'Google Cloud Dataflow runner not available, ' 'please install apache_beam[gcp]') self._maybe_add_unified_worker_missing_options(options) # Convert all side inputs into a form acceptable to Dataflow. if apiclient._use_fnapi(options): pipeline.visit( self.side_input_visitor(apiclient._use_unified_worker(options))) # Performing configured PTransform overrides. Note that this is currently # done before Runner API serialization, since the new proto needs to contain # any added PTransforms. pipeline.replace_all(DataflowRunner._PTRANSFORM_OVERRIDES) if (apiclient._use_fnapi(options) and not apiclient._use_unified_worker(options)): pipeline.replace_all(DataflowRunner._JRH_PTRANSFORM_OVERRIDES) use_fnapi = apiclient._use_fnapi(options) from apache_beam.transforms import environments default_environment = environments.DockerEnvironment.from_container_image( apiclient.get_container_image_from_options(options)) # Snapshot the pipeline in a portable proto. self.proto_pipeline, self.proto_context = pipeline.to_runner_api( return_context=True, default_environment=default_environment) if use_fnapi: self._check_for_unsupported_fnapi_features(self.proto_pipeline) # Cross language transform require using a pipeline object constructed # from the full pipeline proto to make sure that expanded version of # external transforms are reflected in the Pipeline job graph. from apache_beam import Pipeline pipeline = Pipeline.from_runner_api( self.proto_pipeline, pipeline.runner, options, allow_proto_holders=True) # Pipelines generated from proto do not have output set to PDone set for # leaf elements. pipeline.visit(self._set_pdone_visitor(pipeline)) # We need to generate a new context that maps to the new pipeline object. self.proto_pipeline, self.proto_context = pipeline.to_runner_api( return_context=True, default_environment=default_environment) else: # Performing configured PTransform overrides which should not be reflected # in the proto representation of the graph. pipeline.replace_all(DataflowRunner._NON_PORTABLE_PTRANSFORM_OVERRIDES) # Add setup_options for all the BeamPlugin imports setup_options = options.view_as(SetupOptions) plugins = BeamPlugin.get_all_plugin_paths() if setup_options.beam_plugins is not None: plugins = list(set(plugins + setup_options.beam_plugins)) setup_options.beam_plugins = plugins # Elevate "min_cpu_platform" to pipeline option, but using the existing # experiment. debug_options = options.view_as(DebugOptions) worker_options = options.view_as(WorkerOptions) if worker_options.min_cpu_platform: debug_options.add_experiment( 'min_cpu_platform=' + worker_options.min_cpu_platform) # Elevate "enable_streaming_engine" to pipeline option, but using the # existing experiment. google_cloud_options = options.view_as(GoogleCloudOptions) if google_cloud_options.enable_streaming_engine: debug_options.add_experiment("enable_windmill_service") debug_options.add_experiment("enable_streaming_engine") else: if (debug_options.lookup_experiment("enable_windmill_service") or debug_options.lookup_experiment("enable_streaming_engine")): raise ValueError( """Streaming engine both disabled and enabled: enable_streaming_engine flag is not set, but enable_windmill_service and/or enable_streaming_engine experiments are present. It is recommended you only set the enable_streaming_engine flag.""") dataflow_worker_jar = getattr(worker_options, 'dataflow_worker_jar', None) if dataflow_worker_jar is not None: if not apiclient._use_fnapi(options): _LOGGER.warning( 'Typical end users should not use this worker jar feature. ' 'It can only be used when FnAPI is enabled.') else: debug_options.add_experiment('use_staged_dataflow_worker_jar') # Make Dataflow workers use FastAvro on Python 3 unless use_avro experiment # is set. Note that use_avro is only interpreted by the Dataflow runner # at job submission and is not interpreted by Dataflow service or workers, # which by default use avro library unless use_fastavro experiment is set. if sys.version_info[0] > 2 and ( not debug_options.lookup_experiment('use_avro')): debug_options.add_experiment('use_fastavro') self.job = apiclient.Job(options, self.proto_pipeline) # Dataflow runner requires a KV type for GBK inputs, hence we enforce that # here. pipeline.visit(self.group_by_key_input_visitor()) # Dataflow runner requires output type of the Flatten to be the same as the # inputs, hence we enforce that here. pipeline.visit(self.flatten_input_visitor()) # Trigger a traversal of all reachable nodes. self.visit_transforms(pipeline, options) test_options = options.view_as(TestOptions) # If it is a dry run, return without submitting the job. if test_options.dry_run: result = PipelineResult(PipelineState.DONE) result.wait_until_finish = lambda duration=None: None return result # Get a Dataflow API client and set its options self.dataflow_client = apiclient.DataflowApplicationClient(options) # Create the job description and send a request to the service. The result # can be None if there is no need to send a request to the service (e.g. # template creation). If a request was sent and failed then the call will # raise an exception. result = DataflowPipelineResult( self.dataflow_client.create_job(self.job), self) # TODO(BEAM-4274): Circular import runners-metrics. Requires refactoring. from apache_beam.runners.dataflow.dataflow_metrics import DataflowMetrics self._metrics = DataflowMetrics(self.dataflow_client, result, self.job) result.metric_results = self._metrics return result def _maybe_add_unified_worker_missing_options(self, options): # set default beam_fn_api and use_beam_bq_sink experiment if use unified # worker experiment flag exists, no-op otherwise. debug_options = options.view_as(DebugOptions) from apache_beam.runners.dataflow.internal import apiclient if apiclient._use_unified_worker(options): if not debug_options.lookup_experiment('beam_fn_api'): debug_options.add_experiment('beam_fn_api') if not debug_options.lookup_experiment('use_beam_bq_sink'): debug_options.add_experiment('use_beam_bq_sink') def _get_typehint_based_encoding(self, typehint, window_coder): """Returns an encoding based on a typehint object.""" return self._get_cloud_encoding( self._get_coder(typehint, window_coder=window_coder)) @staticmethod def _get_coder(typehint, window_coder): """Returns a coder based on a typehint object.""" if window_coder: return coders.WindowedValueCoder( coders.registry.get_coder(typehint), window_coder=window_coder) return coders.registry.get_coder(typehint) def _get_cloud_encoding(self, coder, unused=None): """Returns an encoding based on a coder object.""" if not isinstance(coder, coders.Coder): raise TypeError( 'Coder object must inherit from coders.Coder: %s.' % str(coder)) return coder.as_cloud_object(self.proto_context.coders) def _get_side_input_encoding(self, input_encoding): """Returns an encoding for the output of a view transform. Args: input_encoding: encoding of current transform's input. Side inputs need this because the service will check that input and output types match. Returns: An encoding that matches the output and input encoding. This is essential for the View transforms introduced to produce side inputs to a ParDo. """ return { '@type': 'kind:stream', 'component_encodings': [input_encoding], 'is_stream_like': { 'value': True }, } def _get_encoded_output_coder( self, transform_node, window_value=True, output_tag=None): """Returns the cloud encoding of the coder for the output of a transform.""" is_external_transform = isinstance( transform_node.transform, RunnerAPIPTransformHolder) if output_tag in transform_node.outputs: element_type = transform_node.outputs[output_tag].element_type elif len(transform_node.outputs) == 1: output_tag = DataflowRunner._only_element(transform_node.outputs.keys()) # TODO(robertwb): Handle type hints for multi-output transforms. element_type = transform_node.outputs[output_tag].element_type elif is_external_transform: raise ValueError( 'For external transforms, output_tag must be specified ' 'since we cannot fallback to a Python only coder.') else: # TODO(silviuc): Remove this branch (and assert) when typehints are # propagated everywhere. Returning an 'Any' as type hint will trigger # usage of the fallback coder (i.e., cPickler). element_type = typehints.Any if window_value: # All outputs have the same windowing. So getting the coder from an # arbitrary window is fine. output_tag = next(iter(transform_node.outputs.keys())) window_coder = ( transform_node.outputs[output_tag].windowing.windowfn. get_window_coder()) else: window_coder = None return self._get_typehint_based_encoding(element_type, window_coder) def _add_step(self, step_kind, step_label, transform_node, side_tags=()): """Creates a Step object and adds it to the cache.""" # Import here to avoid adding the dependency for local running scenarios. # pylint: disable=wrong-import-order, wrong-import-position from apache_beam.runners.dataflow.internal import apiclient step = apiclient.Step(step_kind, self._get_unique_step_name()) self.job.proto.steps.append(step.proto) step.add_property(PropertyNames.USER_NAME, step_label) # Cache the node/step association for the main output of the transform node. # Main output key of external transforms can be ambiguous, so we only tag if # there's only one tag instead of None. output_tag = ( DataflowRunner._only_element(transform_node.outputs.keys()) if len( transform_node.outputs.keys()) == 1 else None) self._cache.cache_output(transform_node, output_tag, step) # If side_tags is not () then this is a multi-output transform node and we # need to cache the (node, tag, step) for each of the tags used to access # the outputs. This is essential because the keys used to search in the # cache always contain the tag. for tag in side_tags: self._cache.cache_output(transform_node, tag, step) # Finally, we add the display data items to the pipeline step. # If the transform contains no display data then an empty list is added. step.add_property( PropertyNames.DISPLAY_DATA, [ item.get_dict() for item in DisplayData.create_from(transform_node.transform).items ]) return step def _add_singleton_step( self, label, full_label, tag, input_step, windowing_strategy, access_pattern): """Creates a CollectionToSingleton step used to handle ParDo side inputs.""" # Import here to avoid adding the dependency for local running scenarios. from apache_beam.runners.dataflow.internal import apiclient step = apiclient.Step(TransformNames.COLLECTION_TO_SINGLETON, label) self.job.proto.steps.append(step.proto) step.add_property(PropertyNames.USER_NAME, full_label) step.add_property( PropertyNames.PARALLEL_INPUT, { '@type': 'OutputReference', PropertyNames.STEP_NAME: input_step.proto.name, PropertyNames.OUTPUT_NAME: input_step.get_output(tag) }) step.encoding = self._get_side_input_encoding(input_step.encoding) output_info = { PropertyNames.USER_NAME: '%s.%s' % (full_label, PropertyNames.OUTPUT), PropertyNames.ENCODING: step.encoding, PropertyNames.OUTPUT_NAME: PropertyNames.OUT } if common_urns.side_inputs.MULTIMAP.urn == access_pattern: output_info[PropertyNames.USE_INDEXED_FORMAT] = True step.add_property(PropertyNames.OUTPUT_INFO, [output_info]) step.add_property( PropertyNames.WINDOWING_STRATEGY, self.serialize_windowing_strategy(windowing_strategy)) return step def run_Impulse(self, transform_node, options): standard_options = options.view_as(StandardOptions) debug_options = options.view_as(DebugOptions) use_fn_api = ( debug_options.experiments and 'beam_fn_api' in debug_options.experiments) use_streaming_engine = ( debug_options.experiments and 'enable_streaming_engine' in debug_options.experiments and 'enable_windmill_service' in debug_options.experiments) step = self._add_step( TransformNames.READ, transform_node.full_label, transform_node) if (standard_options.streaming and (not use_fn_api or not use_streaming_engine)): step.add_property(PropertyNames.FORMAT, 'pubsub') step.add_property(PropertyNames.PUBSUB_SUBSCRIPTION, '_starting_signal/') else: step.add_property(PropertyNames.FORMAT, 'impulse') encoded_impulse_element = coders.WindowedValueCoder( coders.BytesCoder(), coders.coders.GlobalWindowCoder()).get_impl().encode_nested( window.GlobalWindows.windowed_value(b'')) if use_fn_api: encoded_impulse_as_str = self.byte_array_to_json_string( encoded_impulse_element) else: encoded_impulse_as_str = base64.b64encode( encoded_impulse_element).decode('ascii') step.add_property(PropertyNames.IMPULSE_ELEMENT, encoded_impulse_as_str) step.encoding = self._get_encoded_output_coder(transform_node) step.add_property( PropertyNames.OUTPUT_INFO, [{ PropertyNames.USER_NAME: ( '%s.%s' % (transform_node.full_label, PropertyNames.OUT)), PropertyNames.ENCODING: step.encoding, PropertyNames.OUTPUT_NAME: PropertyNames.OUT }]) def run_Flatten(self, transform_node, options): step = self._add_step( TransformNames.FLATTEN, transform_node.full_label, transform_node) inputs = [] for one_input in transform_node.inputs: input_step = self._cache.get_pvalue(one_input) inputs.append({ '@type': 'OutputReference', PropertyNames.STEP_NAME: input_step.proto.name, PropertyNames.OUTPUT_NAME: input_step.get_output(one_input.tag) }) step.add_property(PropertyNames.INPUTS, inputs) step.encoding = self._get_encoded_output_coder(transform_node) step.add_property( PropertyNames.OUTPUT_INFO, [{ PropertyNames.USER_NAME: ( '%s.%s' % (transform_node.full_label, PropertyNames.OUT)), PropertyNames.ENCODING: step.encoding, PropertyNames.OUTPUT_NAME: PropertyNames.OUT }]) def apply_WriteToBigQuery(self, transform, pcoll, options): # Make sure this is the WriteToBigQuery class that we expected, and that # users did not specifically request the new BQ sink by passing experiment # flag. # TODO(BEAM-6928): Remove this function for release 2.14.0. experiments = options.view_as(DebugOptions).experiments or [] from apache_beam.runners.dataflow.internal import apiclient use_fnapi = apiclient._use_fnapi(options) if (not isinstance(transform, beam.io.WriteToBigQuery) or use_fnapi or 'use_beam_bq_sink' in experiments): return self.apply_PTransform(transform, pcoll, options) if transform.schema == beam.io.gcp.bigquery.SCHEMA_AUTODETECT: raise RuntimeError( 'Schema auto-detection is not supported on the native sink') standard_options = options.view_as(StandardOptions) if standard_options.streaming: if (transform.write_disposition == beam.io.BigQueryDisposition.WRITE_TRUNCATE): raise RuntimeError('Can not use write truncation mode in streaming') return self.apply_PTransform(transform, pcoll, options) else: from apache_beam.io.gcp.bigquery_tools import parse_table_schema_from_json schema = None if transform.schema: schema = parse_table_schema_from_json(json.dumps(transform.schema)) return pcoll | 'WriteToBigQuery' >> beam.io.Write( beam.io.BigQuerySink( transform.table_reference.tableId, transform.table_reference.datasetId, transform.table_reference.projectId, schema, transform.create_disposition, transform.write_disposition, kms_key=transform.kms_key)) def apply_GroupByKey(self, transform, pcoll, options): # Infer coder of parent. # # TODO(ccy): make Coder inference and checking less specialized and more # comprehensive. parent = pcoll.producer if parent: coder = parent.transform._infer_output_coder() # pylint: disable=protected-access if not coder: coder = self._get_coder(pcoll.element_type or typehints.Any, None) if not coder.is_kv_coder(): raise ValueError(( 'Coder for the GroupByKey operation "%s" is not a ' 'key-value coder: %s.') % (transform.label, coder)) # TODO(robertwb): Update the coder itself if it changed. coders.registry.verify_deterministic( coder.key_coder(), 'GroupByKey operation "%s"' % transform.label) return pvalue.PCollection.from_(pcoll) def run_GroupByKey(self, transform_node, options): input_tag = transform_node.inputs[0].tag input_step = self._cache.get_pvalue(transform_node.inputs[0]) step = self._add_step( TransformNames.GROUP, transform_node.full_label, transform_node) step.add_property( PropertyNames.PARALLEL_INPUT, { '@type': 'OutputReference', PropertyNames.STEP_NAME: input_step.proto.name, PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag) }) step.encoding = self._get_encoded_output_coder(transform_node) step.add_property( PropertyNames.OUTPUT_INFO, [{ PropertyNames.USER_NAME: ( '%s.%s' % (transform_node.full_label, PropertyNames.OUT)), PropertyNames.ENCODING: step.encoding, PropertyNames.OUTPUT_NAME: PropertyNames.OUT }]) windowing = transform_node.transform.get_windowing(transform_node.inputs) step.add_property( PropertyNames.SERIALIZED_FN, self.serialize_windowing_strategy(windowing)) def run_RunnerAPIPTransformHolder(self, transform_node, options): """Adding Dataflow runner job description for transform holder objects. These holder transform objects are generated for some of the transforms that become available after a cross-language transform expansion, usually if the corresponding transform object cannot be generated in Python SDK (for example, a python `ParDo` transform cannot be generated without a serialized Python `DoFn` object). """ urn = transform_node.transform.proto().urn assert urn # TODO(chamikara): support other transforms that requires holder objects in # Python SDk. if common_urns.primitives.PAR_DO.urn == urn: self.run_ParDo(transform_node, options) else: raise NotImplementedError( '%s uses unsupported URN: %s' % (transform_node.full_label, urn)) def run_ParDo(self, transform_node, options): transform = transform_node.transform input_tag = transform_node.inputs[0].tag input_step = self._cache.get_pvalue(transform_node.inputs[0]) is_external_transform = isinstance(transform, RunnerAPIPTransformHolder) # Attach side inputs. si_dict = {} all_input_labels = transform_node.input_tags_to_preserve si_labels = {} full_label_counts = defaultdict(int) lookup_label = lambda side_pval: si_labels[side_pval] named_inputs = transform_node.named_inputs() label_renames = {} for ix, side_pval in enumerate(transform_node.side_inputs): assert isinstance(side_pval, AsSideInput) step_name = 'SideInput-' + self._get_unique_step_name() si_label = ((SIDE_INPUT_PREFIX + '%d-%s') % (ix, transform_node.full_label) if side_pval.pvalue not in all_input_labels else all_input_labels[side_pval.pvalue]) old_label = (SIDE_INPUT_PREFIX + '%d') % ix if not is_external_transform: label_renames[old_label] = si_label assert old_label in named_inputs pcollection_label = '%s.%s' % ( side_pval.pvalue.producer.full_label.split('/')[-1], side_pval.pvalue.tag if side_pval.pvalue.tag else 'out') si_full_label = '%s/%s(%s.%s)' % ( transform_node.full_label, side_pval.__class__.__name__, pcollection_label, full_label_counts[pcollection_label]) # Count the number of times the same PCollection is a side input # to the same ParDo. full_label_counts[pcollection_label] += 1 self._add_singleton_step( step_name, si_full_label, side_pval.pvalue.tag, self._cache.get_pvalue(side_pval.pvalue), side_pval.pvalue.windowing, side_pval._side_input_data().access_pattern) si_dict[si_label] = { '@type': 'OutputReference', PropertyNames.STEP_NAME: step_name, PropertyNames.OUTPUT_NAME: PropertyNames.OUT } si_labels[side_pval] = si_label # Now create the step for the ParDo transform being handled. transform_name = transform_node.full_label.rsplit('/', 1)[-1] step = self._add_step( TransformNames.DO, transform_node.full_label + ('/{}'.format(transform_name) if transform_node.side_inputs else ''), transform_node, transform_node.transform.output_tags) # Import here to avoid adding the dependency for local running scenarios. # pylint: disable=wrong-import-order, wrong-import-position from apache_beam.runners.dataflow.internal import apiclient transform_proto = self.proto_context.transforms.get_proto(transform_node) transform_id = self.proto_context.transforms.get_id(transform_node) use_fnapi = apiclient._use_fnapi(options) use_unified_worker = apiclient._use_unified_worker(options) # The data transmitted in SERIALIZED_FN is different depending on whether # this is a fnapi pipeline or not. if (use_fnapi and (transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn or use_unified_worker)): # Patch side input ids to be unique across a given pipeline. if (label_renames and transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn): # Patch PTransform proto. for old, new in iteritems(label_renames): transform_proto.inputs[new] = transform_proto.inputs[old] del transform_proto.inputs[old] # Patch ParDo proto. proto_type, _ = beam.PTransform._known_urns[transform_proto.spec.urn] proto = proto_utils.parse_Bytes( transform_proto.spec.payload, proto_type) for old, new in iteritems(label_renames): proto.side_inputs[new].CopyFrom(proto.side_inputs[old]) del proto.side_inputs[old] transform_proto.spec.payload = proto.SerializeToString() # We need to update the pipeline proto. del self.proto_pipeline.components.transforms[transform_id] ( self.proto_pipeline.components.transforms[transform_id].CopyFrom( transform_proto)) serialized_data = transform_id else: serialized_data = pickler.dumps( self._pardo_fn_data(transform_node, lookup_label)) step.add_property(PropertyNames.SERIALIZED_FN, serialized_data) # TODO(BEAM-8882): Enable once dataflow service doesn't reject this. # step.add_property(PropertyNames.PIPELINE_PROTO_TRANSFORM_ID, transform_id) step.add_property( PropertyNames.PARALLEL_INPUT, { '@type': 'OutputReference', PropertyNames.STEP_NAME: input_step.proto.name, PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag) }) # Add side inputs if any. step.add_property(PropertyNames.NON_PARALLEL_INPUTS, si_dict) # Generate description for the outputs. The output names # will be 'None' for main output and '<tag>' for a tagged output. outputs = [] all_output_tags = transform_proto.outputs.keys() # Some external transforms require output tags to not be modified. # So we randomly select one of the output tags as the main output and # leave others as side outputs. Transform execution should not change # dependending on which output tag we choose as the main output here. # Also, some SDKs do not work correctly if output tags are modified. So for # external transforms, we leave tags unmodified. # # Python SDK uses 'None' as the tag of the main output. main_output_tag = (all_output_tags[0] if is_external_transform else 'None') step.encoding = self._get_encoded_output_coder( transform_node, output_tag=main_output_tag) side_output_tags = set(all_output_tags).difference({main_output_tag}) # Add the main output to the description. outputs.append({ PropertyNames.USER_NAME: ( '%s.%s' % (transform_node.full_label, PropertyNames.OUT)), PropertyNames.ENCODING: step.encoding, PropertyNames.OUTPUT_NAME: main_output_tag }) for side_tag in side_output_tags: # The assumption here is that all outputs will have the same typehint # and coder as the main output. This is certainly the case right now # but conceivably it could change in the future. encoding = self._get_encoded_output_coder( transform_node, output_tag=side_tag) outputs.append({ PropertyNames.USER_NAME: ( '%s.%s' % (transform_node.full_label, side_tag)), PropertyNames.ENCODING: encoding, PropertyNames.OUTPUT_NAME: side_tag }) step.add_property(PropertyNames.OUTPUT_INFO, outputs) # Add the restriction encoding if we are a splittable DoFn # and are using the Fn API on the unified worker. restriction_coder = transform.get_restriction_coder() if restriction_coder: step.add_property( PropertyNames.RESTRICTION_ENCODING, self._get_cloud_encoding(restriction_coder)) if options.view_as(StandardOptions).streaming: is_stateful_dofn = ( transform.is_pardo_with_stateful_dofn if is_external_transform else DoFnSignature(transform.dofn).is_stateful_dofn()) if is_stateful_dofn: step.add_property(PropertyNames.USES_KEYED_STATE, 'true') @staticmethod def _pardo_fn_data(transform_node, get_label): transform = transform_node.transform si_tags_and_types = [ # pylint: disable=protected-access (get_label(side_pval), side_pval.__class__, side_pval._view_options()) for side_pval in transform_node.side_inputs] return ( transform.fn, transform.args, transform.kwargs, si_tags_and_types, transform_node.inputs[0].windowing) def run_CombineValuesReplacement(self, transform_node, options): transform = transform_node.transform.transform input_tag = transform_node.inputs[0].tag input_step = self._cache.get_pvalue(transform_node.inputs[0]) step = self._add_step( TransformNames.COMBINE, transform_node.full_label, transform_node) transform_id = self.proto_context.transforms.get_id(transform_node.parent) # The data transmitted in SERIALIZED_FN is different depending on whether # this is a fnapi pipeline or not. from apache_beam.runners.dataflow.internal import apiclient use_fnapi = apiclient._use_fnapi(options) if use_fnapi: # Fnapi pipelines send the transform ID of the CombineValues transform's # parent composite because Dataflow expects the ID of a CombinePerKey # transform. serialized_data = transform_id else: # Combiner functions do not take deferred side-inputs (i.e. PValues) and # therefore the code to handle extra args/kwargs is simpler than for the # DoFn's of the ParDo transform. In the last, empty argument is where # side inputs information would go. serialized_data = pickler.dumps( (transform.fn, transform.args, transform.kwargs, ())) step.add_property(PropertyNames.SERIALIZED_FN, serialized_data) # TODO(BEAM-8882): Enable once dataflow service doesn't reject this. # step.add_property(PropertyNames.PIPELINE_PROTO_TRANSFORM_ID, transform_id) step.add_property( PropertyNames.PARALLEL_INPUT, { '@type': 'OutputReference', PropertyNames.STEP_NAME: input_step.proto.name, PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag) }) # Note that the accumulator must not have a WindowedValue encoding, while # the output of this step does in fact have a WindowedValue encoding. accumulator_encoding = self._get_cloud_encoding( transform.fn.get_accumulator_coder()) output_encoding = self._get_encoded_output_coder(transform_node) step.encoding = output_encoding step.add_property(PropertyNames.ENCODING, accumulator_encoding) # Generate description for main output 'out.' outputs = [] # Add the main output to the description. outputs.append({ PropertyNames.USER_NAME: ( '%s.%s' % (transform_node.full_label, PropertyNames.OUT)), PropertyNames.ENCODING: step.encoding, PropertyNames.OUTPUT_NAME: PropertyNames.OUT }) step.add_property(PropertyNames.OUTPUT_INFO, outputs) def apply_Read(self, transform, pbegin, options): if hasattr(transform.source, 'format'): # Consider native Read to be a primitive for dataflow. return beam.pvalue.PCollection.from_(pbegin) else: return self.apply_PTransform(transform, pbegin, options) def run_Read(self, transform_node, options): transform = transform_node.transform step = self._add_step( TransformNames.READ, transform_node.full_label, transform_node) # TODO(mairbek): refactor if-else tree to use registerable functions. # Initialize the source specific properties. standard_options = options.view_as(StandardOptions) if not hasattr(transform.source, 'format'): # If a format is not set, we assume the source to be a custom source. source_dict = {} source_dict['spec'] = { '@type': names.SOURCE_TYPE, names.SERIALIZED_SOURCE_KEY: pickler.dumps(transform.source) } try: source_dict['metadata'] = { 'estimated_size_bytes': json_value.get_typed_value_descriptor( transform.source.estimate_size()) } except error.RuntimeValueProviderError: # Size estimation is best effort, and this error is by value provider. _LOGGER.info( 'Could not estimate size of source %r due to ' + \ 'RuntimeValueProviderError', transform.source) except Exception: # pylint: disable=broad-except # Size estimation is best effort. So we log the error and continue. _LOGGER.info( 'Could not estimate size of source %r due to an exception: %s', transform.source, traceback.format_exc()) step.add_property(PropertyNames.SOURCE_STEP_INPUT, source_dict) elif transform.source.format == 'text': step.add_property(PropertyNames.FILE_PATTERN, transform.source.path) elif transform.source.format == 'bigquery': if standard_options.streaming: raise ValueError( 'BigQuery source is not currently available for use ' 'in streaming pipelines.') debug_options = options.view_as(DebugOptions) use_fn_api = ( debug_options.experiments and 'beam_fn_api' in debug_options.experiments) if use_fn_api: raise ValueError(BQ_SOURCE_UW_ERROR) step.add_property(PropertyNames.BIGQUERY_EXPORT_FORMAT, 'FORMAT_AVRO') # TODO(silviuc): Add table validation if transform.source.validate. if transform.source.table_reference is not None: step.add_property( PropertyNames.BIGQUERY_DATASET, transform.source.table_reference.datasetId) step.add_property( PropertyNames.BIGQUERY_TABLE, transform.source.table_reference.tableId) # If project owning the table was not specified then the project owning # the workflow (current project) will be used. if transform.source.table_reference.projectId is not None: step.add_property( PropertyNames.BIGQUERY_PROJECT, transform.source.table_reference.projectId) elif transform.source.query is not None: step.add_property(PropertyNames.BIGQUERY_QUERY, transform.source.query) step.add_property( PropertyNames.BIGQUERY_USE_LEGACY_SQL, transform.source.use_legacy_sql) step.add_property( PropertyNames.BIGQUERY_FLATTEN_RESULTS, transform.source.flatten_results) else: raise ValueError( 'BigQuery source %r must specify either a table or' ' a query' % transform.source) if transform.source.kms_key is not None: step.add_property( PropertyNames.BIGQUERY_KMS_KEY, transform.source.kms_key) elif transform.source.format == 'pubsub': if not standard_options.streaming: raise ValueError( 'Cloud Pub/Sub is currently available for use ' 'only in streaming pipelines.') # Only one of topic or subscription should be set. if transform.source.full_subscription: step.add_property( PropertyNames.PUBSUB_SUBSCRIPTION, transform.source.full_subscription) elif transform.source.full_topic: step.add_property( PropertyNames.PUBSUB_TOPIC, transform.source.full_topic) if transform.source.id_label: step.add_property( PropertyNames.PUBSUB_ID_LABEL, transform.source.id_label) if transform.source.with_attributes: # Setting this property signals Dataflow runner to return full # PubsubMessages instead of just the data part of the payload. step.add_property(PropertyNames.PUBSUB_SERIALIZED_ATTRIBUTES_FN, '') if transform.source.timestamp_attribute is not None: step.add_property( PropertyNames.PUBSUB_TIMESTAMP_ATTRIBUTE, transform.source.timestamp_attribute) else: raise ValueError( 'Source %r has unexpected format %s.' % (transform.source, transform.source.format)) if not hasattr(transform.source, 'format'): step.add_property(PropertyNames.FORMAT, names.SOURCE_FORMAT) else: step.add_property(PropertyNames.FORMAT, transform.source.format) # Wrap coder in WindowedValueCoder: this is necessary as the encoding of a # step should be the type of value outputted by each step. Read steps # automatically wrap output values in a WindowedValue wrapper, if necessary. # This is also necessary for proper encoding for size estimation. # Using a GlobalWindowCoder as a place holder instead of the default # PickleCoder because GlobalWindowCoder is known coder. # TODO(robertwb): Query the collection for the windowfn to extract the # correct coder. coder = coders.WindowedValueCoder( coders.registry.get_coder(transform_node.outputs[None].element_type), coders.coders.GlobalWindowCoder()) step.encoding = self._get_cloud_encoding(coder) step.add_property( PropertyNames.OUTPUT_INFO, [{ PropertyNames.USER_NAME: ( '%s.%s' % (transform_node.full_label, PropertyNames.OUT)), PropertyNames.ENCODING: step.encoding, PropertyNames.OUTPUT_NAME: PropertyNames.OUT }]) def run__NativeWrite(self, transform_node, options): transform = transform_node.transform input_tag = transform_node.inputs[0].tag input_step = self._cache.get_pvalue(transform_node.inputs[0]) step = self._add_step( TransformNames.WRITE, transform_node.full_label, transform_node) # TODO(mairbek): refactor if-else tree to use registerable functions. # Initialize the sink specific properties. if transform.sink.format == 'text': # Note that it is important to use typed properties (@type/value dicts) # for non-string properties and also for empty strings. For example, # in the code below the num_shards must have type and also # file_name_suffix and shard_name_template (could be empty strings). step.add_property( PropertyNames.FILE_NAME_PREFIX, transform.sink.file_name_prefix, with_type=True) step.add_property( PropertyNames.FILE_NAME_SUFFIX, transform.sink.file_name_suffix, with_type=True) step.add_property( PropertyNames.SHARD_NAME_TEMPLATE, transform.sink.shard_name_template, with_type=True) if transform.sink.num_shards > 0: step.add_property( PropertyNames.NUM_SHARDS, transform.sink.num_shards, with_type=True) # TODO(silviuc): Implement sink validation. step.add_property(PropertyNames.VALIDATE_SINK, False, with_type=True) elif transform.sink.format == 'bigquery': # TODO(silviuc): Add table validation if transform.sink.validate. step.add_property( PropertyNames.BIGQUERY_DATASET, transform.sink.table_reference.datasetId) step.add_property( PropertyNames.BIGQUERY_TABLE, transform.sink.table_reference.tableId) # If project owning the table was not specified then the project owning # the workflow (current project) will be used. if transform.sink.table_reference.projectId is not None: step.add_property( PropertyNames.BIGQUERY_PROJECT, transform.sink.table_reference.projectId) step.add_property( PropertyNames.BIGQUERY_CREATE_DISPOSITION, transform.sink.create_disposition) step.add_property( PropertyNames.BIGQUERY_WRITE_DISPOSITION, transform.sink.write_disposition) if transform.sink.table_schema is not None: step.add_property( PropertyNames.BIGQUERY_SCHEMA, transform.sink.schema_as_json()) if transform.sink.kms_key is not None: step.add_property( PropertyNames.BIGQUERY_KMS_KEY, transform.sink.kms_key) elif transform.sink.format == 'pubsub': standard_options = options.view_as(StandardOptions) if not standard_options.streaming: raise ValueError( 'Cloud Pub/Sub is currently available for use ' 'only in streaming pipelines.') step.add_property(PropertyNames.PUBSUB_TOPIC, transform.sink.full_topic) if transform.sink.id_label: step.add_property( PropertyNames.PUBSUB_ID_LABEL, transform.sink.id_label) if transform.sink.with_attributes: # Setting this property signals Dataflow runner that the PCollection # contains PubsubMessage objects instead of just raw data. step.add_property(PropertyNames.PUBSUB_SERIALIZED_ATTRIBUTES_FN, '') if transform.sink.timestamp_attribute is not None: step.add_property( PropertyNames.PUBSUB_TIMESTAMP_ATTRIBUTE, transform.sink.timestamp_attribute) else: raise ValueError( 'Sink %r has unexpected format %s.' % (transform.sink, transform.sink.format)) step.add_property(PropertyNames.FORMAT, transform.sink.format) # Wrap coder in WindowedValueCoder: this is necessary for proper encoding # for size estimation. Using a GlobalWindowCoder as a place holder instead # of the default PickleCoder because GlobalWindowCoder is known coder. # TODO(robertwb): Query the collection for the windowfn to extract the # correct coder. coder = coders.WindowedValueCoder( transform.sink.coder, coders.coders.GlobalWindowCoder()) step.encoding = self._get_cloud_encoding(coder) step.add_property(PropertyNames.ENCODING, step.encoding) step.add_property( PropertyNames.PARALLEL_INPUT, { '@type': 'OutputReference', PropertyNames.STEP_NAME: input_step.proto.name, PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag) }) def run_TestStream(self, transform_node, options): from apache_beam.testing.test_stream import ElementEvent from apache_beam.testing.test_stream import ProcessingTimeEvent from apache_beam.testing.test_stream import WatermarkEvent standard_options = options.view_as(StandardOptions) if not standard_options.streaming: raise ValueError( 'TestStream is currently available for use ' 'only in streaming pipelines.') transform = transform_node.transform step = self._add_step( TransformNames.READ, transform_node.full_label, transform_node) step.add_property(PropertyNames.FORMAT, 'test_stream') test_stream_payload = beam_runner_api_pb2.TestStreamPayload() # TestStream source doesn't do any decoding of elements, # so we won't set test_stream_payload.coder_id. output_coder = transform._infer_output_coder() # pylint: disable=protected-access for event in transform._events: new_event = test_stream_payload.events.add() if isinstance(event, ElementEvent): for tv in event.timestamped_values: element = new_event.element_event.elements.add() element.encoded_element = output_coder.encode(tv.value) element.timestamp = tv.timestamp.micros elif isinstance(event, ProcessingTimeEvent): new_event.processing_time_event.advance_duration = ( event.advance_by.micros) elif isinstance(event, WatermarkEvent): new_event.watermark_event.new_watermark = event.new_watermark.micros serialized_payload = self.byte_array_to_json_string( test_stream_payload.SerializeToString()) step.add_property(PropertyNames.SERIALIZED_TEST_STREAM, serialized_payload) step.encoding = self._get_encoded_output_coder(transform_node) step.add_property( PropertyNames.OUTPUT_INFO, [{ PropertyNames.USER_NAME: ( '%s.%s' % (transform_node.full_label, PropertyNames.OUT)), PropertyNames.ENCODING: step.encoding, PropertyNames.OUTPUT_NAME: PropertyNames.OUT }]) # We must mark this method as not a test or else its name is a matcher for # nosetest tests. run_TestStream.__test__ = False # type: ignore[attr-defined] @classmethod def serialize_windowing_strategy(cls, windowing): from apache_beam.runners import pipeline_context context = pipeline_context.PipelineContext() windowing_proto = windowing.to_runner_api(context) return cls.byte_array_to_json_string( beam_runner_api_pb2.MessageWithComponents( components=context.to_runner_api(), windowing_strategy=windowing_proto).SerializeToString()) @classmethod def deserialize_windowing_strategy(cls, serialized_data): # Imported here to avoid circular dependencies. # pylint: disable=wrong-import-order, wrong-import-position from apache_beam.runners import pipeline_context from apache_beam.transforms.core import Windowing proto = beam_runner_api_pb2.MessageWithComponents() proto.ParseFromString(cls.json_string_to_byte_array(serialized_data)) return Windowing.from_runner_api( proto.windowing_strategy, pipeline_context.PipelineContext(proto.components)) @staticmethod def byte_array_to_json_string(raw_bytes): """Implements org.apache.beam.sdk.util.StringUtils.byteArrayToJsonString.""" return quote(raw_bytes) @staticmethod def json_string_to_byte_array(encoded_string): """Implements org.apache.beam.sdk.util.StringUtils.jsonStringToByteArray.""" return unquote_to_bytes(encoded_string) def get_default_gcp_region(self): """Get a default value for Google Cloud region according to https://cloud.google.com/compute/docs/gcloud-compute/#default-properties. If no default can be found, returns None. """ environment_region = os.environ.get('CLOUDSDK_COMPUTE_REGION') if environment_region: _LOGGER.info( 'Using default GCP region %s from $CLOUDSDK_COMPUTE_REGION', environment_region) return environment_region try: cmd = ['gcloud', 'config', 'get-value', 'compute/region'] # Use subprocess.DEVNULL in Python 3.3+. if hasattr(subprocess, 'DEVNULL'): DEVNULL = subprocess.DEVNULL else: DEVNULL = open(os.devnull, 'ab') raw_output = processes.check_output(cmd, stderr=DEVNULL) formatted_output = raw_output.decode('utf-8').strip() if formatted_output: _LOGGER.info( 'Using default GCP region %s from `%s`', formatted_output, ' '.join(cmd)) return formatted_output except RuntimeError: pass return None class _DataflowSideInput(beam.pvalue.AsSideInput): """Wraps a side input as a dataflow-compatible side input.""" def _view_options(self): return { 'data': self._data, } def _side_input_data(self): return self._data class _DataflowIterableAsMultimapSideInput(_DataflowSideInput): """Wraps an iterable side input as dataflow-compatible side input.""" def __init__(self, side_input): # pylint: disable=protected-access side_input_data = side_input._side_input_data() assert ( side_input_data.access_pattern == common_urns.side_inputs.ITERABLE.urn) iterable_view_fn = side_input_data.view_fn self._data = beam.pvalue.SideInputData( common_urns.side_inputs.MULTIMAP.urn, side_input_data.window_mapping_fn, lambda multimap: iterable_view_fn(multimap[b''])) class _DataflowIterableSideInput(_DataflowSideInput): """Wraps an iterable side input as dataflow-compatible side input.""" def __init__(self, side_input): # pylint: disable=protected-access self.pvalue = side_input.pvalue side_input_data = side_input._side_input_data() assert ( side_input_data.access_pattern == common_urns.side_inputs.ITERABLE.urn) self._data = beam.pvalue.SideInputData( common_urns.side_inputs.ITERABLE.urn, side_input_data.window_mapping_fn, side_input_data.view_fn) class _DataflowMultimapSideInput(_DataflowSideInput): """Wraps a multimap side input as dataflow-compatible side input.""" def __init__(self, side_input): # pylint: disable=protected-access self.pvalue = side_input.pvalue side_input_data = side_input._side_input_data() assert ( side_input_data.access_pattern == common_urns.side_inputs.MULTIMAP.urn) self._data = beam.pvalue.SideInputData( common_urns.side_inputs.MULTIMAP.urn, side_input_data.window_mapping_fn, side_input_data.view_fn) class DataflowPipelineResult(PipelineResult): """Represents the state of a pipeline run on the Dataflow service.""" def __init__(self, job, runner): """Initialize a new DataflowPipelineResult instance. Args: job: Job message from the Dataflow API. Could be :data:`None` if a job request was not sent to Dataflow service (e.g. template jobs). runner: DataflowRunner instance. """ self._job = job self._runner = runner self.metric_results = None def _update_job(self): # We need the job id to be able to update job information. There is no need # to update the job if we are in a known terminal state. if self.has_job and not self.is_in_terminal_state(): self._job = self._runner.dataflow_client.get_job(self.job_id()) def job_id(self): return self._job.id def metrics(self): return self.metric_results @property def has_job(self): return self._job is not None def _get_job_state(self): values_enum = dataflow_api.Job.CurrentStateValueValuesEnum # Ordered by the enum values. Values that may be introduced in # future versions of Dataflow API are considered UNRECOGNIZED by the SDK. api_jobstate_map = defaultdict( lambda: PipelineState.UNRECOGNIZED, { values_enum.JOB_STATE_UNKNOWN: PipelineState.UNKNOWN, values_enum.JOB_STATE_STOPPED: PipelineState.STOPPED, values_enum.JOB_STATE_RUNNING: PipelineState.RUNNING, values_enum.JOB_STATE_DONE: PipelineState.DONE, values_enum.JOB_STATE_FAILED: PipelineState.FAILED, values_enum.JOB_STATE_CANCELLED: PipelineState.CANCELLED, values_enum.JOB_STATE_UPDATED: PipelineState.UPDATED, values_enum.JOB_STATE_DRAINING: PipelineState.DRAINING, values_enum.JOB_STATE_DRAINED: PipelineState.DRAINED, values_enum.JOB_STATE_PENDING: PipelineState.PENDING, values_enum.JOB_STATE_CANCELLING: PipelineState.CANCELLING, }) return ( api_jobstate_map[self._job.currentState] if self._job.currentState else PipelineState.UNKNOWN) @property def state(self): """Return the current state of the remote job. Returns: A PipelineState object. """ if not self.has_job: return PipelineState.UNKNOWN self._update_job() return self._get_job_state() def is_in_terminal_state(self): if not self.has_job: return True return PipelineState.is_terminal(self._get_job_state()) def wait_until_finish(self, duration=None): if not self.is_in_terminal_state(): if not self.has_job: raise IOError('Failed to get the Dataflow job id.') thread = threading.Thread( target=DataflowRunner.poll_for_job_completion, args=(self._runner, self, duration)) # Mark the thread as a daemon thread so a keyboard interrupt on the main # thread will terminate everything. This is also the reason we will not # use thread.join() to wait for the polling thread. thread.daemon = True thread.start() while thread.is_alive(): time.sleep(5.0) # TODO: Merge the termination code in poll_for_job_completion and # is_in_terminal_state. terminated = self.is_in_terminal_state() assert duration or terminated, ( 'Job did not reach to a terminal state after waiting indefinitely.') if terminated and self.state != PipelineState.DONE: # TODO(BEAM-1290): Consider converting this to an error log based on # theresolution of the issue. raise DataflowRuntimeException( 'Dataflow pipeline failed. State: %s, Error:\n%s' % (self.state, getattr(self._runner, 'last_error_msg', None)), self) return self.state def cancel(self): if not self.has_job: raise IOError('Failed to get the Dataflow job id.') self._update_job() if self.is_in_terminal_state(): _LOGGER.warning( 'Cancel failed because job %s is already terminated in state %s.', self.job_id(), self.state) else: if not self._runner.dataflow_client.modify_job_state( self.job_id(), 'JOB_STATE_CANCELLED'): cancel_failed_message = ( 'Failed to cancel job %s, please go to the Developers Console to ' 'cancel it manually.') % self.job_id() _LOGGER.error(cancel_failed_message) raise DataflowRuntimeException(cancel_failed_message, self) return self.state def __str__(self): return '<%s %s %s>' % (self.__class__.__name__, self.job_id(), self.state) def __repr__(self): return '<%s %s at %s>' % (self.__class__.__name__, self._job, hex(id(self))) class DataflowRuntimeException(Exception): """Indicates an error has occurred in running this pipeline.""" def __init__(self, msg, result): super(DataflowRuntimeException, self).__init__(msg) self.result = result
[]
[]
[ "CLOUDSDK_COMPUTE_REGION" ]
[]
["CLOUDSDK_COMPUTE_REGION"]
python
1
0
e2e/e2e_test.go
package e2e import ( "flag" "fmt" "log" "os" "path/filepath" "testing" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/config" ) func init() { log.SetOutput(GinkgoWriter) flag.IntVar(&deployTimeout, "deploy-timeout", 10, "timeout to wait for created kubernetes resources") flag.BoolVar(&deployCephFS, "deploy-cephfs", true, "deploy cephfs csi driver") flag.BoolVar(&deployRBD, "deploy-rbd", true, "deploy rbd csi driver") flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephfs csi driver") flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver") flag.BoolVar(&upgradeTesting, "upgrade-testing", false, "perform upgrade testing") flag.StringVar(&upgradeVersion, "upgrade-version", "v2.1.2", "target version for upgrade testing") flag.StringVar(&cephCSINamespace, "cephcsi-namespace", defaultNs, "namespace in which cephcsi deployed") flag.StringVar(&rookNamespace, "rook-namespace", "rook-ceph", "namespace in which rook is deployed") setDefaultKubeconfig() // Register framework flags, then handle flags handleFlags() framework.AfterReadingAllFlags(&framework.TestContext) fmt.Println("timeout for deploytimeout ", deployTimeout) } func setDefaultKubeconfig() { _, exists := os.LookupEnv("KUBECONFIG") if !exists { defaultKubeconfig := filepath.Join(os.Getenv("HOME"), ".kube", "config") os.Setenv("KUBECONFIG", defaultKubeconfig) } } var _ = BeforeSuite(func() { }) var _ = AfterSuite(func() { }) func TestE2E(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "E2e Suite") } func handleFlags() { config.CopyFlags(config.Flags, flag.CommandLine) framework.RegisterCommonFlags(flag.CommandLine) framework.RegisterClusterFlags(flag.CommandLine) testing.Init() flag.Parse() initResouces() }
[ "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
services/preview/synapse/mgmt/2019-06-01-preview/synapse/workspacemanagedsqlserverextendedblobauditingpolicies.go
package synapse // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "context" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/validation" "github.com/Azure/go-autorest/tracing" "net/http" ) // WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient is the azure Synapse Analytics Management Client type WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient struct { BaseClient } // NewWorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient creates an instance of the // WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient client. func NewWorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient(subscriptionID string) WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient { return NewWorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) } // NewWorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClientWithBaseURI creates an instance of the // WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient client using a custom endpoint. Use this when // interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewWorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClientWithBaseURI(baseURI string, subscriptionID string) WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient { return WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate create or Update a workspace managed sql server's extended blob auditing policy. // Parameters: // resourceGroupName - the name of the resource group. The name is case insensitive. // workspaceName - the name of the workspace // parameters - properties of extended blob auditing policy. func (client WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, parameters ExtendedServerBlobAuditingPolicy) (result WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesCreateOrUpdateFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient.CreateOrUpdate") defer func() { sc := -1 if result.FutureAPI != nil && result.FutureAPI.Response() != nil { sc = result.FutureAPI.Response().StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { return result, validation.NewError("synapse.WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient", "CreateOrUpdate", err.Error()) } req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, workspaceName, parameters) if err != nil { err = autorest.NewErrorWithError(err, "synapse.WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request") return } result, err = client.CreateOrUpdateSender(req) if err != nil { err = autorest.NewErrorWithError(err, "synapse.WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient", "CreateOrUpdate", result.Response(), "Failure sending request") return } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. func (client WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, parameters ExtendedServerBlobAuditingPolicy) (*http.Request, error) { pathParameters := map[string]interface{}{ "blobAuditingPolicyName": autorest.Encode("path", "default"), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), "workspaceName": autorest.Encode("path", workspaceName), } const APIVersion = "2019-06-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/extendedAuditingSettings/{blobAuditingPolicyName}", pathParameters), autorest.WithJSON(parameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient) CreateOrUpdateSender(req *http.Request) (future WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesCreateOrUpdateFuture, err error) { var resp *http.Response future.FutureAPI = &azure.Future{} resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf future.Result = future.result return } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. func (client WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result ExtendedServerBlobAuditingPolicy, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // Get get a workspace SQL server's extended blob auditing policy. // Parameters: // resourceGroupName - the name of the resource group. The name is case insensitive. // workspaceName - the name of the workspace func (client WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient) Get(ctx context.Context, resourceGroupName string, workspaceName string) (result ExtendedServerBlobAuditingPolicy, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient.Get") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { return result, validation.NewError("synapse.WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient", "Get", err.Error()) } req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName) if err != nil { err = autorest.NewErrorWithError(err, "synapse.WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient", "Get", nil, "Failure preparing request") return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "synapse.WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient", "Get", resp, "Failure sending request") return } result, err = client.GetResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "synapse.WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient", "Get", resp, "Failure responding to request") return } return } // GetPreparer prepares the Get request. func (client WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "blobAuditingPolicyName": autorest.Encode("path", "default"), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), "workspaceName": autorest.Encode("path", workspaceName), } const APIVersion = "2019-06-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/extendedAuditingSettings/{blobAuditingPolicyName}", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient) GetSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always // closes the http.Response Body. func (client WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient) GetResponder(resp *http.Response) (result ExtendedServerBlobAuditingPolicy, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // ListByWorkspace list workspace managed sql server's extended blob auditing policies. // Parameters: // resourceGroupName - the name of the resource group. The name is case insensitive. // workspaceName - the name of the workspace func (client WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient) ListByWorkspace(ctx context.Context, resourceGroupName string, workspaceName string) (result ExtendedServerBlobAuditingPolicyListResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient.ListByWorkspace") defer func() { sc := -1 if result.esbaplr.Response.Response != nil { sc = result.esbaplr.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { return result, validation.NewError("synapse.WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient", "ListByWorkspace", err.Error()) } result.fn = client.listByWorkspaceNextResults req, err := client.ListByWorkspacePreparer(ctx, resourceGroupName, workspaceName) if err != nil { err = autorest.NewErrorWithError(err, "synapse.WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient", "ListByWorkspace", nil, "Failure preparing request") return } resp, err := client.ListByWorkspaceSender(req) if err != nil { result.esbaplr.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "synapse.WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient", "ListByWorkspace", resp, "Failure sending request") return } result.esbaplr, err = client.ListByWorkspaceResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "synapse.WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient", "ListByWorkspace", resp, "Failure responding to request") return } if result.esbaplr.hasNextLink() && result.esbaplr.IsEmpty() { err = result.NextWithContext(ctx) return } return } // ListByWorkspacePreparer prepares the ListByWorkspace request. func (client WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient) ListByWorkspacePreparer(ctx context.Context, resourceGroupName string, workspaceName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), "workspaceName": autorest.Encode("path", workspaceName), } const APIVersion = "2019-06-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/extendedAuditingSettings", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ListByWorkspaceSender sends the ListByWorkspace request. The method will close the // http.Response Body if it receives an error. func (client WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient) ListByWorkspaceSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByWorkspaceResponder handles the response to the ListByWorkspace request. The method always // closes the http.Response Body. func (client WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient) ListByWorkspaceResponder(resp *http.Response) (result ExtendedServerBlobAuditingPolicyListResult, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // listByWorkspaceNextResults retrieves the next set of results, if any. func (client WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient) listByWorkspaceNextResults(ctx context.Context, lastResults ExtendedServerBlobAuditingPolicyListResult) (result ExtendedServerBlobAuditingPolicyListResult, err error) { req, err := lastResults.extendedServerBlobAuditingPolicyListResultPreparer(ctx) if err != nil { return result, autorest.NewErrorWithError(err, "synapse.WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient", "listByWorkspaceNextResults", nil, "Failure preparing next results request") } if req == nil { return } resp, err := client.ListByWorkspaceSender(req) if err != nil { result.Response = autorest.Response{Response: resp} return result, autorest.NewErrorWithError(err, "synapse.WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient", "listByWorkspaceNextResults", resp, "Failure sending next results request") } result, err = client.ListByWorkspaceResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "synapse.WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient", "listByWorkspaceNextResults", resp, "Failure responding to next results request") } return } // ListByWorkspaceComplete enumerates all values, automatically crossing page boundaries as required. func (client WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient) ListByWorkspaceComplete(ctx context.Context, resourceGroupName string, workspaceName string) (result ExtendedServerBlobAuditingPolicyListResultIterator, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/WorkspaceManagedSQLServerExtendedBlobAuditingPoliciesClient.ListByWorkspace") defer func() { sc := -1 if result.Response().Response.Response != nil { sc = result.page.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } result.page, err = client.ListByWorkspace(ctx, resourceGroupName, workspaceName) return }
[]
[]
[]
[]
[]
go
null
null
null
internal/hud/renderer.go
package hud import ( "fmt" "os" "sync" "time" "github.com/gdamore/tcell" "github.com/windmilleng/tilt/internal/dockercompose" "github.com/windmilleng/tilt/internal/hud/view" "github.com/windmilleng/tilt/internal/rty" "github.com/windmilleng/tilt/pkg/model" ) const defaultLogPaneHeight = 8 type Renderer struct { rty rty.RTY screen tcell.Screen mu *sync.RWMutex clock func() time.Time } func NewRenderer(clock func() time.Time) *Renderer { return &Renderer{ mu: new(sync.RWMutex), clock: clock, } } func (r *Renderer) Render(v view.View, vs view.ViewState) { r.mu.RLock() defer r.mu.RUnlock() rty := r.rty if rty != nil { layout := r.layout(v, vs) rty.Render(layout) } } var cText = tcell.Color232 var cLightText = tcell.Color243 var cGood = tcell.ColorGreen var cBad = tcell.ColorRed var cPending = tcell.Color243 func (r *Renderer) layout(v view.View, vs view.ViewState) rty.Component { l := rty.NewFlexLayout(rty.DirVert) if vs.ShowNarration { l.Add(renderNarration(vs.NarrationMessage)) l.Add(rty.NewLine()) } l.Add(r.renderResourceHeader(v)) l.Add(r.renderResources(v, vs)) l.Add(r.renderLogPane(v, vs)) l.Add(r.renderFooter(v, keyLegend(v, vs))) var ret rty.Component = l ret = r.maybeAddFullScreenLog(v, vs, ret) ret = r.maybeAddAlertModal(v, vs, ret) return ret } func (r *Renderer) maybeAddFullScreenLog(v view.View, vs view.ViewState, layout rty.Component) rty.Component { if vs.TiltLogState == view.TiltLogFullScreen { tabView := NewTabView(v, vs) l := rty.NewConcatLayout(rty.DirVert) sl := rty.NewTextScrollLayout("log") l.Add(tabView.buildTabs(true)) sl.Add(rty.TextString(tabView.log())) l.AddDynamic(sl) l.Add(r.renderFooter(v, keyLegend(v, vs))) layout = rty.NewModalLayout(layout, l, 1, true) } return layout } func (r *Renderer) maybeAddAlertModal(v view.View, vs view.ViewState, layout rty.Component) rty.Component { alertMsg := "" if v.FatalError != nil { alertMsg = fmt.Sprintf("Tilt has encountered a fatal error: %s\nOnce you fix this issue you'll need to restart Tilt. In the meantime feel free to browse through the UI.", v.FatalError.Error()) } else if vs.AlertMessage != "" { alertMsg = vs.AlertMessage } if alertMsg != "" { l := rty.NewLines() l.Add(rty.TextString("")) msg := " " + alertMsg + " " l.Add(rty.Fg(rty.TextString(msg), tcell.ColorDefault)) l.Add(rty.TextString("")) w := rty.NewWindow(l) w.SetTitle("! Alert !") layout = r.renderModal(rty.Fg(w, tcell.ColorRed), layout, false) } return layout } func (r *Renderer) renderLogPane(v view.View, vs view.ViewState) rty.Component { tabView := NewTabView(v, vs) var height int switch vs.TiltLogState { case view.TiltLogShort: height = defaultLogPaneHeight case view.TiltLogHalfScreen: height = rty.GROW case view.TiltLogFullScreen: height = 1 // FullScreen is handled elsewhere, since it's no longer a pane // but we have to set height to something non-0 or rty will blow up } return rty.NewFixedSize(tabView.Build(), rty.GROW, height) } func renderPaneHeader(isMax bool) rty.Component { var verb string if isMax { verb = "contract" } else { verb = "expand" } s := fmt.Sprintf("X: %s", verb) l := rty.NewLine() l.Add(rty.NewFillerString(' ')) l.Add(rty.TextString(fmt.Sprintf(" %s ", s))) return l } func (r *Renderer) renderStatusMessage(v view.View) rty.Component { errorCount := 0 for _, res := range v.Resources { if isInError(res) { errorCount++ } } sb := rty.NewStringBuilder() if errorCount == 0 && v.TiltfileErrorMessage() == "" { sb.Fg(cGood).Text("✓").Fg(cText).Text(" OK") } else { var errorCountMessage string s := "error" if errorCount > 1 { s = "errors" } if errorCount > 0 { errorCountMessage = fmt.Sprintf(" %d %s", errorCount, s) } sb.Fg(cBad).Text("✖"). Fg(cText).Textf("%s", errorCountMessage) } return sb.Build() } func (r *Renderer) renderStatusBar(v view.View) rty.Component { l := rty.NewConcatLayout(rty.DirHor) l.Add(rty.TextString(" ")) l.Add(r.renderStatusMessage(v)) l.Add(rty.TextString(" ")) l.AddDynamic(rty.NewFillerString(' ')) msg := " To explore, open web view (enter) • terminal is limited " l.Add(rty.ColoredString(msg, cText)) return rty.Bg(rty.OneLine(l), tcell.ColorWhiteSmoke) } func (r *Renderer) renderFooter(v view.View, keys string) rty.Component { footer := rty.NewConcatLayout(rty.DirVert) footer.Add(r.renderStatusBar(v)) l := rty.NewConcatLayout(rty.DirHor) sbRight := rty.NewStringBuilder() sbRight.Text(keys) l.AddDynamic(rty.NewFillerString(' ')) l.Add(sbRight.Build()) footer.Add(l) return rty.NewFixedSize(footer, rty.GROW, 2) } func keyLegend(v view.View, vs view.ViewState) string { defaultKeys := "Browse (↓ ↑), Expand (→) ┊ (enter) log ┊ (ctrl-C) quit " if vs.AlertMessage != "" { return "Tilt (l)og ┊ (esc) close alert " } return defaultKeys } func isInError(res view.Resource) bool { return combinedStatus(res).color == cBad } func isCrashing(res view.Resource) bool { return (res.IsK8s() && res.K8sInfo().PodRestarts > 0) || res.LastBuild().Reason.Has(model.BuildReasonFlagCrash) || res.CurrentBuild.Reason.Has(model.BuildReasonFlagCrash) || res.PendingBuildReason.Has(model.BuildReasonFlagCrash) || res.IsDC() && res.DockerComposeTarget().Status() == string(dockercompose.StatusCrash) } func (r *Renderer) renderModal(fg rty.Component, bg rty.Component, fixed bool) rty.Component { return rty.NewModalLayout(bg, fg, .9, fixed) } func renderNarration(msg string) rty.Component { lines := rty.NewLines() l := rty.NewLine() l.Add(rty.TextString(msg)) lines.Add(rty.NewLine()) lines.Add(l) lines.Add(rty.NewLine()) box := rty.Fg(rty.Bg(lines, tcell.ColorLightGrey), cText) return rty.NewFixedSize(box, rty.GROW, 3) } func (r *Renderer) renderResourceHeader(v view.View) rty.Component { l := rty.NewConcatLayout(rty.DirHor) l.Add(rty.ColoredString(" RESOURCE NAME ", cLightText)) l.AddDynamic(rty.NewFillerString(' ')) k8sCell := rty.ColoredString(" CONTAINER", cLightText) l.Add(k8sCell) l.Add(middotText()) buildCell := rty.NewMinLengthLayout(BuildDurCellMinWidth+BuildStatusCellMinWidth, rty.DirHor). SetAlign(rty.AlignEnd). Add(rty.ColoredString("UPDATE STATUS ", cLightText)) l.Add(buildCell) l.Add(middotText()) deployCell := rty.NewMinLengthLayout(DeployCellMinWidth+1, rty.DirHor). SetAlign(rty.AlignEnd). Add(rty.ColoredString("AS OF ", cLightText)) l.Add(deployCell) return rty.OneLine(l) } func (r *Renderer) renderResources(v view.View, vs view.ViewState) rty.Component { rs := v.Resources cl := rty.NewConcatLayout(rty.DirVert) childNames := make([]string, len(rs)) for i, r := range rs { childNames[i] = r.Name.String() } // the items added to `l` below must be kept in sync with `childNames` above l, selectedResource := r.rty.RegisterElementScroll(resourcesScollerName, childNames) if len(rs) > 0 { for i, res := range rs { resView := NewResourceView(v.LogReader, res, vs.Resources[i], res.TriggerMode, selectedResource == res.Name.String(), r.clock) l.Add(resView.Build()) } } cl.Add(l) return cl } func (r *Renderer) SetUp() (chan tcell.Event, error) { r.mu.Lock() defer r.mu.Unlock() screen, err := tcell.NewScreen() if err != nil { if err == tcell.ErrTermNotFound { // The statically-compiled tcell only supports the most common TERM configs. // The dynamically-compiled tcell supports more, but has distribution problems. // See: https://github.com/gdamore/tcell/issues/252 term := os.Getenv("TERM") return nil, fmt.Errorf("Tilt does not support TERM=%q. "+ "This is not a common Terminal config. "+ "If you expect that you're using a common terminal, "+ "you might have misconfigured $TERM in your .profile.", term) } return nil, err } if err = screen.Init(); err != nil { return nil, err } screenEvents := make(chan tcell.Event) go func() { for { screenEvents <- screen.PollEvent() } }() r.rty = rty.NewRTY(screen, rty.SkipErrorHandler{}) r.screen = screen return screenEvents, nil } func (r *Renderer) RTY() rty.RTY { r.mu.RLock() defer r.mu.RUnlock() return r.rty } func (r *Renderer) Reset() { r.mu.Lock() defer r.mu.Unlock() if r.screen != nil { r.screen.Fini() } r.screen = nil }
[ "\"TERM\"" ]
[]
[ "TERM" ]
[]
["TERM"]
go
1
0
tools/run-tests.py
#!/usr/bin/env python # # Copyright 2012 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from collections import OrderedDict import itertools import multiprocessing import optparse import os from os.path import join import platform import random import shlex import subprocess import sys import time from testrunner.local import execution from testrunner.local import progress from testrunner.local import testsuite from testrunner.local.testsuite import VARIANT_FLAGS from testrunner.local import utils from testrunner.local import verbose from testrunner.network import network_execution from testrunner.objects import context ARCH_GUESS = utils.DefaultArch() DEFAULT_TESTS = [ "mjsunit", "unittests", "cctest", "message", "preparser", ] # Map of test name synonyms to lists of test suites. Should be ordered by # expected runtimes (suites with slow test cases first). These groups are # invoked in seperate steps on the bots. TEST_MAP = { "default": [ "mjsunit", "cctest", "message", "preparser", ], "optimize_for_size": [ "mjsunit", "cctest", "webkit", ], "unittests": [ "unittests", ], } TIMEOUT_DEFAULT = 60 VARIANTS = ["default", "stress", "turbofan", "nocrankshaft"] DEBUG_FLAGS = ["--nohard-abort", "--nodead-code-elimination", "--nofold-constants", "--enable-slow-asserts", "--debug-code", "--verify-heap"] RELEASE_FLAGS = ["--nohard-abort", "--nodead-code-elimination", "--nofold-constants"] MODES = { "debug": { "flags": DEBUG_FLAGS, "timeout_scalefactor": 4, "status_mode": "debug", "execution_mode": "debug", "output_folder": "debug", }, "optdebug": { "flags": DEBUG_FLAGS, "timeout_scalefactor": 4, "status_mode": "debug", "execution_mode": "debug", "output_folder": "optdebug", }, "release": { "flags": RELEASE_FLAGS, "timeout_scalefactor": 1, "status_mode": "release", "execution_mode": "release", "output_folder": "release", }, # This mode requires v8 to be compiled with dchecks and slow dchecks. "tryrelease": { "flags": RELEASE_FLAGS + ["--enable-slow-asserts"], "timeout_scalefactor": 2, "status_mode": "debug", "execution_mode": "release", "output_folder": "release", }, } GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction", "--concurrent-recompilation-queue-length=64", "--concurrent-recompilation-delay=500", "--concurrent-recompilation"] SUPPORTED_ARCHS = ["android_arm", "android_arm64", "android_ia32", "android_x64", "arm", "ia32", "x87", "mips", "mipsel", "mips64el", "nacl_ia32", "nacl_x64", "ppc", "ppc64", "x64", "x32", "arm64"] # Double the timeout for these: SLOW_ARCHS = ["android_arm", "android_arm64", "android_ia32", "android_x64", "arm", "mips", "mipsel", "mips64el", "nacl_ia32", "nacl_x64", "x87", "arm64"] def BuildOptions(): result = optparse.OptionParser() result.add_option("--arch", help=("The architecture to run tests for, " "'auto' or 'native' for auto-detect"), default="ia32,x64,arm") result.add_option("--arch-and-mode", help="Architecture and mode in the format 'arch.mode'", default=None) result.add_option("--asan", help="Regard test expectations for ASAN", default=False, action="store_true") result.add_option("--buildbot", help="Adapt to path structure used on buildbots", default=False, action="store_true") result.add_option("--dcheck-always-on", help="Indicates that V8 was compiled with DCHECKs enabled", default=False, action="store_true") result.add_option("--cat", help="Print the source of the tests", default=False, action="store_true") result.add_option("--flaky-tests", help="Regard tests marked as flaky (run|skip|dontcare)", default="dontcare") result.add_option("--slow-tests", help="Regard slow tests (run|skip|dontcare)", default="dontcare") result.add_option("--pass-fail-tests", help="Regard pass|fail tests (run|skip|dontcare)", default="dontcare") result.add_option("--gc-stress", help="Switch on GC stress mode", default=False, action="store_true") result.add_option("--command-prefix", help="Prepended to each shell command used to run a test", default="") result.add_option("--download-data", help="Download missing test suite data", default=False, action="store_true") result.add_option("--extra-flags", help="Additional flags to pass to each test command", default="") result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true") result.add_option("-j", help="The number of parallel tasks to run", default=0, type="int") result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)", default="release,debug") result.add_option("--no-harness", "--noharness", help="Run without test harness of a given suite", default=False, action="store_true") result.add_option("--no-i18n", "--noi18n", help="Skip internationalization tests", default=False, action="store_true") result.add_option("--no-network", "--nonetwork", help="Don't distribute tests on the network", default=(utils.GuessOS() != "linux"), dest="no_network", action="store_true") result.add_option("--no-presubmit", "--nopresubmit", help='Skip presubmit checks', default=False, dest="no_presubmit", action="store_true") result.add_option("--no-snap", "--nosnap", help='Test a build compiled without snapshot.', default=False, dest="no_snap", action="store_true") result.add_option("--no-sorting", "--nosorting", help="Don't sort tests according to duration of last run.", default=False, dest="no_sorting", action="store_true") result.add_option("--no-stress", "--nostress", help="Don't run crankshaft --always-opt --stress-op test", default=False, dest="no_stress", action="store_true") result.add_option("--no-variants", "--novariants", help="Don't run any testing variants", default=False, dest="no_variants", action="store_true") result.add_option("--variants", help="Comma-separated list of testing variants") result.add_option("--outdir", help="Base directory with compile output", default="out") result.add_option("--predictable", help="Compare output of several reruns of each test", default=False, action="store_true") result.add_option("-p", "--progress", help=("The style of progress indicator" " (verbose, dots, color, mono)"), choices=progress.PROGRESS_INDICATORS.keys(), default="mono") result.add_option("--quickcheck", default=False, action="store_true", help=("Quick check mode (skip slow/flaky tests)")) result.add_option("--report", help="Print a summary of the tests to be run", default=False, action="store_true") result.add_option("--json-test-results", help="Path to a file for storing json results.") result.add_option("--rerun-failures-count", help=("Number of times to rerun each failing test case. " "Very slow tests will be rerun only once."), default=0, type="int") result.add_option("--rerun-failures-max", help="Maximum number of failing test cases to rerun.", default=100, type="int") result.add_option("--shard-count", help="Split testsuites into this number of shards", default=1, type="int") result.add_option("--shard-run", help="Run this shard from the split up tests.", default=1, type="int") result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="") result.add_option("--shell-dir", help="Directory containing executables", default="") result.add_option("--dont-skip-slow-simulator-tests", help="Don't skip more slow tests when using a simulator.", default=False, action="store_true", dest="dont_skip_simulator_slow_tests") result.add_option("--stress-only", help="Only run tests with --always-opt --stress-opt", default=False, action="store_true") result.add_option("--time", help="Print timing information after running", default=False, action="store_true") result.add_option("-t", "--timeout", help="Timeout in seconds", default= -1, type="int") result.add_option("--tsan", help="Regard test expectations for TSAN", default=False, action="store_true") result.add_option("-v", "--verbose", help="Verbose output", default=False, action="store_true") result.add_option("--valgrind", help="Run tests through valgrind", default=False, action="store_true") result.add_option("--warn-unused", help="Report unused rules", default=False, action="store_true") result.add_option("--junitout", help="File name of the JUnit output") result.add_option("--junittestsuite", help="The testsuite name in the JUnit output file", default="v8tests") result.add_option("--random-seed", default=0, dest="random_seed", help="Default seed for initializing random generator") result.add_option("--msan", help="Regard test expectations for MSAN", default=False, action="store_true") return result def ProcessOptions(options): global VARIANT_FLAGS global VARIANTS # Architecture and mode related stuff. if options.arch_and_mode: options.arch_and_mode = [arch_and_mode.split(".") for arch_and_mode in options.arch_and_mode.split(",")] options.arch = ",".join([tokens[0] for tokens in options.arch_and_mode]) options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode]) options.mode = options.mode.split(",") for mode in options.mode: if not mode.lower() in MODES: print "Unknown mode %s" % mode return False if options.arch in ["auto", "native"]: options.arch = ARCH_GUESS options.arch = options.arch.split(",") for arch in options.arch: if not arch in SUPPORTED_ARCHS: print "Unknown architecture %s" % arch return False # Store the final configuration in arch_and_mode list. Don't overwrite # predefined arch_and_mode since it is more expressive than arch and mode. if not options.arch_and_mode: options.arch_and_mode = itertools.product(options.arch, options.mode) # Special processing of other options, sorted alphabetically. if options.buildbot: # Buildbots run presubmit tests as a separate step. options.no_presubmit = True options.no_network = True if options.command_prefix: print("Specifying --command-prefix disables network distribution, " "running tests locally.") options.no_network = True options.command_prefix = shlex.split(options.command_prefix) options.extra_flags = shlex.split(options.extra_flags) if options.gc_stress: options.extra_flags += GC_STRESS_FLAGS if options.asan: options.extra_flags.append("--invoke-weak-callbacks") if options.tsan: VARIANTS = ["default"] suppressions_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sanitizers', 'tsan_suppressions.txt') tsan_options = '%s suppressions=%s' % ( os.environ.get('TSAN_OPTIONS', ''), suppressions_file) os.environ['TSAN_OPTIONS'] = tsan_options if options.j == 0: options.j = multiprocessing.cpu_count() while options.random_seed == 0: options.random_seed = random.SystemRandom().randint(-2147483648, 2147483647) def excl(*args): """Returns true if zero or one of multiple arguments are true.""" return reduce(lambda x, y: x + y, args) <= 1 if not excl(options.no_stress, options.stress_only, options.no_variants, bool(options.variants)): print("Use only one of --no-stress, --stress-only, --no-variants, " "or --variants.") return False if options.quickcheck: VARIANTS = ["default", "stress"] options.flaky_tests = "skip" options.slow_tests = "skip" options.pass_fail_tests = "skip" if options.no_stress: VARIANTS = ["default", "nocrankshaft"] if options.no_variants: VARIANTS = ["default"] if options.stress_only: VARIANTS = ["stress"] if options.variants: VARIANTS = options.variants.split(",") if not set(VARIANTS).issubset(VARIANT_FLAGS.keys()): print "All variants must be in %s" % str(VARIANT_FLAGS.keys()) return False if options.predictable: VARIANTS = ["default"] options.extra_flags.append("--predictable") options.extra_flags.append("--verify_predictable") options.extra_flags.append("--no-inline-new") if not options.shell_dir: if options.shell: print "Warning: --shell is deprecated, use --shell-dir instead." options.shell_dir = os.path.dirname(options.shell) if options.valgrind: run_valgrind = os.path.join("tools", "run-valgrind.py") # This is OK for distributed running, so we don't need to set no_network. options.command_prefix = (["python", "-u", run_valgrind] + options.command_prefix) def CheckTestMode(name, option): if not option in ["run", "skip", "dontcare"]: print "Unknown %s mode %s" % (name, option) return False return True if not CheckTestMode("flaky test", options.flaky_tests): return False if not CheckTestMode("slow test", options.slow_tests): return False if not CheckTestMode("pass|fail test", options.pass_fail_tests): return False if not options.no_i18n: DEFAULT_TESTS.append("intl") return True def ShardTests(tests, shard_count, shard_run): if shard_count < 2: return tests if shard_run < 1 or shard_run > shard_count: print "shard-run not a valid number, should be in [1:shard-count]" print "defaulting back to running all tests" return tests count = 0 shard = [] for test in tests: if count % shard_count == shard_run - 1: shard.append(test) count += 1 return shard def Main(): parser = BuildOptions() (options, args) = parser.parse_args() if not ProcessOptions(options): parser.print_help() return 1 exit_code = 0 workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), "..")) if not options.no_presubmit: print ">>> running presubmit tests" exit_code = subprocess.call( [sys.executable, join(workspace, "tools", "presubmit.py")]) suite_paths = utils.GetSuitePaths(join(workspace, "test")) # Expand arguments with grouped tests. The args should reflect the list of # suites as otherwise filters would break. def ExpandTestGroups(name): if name in TEST_MAP: return [suite for suite in TEST_MAP[arg]] else: return [name] args = reduce(lambda x, y: x + y, [ExpandTestGroups(arg) for arg in args], []) if len(args) == 0: suite_paths = [ s for s in DEFAULT_TESTS if s in suite_paths ] else: args_suites = OrderedDict() # Used as set for arg in args: args_suites[arg.split(os.path.sep)[0]] = True suite_paths = [ s for s in args_suites if s in suite_paths ] suites = [] for root in suite_paths: suite = testsuite.TestSuite.LoadTestSuite( os.path.join(workspace, "test", root)) if suite: suites.append(suite) if options.download_data: for s in suites: s.DownloadData() for (arch, mode) in options.arch_and_mode: try: code = Execute(arch, mode, args, options, suites, workspace) except KeyboardInterrupt: return 2 exit_code = exit_code or code return exit_code def Execute(arch, mode, args, options, suites, workspace): print(">>> Running tests for %s.%s" % (arch, mode)) shell_dir = options.shell_dir if not shell_dir: if options.buildbot: # TODO(machenbach): Get rid of different output folder location on # buildbot. Currently this is capitalized Release and Debug. shell_dir = os.path.join(workspace, options.outdir, mode) mode = mode.lower() else: shell_dir = os.path.join( workspace, options.outdir, "%s.%s" % (arch, MODES[mode]["output_folder"]), ) shell_dir = os.path.relpath(shell_dir) # Populate context object. mode_flags = MODES[mode]["flags"] timeout = options.timeout if timeout == -1: # Simulators are slow, therefore allow a longer default timeout. if arch in SLOW_ARCHS: timeout = 2 * TIMEOUT_DEFAULT; else: timeout = TIMEOUT_DEFAULT; timeout *= MODES[mode]["timeout_scalefactor"] if options.predictable: # Predictable mode is slower. timeout *= 2 ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir, mode_flags, options.verbose, timeout, options.isolates, options.command_prefix, options.extra_flags, options.no_i18n, options.random_seed, options.no_sorting, options.rerun_failures_count, options.rerun_failures_max, options.predictable, options.no_harness) # TODO(all): Combine "simulator" and "simulator_run". simulator_run = not options.dont_skip_simulator_slow_tests and \ arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64el', \ 'ppc', 'ppc64'] and \ ARCH_GUESS and arch != ARCH_GUESS # Find available test suites and read test cases from them. variables = { "arch": arch, "asan": options.asan, "deopt_fuzzer": False, "gc_stress": options.gc_stress, "isolates": options.isolates, "mode": MODES[mode]["status_mode"], "no_i18n": options.no_i18n, "no_snap": options.no_snap, "simulator_run": simulator_run, "simulator": utils.UseSimulator(arch), "system": utils.GuessOS(), "tsan": options.tsan, "msan": options.msan, "dcheck_always_on": options.dcheck_always_on, "byteorder": sys.byteorder, } all_tests = [] num_tests = 0 test_id = 0 for s in suites: s.ReadStatusFile(variables) s.ReadTestCases(ctx) if len(args) > 0: s.FilterTestCasesByArgs(args) all_tests += s.tests s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests, options.slow_tests, options.pass_fail_tests) if options.cat: verbose.PrintTestSource(s.tests) continue variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS] s.tests = [ t.CopyAddingFlags(v) for t in s.tests for v in s.VariantFlags(t, variant_flags) ] s.tests = ShardTests(s.tests, options.shard_count, options.shard_run) num_tests += len(s.tests) for t in s.tests: t.id = test_id test_id += 1 if options.cat: return 0 # We're done here. if options.report: verbose.PrintReport(all_tests) if num_tests == 0: print "No tests to run." return 0 # Run the tests, either locally or distributed on the network. start_time = time.time() progress_indicator = progress.PROGRESS_INDICATORS[options.progress]() if options.junitout: progress_indicator = progress.JUnitTestProgressIndicator( progress_indicator, options.junitout, options.junittestsuite) if options.json_test_results: progress_indicator = progress.JsonTestProgressIndicator( progress_indicator, options.json_test_results, arch, MODES[mode]["execution_mode"]) run_networked = not options.no_network if not run_networked: print("Network distribution disabled, running tests locally.") elif utils.GuessOS() != "linux": print("Network distribution is only supported on Linux, sorry!") run_networked = False peers = [] if run_networked: peers = network_execution.GetPeers() if not peers: print("No connection to distribution server; running tests locally.") run_networked = False elif len(peers) == 1: print("No other peers on the network; running tests locally.") run_networked = False elif num_tests <= 100: print("Less than 100 tests, running them locally.") run_networked = False if run_networked: runner = network_execution.NetworkedRunner(suites, progress_indicator, ctx, peers, workspace) else: runner = execution.Runner(suites, progress_indicator, ctx) exit_code = runner.Run(options.j) overall_duration = time.time() - start_time if options.time: verbose.PrintTestDurations(suites, overall_duration) return exit_code if __name__ == "__main__": sys.exit(Main())
[]
[]
[ "TSAN_OPTIONS" ]
[]
["TSAN_OPTIONS"]
python
1
0
cmd/install.go
package cmd import ( "errors" "fmt" "os" "os/exec" "path" "strconv" _ "github.com/lib/pq" // pg driver "github.com/sethvargo/go-password/password" "github.com/spf13/cobra" ) // These variables are set during compilation time var quayImage string var redisImage string var postgresImage string // imageArchivePath is the optional location of the OCI image archive containing required install images var imageArchivePath string // sshKey is the optional location of the SSH key you would like to use to connect to your host. var sshKey string // targetHostname is the hostname of the server you wish to install Quay on var targetHostname string // targetUsername is the name of the user on the target host to connect with SSH var targetUsername string // initPassword is the password of the initial user. var initPassword string // quayHostname is the value to set SERVER_HOSTNAME in the Quay config.yaml var quayHostname string // askBecomePass holds whether or not to ask for sudo password during SSH connection var askBecomePass bool // additionalArgs are arguments that you would like to append to the end of the ansible-playbook call (used mostly for development) var additionalArgs string // installCmd represents the validate command var installCmd = &cobra.Command{ Use: "install", Short: "Install Quay and its required dependencies.", Run: func(cmd *cobra.Command, args []string) { install() }, } func init() { // Add install command rootCmd.AddCommand(installCmd) installCmd.Flags().StringVarP(&targetHostname, "targetHostname", "H", os.Getenv("HOST"), "The hostname of the target you wish to install Quay to. This defaults to $HOST") installCmd.Flags().StringVarP(&targetUsername, "targetUsername", "u", os.Getenv("USER"), "The user on the target host which will be used for SSH. This defaults to $USER") installCmd.Flags().StringVarP(&sshKey, "ssh-key", "k", os.Getenv("HOME")+"/.ssh/quay_installer", "The path of your ssh identity key. This defaults to ~/.ssh/quay_installer") installCmd.Flags().StringVarP(&initPassword, "initPassword", "", "", "The password of the initial user. If not specified, this will be randomly generated.") installCmd.Flags().StringVarP(&quayHostname, "quayHostname", "", "", "The value to set SERVER_HOSTNAME in the Quay config.yaml. This defaults to <targetHostname>:8443") installCmd.Flags().StringVarP(&imageArchivePath, "image-archive", "i", "", "An archive containing images") installCmd.Flags().BoolVarP(&askBecomePass, "askBecomePass", "", false, "Whether or not to ask for sudo password during SSH connection.") installCmd.Flags().StringVarP(&additionalArgs, "additionalArgs", "", "", "Additional arguments you would like to append to the ansible-playbook call. Used mostly for development.") } func install() { var err error log.Printf("Install has begun") log.Debug("Quay Image: " + quayImage) log.Debug("Redis Image: " + redisImage) log.Debug("Postgres Image: " + postgresImage) // Load execution environment err = loadExecutionEnvironment() check(err) // Check that SSH key is present, and generate if not err = loadSSHKeys() check(err) // Handle Image Archive Defaulting var imageArchiveMountFlag string if imageArchivePath == "" { executableDir, err := os.Executable() check(err) defaultArchivePath := path.Join(path.Dir(executableDir), "image-archive.tar") if pathExists(defaultArchivePath) { imageArchivePath = defaultArchivePath } } else { if !pathExists(imageArchivePath) { check(errors.New("Could not find image-archive.tar at " + imageArchivePath)) } } if imageArchivePath != "" { imageArchiveMountFlag = fmt.Sprintf("-v %s:/runner/image-archive.tar", imageArchivePath) log.Info("Found image archive at " + imageArchivePath) if isLocalInstall() { log.Printf("Loading image archive from %s", imageArchivePath) cmd := exec.Command("sudo", "podman", "load", "-i", imageArchivePath) if verbose { cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout } err = cmd.Run() check(err) } log.Infof("Attempting to set SELinux rules on image archive") cmd := exec.Command("chcon", "-Rt", "svirt_sandbox_file_t", imageArchivePath) if verbose { cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout } if err := cmd.Run(); err != nil { log.Warn("Could not set SELinux rule. If your system does not have SELinux enabled, you may ignore this.") } } // Generate password if none provided if initPassword == "" { initPassword, err = password.Generate(32, 10, 0, false, false) check(err) } // Set quayHostname if not already set if quayHostname == "" { quayHostname = targetHostname + ":8443" } // Set askBecomePass flag if true var askBecomePassFlag string if askBecomePass { askBecomePassFlag = "-K" } // Run playbook log.Printf("Running install playbook. This may take some time. To see playbook output run the installer with -v (verbose) flag.") podmanCmd := fmt.Sprintf(`sudo podman run `+ `--rm --interactive --tty `+ `--workdir /runner/project `+ `--net host `+ imageArchiveMountFlag+ // optional image archive flag ` -v %s:/runner/env/ssh_key `+ `-e RUNNER_OMIT_EVENTS=False `+ `-e RUNNER_ONLY_FAILED_EVENTS=False `+ `-e ANSIBLE_HOST_KEY_CHECKING=False `+ `-e ANSIBLE_CONFIG=/runner/project/ansible.cfg `+ `--quiet `+ `--name ansible_runner_instance `+ `quay.io/quay/openshift-mirror-registry-ee `+ `ansible-playbook -i %s@%s, --private-key /runner/env/ssh_key -e "init_password=%s quay_image=%s redis_image=%s postgres_image=%s quay_hostname=%s local_install=%s" install_mirror_appliance.yml %s %s`, sshKey, targetUsername, targetHostname, initPassword, quayImage, redisImage, postgresImage, quayHostname, strconv.FormatBool(isLocalInstall()), askBecomePassFlag, additionalArgs) log.Debug("Running command: " + podmanCmd) cmd := exec.Command("bash", "-c", podmanCmd) cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout cmd.Stdin = os.Stdin err = cmd.Run() check(err) log.Printf("Quay installed successfully") log.Printf("Quay is available at %s with credentials (init, %s)", "https://"+quayHostname, initPassword) }
[ "\"HOST\"", "\"USER\"", "\"HOME\"" ]
[]
[ "HOST", "HOME", "USER" ]
[]
["HOST", "HOME", "USER"]
go
3
0
lineup_remote/model.py
import re from typing import Any, cast, Dict, Optional, Tuple class NumberFilter: def __init__(self, dump: Dict[str, Any]): self.min = dump["min"] self.max = dump["max"] self.filter_missing = dump["filterMissing"] def to_sql(self, column: str) -> Tuple[str, Dict[str, float]]: args = {} if self.min is not None and self.max is not None: sql = "{0} between :{0}_min and :{0}_max" args[column + "_min"] = self.min args[column + "_max"] = self.max elif self.min is not None: sql = "{0} >= :{0}_min" args[column + "_min"] = self.min elif self.max is not None: sql = "{0} <= :{0}_max" args[column + "_max"] = self.max if self.filter_missing: sql = "({0} is not null AND " + sql + ")" return sql.format(column), args class MappingFunction: def __init__(self, dump: Dict[str, Any]): self.type = dump["type"] self.domain = dump["domain"] self.range = dump["range"] def to_query(self, column: str): return "map_value({column}, '{c.type}', {c.domain[0]}, {c.domain[1]}, {c.range[0]}, {c.range[1]})".format(column=column, c=self) class DateGrouper: def __init__(self, dump: Dict[str, Any]): self.granularity = dump["granularity"] self.circular = dump["circular"] class CategoricalFilter: def __init__(self, dump: Dict[str, Any]): self.filter = dump["filter"] self.filter_missing = dump["filterMissing"] def to_sql(self, column: str): args = {column: self.filter} sql = "{0} = any(:{0})" if self.filter_missing: sql = "({0} is not null AND " + sql + ")" return sql.format(column), args class StringFilter: def __init__(self, dump: str): self.filter = dump def to_sql(self, column: str): if self.filter == "__FILTER_MISSING": return '({0} is not null AND {0} != ""'.format(column), {} if self.filter.startswith("REGEX:"): return "{0} ~ {0}".format(column), {column: self.filter[6:]} return "lower({0}) = {0}".format(column), {column: self.filter.lower()} class ColumnDump: def __init__(self, dump: Dict[str, Any], column: str = "", type: str = ""): self.id = dump["id"] self.desc = dump["desc"] self.column = column self.type = type self.filter: Any = None def to_filter(self): return self.filter.to_sql(self.column) if self.filter else None class NumberColumnDump(ColumnDump): def __init__(self, dump: Dict[str, Any], column: str): super(NumberColumnDump, self).__init__(dump, column, "number") self.map = MappingFunction(dump["map"]) self.filter = NumberFilter(dump["filter"]) if dump.get("filter") else None self.group_sort_method = dump["groupSortMethod"] self.stratify_thresholds = dump["stratifyThresholds"] if dump.get("stratifyThresholds") else None assert self.column is not None self.mapped_column = self.map.to_query(self.column) class DateColumnDump(ColumnDump): def __init__(self, dump: Dict[str, Any], column: str): super(DateColumnDump, self).__init__(dump, column, "date") self.filter = NumberFilter(dump["filter"]) if dump.get("filter") else None self.grouper = DateGrouper(dump["grouper"]) if dump.get("grouper") else None class CategoricalColumnDump(ColumnDump): def __init__(self, dump: Dict[str, Any], column: str): super(CategoricalColumnDump, self).__init__(dump, column, "categorical") self.filter = CategoricalFilter(dump["filter"]) if dump.get("filter") else None class StringColumnDump(ColumnDump): def __init__(self, dump: Dict[str, Any], column: str): super(StringColumnDump, self).__init__(dump, column, "string") self.filter = StringFilter(dump["filter"]) if dump.get("filter") else None self.group_criteria = dump.get("groupCriteria") class CompositeColumnDump(ColumnDump): def __init__(self, dump: Dict[str, Any]): super(CompositeColumnDump, self).__init__(dump, "", dump["desc"]["type"]) self.children = [parse_column_dump(c) for c in dump.get("children", [])] class StackColumnDump(CompositeColumnDump): def __init__(self, dump: Dict[str, Any]): super(StackColumnDump, self).__init__(dump) self.total = dump["width"] class NestedColumnDump(CompositeColumnDump): def __init__(self, dump: Dict[str, Any]): super(NestedColumnDump, self).__init__(dump) def parse_column_dump(dump: Dict[str, Any]): desc = dump["desc"] if isinstance(desc, str): column_type, column = desc.split("@") if column_type == "number": return NumberColumnDump(dump, column) if column_type == "string": return StringColumnDump(dump, column) if column_type == "categorical": return CategoricalColumnDump(dump, column) if column_type == "date": return DateColumnDump(dump, column) return ColumnDump(dump, column, column_type) # object dump so a composite for example column_type = desc.get("type") if column_type == "stack": return StackColumnDump(dump) elif column_type == "nested": return NestedColumnDump(dump) return ColumnDump(dump, "", desc["type"]) class ComputeColumnDump: def __init__(self, dump: ColumnDump, type: str): self.dump = dump self.type = type def parse_compute_column_dump(dump: Dict[str, Any]): return ComputeColumnDump(parse_column_dump(dump["dump"]), dump["type"]) class SortCriteria: def __init__(self, dump: Dict[str, Any]): self.col = parse_column_dump(dump["col"]) self.asc = dump["asc"] def to_clause(self): if self.asc: return self.col.column return self.col.column + " DESC" class ServerRankingDump: def __init__(self, dump: Dict[str, Any]): self.filter = [parse_column_dump(d) for d in dump.get("filter", [])] self.sort_criteria = [SortCriteria(d) for d in dump.get("sortCriteria", [])] self.group_criteria = [parse_column_dump(d) for d in dump.get("groupCriteria", [])] self.group_sort_criteria = [SortCriteria(d) for d in dump.get("groupSortCriteria", [])] # TODO support nested columns, support boolean columns # TODO support stacked columns def to_filter(self): fs = [f.to_filter() for f in self.filter if f.filter] args = dict() for f in fs: args.update(f[1]) return " AND ".join(f[0] for f in fs), args def to_where(self, group: Optional[str] = None): filter_sql, args = self.to_filter() where = "WHERE " + filter_sql if filter_sql else "" if group: args["groupname"] = group if not where: where = "WHERE {0} = :groupname".format(self.to_group_name()) else: where += " AND {0} = :groupname".format(self.to_group_name()) return where, args def to_sort(self): clauses = [c.to_clause() for c in self.sort_criteria] if not clauses: return "" return "ORDER BY " + ", ".join(clauses) def to_group_by(self): clauses = [c.column for c in self.group_criteria] if not clauses: return "" return "GROUP BY " + ", ".join(clauses) def to_group_name(self): if not self.group_criteria: return "'Default group'" return "CONCAT({0})".format(", ".join("COALESCE({0}, 'Missing values')".format(g.column) for g in self.group_criteria)) def parse_ranking_dump(dump: Dict[str, Any]): return ServerRankingDump(dump)
[]
[]
[]
[]
[]
python
null
null
null
tests/scripts/thread-cert/node.py
#!/usr/bin/env python # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import config import ipaddress import os import sys import pexpect import pexpect.popen_spawn import re import simulator import socket import time import unittest class Node: def __init__(self, nodeid, is_mtd=False, simulator=None): self.nodeid = nodeid self.verbose = int(float(os.getenv('VERBOSE', 0))) self.node_type = os.getenv('NODE_TYPE', 'sim') self.simulator = simulator if self.simulator: self.simulator.add_node(self) mode = os.environ.get('USE_MTD') == '1' and is_mtd and 'mtd' or 'ftd' if self.node_type == 'soc': self.__init_soc(nodeid) elif self.node_type == 'ncp-sim': # TODO use mode after ncp-mtd is available. self.__init_ncp_sim(nodeid, 'ftd') else: self.__init_sim(nodeid, mode) if self.verbose: if sys.version_info[0] == 2: self.pexpect.logfile_read = sys.stdout else: self.pexpect.logfile_read = sys.stdout.buffer self._initialized = True def __init_sim(self, nodeid, mode): """ Initialize a simulation node. """ if 'OT_CLI_PATH' in os.environ.keys(): cmd = os.environ['OT_CLI_PATH'] elif 'top_builddir' in os.environ.keys(): srcdir = os.environ['top_builddir'] cmd = '%s/examples/apps/cli/ot-cli-%s' % (srcdir, mode) else: cmd = './ot-cli-%s' % mode if 'RADIO_DEVICE' in os.environ: cmd += ' -v %s' % os.environ['RADIO_DEVICE'] os.environ['NODE_ID'] = str(nodeid) cmd += ' %d' % nodeid print("%s" % cmd) self.pexpect = pexpect.popen_spawn.PopenSpawn(cmd, timeout=4) # Add delay to ensure that the process is ready to receive commands. timeout = 0.4 while timeout > 0: self.pexpect.send('\r\n') try: self.pexpect.expect('> ', timeout=0.1) break except pexpect.TIMEOUT: timeout -= 0.1 def __init_ncp_sim(self, nodeid, mode): """ Initialize an NCP simulation node. """ if 'RADIO_DEVICE' in os.environ: args = ' %s' % os.environ['RADIO_DEVICE'] os.environ['NODE_ID'] = str(nodeid) else: args = '' if 'OT_NCP_PATH' in os.environ.keys(): cmd = 'spinel-cli.py -p "%s%s" -n' % ( os.environ['OT_NCP_PATH'], args, ) elif "top_builddir" in os.environ.keys(): builddir = os.environ['top_builddir'] cmd = 'spinel-cli.py -p "%s/examples/apps/ncp/ot-ncp-%s%s" -n' % ( builddir, mode, args, ) else: cmd = 'spinel-cli.py -p "./ot-ncp-%s%s" -n' % (mode, args) cmd += ' %d' % nodeid print("%s" % cmd) self.pexpect = pexpect.spawn(cmd, timeout=4) # Add delay to ensure that the process is ready to receive commands. time.sleep(0.2) self._expect('spinel-cli >') self.debug(int(os.getenv('DEBUG', '0'))) def _expect(self, pattern, timeout=-1, *args, **kwargs): """ Process simulator events until expected the pattern. """ if timeout == -1: timeout = self.pexpect.timeout assert timeout > 0 while timeout > 0: try: return self.pexpect.expect(pattern, 0.1, *args, **kwargs) except pexpect.TIMEOUT: timeout -= 0.1 self.simulator.go(0) if timeout <= 0: raise def __init_soc(self, nodeid): """ Initialize a System-on-a-chip node connected via UART. """ import fdpexpect serialPort = '/dev/ttyUSB%d' % ((nodeid - 1) * 2) self.pexpect = fdpexpect.fdspawn( os.open(serialPort, os.O_RDWR | os.O_NONBLOCK | os.O_NOCTTY) ) def __del__(self): self.destroy() def destroy(self): if not self._initialized: return if ( hasattr(self.pexpect, 'proc') and self.pexpect.proc.poll() is None or not hasattr(self.pexpect, 'proc') and self.pexpect.isalive() ): print("%d: exit" % self.nodeid) self.pexpect.send('exit\n') self.pexpect.expect(pexpect.EOF) self.pexpect.wait() self._initialized = False def read_cert_messages_in_commissioning_log(self, timeout=-1): """Get the log of the traffic after DTLS handshake. """ format_str = br"=+?\[\[THCI\].*?type=%s.*?\].*?=+?[\s\S]+?-{40,}" join_fin_req = format_str % br"JOIN_FIN\.req" join_fin_rsp = format_str % br"JOIN_FIN\.rsp" dummy_format_str = br"\[THCI\].*?type=%s.*?" join_ent_ntf = dummy_format_str % br"JOIN_ENT\.ntf" join_ent_rsp = dummy_format_str % br"JOIN_ENT\.rsp" pattern = ( b"(" + join_fin_req + b")|(" + join_fin_rsp + b")|(" + join_ent_ntf + b")|(" + join_ent_rsp + b")" ) messages = [] # There are at most 4 cert messages both for joiner and commissioner for _ in range(0, 4): try: self._expect(pattern, timeout=timeout) log = self.pexpect.match.group(0) messages.append(self._extract_cert_message(log)) except BaseException: break return messages def _extract_cert_message(self, log): res = re.search(br"direction=\w+", log) assert res direction = res.group(0).split(b'=')[1].strip() res = re.search(br"type=\S+", log) assert res type = res.group(0).split(b'=')[1].strip() payload = bytearray([]) payload_len = 0 if type in [b"JOIN_FIN.req", b"JOIN_FIN.rsp"]: res = re.search(br"len=\d+", log) assert res payload_len = int(res.group(0).split(b'=')[1].strip()) hex_pattern = br"\|(\s([0-9a-fA-F]{2}|\.\.))+?\s+?\|" while True: res = re.search(hex_pattern, log) if not res: break data = [ int(hex, 16) for hex in res.group(0)[1:-1].split(b' ') if hex and hex != b'..' ] payload += bytearray(data) log = log[res.end() - 1:] assert len(payload) == payload_len return (direction, type, payload) def send_command(self, cmd, go=True): print("%d: %s" % (self.nodeid, cmd)) self.pexpect.send(cmd + '\n') if go: self.simulator.go(0, nodeid=self.nodeid) sys.stdout.flush() def get_commands(self): self.send_command('?') self._expect('Commands:') commands = [] while True: i = self._expect(['Done', r'(\S+)']) if i != 0: commands.append(self.pexpect.match.groups()[0]) else: break return commands def set_mode(self, mode): cmd = 'mode %s' % mode self.send_command(cmd) self._expect('Done') def debug(self, level): # `debug` command will not trigger interaction with simulator self.send_command('debug %d' % level, go=False) def start(self): self.interface_up() self.thread_start() def stop(self): self.thread_stop() self.interface_down() def interface_up(self): self.send_command('ifconfig up') self._expect('Done') def interface_down(self): self.send_command('ifconfig down') self._expect('Done') def thread_start(self): self.send_command('thread start') self._expect('Done') def thread_stop(self): self.send_command('thread stop') self._expect('Done') def commissioner_start(self): cmd = 'commissioner start' self.send_command(cmd) self._expect('Done') def commissioner_add_joiner(self, addr, psk): cmd = 'commissioner joiner add %s %s' % (addr, psk) self.send_command(cmd) self._expect('Done') def joiner_start(self, pskd='', provisioning_url=''): cmd = 'joiner start %s %s' % (pskd, provisioning_url) self.send_command(cmd) self._expect('Done') def clear_whitelist(self): cmd = 'macfilter addr clear' self.send_command(cmd) self._expect('Done') def enable_whitelist(self): cmd = 'macfilter addr whitelist' self.send_command(cmd) self._expect('Done') def disable_whitelist(self): cmd = 'macfilter addr disable' self.send_command(cmd) self._expect('Done') def add_whitelist(self, addr, rssi=None): cmd = 'macfilter addr add %s' % addr if rssi is not None: cmd += ' %s' % rssi self.send_command(cmd) self._expect('Done') def remove_whitelist(self, addr): cmd = 'macfilter addr remove %s' % addr self.send_command(cmd) self._expect('Done') def get_addr16(self): self.send_command('rloc16') i = self._expect('([0-9a-fA-F]{4})') if i == 0: addr16 = int(self.pexpect.match.groups()[0], 16) self._expect('Done') return addr16 def get_router_id(self): rloc16 = self.get_addr16() return rloc16 >> 10 def get_addr64(self): self.send_command('extaddr') i = self._expect('([0-9a-fA-F]{16})') if i == 0: addr64 = self.pexpect.match.groups()[0].decode("utf-8") self._expect('Done') return addr64 def get_eui64(self): self.send_command('eui64') i = self._expect('([0-9a-fA-F]{16})') if i == 0: addr64 = self.pexpect.match.groups()[0].decode("utf-8") self._expect('Done') return addr64 def get_joiner_id(self): self.send_command('joiner id') i = self._expect('([0-9a-fA-F]{16})') if i == 0: addr = self.pexpect.match.groups()[0].decode("utf-8") self._expect('Done') return addr def get_channel(self): self.send_command('channel') i = self._expect(r'(\d+)\r?\n') if i == 0: channel = int(self.pexpect.match.groups()[0]) self._expect('Done') return channel def set_channel(self, channel): cmd = 'channel %d' % channel self.send_command(cmd) self._expect('Done') def get_masterkey(self): self.send_command('masterkey') i = self._expect('([0-9a-fA-F]{32})') if i == 0: masterkey = self.pexpect.match.groups()[0].decode("utf-8") self._expect('Done') return masterkey def set_masterkey(self, masterkey): cmd = 'masterkey %s' % masterkey self.send_command(cmd) self._expect('Done') def get_key_sequence_counter(self): self.send_command('keysequence counter') i = self._expect(r'(\d+)\r?\n') if i == 0: key_sequence_counter = int(self.pexpect.match.groups()[0]) self._expect('Done') return key_sequence_counter def set_key_sequence_counter(self, key_sequence_counter): cmd = 'keysequence counter %d' % key_sequence_counter self.send_command(cmd) self._expect('Done') def set_key_switch_guardtime(self, key_switch_guardtime): cmd = 'keysequence guardtime %d' % key_switch_guardtime self.send_command(cmd) self._expect('Done') def set_network_id_timeout(self, network_id_timeout): cmd = 'networkidtimeout %d' % network_id_timeout self.send_command(cmd) self._expect('Done') def get_network_name(self): self.send_command('networkname') while True: i = self._expect(['Done', r'(\S+)']) if i != 0: network_name = self.pexpect.match.groups()[0].decode('utf-8') else: break return network_name def set_network_name(self, network_name): cmd = 'networkname %s' % network_name self.send_command(cmd) self._expect('Done') def get_panid(self): self.send_command('panid') i = self._expect('([0-9a-fA-F]{4})') if i == 0: panid = int(self.pexpect.match.groups()[0], 16) self._expect('Done') return panid def set_panid(self, panid=config.PANID): cmd = 'panid %d' % panid self.send_command(cmd) self._expect('Done') def get_partition_id(self): self.send_command('leaderpartitionid') i = self._expect(r'(\d+)\r?\n') if i == 0: weight = self.pexpect.match.groups()[0] self._expect('Done') return weight def set_partition_id(self, partition_id): cmd = 'leaderpartitionid %d' % partition_id self.send_command(cmd) self._expect('Done') def set_router_upgrade_threshold(self, threshold): cmd = 'routerupgradethreshold %d' % threshold self.send_command(cmd) self._expect('Done') def set_router_downgrade_threshold(self, threshold): cmd = 'routerdowngradethreshold %d' % threshold self.send_command(cmd) self._expect('Done') def release_router_id(self, router_id): cmd = 'releaserouterid %d' % router_id self.send_command(cmd) self._expect('Done') def get_state(self): states = [r'\ndetached', r'\nchild', r'\nrouter', r'\nleader'] self.send_command('state') match = self._expect(states) self._expect('Done') return states[match].strip(r'\n') def set_state(self, state): cmd = 'state %s' % state self.send_command(cmd) self._expect('Done') def get_timeout(self): self.send_command('childtimeout') i = self._expect(r'(\d+)\r?\n') if i == 0: timeout = self.pexpect.match.groups()[0] self._expect('Done') return timeout def set_timeout(self, timeout): cmd = 'childtimeout %d' % timeout self.send_command(cmd) self._expect('Done') def set_max_children(self, number): cmd = 'childmax %d' % number self.send_command(cmd) self._expect('Done') def get_weight(self): self.send_command('leaderweight') i = self._expect(r'(\d+)\r?\n') if i == 0: weight = self.pexpect.match.groups()[0] self._expect('Done') return weight def set_weight(self, weight): cmd = 'leaderweight %d' % weight self.send_command(cmd) self._expect('Done') def add_ipaddr(self, ipaddr): cmd = 'ipaddr add %s' % ipaddr self.send_command(cmd) self._expect('Done') def get_addrs(self): addrs = [] self.send_command('ipaddr') while True: i = self._expect([r'(\S+(:\S*)+)\r?\n', 'Done']) if i == 0: addrs.append(self.pexpect.match.groups()[0].decode("utf-8")) elif i == 1: break return addrs def get_addr(self, prefix): network = ipaddress.ip_network(u'%s' % str(prefix)) addrs = self.get_addrs() for addr in addrs: if isinstance(addr, bytearray): addr = bytes(addr) elif isinstance(addr, str) and sys.version_info[0] == 2: addr = addr.decode("utf-8") ipv6_address = ipaddress.ip_address(addr) if ipv6_address in network: return ipv6_address.exploded return None def get_addr_rloc(self): addrs = self.get_addrs() for addr in addrs: segs = addr.split(':') if ( segs[4] == '0' and segs[5] == 'ff' and segs[6] == 'fe00' and segs[7] != 'fc00' ): return addr return None def get_addr_leader_aloc(self): addrs = self.get_addrs() for addr in addrs: segs = addr.split(':') if ( segs[4] == '0' and segs[5] == 'ff' and segs[6] == 'fe00' and segs[7] == 'fc00' ): return addr return None def get_eidcaches(self): eidcaches = [] self.send_command('eidcache') while True: i = self._expect([r'([a-fA-F0-9\:]+) ([a-fA-F0-9]+)\r?\n', 'Done']) if i == 0: eid = self.pexpect.match.groups()[0].decode("utf-8") rloc = self.pexpect.match.groups()[1].decode("utf-8") eidcaches.append((eid, rloc)) elif i == 1: break return eidcaches def add_service(self, enterpriseNumber, serviceData, serverData): cmd = 'service add %s %s %s' % ( enterpriseNumber, serviceData, serverData, ) self.send_command(cmd) self._expect('Done') def remove_service(self, enterpriseNumber, serviceData): cmd = 'service remove %s %s' % (enterpriseNumber, serviceData) self.send_command(cmd) self._expect('Done') def __getLinkLocalAddress(self): for ip6Addr in self.get_addrs(): if re.match(config.LINK_LOCAL_REGEX_PATTERN, ip6Addr, re.I): return ip6Addr return None def __getGlobalAddress(self): global_address = [] for ip6Addr in self.get_addrs(): if ( (not re.match(config.LINK_LOCAL_REGEX_PATTERN, ip6Addr, re.I)) and ( not re.match( config.MESH_LOCAL_PREFIX_REGEX_PATTERN, ip6Addr, re.I ) ) and ( not re.match( config.ROUTING_LOCATOR_REGEX_PATTERN, ip6Addr, re.I ) ) ): global_address.append(ip6Addr) return global_address def __getRloc(self): for ip6Addr in self.get_addrs(): if ( re.match(config.MESH_LOCAL_PREFIX_REGEX_PATTERN, ip6Addr, re.I) and re.match( config.ROUTING_LOCATOR_REGEX_PATTERN, ip6Addr, re.I ) and not ( re.match(config.ALOC_FLAG_REGEX_PATTERN, ip6Addr, re.I) ) ): return ip6Addr return None def __getAloc(self): aloc = [] for ip6Addr in self.get_addrs(): if ( re.match(config.MESH_LOCAL_PREFIX_REGEX_PATTERN, ip6Addr, re.I) and re.match( config.ROUTING_LOCATOR_REGEX_PATTERN, ip6Addr, re.I ) and re.match(config.ALOC_FLAG_REGEX_PATTERN, ip6Addr, re.I) ): aloc.append(ip6Addr) return aloc def __getMleid(self): for ip6Addr in self.get_addrs(): if re.match( config.MESH_LOCAL_PREFIX_REGEX_PATTERN, ip6Addr, re.I ) and not ( re.match(config.ROUTING_LOCATOR_REGEX_PATTERN, ip6Addr, re.I) ): return ip6Addr return None def get_ip6_address(self, address_type): """Get specific type of IPv6 address configured on thread device. Args: address_type: the config.ADDRESS_TYPE type of IPv6 address. Returns: IPv6 address string. """ if address_type == config.ADDRESS_TYPE.LINK_LOCAL: return self.__getLinkLocalAddress() elif address_type == config.ADDRESS_TYPE.GLOBAL: return self.__getGlobalAddress() elif address_type == config.ADDRESS_TYPE.RLOC: return self.__getRloc() elif address_type == config.ADDRESS_TYPE.ALOC: return self.__getAloc() elif address_type == config.ADDRESS_TYPE.ML_EID: return self.__getMleid() else: return None return None def get_context_reuse_delay(self): self.send_command('contextreusedelay') i = self._expect(r'(\d+)\r?\n') if i == 0: timeout = self.pexpect.match.groups()[0] self._expect('Done') return timeout def set_context_reuse_delay(self, delay): cmd = 'contextreusedelay %d' % delay self.send_command(cmd) self._expect('Done') def add_prefix(self, prefix, flags, prf='med'): cmd = 'prefix add %s %s %s' % (prefix, flags, prf) self.send_command(cmd) self._expect('Done') def remove_prefix(self, prefix): cmd = 'prefix remove %s' % prefix self.send_command(cmd) self._expect('Done') def add_route(self, prefix, prf='med'): cmd = 'route add %s %s' % (prefix, prf) self.send_command(cmd) self._expect('Done') def remove_route(self, prefix): cmd = 'route remove %s' % prefix self.send_command(cmd) self._expect('Done') def register_netdata(self): self.send_command('netdataregister') self._expect('Done') def energy_scan(self, mask, count, period, scan_duration, ipaddr): cmd = 'commissioner energy %d %d %d %d %s' % ( mask, count, period, scan_duration, ipaddr, ) self.send_command(cmd) if isinstance(self.simulator, simulator.VirtualTime): self.simulator.go(8) timeout = 1 else: timeout = 8 self._expect('Energy:', timeout=timeout) def panid_query(self, panid, mask, ipaddr): cmd = 'commissioner panid %d %d %s' % (panid, mask, ipaddr) self.send_command(cmd) if isinstance(self.simulator, simulator.VirtualTime): self.simulator.go(8) timeout = 1 else: timeout = 8 self._expect('Conflict:', timeout=timeout) def scan(self): self.send_command('scan') results = [] while True: i = self._expect( [ r'\|\s(\S+)\s+\|\s(\S+)\s+\|\s([0-9a-fA-F]{4})\s\|\s([0-9a-fA-F]{16})\s\|\s(\d+)\r?\n', 'Done', ] ) if i == 0: results.append(self.pexpect.match.groups()) else: break return results def ping(self, ipaddr, num_responses=1, size=None, timeout=5): cmd = 'ping %s' % ipaddr if size is not None: cmd += ' %d' % size self.send_command(cmd) if isinstance(self.simulator, simulator.VirtualTime): self.simulator.go(timeout) result = True try: responders = {} while len(responders) < num_responses: i = self._expect([r'from (\S+):']) if i == 0: responders[self.pexpect.match.groups()[0]] = 1 self._expect('\n') except (pexpect.TIMEOUT, socket.timeout): result = False if isinstance(self.simulator, simulator.VirtualTime): self.simulator.sync_devices() return result def reset(self): self.send_command('reset') time.sleep(0.1) def set_router_selection_jitter(self, jitter): cmd = 'routerselectionjitter %d' % jitter self.send_command(cmd) self._expect('Done') def set_active_dataset( self, timestamp, panid=None, channel=None, channel_mask=None, master_key=None, ): self.send_command('dataset clear') self._expect('Done') cmd = 'dataset activetimestamp %d' % timestamp self.send_command(cmd) self._expect('Done') if panid is not None: cmd = 'dataset panid %d' % panid self.send_command(cmd) self._expect('Done') if channel is not None: cmd = 'dataset channel %d' % channel self.send_command(cmd) self._expect('Done') if channel_mask is not None: cmd = 'dataset channelmask %d' % channel_mask self.send_command(cmd) self._expect('Done') if master_key is not None: cmd = 'dataset masterkey %s' % master_key self.send_command(cmd) self._expect('Done') self.send_command('dataset commit active') self._expect('Done') def set_pending_dataset( self, pendingtimestamp, activetimestamp, panid=None, channel=None ): self.send_command('dataset clear') self._expect('Done') cmd = 'dataset pendingtimestamp %d' % pendingtimestamp self.send_command(cmd) self._expect('Done') cmd = 'dataset activetimestamp %d' % activetimestamp self.send_command(cmd) self._expect('Done') if panid is not None: cmd = 'dataset panid %d' % panid self.send_command(cmd) self._expect('Done') if channel is not None: cmd = 'dataset channel %d' % channel self.send_command(cmd) self._expect('Done') self.send_command('dataset commit pending') self._expect('Done') def announce_begin(self, mask, count, period, ipaddr): cmd = 'commissioner announce %d %d %d %s' % ( mask, count, period, ipaddr, ) self.send_command(cmd) self._expect('Done') def send_mgmt_active_set( self, active_timestamp=None, channel=None, channel_mask=None, extended_panid=None, panid=None, master_key=None, mesh_local=None, network_name=None, binary=None, ): cmd = 'dataset mgmtsetcommand active ' if active_timestamp is not None: cmd += 'activetimestamp %d ' % active_timestamp if channel is not None: cmd += 'channel %d ' % channel if channel_mask is not None: cmd += 'channelmask %d ' % channel_mask if extended_panid is not None: cmd += 'extpanid %s ' % extended_panid if panid is not None: cmd += 'panid %d ' % panid if master_key is not None: cmd += 'masterkey %s ' % master_key if mesh_local is not None: cmd += 'localprefix %s ' % mesh_local if network_name is not None: cmd += 'networkname %s ' % network_name if binary is not None: cmd += 'binary %s ' % binary self.send_command(cmd) self._expect('Done') def send_mgmt_pending_set( self, pending_timestamp=None, active_timestamp=None, delay_timer=None, channel=None, panid=None, master_key=None, mesh_local=None, network_name=None, ): cmd = 'dataset mgmtsetcommand pending ' if pending_timestamp is not None: cmd += 'pendingtimestamp %d ' % pending_timestamp if active_timestamp is not None: cmd += 'activetimestamp %d ' % active_timestamp if delay_timer is not None: cmd += 'delaytimer %d ' % delay_timer if channel is not None: cmd += 'channel %d ' % channel if panid is not None: cmd += 'panid %d ' % panid if master_key is not None: cmd += 'masterkey %s ' % master_key if mesh_local is not None: cmd += 'localprefix %s ' % mesh_local if network_name is not None: cmd += 'networkname %s ' % network_name self.send_command(cmd) self._expect('Done') def coaps_start_psk(self, psk, pskIdentity): cmd = 'coaps psk %s %s' % (psk, pskIdentity) self.send_command(cmd) self._expect('Done') cmd = 'coaps start' self.send_command(cmd) self._expect('Done') def coaps_start_x509(self): cmd = 'coaps x509' self.send_command(cmd) self._expect('Done') cmd = 'coaps start' self.send_command(cmd) self._expect('Done') def coaps_set_resource_path(self, path): cmd = 'coaps resource %s' % path self.send_command(cmd) self._expect('Done') def coaps_stop(self): cmd = 'coaps stop' self.send_command(cmd) if isinstance(self.simulator, simulator.VirtualTime): self.simulator.go(5) timeout = 1 else: timeout = 5 self._expect('Done', timeout=timeout) def coaps_connect(self, ipaddr): cmd = 'coaps connect %s' % ipaddr self.send_command(cmd) if isinstance(self.simulator, simulator.VirtualTime): self.simulator.go(5) timeout = 1 else: timeout = 5 self._expect('coaps connected', timeout=timeout) def coaps_disconnect(self): cmd = 'coaps disconnect' self.send_command(cmd) self._expect('Done') self.simulator.go(5) def coaps_get(self): cmd = 'coaps get test' self.send_command(cmd) if isinstance(self.simulator, simulator.VirtualTime): self.simulator.go(5) timeout = 1 else: timeout = 5 self._expect('coaps response', timeout=timeout) def commissioner_mgmtset(self, tlvs_binary): cmd = 'commissioner mgmtset binary %s' % tlvs_binary self.send_command(cmd) self._expect('Done') def bytes_to_hex_str(self, src): return ''.join(format(x, '02x') for x in src) def commissioner_mgmtset_with_tlvs(self, tlvs): payload = bytearray() for tlv in tlvs: payload += tlv.to_hex() self.commissioner_mgmtset(self.bytes_to_hex_str(payload)) def udp_start(self, local_ipaddr, local_port): cmd = 'udp open' self.send_command(cmd) self._expect('Done') cmd = 'udp bind %s %s' % (local_ipaddr, local_port) self.send_command(cmd) self._expect('Done') def udp_stop(self): cmd = 'udp close' self.send_command(cmd) self._expect('Done') def udp_send(self, bytes, ipaddr, port, success=True): cmd = 'udp send -s %d %s %d' % (bytes, ipaddr, port) self.send_command(cmd) if success: self._expect('Done') else: self._expect('Error') def udp_check_rx(self, bytes_should_rx): self._expect('%d bytes' % bytes_should_rx) def router_list(self): cmd = 'router list' self.send_command(cmd) self._expect([r'(\d+)((\s\d+)*)']) g = self.pexpect.match.groups() router_list = g[0] + ' ' + g[1] router_list = [int(x) for x in router_list.split()] self._expect('Done') return router_list def router_table(self): cmd = 'router table' self.send_command(cmd) self._expect(r'(.*)Done') g = self.pexpect.match.groups() output = g[0] lines = output.strip().split('\n') lines = [l.strip() for l in lines] router_table = {} for i, line in enumerate(lines): if not line.startswith('|') or not line.endswith('|'): if i not in (0, 2): # should not happen print("unexpected line %d: %s" % (i, line)) continue line = line[1:][:-1] line = [x.strip() for x in line.split('|')] if len(line) != 8: print("unexpected line %d: %s" % (i, line)) continue try: int(line[0]) except ValueError: if i != 1: print("unexpected line %d: %s" % (i, line)) continue id = int(line[0]) rloc16 = int(line[1], 16) nexthop = int(line[2]) pathcost = int(line[3]) lqin = int(line[4]) lqout = int(line[5]) age = int(line[6]) emac = str(line[7]) router_table[id] = { 'rloc16': rloc16, 'nexthop': nexthop, 'pathcost': pathcost, 'lqin': lqin, 'lqout': lqout, 'age': age, 'emac': emac, } return router_table if __name__ == '__main__': unittest.main()
[]
[]
[ "NODE_ID", "USE_MTD", "top_builddir", "RADIO_DEVICE", "OT_NCP_PATH", "OT_CLI_PATH", "VERBOSE", "DEBUG", "NODE_TYPE" ]
[]
["NODE_ID", "USE_MTD", "top_builddir", "RADIO_DEVICE", "OT_NCP_PATH", "OT_CLI_PATH", "VERBOSE", "DEBUG", "NODE_TYPE"]
python
9
0
refugio/wsgi.py
""" WSGI config for refugio project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'refugio.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
nipype/utils/config.py
# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Created on 20 Apr 2010 logging options : INFO, DEBUG hash_method : content, timestamp @author: Chris Filo Gorgolewski ''' import os import sys import errno import atexit from warnings import warn from distutils.version import LooseVersion import configparser import numpy as np from simplejson import load, dump from .misc import str2bool from filelock import SoftFileLock CONFIG_DEPRECATIONS = { 'profile_runtime': ('monitoring.enabled', '1.0'), 'filemanip_level': ('logging.utils_level', '1.0'), } NUMPY_MMAP = LooseVersion(np.__version__) >= LooseVersion('1.12.0') DEFAULT_CONFIG_TPL = """\ [logging] workflow_level = INFO utils_level = INFO interface_level = INFO log_to_file = false log_directory = {log_dir} log_size = 16384000 log_rotate = 4 [execution] create_report = true crashdump_dir = {crashdump_dir} hash_method = timestamp job_finished_timeout = 5 keep_inputs = false local_hash_check = true matplotlib_backend = Agg plugin = Linear remove_node_directories = false remove_unnecessary_outputs = true try_hard_link_datasink = true single_thread_matlab = true crashfile_format = pklz stop_on_first_crash = false stop_on_first_rerun = false use_relative_paths = false stop_on_unknown_version = false write_provenance = false parameterize_dirs = true poll_sleep_duration = 2 xvfb_max_wait = 10 check_version = true [monitoring] enabled = false sample_frequency = 1 summary_append = true [check] interval = 1209600 """.format def mkdir_p(path): try: os.makedirs(path) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise class NipypeConfig(object): """Base nipype config class""" def __init__(self, *args, **kwargs): self._config = configparser.ConfigParser() self._cwd = None config_dir = os.path.expanduser('~/.nipype') self.data_file = os.path.join(config_dir, 'nipype.json') self.set_default_config() self._display = None self._resource_monitor = None self._config.read( [os.path.join(config_dir, 'nipype.cfg'), 'nipype.cfg']) for option in CONFIG_DEPRECATIONS: for section in ['execution', 'logging', 'monitoring']: if self.has_option(section, option): new_section, new_option = CONFIG_DEPRECATIONS[option][ 0].split('.') if not self.has_option(new_section, new_option): # Warn implicit in get self.set(new_section, new_option, self.get(section, option)) @property def cwd(self): """Cache current working directory ASAP""" # Run getcwd only once, preventing multiproc to finish # with error having changed to the wrong path if self._cwd is None: try: self._cwd = os.getcwd() except OSError: warn('Trying to run Nipype from a nonexistent directory "{}".'. format(os.getenv('PWD', 'unknown')), RuntimeWarning) raise return self._cwd def set_default_config(self): """Read default settings template and set into config object""" default_cfg = DEFAULT_CONFIG_TPL( log_dir=os.path.expanduser( '~'), # Get $HOME in a platform-agnostic way crashdump_dir=self.cwd # Read cached cwd ) try: self._config.read_string(default_cfg) # Python >= 3.2 except AttributeError: from io import StringIO self._config.readfp(StringIO(default_cfg)) def enable_debug_mode(self): """Enables debug configuration""" from .. import logging self._config.set('execution', 'stop_on_first_crash', 'true') self._config.set('execution', 'remove_unnecessary_outputs', 'false') self._config.set('execution', 'keep_inputs', 'true') self._config.set('logging', 'workflow_level', 'DEBUG') self._config.set('logging', 'interface_level', 'DEBUG') self._config.set('logging', 'utils_level', 'DEBUG') logging.update_logging(self._config) def set_log_dir(self, log_dir): """Sets logging directory This should be the first thing that is done before any nipype class with logging is imported. """ self._config.set('logging', 'log_directory', log_dir) def get(self, section, option, default=None): """Get an option""" if option in CONFIG_DEPRECATIONS: msg = ('Config option "%s" has been deprecated as of nipype %s. ' 'Please use "%s" instead.') % ( option, CONFIG_DEPRECATIONS[option][1], CONFIG_DEPRECATIONS[option][0]) warn(msg) section, option = CONFIG_DEPRECATIONS[option][0].split('.') if self._config.has_option(section, option): return self._config.get(section, option) return default def set(self, section, option, value): """Set new value on option""" if isinstance(value, bool): value = str(value) if option in CONFIG_DEPRECATIONS: msg = ('Config option "%s" has been deprecated as of nipype %s. ' 'Please use "%s" instead.') % ( option, CONFIG_DEPRECATIONS[option][1], CONFIG_DEPRECATIONS[option][0]) warn(msg) section, option = CONFIG_DEPRECATIONS[option][0].split('.') return self._config.set(section, option, value) def getboolean(self, section, option): """Get a boolean option from section""" return self._config.getboolean(section, option) def has_option(self, section, option): """Check if option exists in section""" return self._config.has_option(section, option) @property def _sections(self): return self._config._sections def get_data(self, key): """Read options file""" if not os.path.exists(self.data_file): return None with SoftFileLock('%s.lock' % self.data_file): with open(self.data_file, 'rt') as file: datadict = load(file) if key in datadict: return datadict[key] return None def save_data(self, key, value): """Store config flie""" datadict = {} if os.path.exists(self.data_file): with SoftFileLock('%s.lock' % self.data_file): with open(self.data_file, 'rt') as file: datadict = load(file) else: dirname = os.path.dirname(self.data_file) if not os.path.exists(dirname): mkdir_p(dirname) with SoftFileLock('%s.lock' % self.data_file): with open(self.data_file, 'wt') as file: datadict[key] = value dump(datadict, file) def update_config(self, config_dict): """Extend internal dictionary with config_dict""" for section in ['execution', 'logging', 'check']: if section in config_dict: for key, val in list(config_dict[section].items()): if not key.startswith('__'): self._config.set(section, key, str(val)) def update_matplotlib(self): """Set backend on matplotlib from options""" import matplotlib matplotlib.use(self.get('execution', 'matplotlib_backend')) def enable_provenance(self): """Sets provenance storing on""" self._config.set('execution', 'write_provenance', 'true') self._config.set('execution', 'hash_method', 'content') @property def resource_monitor(self): """Check if resource_monitor is available""" if self._resource_monitor is not None: return self._resource_monitor # Cache config from nipype config self.resource_monitor = str2bool( self._config.get('monitoring', 'enabled')) or False return self._resource_monitor @resource_monitor.setter def resource_monitor(self, value): # Accept string true/false values if isinstance(value, (str, bytes)): value = str2bool(value.lower()) if value is False: self._resource_monitor = False elif value is True: if not self._resource_monitor: # Before setting self._resource_monitor check psutil # availability self._resource_monitor = False try: import psutil self._resource_monitor = LooseVersion( psutil.__version__) >= LooseVersion('5.0') except ImportError: pass finally: if not self._resource_monitor: warn('Could not enable the resource monitor: ' 'psutil>=5.0 could not be imported.') self._config.set('monitoring', 'enabled', ('%s' % self._resource_monitor).lower()) def enable_resource_monitor(self): """Sets the resource monitor on""" self.resource_monitor = True def disable_resource_monitor(self): """Sets the resource monitor off""" self.resource_monitor = False def get_display(self): """Returns the first display available""" # Check if an Xorg server is listening # import subprocess as sp # if not hasattr(sp, 'DEVNULL'): # setattr(sp, 'DEVNULL', os.devnull) # x_listening = bool(sp.call('ps au | grep -v grep | grep -i xorg', # shell=True, stdout=sp.DEVNULL)) if self._display is not None: return ':%d' % self._display.new_display sysdisplay = None if self._config.has_option('execution', 'display_variable'): sysdisplay = self._config.get('execution', 'display_variable') sysdisplay = sysdisplay or os.getenv('DISPLAY') if sysdisplay: from collections import namedtuple def _mock(): pass # Store a fake Xvfb object. Format - <host>:<display>[.<screen>] ndisp = sysdisplay.split(':')[-1].split('.')[0] Xvfb = namedtuple('Xvfb', ['new_display', 'stop']) self._display = Xvfb(int(ndisp), _mock) return self.get_display() else: if 'darwin' in sys.platform: raise RuntimeError( 'Xvfb requires root permissions to run in OSX. Please ' 'make sure that an X server is listening and set the ' 'appropriate config on either $DISPLAY or nipype\'s ' '"display_variable" config. Valid X servers include ' 'VNC, XQuartz, or manually started Xvfb.') # If $DISPLAY is empty, it confuses Xvfb so unset if sysdisplay == '': del os.environ['DISPLAY'] try: from xvfbwrapper import Xvfb except ImportError: raise RuntimeError( 'A display server was required, but $DISPLAY is not ' 'defined and Xvfb could not be imported.') self._display = Xvfb(nolisten='tcp') self._display.start() # Older versions of xvfbwrapper used vdisplay_num if not hasattr(self._display, 'new_display'): setattr(self._display, 'new_display', self._display.vdisplay_num) return self.get_display() def stop_display(self): """Closes the display if started""" if self._display is not None: from .. import logging self._display.stop() logging.getLogger('nipype.interface').debug( 'Closing display (if virtual)') @atexit.register def free_display(): """Stop virtual display (if it is up)""" from .. import config config.stop_display()
[]
[]
[ "PWD", "DISPLAY" ]
[]
["PWD", "DISPLAY"]
python
2
0