filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
fairseq/fairseq_cli/preprocess.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import logging
import os
import shutil
import sys
from collections import Counter
from itertools import zip_longest
from multiprocessing import Pool
from fairseq import options, tasks, utils
from fairseq.binarizer import Binarizer
from fairseq.data import indexed_dataset
from fairseq.file_chunker_utils import find_offsets
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.preprocess")
def main(args):
utils.import_user_module(args)
os.makedirs(args.destdir, exist_ok=True)
logger.addHandler(
logging.FileHandler(
filename=os.path.join(args.destdir, "preprocess.log"),
)
)
logger.info(args)
task = tasks.get_task(args.task)
def train_path(lang):
return "{}{}".format(args.trainpref, ("." + lang) if lang else "")
def file_name(prefix, lang):
fname = prefix
if lang is not None:
fname += ".{lang}".format(lang=lang)
return fname
def dest_path(prefix, lang):
return os.path.join(args.destdir, file_name(prefix, lang))
def dict_path(lang):
return dest_path("dict", lang) + ".txt"
def build_dictionary(filenames, src=False, tgt=False):
assert src ^ tgt
return task.build_dictionary(
filenames,
workers=args.workers,
threshold=args.thresholdsrc if src else args.thresholdtgt,
nwords=args.nwordssrc if src else args.nwordstgt,
padding_factor=args.padding_factor,
)
target = not args.only_source
if not args.srcdict and os.path.exists(dict_path(args.source_lang)):
raise FileExistsError(dict_path(args.source_lang))
if target and not args.tgtdict and os.path.exists(dict_path(args.target_lang)):
raise FileExistsError(dict_path(args.target_lang))
if args.joined_dictionary:
assert (
not args.srcdict or not args.tgtdict
), "cannot use both --srcdict and --tgtdict with --joined-dictionary"
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
elif args.tgtdict:
src_dict = task.load_dictionary(args.tgtdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary(
{train_path(lang) for lang in [args.source_lang, args.target_lang]},
src=True,
)
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary([train_path(args.source_lang)], src=True)
if target:
if args.tgtdict:
tgt_dict = task.load_dictionary(args.tgtdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --tgtdict is not specified"
tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True)
else:
tgt_dict = None
src_dict.save(dict_path(args.source_lang))
if target and tgt_dict is not None:
tgt_dict.save(dict_path(args.target_lang))
if args.dict_only:
return
def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers):
logger.info("[{}] Dictionary: {} types".format(lang, len(vocab)))
n_seq_tok = [0, 0]
replaced = Counter()
def merge_result(worker_result):
replaced.update(worker_result["replaced"])
n_seq_tok[0] += worker_result["nseq"]
n_seq_tok[1] += worker_result["ntok"]
input_file = "{}{}".format(
input_prefix, ("." + lang) if lang is not None else ""
)
offsets = find_offsets(input_file, num_workers)
(first_chunk, *more_chunks) = zip(offsets, offsets[1:])
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id, (start_offset, end_offset) in enumerate(
more_chunks, start=1
):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize,
(
args,
input_file,
vocab,
prefix,
lang,
start_offset,
end_offset,
),
callback=merge_result,
)
pool.close()
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl,
vocab_size=len(vocab),
)
merge_result(
Binarizer.binarize(
input_file,
vocab,
lambda t: ds.add_item(t),
offset=first_chunk[0],
end=first_chunk[1],
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, lang)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
logger.info(
"[{}] {}: {} sents, {} tokens, {:.3}% replaced by {}".format(
lang,
input_file,
n_seq_tok[0],
n_seq_tok[1],
100 * sum(replaced.values()) / n_seq_tok[1],
vocab.unk_word,
)
)
def make_binary_alignment_dataset(input_prefix, output_prefix, num_workers):
nseq = [0]
def merge_result(worker_result):
nseq[0] += worker_result["nseq"]
input_file = input_prefix
offsets = find_offsets(input_file, num_workers)
(first_chunk, *more_chunks) = zip(offsets, offsets[1:])
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id, (start_offset, end_offset) in enumerate(
more_chunks, start=1
):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize_alignments,
(
args,
input_file,
utils.parse_alignment,
prefix,
start_offset,
end_offset,
),
callback=merge_result,
)
pool.close()
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, None, "bin"), impl=args.dataset_impl
)
merge_result(
Binarizer.binarize_alignments(
input_file,
utils.parse_alignment,
lambda t: ds.add_item(t),
offset=first_chunk[0],
end=first_chunk[1],
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, None)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, None, "idx"))
logger.info("[alignments] {}: parsed {} alignments".format(input_file, nseq[0]))
def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1):
if args.dataset_impl == "raw":
# Copy original text file to destination folder
output_text_file = dest_path(
output_prefix + ".{}-{}".format(args.source_lang, args.target_lang),
lang,
)
shutil.copyfile(file_name(input_prefix, lang), output_text_file)
else:
make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers)
def make_all(lang, vocab):
if args.trainpref:
make_dataset(vocab, args.trainpref, "train", lang, num_workers=args.workers)
if args.validpref:
for k, validpref in enumerate(args.validpref.split(",")):
outprefix = "valid{}".format(k) if k > 0 else "valid"
make_dataset(
vocab, validpref, outprefix, lang, num_workers=args.workers
)
if args.testpref:
for k, testpref in enumerate(args.testpref.split(",")):
outprefix = "test{}".format(k) if k > 0 else "test"
make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers)
def make_all_alignments():
if args.trainpref and os.path.exists(args.trainpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.trainpref + "." + args.align_suffix,
"train.align",
num_workers=args.workers,
)
if args.validpref and os.path.exists(args.validpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.validpref + "." + args.align_suffix,
"valid.align",
num_workers=args.workers,
)
if args.testpref and os.path.exists(args.testpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.testpref + "." + args.align_suffix,
"test.align",
num_workers=args.workers,
)
make_all(args.source_lang, src_dict)
if target:
make_all(args.target_lang, tgt_dict)
if args.align_suffix:
make_all_alignments()
logger.info("Wrote preprocessed data to {}".format(args.destdir))
if args.alignfile:
assert args.trainpref, "--trainpref must be set if --alignfile is specified"
src_file_name = train_path(args.source_lang)
tgt_file_name = train_path(args.target_lang)
freq_map = {}
with open(args.alignfile, "r", encoding="utf-8") as align_file:
with open(src_file_name, "r", encoding="utf-8") as src_file:
with open(tgt_file_name, "r", encoding="utf-8") as tgt_file:
for a, s, t in zip_longest(align_file, src_file, tgt_file):
si = src_dict.encode_line(s, add_if_not_exist=False)
ti = tgt_dict.encode_line(t, add_if_not_exist=False)
ai = list(map(lambda x: tuple(x.split("-")), a.split()))
for sai, tai in ai:
srcidx = si[int(sai)]
tgtidx = ti[int(tai)]
if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():
assert srcidx != src_dict.pad()
assert srcidx != src_dict.eos()
assert tgtidx != tgt_dict.pad()
assert tgtidx != tgt_dict.eos()
if srcidx not in freq_map:
freq_map[srcidx] = {}
if tgtidx not in freq_map[srcidx]:
freq_map[srcidx][tgtidx] = 1
else:
freq_map[srcidx][tgtidx] += 1
align_dict = {}
for srcidx in freq_map.keys():
align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
with open(
os.path.join(
args.destdir,
"alignment.{}-{}.txt".format(args.source_lang, args.target_lang),
),
"w",
encoding="utf-8",
) as f:
for k, v in align_dict.items():
print("{} {}".format(src_dict[k], tgt_dict[v]), file=f)
def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True):
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl,
vocab_size=len(vocab),
)
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize(
filename, vocab, consumer, append_eos=append_eos, offset=offset, end=end
)
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
return res
def binarize_alignments(args, filename, parse_alignment, output_prefix, offset, end):
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, None, "bin"),
impl=args.dataset_impl,
vocab_size=None,
)
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize_alignments(
filename, parse_alignment, consumer, offset=offset, end=end
)
ds.finalize(dataset_dest_file(args, output_prefix, None, "idx"))
return res
def dataset_dest_prefix(args, output_prefix, lang):
base = "{}/{}".format(args.destdir, output_prefix)
if lang is not None:
lang_part = ".{}-{}.{}".format(args.source_lang, args.target_lang, lang)
elif args.only_source:
lang_part = ""
else:
lang_part = ".{}-{}".format(args.source_lang, args.target_lang)
return "{}{}".format(base, lang_part)
def dataset_dest_file(args, output_prefix, lang, extension):
base = dataset_dest_prefix(args, output_prefix, lang)
return "{}.{}".format(base, extension)
def cli_main():
parser = options.get_preprocessing_parser()
args = parser.parse_args()
main(args)
if __name__ == "__main__":
cli_main()
|
[] |
[] |
[
"LOGLEVEL"
] |
[]
|
["LOGLEVEL"]
|
python
| 1 | 0 | |
Lib/test/test_userdict.py
|
# Check every path through every method of UserDict
from test import test_support, mapping_tests
import UserDict
d0 = {}
d1 = {"one": 1}
d2 = {"one": 1, "two": 2}
d3 = {"one": 1, "two": 3, "three": 5}
d4 = {"one": None, "two": None}
d5 = {"one": 1, "two": 1}
class UserDictTest(mapping_tests.TestHashMappingProtocol):
type2test = UserDict.IterableUserDict
def test_all(self):
# Test constructors
u = UserDict.UserDict()
u0 = UserDict.UserDict(d0)
u1 = UserDict.UserDict(d1)
u2 = UserDict.IterableUserDict(d2)
uu = UserDict.UserDict(u)
uu0 = UserDict.UserDict(u0)
uu1 = UserDict.UserDict(u1)
uu2 = UserDict.UserDict(u2)
# keyword arg constructor
self.assertEqual(UserDict.UserDict(one=1, two=2), d2)
# item sequence constructor
self.assertEqual(UserDict.UserDict([('one',1), ('two',2)]), d2)
self.assertEqual(UserDict.UserDict(dict=[('one',1), ('two',2)]), d2)
# both together
self.assertEqual(UserDict.UserDict([('one',1), ('two',2)], two=3, three=5), d3)
# alternate constructor
self.assertEqual(UserDict.UserDict.fromkeys('one two'.split()), d4)
self.assertEqual(UserDict.UserDict().fromkeys('one two'.split()), d4)
self.assertEqual(UserDict.UserDict.fromkeys('one two'.split(), 1), d5)
self.assertEqual(UserDict.UserDict().fromkeys('one two'.split(), 1), d5)
self.assert_(u1.fromkeys('one two'.split()) is not u1)
self.assert_(isinstance(u1.fromkeys('one two'.split()), UserDict.UserDict))
self.assert_(isinstance(u2.fromkeys('one two'.split()), UserDict.IterableUserDict))
# Test __repr__
self.assertEqual(str(u0), str(d0))
self.assertEqual(repr(u1), repr(d1))
self.assertEqual(`u2`, `d2`)
# Test __cmp__ and __len__
all = [d0, d1, d2, u, u0, u1, u2, uu, uu0, uu1, uu2]
for a in all:
for b in all:
self.assertEqual(cmp(a, b), cmp(len(a), len(b)))
# Test __getitem__
self.assertEqual(u2["one"], 1)
self.assertRaises(KeyError, u1.__getitem__, "two")
# Test __setitem__
u3 = UserDict.UserDict(u2)
u3["two"] = 2
u3["three"] = 3
# Test __delitem__
del u3["three"]
self.assertRaises(KeyError, u3.__delitem__, "three")
# Test clear
u3.clear()
self.assertEqual(u3, {})
# Test copy()
u2a = u2.copy()
self.assertEqual(u2a, u2)
u2b = UserDict.UserDict(x=42, y=23)
u2c = u2b.copy() # making a copy of a UserDict is special cased
self.assertEqual(u2b, u2c)
class MyUserDict(UserDict.UserDict):
def display(self): print self
m2 = MyUserDict(u2)
m2a = m2.copy()
self.assertEqual(m2a, m2)
# SF bug #476616 -- copy() of UserDict subclass shared data
m2['foo'] = 'bar'
self.assertNotEqual(m2a, m2)
# Test keys, items, values
self.assertEqual(u2.keys(), d2.keys())
self.assertEqual(u2.items(), d2.items())
self.assertEqual(u2.values(), d2.values())
# Test has_key and "in".
for i in u2.keys():
self.assert_(u2.has_key(i))
self.assert_(i in u2)
self.assertEqual(u1.has_key(i), d1.has_key(i))
self.assertEqual(i in u1, i in d1)
self.assertEqual(u0.has_key(i), d0.has_key(i))
self.assertEqual(i in u0, i in d0)
# Test update
t = UserDict.UserDict()
t.update(u2)
self.assertEqual(t, u2)
class Items:
def items(self):
return (("x", 42), ("y", 23))
t = UserDict.UserDict()
t.update(Items())
self.assertEqual(t, {"x": 42, "y": 23})
# Test get
for i in u2.keys():
self.assertEqual(u2.get(i), u2[i])
self.assertEqual(u1.get(i), d1.get(i))
self.assertEqual(u0.get(i), d0.get(i))
# Test "in" iteration.
for i in xrange(20):
u2[i] = str(i)
ikeys = []
for k in u2:
ikeys.append(k)
keys = u2.keys()
self.assertEqual(set(ikeys), set(keys))
# Test setdefault
t = UserDict.UserDict()
self.assertEqual(t.setdefault("x", 42), 42)
self.assert_(t.has_key("x"))
self.assertEqual(t.setdefault("x", 23), 42)
# Test pop
t = UserDict.UserDict(x=42)
self.assertEqual(t.pop("x"), 42)
self.assertRaises(KeyError, t.pop, "x")
self.assertEqual(t.pop("x", 1), 1)
t["x"] = 42
self.assertEqual(t.pop("x", 1), 42)
# Test popitem
t = UserDict.UserDict(x=42)
self.assertEqual(t.popitem(), ("x", 42))
self.assertRaises(KeyError, t.popitem)
def test_missing(self):
# Make sure UserDict doesn't have a __missing__ method
self.assertEqual(hasattr(UserDict, "__missing__"), False)
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at a all
class D(UserDict.UserDict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assert_(2 not in d)
self.assert_(2 not in d.keys())
self.assertEqual(d[2], 42)
class E(UserDict.UserDict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
try:
e[42]
except RuntimeError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("e[42] didn't raise RuntimeError")
class F(UserDict.UserDict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
UserDict.UserDict.__init__(self)
f = F()
try:
f[42]
except KeyError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("f[42] didn't raise KeyError")
class G(UserDict.UserDict):
pass
g = G()
try:
g[42]
except KeyError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("g[42] didn't raise KeyError")
##########################
# Test Dict Mixin
class SeqDict(UserDict.DictMixin):
"""Dictionary lookalike implemented with lists.
Used to test and demonstrate DictMixin
"""
def __init__(self, other=None, **kwargs):
self.keylist = []
self.valuelist = []
if other is not None:
for (key, value) in other:
self[key] = value
for (key, value) in kwargs.iteritems():
self[key] = value
def __getitem__(self, key):
try:
i = self.keylist.index(key)
except ValueError:
raise KeyError
return self.valuelist[i]
def __setitem__(self, key, value):
try:
i = self.keylist.index(key)
self.valuelist[i] = value
except ValueError:
self.keylist.append(key)
self.valuelist.append(value)
def __delitem__(self, key):
try:
i = self.keylist.index(key)
except ValueError:
raise KeyError
self.keylist.pop(i)
self.valuelist.pop(i)
def keys(self):
return list(self.keylist)
def copy(self):
d = self.__class__()
for key, value in self.iteritems():
d[key] = value
return d
@classmethod
def fromkeys(cls, keys, value=None):
d = cls()
for key in keys:
d[key] = value
return d
class UserDictMixinTest(mapping_tests.TestMappingProtocol):
type2test = SeqDict
def test_all(self):
## Setup test and verify working of the test class
# check init
s = SeqDict()
# exercise setitem
s[10] = 'ten'
s[20] = 'twenty'
s[30] = 'thirty'
# exercise delitem
del s[20]
# check getitem and setitem
self.assertEqual(s[10], 'ten')
# check keys() and delitem
self.assertEqual(s.keys(), [10, 30])
## Now, test the DictMixin methods one by one
# has_key
self.assert_(s.has_key(10))
self.assert_(not s.has_key(20))
# __contains__
self.assert_(10 in s)
self.assert_(20 not in s)
# __iter__
self.assertEqual([k for k in s], [10, 30])
# __len__
self.assertEqual(len(s), 2)
# iteritems
self.assertEqual(list(s.iteritems()), [(10,'ten'), (30, 'thirty')])
# iterkeys
self.assertEqual(list(s.iterkeys()), [10, 30])
# itervalues
self.assertEqual(list(s.itervalues()), ['ten', 'thirty'])
# values
self.assertEqual(s.values(), ['ten', 'thirty'])
# items
self.assertEqual(s.items(), [(10,'ten'), (30, 'thirty')])
# get
self.assertEqual(s.get(10), 'ten')
self.assertEqual(s.get(15,'fifteen'), 'fifteen')
self.assertEqual(s.get(15), None)
# setdefault
self.assertEqual(s.setdefault(40, 'forty'), 'forty')
self.assertEqual(s.setdefault(10, 'null'), 'ten')
del s[40]
# pop
self.assertEqual(s.pop(10), 'ten')
self.assert_(10 not in s)
s[10] = 'ten'
self.assertEqual(s.pop("x", 1), 1)
s["x"] = 42
self.assertEqual(s.pop("x", 1), 42)
# popitem
k, v = s.popitem()
self.assert_(k not in s)
s[k] = v
# clear
s.clear()
self.assertEqual(len(s), 0)
# empty popitem
self.assertRaises(KeyError, s.popitem)
# update
s.update({10: 'ten', 20:'twenty'})
self.assertEqual(s[10], 'ten')
self.assertEqual(s[20], 'twenty')
# cmp
self.assertEqual(s, {10: 'ten', 20:'twenty'})
t = SeqDict()
t[20] = 'twenty'
t[10] = 'ten'
self.assertEqual(s, t)
def test_main():
test_support.run_unittest(
UserDictTest,
UserDictMixinTest
)
if __name__ == "__main__":
test_main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
jwtmiddleware/helpers_test.go
|
package jwtmiddleware
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"testing"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
gjwt "golang.org/x/oauth2/jwt"
"google.golang.org/appengine"
"google.golang.org/appengine/aetest"
gcpjwt "github.com/someone1/gcp-jwt-go/v2"
goauth2 "github.com/someone1/gcp-jwt-go/v2/oauth2"
)
var jwtConfig *gjwt.Config
func init() {
credPath := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
if credPath == "" {
panic("GOOGLE_APPLICATION_CREDENTIALS environmental variable required for this test to run!")
}
b, err := ioutil.ReadFile(credPath)
if err != nil {
panic(err)
}
jwtConfig, err = google.JWTConfigFromJSON(b)
if err != nil {
panic(err)
}
}
func newTestReq(method, urlStr string, body io.Reader) (*http.Request, error) {
return httptest.NewRequest(method, urlStr, body), nil
}
func TestHelpers(t *testing.T) {
ctx := context.Background()
newReqFunc := newTestReq
// AppEngine test setup
isAppEngine := os.Getenv("APPENGINE_TEST") == "true"
if isAppEngine {
inst, err := aetest.NewInstance(nil)
if err != nil {
t.Fatal(err)
}
defer inst.Close()
req, err := inst.NewRequest("GET", "/", nil)
if err != nil {
t.Fatal(err)
}
ctx = appengine.NewContext(req)
newReqFunc = inst.NewRequest
}
config := &gcpjwt.IAMConfig{
ServiceAccount: jwtConfig.Email,
}
config.EnableCache = true
audience := "https://test.com"
testSources := make(map[int]oauth2.TokenSource)
t.Run("TokenSource", func(t *testing.T) {
var tests = []struct {
name string
config *gcpjwt.IAMConfig
out bool
}{
{
"NoType",
&gcpjwt.IAMConfig{
ServiceAccount: jwtConfig.Email,
},
true,
},
{
"BlobType",
&gcpjwt.IAMConfig{
ServiceAccount: jwtConfig.Email,
IAMType: gcpjwt.IAMBlobType,
},
false,
},
{
"JWTType",
&gcpjwt.IAMConfig{
ServiceAccount: jwtConfig.Email,
IAMType: gcpjwt.IAMJwtType,
},
false,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
source, err := goauth2.JWTAccessTokenSource(ctx, test.config, audience)
if (err != nil) != test.out {
t.Errorf("unexpected error `%v`", err)
}
if err == nil {
testSources[int(test.config.IAMType)] = source
}
})
}
})
t.Run("JWTMiddleware", func(t *testing.T) {
invalidAudienceSource, err := goauth2.JWTAccessTokenSource(ctx, &gcpjwt.IAMConfig{
ServiceAccount: jwtConfig.Email,
IAMType: gcpjwt.IAMJwtType,
}, "https://invalid")
if err != nil {
t.Errorf("Could not make invalid audience token source: %v", err)
return
}
okHandler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
_, _ = w.Write([]byte("ok"))
})
handler := NewHandler(ctx, config, "")(okHandler)
var tests = []struct {
name string
url string
source oauth2.TokenSource
want int
}{
{
"MissingToken",
audience,
nil,
http.StatusUnauthorized,
},
{
"InvalidAudienceToken",
audience,
invalidAudienceSource,
http.StatusForbidden,
},
{
"InvalidHost",
"http://invalid.com",
testSources[int(gcpjwt.IAMBlobType)],
http.StatusForbidden,
},
{
"BlobToken",
audience,
testSources[int(gcpjwt.IAMBlobType)],
http.StatusOK,
},
{
"JwtToken",
audience,
testSources[int(gcpjwt.IAMJwtType)],
http.StatusOK,
},
}
// Required for this to work
gcpjwt.SigningMethodIAMJWT.Override()
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
w := httptest.NewRecorder()
w.Body = bytes.NewBuffer(nil)
r, err := newReqFunc(http.MethodGet, test.url, nil)
if err != nil {
t.Errorf("could not create request: %v", err)
return
}
if test.source != nil {
token, err := test.source.Token()
if err != nil {
t.Errorf("error getting token: %v", err)
}
r.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.AccessToken))
}
handler.ServeHTTP(w, r)
if got := w.Result().StatusCode; got != test.want {
t.Errorf("expected response code `%d`, got `%d`", test.want, got)
t.Errorf("Body: %s", w.Body.String())
}
})
}
})
}
|
[
"\"GOOGLE_APPLICATION_CREDENTIALS\"",
"\"APPENGINE_TEST\""
] |
[] |
[
"APPENGINE_TEST",
"GOOGLE_APPLICATION_CREDENTIALS"
] |
[]
|
["APPENGINE_TEST", "GOOGLE_APPLICATION_CREDENTIALS"]
|
go
| 2 | 0 | |
contrib/iex/iex.go
|
package main
import (
"encoding/json"
"fmt"
"os"
"runtime"
"strings"
"sync"
"time"
"github.com/alpacahq/marketstore/contrib/iex/api"
"github.com/alpacahq/marketstore/executor"
"github.com/alpacahq/marketstore/plugins/bgworker"
"github.com/alpacahq/marketstore/utils/io"
"github.com/alpacahq/marketstore/utils/log"
)
const (
minute = "1Min"
daily = "1D"
fiveYear = "5y"
oneDay = "1d"
monthly = "1m"
)
type IEXFetcher struct {
config FetcherConfig
backfillM *sync.Map
queue chan []string
lastM *sync.Map
refreshSymbols bool
lastDailyRunDate int
}
type FetcherConfig struct {
// determines whether or not daily (1D) bars are queried
Daily bool
// determines whether or not intraday (1Min) bars are queried
Intraday bool
// list of symbols to poll - queries all if empty
Symbols []string
// API Token
Token string
// True for sandbox
Sandbox bool
}
func NewBgWorker(conf map[string]interface{}) (bgworker.BgWorker, error) {
data, _ := json.Marshal(conf)
config := FetcherConfig{}
json.Unmarshal(data, &config)
if config.Token == "" {
return nil, fmt.Errorf("IEXCloud Token is not set")
}
api.SetToken(config.Token)
api.SetSandbox(config.Sandbox)
if config.Sandbox {
log.Info("starting for IEX sandbox")
} else {
log.Info("starting for IEX production")
}
return &IEXFetcher{
backfillM: &sync.Map{},
config: config,
queue: make(chan []string, int(len(config.Symbols)/api.BatchSize)+1),
lastM: &sync.Map{},
refreshSymbols: len(config.Symbols) == 0,
lastDailyRunDate: 0,
}, nil
}
func (f *IEXFetcher) UpdateSymbolList() {
// update the symbol list if there was no static list in config
if f.refreshSymbols {
log.Info("refreshing symbols list from IEX")
resp, err := api.ListSymbols()
if err != nil {
return
}
f.config.Symbols = make([]string, len(*resp))
log.Info("Loaded list of %d symbols from IEX", len(f.config.Symbols))
for i, s := range *resp {
if s.IsEnabled {
f.config.Symbols[i] = s.Symbol
}
}
}
}
func (f *IEXFetcher) Run() {
// batchify the symbols & queue the batches
f.UpdateSymbolList()
f.queue = make(chan []string, int(len(f.config.Symbols)/api.BatchSize)+1)
log.Info("Launching backfill")
go f.workBackfill()
go func() {
for { // loop forever adding batches of symbols to fetch
symbols := f.config.Symbols
for i := 0; i < len(symbols); i += api.BatchSize {
end := i + api.BatchSize
if end > len(symbols) {
end = len(symbols)
}
f.queue <- symbols[i:end]
}
// Put a marker in the queue so the loop can pause til the next minute
f.queue <- []string{"__EOL__"}
}
}()
runDaily := onceDaily(&f.lastDailyRunDate, 5, 10)
start := time.Now()
iWorkers := make(chan bool, (runtime.NumCPU()))
var iWg sync.WaitGroup
for batch := range f.queue {
if batch[0] == "__EOL__" {
log.Debug("End of Symbol list.. waiting for workers")
iWg.Wait()
end := time.Now()
log.Info("Minute bar fetch for %d symbols completed (elapsed %s)", len(f.config.Symbols), end.Sub(start).String())
runDaily = onceDaily(&f.lastDailyRunDate, 5, 10)
if runDaily {
log.Info("time for daily task(s)")
go f.UpdateSymbolList()
}
delay := time.Minute - end.Sub(start)
log.Debug("Sleep for %s", delay.String())
<-time.After(delay)
start = time.Now()
} else {
iWorkers <- true
go func(wg *sync.WaitGroup) {
wg.Add(1)
defer wg.Done()
defer func() { <-iWorkers }()
f.pollIntraday(batch)
if runDaily {
f.pollDaily(batch)
}
}(&iWg)
<-time.After(limiter())
}
}
}
func (f *IEXFetcher) pollIntraday(symbols []string) {
if !f.config.Intraday {
return
}
limit := 10
start := time.Now()
resp, err := api.GetBars(symbols, oneDay, &limit, 5)
if err != nil {
log.Error("failed to query intraday bar batch (%v)", err)
return
}
fetched := time.Now()
if err = f.writeBars(resp, true, false); err != nil {
log.Error("failed to write intraday bar batch (%v)", err)
return
}
done := time.Now()
log.Debug("Done Batch (fetched: %s, wrote: %s)", done.Sub(fetched).String(), fetched.Sub(start).String())
}
func (f *IEXFetcher) pollDaily(symbols []string) {
if !f.config.Daily {
return
}
limit := 1
log.Info("running daily bars poll from IEX")
resp, err := api.GetBars(symbols, monthly, &limit, 5)
if err != nil {
log.Error("failed to query daily bar batch (%v)", err)
}
if err = f.writeBars(resp, false, false); err != nil {
log.Error("failed to write daily bar batch (%v)", err)
}
}
func (f *IEXFetcher) writeBars(resp *api.GetBarsResponse, intraday, backfill bool) error {
if resp == nil {
return nil
}
csm := io.NewColumnSeriesMap()
for symbol, bars := range *resp {
if len(bars.Chart) == 0 {
continue
}
if backfill {
log.Info("backfill: Writing %d bars for %s", len(bars.Chart), symbol)
}
var (
tbk *io.TimeBucketKey
epoch []int64
open []float32
high []float32
low []float32
close []float32
volume []int32
)
if intraday {
tbk = io.NewTimeBucketKeyFromString(fmt.Sprintf("%s/%s/OHLCV", symbol, minute))
} else {
tbk = io.NewTimeBucketKeyFromString(fmt.Sprintf("%s/%s/OHLCV", symbol, daily))
}
var (
ts time.Time
err error
)
for _, bar := range bars.Chart {
if bar.Volume == 0 {
continue
}
ts, err = bar.GetTimestamp()
if err != nil {
return err
}
if ts.IsZero() {
continue
}
epoch = append(epoch, ts.Unix())
open = append(open, bar.Open)
high = append(high, bar.High)
low = append(low, bar.Low)
close = append(close, bar.Close)
volume = append(volume, bar.Volume)
}
if len(epoch) == 0 {
continue
}
// determine whether we skip the bar so we don't
// re-stream bars that have already been written
if !backfill {
v, ok := f.lastM.Load(*tbk)
if ok && v.(int64) >= epoch[len(epoch)-1] {
continue
}
}
f.backfillM.LoadOrStore(strings.Replace(tbk.GetItemKey(), "/OHLCV", "", 1), &ts)
cs := io.NewColumnSeries()
cs.AddColumn("Epoch", epoch)
cs.AddColumn("Open", open)
cs.AddColumn("High", high)
cs.AddColumn("Low", low)
cs.AddColumn("Close", close)
cs.AddColumn("Volume", volume)
csm.AddColumnSeries(*tbk, cs)
}
if err := executor.WriteCSM(csm, false); err != nil {
return err
}
f.updateLastWritten(&csm)
return nil
}
func (f *IEXFetcher) updateLastWritten(csm *io.ColumnSeriesMap) {
if csm == nil {
return
}
for tbk, cs := range *csm {
epoch := cs.GetEpoch()
if len(epoch) == 0 {
continue
}
f.lastM.Store(tbk, epoch[len(epoch)-1])
}
}
func (f *IEXFetcher) backfill(symbol, timeframe string, ts *time.Time) (err error) {
var (
resp *api.GetBarsResponse
intraday = strings.EqualFold(timeframe, minute)
)
if intraday {
resp, err = api.GetBars([]string{symbol}, oneDay, nil, 5)
} else {
resp, err = api.GetBars([]string{symbol}, fiveYear, nil, 5)
}
if err != nil {
log.Error("failed to backfill %v/%v (%v)", symbol, timeframe, err)
return
}
// c := (*resp)[symbol].Chart
// if len(c) > 0 {
// log.Info(
// "backfilling %v/%v (%v bars | start: %v-%v | end: %v-%v)",
// symbol, timeframe,
// len(c), c[0].Date,
// c[0].Minute, c[len(c)-1].Date,
// c[len(c)-1].Minute)
// }
if err = f.writeBars(resp, intraday, true); err != nil {
log.Error("failed to write bars from backfill for %v/%v (%v)", symbol, timeframe, err)
}
return
}
func (f *IEXFetcher) workBackfill() {
ticker := time.NewTicker(30 * time.Second)
for range ticker.C {
wg := sync.WaitGroup{}
count := 0
// range over symbols that need backfilling, and
// backfill them from the last written record
f.backfillM.Range(func(key, value interface{}) bool {
parts := strings.Split(key.(string), "/")
symbol := parts[0]
timeframe := parts[1]
// make sure epoch value isn't nil (i.e. hasn't
// been backfilled already)
if value != nil {
log.Info("backfilling [%v|%v]", symbol, timeframe)
go func() {
count++
wg.Add(1)
defer wg.Done()
// backfill the symbol/timeframe pair in parallel
if f.backfill(symbol, timeframe, value.(*time.Time)) == nil {
f.backfillM.Store(key, nil)
}
}()
} else {
log.Debug("skipping backfill [%v|%v]", symbol, timeframe)
}
// limit 10 goroutines per CPU core
if count >= runtime.NumCPU()*10 {
return false
}
return true
})
wg.Wait()
}
}
func limiter() time.Duration {
return time.Second / 50
}
func onceDaily(lastDailyRunDate *int, runHour int, runMinute int) bool {
now := time.Now()
if *lastDailyRunDate == 0 || (*lastDailyRunDate != now.Day() && runHour == now.Hour() && runMinute <= now.Minute()) {
*lastDailyRunDate = now.Day()
return true
} else {
return false
}
}
func main() {
api.SetToken(os.Getenv("IEXTOKEN"))
resp, err := api.GetBars([]string{"AAPL", "AMD", "X", "NVDA", "AMPY", "IBM", "GOOG"}, oneDay, nil, 5)
if err != nil {
panic(err)
}
for symbol, chart := range *resp {
for _, bar := range chart.Chart {
fmt.Printf("symbol: %v bar: %v\n", symbol, bar)
}
}
fmt.Printf("-------------------\n\n")
resp, err = api.GetBars([]string{"AMPY", "MSFT", "DVCR"}, oneDay, nil, 5)
if err != nil {
panic(err)
}
for symbol, chart := range *resp {
for _, bar := range chart.Chart {
fmt.Printf("symbol: %v bar: %v\n", symbol, bar)
}
}
}
|
[
"\"IEXTOKEN\""
] |
[] |
[
"IEXTOKEN"
] |
[]
|
["IEXTOKEN"]
|
go
| 1 | 0 | |
superpoint/experiment.py
|
import logging
import yaml
import os
import argparse
import numpy as np
from contextlib import contextmanager
from json import dumps as pprint
from datasets import get_dataset
from models import get_model
from utils.stdout_capturing import capture_outputs
from settings import EXPER_PATH
logging.basicConfig(format='[%(asctime)s %(levelname)s] %(message)s',
datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
import tensorflow as tf # noqa: E402
def train(config, n_iter, output_dir, checkpoint_name='model.ckpt'):
checkpoint_path = os.path.join(output_dir, checkpoint_name)
with _init_graph(config) as net:
try:
net.train(n_iter, output_dir=output_dir,
validation_interval=config.get('validation_interval', 100),
save_interval=config.get('save_interval', None),
checkpoint_path=checkpoint_path,
keep_checkpoints=config.get('keep_checkpoints', 1))
except KeyboardInterrupt:
logging.info('Got Keyboard Interrupt, saving model and closing.')
net.save(os.path.join(output_dir, checkpoint_name))
def evaluate(config, output_dir, n_iter=None):
with _init_graph(config) as net:
net.load(output_dir)
results = net.evaluate(config.get('eval_set', 'test'), max_iterations=n_iter)
return results
def predict(config, output_dir, n_iter):
pred = []
data = []
with _init_graph(config, with_dataset=True) as (net, dataset):
if net.trainable:
net.load(output_dir)
test_set = dataset.get_test_set()
for _ in range(n_iter):
data.append(next(test_set))
pred.append(net.predict(data[-1], keys='*'))
return pred, data
def set_seed(seed):
tf.set_random_seed(seed)
np.random.seed(seed)
def get_num_gpus():
return len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
@contextmanager
def _init_graph(config, with_dataset=False):
set_seed(config.get('seed', int.from_bytes(os.urandom(4), byteorder='big')))
n_gpus = get_num_gpus()
logging.info('Number of GPUs detected: {}'.format(n_gpus))
dataset = get_dataset(config['data']['name'])(**config['data'])
model = get_model(config['model']['name'])(
data={} if with_dataset else dataset.get_tf_datasets(),
n_gpus=n_gpus, **config['model'])
model.__enter__()
if with_dataset:
yield model, dataset
else:
yield model
model.__exit__()
tf.reset_default_graph()
def _cli_train(config, output_dir, args):
assert 'train_iter' in config
with open(os.path.join(output_dir, 'config.yml'), 'w') as f:
yaml.dump(config, f, default_flow_style=False)
train(config, config['train_iter'], output_dir)
if args.eval:
_cli_eval(config, output_dir, args)
def _cli_eval(config, output_dir, args):
# Load model config from previous experiment
with open(os.path.join(output_dir, 'config.yml'), 'r') as f:
model_config = yaml.load(f)['model']
model_config.update(config.get('model', {}))
config['model'] = model_config
results = evaluate(config, output_dir, n_iter=config.get('eval_iter'))
# Print and export results
logging.info('Evaluation results: \n{}'.format(
pprint(results, indent=2, default=str)))
with open(os.path.join(output_dir, 'eval.txt'), 'a') as f:
f.write('Evaluation for {} dataset:\n'.format(config['data']['name']))
for r, v in results.items():
f.write('\t{}:\n\t\t{}\n'.format(r, v))
f.write('\n')
# TODO
def _cli_pred(config, args):
raise NotImplementedError
if __name__ == '__main__':
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='command')
# Training command
p_train = subparsers.add_parser('train')
p_train.add_argument('config', type=str)
p_train.add_argument('exper_name', type=str)
p_train.add_argument('--eval', action='store_true')
p_train.set_defaults(func=_cli_train)
# Evaluation command
p_train = subparsers.add_parser('evaluate')
p_train.add_argument('config', type=str)
p_train.add_argument('exper_name', type=str)
p_train.set_defaults(func=_cli_eval)
# Inference command
p_train = subparsers.add_parser('predict')
p_train.add_argument('config', type=str)
p_train.add_argument('exper_name', type=str)
p_train.set_defaults(func=_cli_pred)
args = parser.parse_args()
with open(args.config, 'r') as f:
config = yaml.load(f)
output_dir = os.path.join(EXPER_PATH, args.exper_name)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
with capture_outputs(os.path.join(output_dir, 'log')):
logging.info('Running command {}'.format(args.command.upper()))
args.func(config, output_dir, args)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
hadoop-src/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import javax.security.auth.login.AppConfigurationEntry;
import org.apache.curator.ensemble.fixed.FixedEnsembleProvider;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.CuratorFrameworkFactory.Builder;
import org.apache.curator.framework.api.ACLProvider;
import org.apache.curator.framework.imps.DefaultACLProvider;
import org.apache.curator.framework.recipes.cache.ChildData;
import org.apache.curator.framework.recipes.cache.PathChildrenCache;
import org.apache.curator.framework.recipes.cache.PathChildrenCache.StartMode;
import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent;
import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener;
import org.apache.curator.framework.recipes.shared.SharedCount;
import org.apache.curator.framework.recipes.shared.VersionedValue;
import org.apache.curator.retry.RetryNTimes;
import org.apache.curator.utils.EnsurePath;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenManager;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooDefs.Perms;
import org.apache.zookeeper.client.ZooKeeperSaslClient;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Id;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* An implementation of {@link AbstractDelegationTokenSecretManager} that
* persists TokenIdentifiers and DelegationKeys in Zookeeper. This class can
* be used by HA (Highly available) services that consists of multiple nodes.
* This class ensures that Identifiers and Keys are replicated to all nodes of
* the service.
*/
@InterfaceAudience.Private
public abstract class ZKDelegationTokenSecretManager<TokenIdent extends AbstractDelegationTokenIdentifier>
extends AbstractDelegationTokenSecretManager<TokenIdent> {
private static final String ZK_CONF_PREFIX = "zk-dt-secret-manager.";
public static final String ZK_DTSM_ZK_NUM_RETRIES = ZK_CONF_PREFIX
+ "zkNumRetries";
public static final String ZK_DTSM_ZK_SESSION_TIMEOUT = ZK_CONF_PREFIX
+ "zkSessionTimeout";
public static final String ZK_DTSM_ZK_CONNECTION_TIMEOUT = ZK_CONF_PREFIX
+ "zkConnectionTimeout";
public static final String ZK_DTSM_ZK_SHUTDOWN_TIMEOUT = ZK_CONF_PREFIX
+ "zkShutdownTimeout";
public static final String ZK_DTSM_ZNODE_WORKING_PATH = ZK_CONF_PREFIX
+ "znodeWorkingPath";
public static final String ZK_DTSM_ZK_AUTH_TYPE = ZK_CONF_PREFIX
+ "zkAuthType";
public static final String ZK_DTSM_ZK_CONNECTION_STRING = ZK_CONF_PREFIX
+ "zkConnectionString";
public static final String ZK_DTSM_ZK_KERBEROS_KEYTAB = ZK_CONF_PREFIX
+ "kerberos.keytab";
public static final String ZK_DTSM_ZK_KERBEROS_PRINCIPAL = ZK_CONF_PREFIX
+ "kerberos.principal";
public static final int ZK_DTSM_ZK_NUM_RETRIES_DEFAULT = 3;
public static final int ZK_DTSM_ZK_SESSION_TIMEOUT_DEFAULT = 10000;
public static final int ZK_DTSM_ZK_CONNECTION_TIMEOUT_DEFAULT = 10000;
public static final int ZK_DTSM_ZK_SHUTDOWN_TIMEOUT_DEFAULT = 10000;
public static final String ZK_DTSM_ZNODE_WORKING_PATH_DEAFULT = "zkdtsm";
private static Logger LOG = LoggerFactory
.getLogger(ZKDelegationTokenSecretManager.class);
private static final String JAAS_LOGIN_ENTRY_NAME =
"ZKDelegationTokenSecretManagerClient";
private static final String ZK_DTSM_NAMESPACE = "ZKDTSMRoot";
private static final String ZK_DTSM_SEQNUM_ROOT = "/ZKDTSMSeqNumRoot";
private static final String ZK_DTSM_KEYID_ROOT = "/ZKDTSMKeyIdRoot";
private static final String ZK_DTSM_TOKENS_ROOT = "/ZKDTSMTokensRoot";
private static final String ZK_DTSM_MASTER_KEY_ROOT = "/ZKDTSMMasterKeyRoot";
private static final String DELEGATION_KEY_PREFIX = "DK_";
private static final String DELEGATION_TOKEN_PREFIX = "DT_";
private static final ThreadLocal<CuratorFramework> CURATOR_TL =
new ThreadLocal<CuratorFramework>();
public static void setCurator(CuratorFramework curator) {
CURATOR_TL.set(curator);
}
private final boolean isExternalClient;
private final CuratorFramework zkClient;
private SharedCount delTokSeqCounter;
private SharedCount keyIdSeqCounter;
private PathChildrenCache keyCache;
private PathChildrenCache tokenCache;
private ExecutorService listenerThreadPool;
private final long shutdownTimeout;
public ZKDelegationTokenSecretManager(Configuration conf) {
super(conf.getLong(DelegationTokenManager.UPDATE_INTERVAL,
DelegationTokenManager.UPDATE_INTERVAL_DEFAULT) * 1000,
conf.getLong(DelegationTokenManager.MAX_LIFETIME,
DelegationTokenManager.MAX_LIFETIME_DEFAULT) * 1000,
conf.getLong(DelegationTokenManager.RENEW_INTERVAL,
DelegationTokenManager.RENEW_INTERVAL_DEFAULT * 1000),
conf.getLong(DelegationTokenManager.REMOVAL_SCAN_INTERVAL,
DelegationTokenManager.REMOVAL_SCAN_INTERVAL_DEFAULT) * 1000);
shutdownTimeout = conf.getLong(ZK_DTSM_ZK_SHUTDOWN_TIMEOUT,
ZK_DTSM_ZK_SHUTDOWN_TIMEOUT_DEFAULT);
if (CURATOR_TL.get() != null) {
zkClient =
CURATOR_TL.get().usingNamespace(
conf.get(ZK_DTSM_ZNODE_WORKING_PATH,
ZK_DTSM_ZNODE_WORKING_PATH_DEAFULT)
+ "/" + ZK_DTSM_NAMESPACE);
isExternalClient = true;
} else {
String connString = conf.get(ZK_DTSM_ZK_CONNECTION_STRING);
Preconditions.checkNotNull(connString,
"Zookeeper connection string cannot be null");
String authType = conf.get(ZK_DTSM_ZK_AUTH_TYPE);
// AuthType has to be explicitly set to 'none' or 'sasl'
Preconditions.checkNotNull(authType, "Zookeeper authType cannot be null !!");
Preconditions.checkArgument(
authType.equals("sasl") || authType.equals("none"),
"Zookeeper authType must be one of [none, sasl]");
Builder builder = null;
try {
ACLProvider aclProvider = null;
if (authType.equals("sasl")) {
LOG.info("Connecting to ZooKeeper with SASL/Kerberos"
+ "and using 'sasl' ACLs");
String principal = setJaasConfiguration(conf);
System.setProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY,
JAAS_LOGIN_ENTRY_NAME);
System.setProperty("zookeeper.authProvider.1",
"org.apache.zookeeper.server.auth.SASLAuthenticationProvider");
aclProvider = new SASLOwnerACLProvider(principal);
} else { // "none"
LOG.info("Connecting to ZooKeeper without authentication");
aclProvider = new DefaultACLProvider(); // open to everyone
}
int sessionT =
conf.getInt(ZK_DTSM_ZK_SESSION_TIMEOUT,
ZK_DTSM_ZK_SESSION_TIMEOUT_DEFAULT);
int numRetries =
conf.getInt(ZK_DTSM_ZK_NUM_RETRIES, ZK_DTSM_ZK_NUM_RETRIES_DEFAULT);
builder =
CuratorFrameworkFactory
.builder()
.aclProvider(aclProvider)
.namespace(
conf.get(ZK_DTSM_ZNODE_WORKING_PATH,
ZK_DTSM_ZNODE_WORKING_PATH_DEAFULT)
+ "/"
+ ZK_DTSM_NAMESPACE
)
.sessionTimeoutMs(sessionT)
.connectionTimeoutMs(
conf.getInt(ZK_DTSM_ZK_CONNECTION_TIMEOUT,
ZK_DTSM_ZK_CONNECTION_TIMEOUT_DEFAULT)
)
.retryPolicy(
new RetryNTimes(numRetries, sessionT / numRetries));
} catch (Exception ex) {
throw new RuntimeException("Could not Load ZK acls or auth");
}
zkClient = builder.ensembleProvider(new FixedEnsembleProvider(connString))
.build();
isExternalClient = false;
}
}
private String setJaasConfiguration(Configuration config) throws Exception {
String keytabFile =
config.get(ZK_DTSM_ZK_KERBEROS_KEYTAB, "").trim();
if (keytabFile == null || keytabFile.length() == 0) {
throw new IllegalArgumentException(ZK_DTSM_ZK_KERBEROS_KEYTAB
+ " must be specified");
}
String principal =
config.get(ZK_DTSM_ZK_KERBEROS_PRINCIPAL, "").trim();
if (principal == null || principal.length() == 0) {
throw new IllegalArgumentException(ZK_DTSM_ZK_KERBEROS_PRINCIPAL
+ " must be specified");
}
JaasConfiguration jConf =
new JaasConfiguration(JAAS_LOGIN_ENTRY_NAME, principal, keytabFile);
javax.security.auth.login.Configuration.setConfiguration(jConf);
return principal.split("[/@]")[0];
}
/**
* Creates a programmatic version of a jaas.conf file. This can be used
* instead of writing a jaas.conf file and setting the system property,
* "java.security.auth.login.config", to point to that file. It is meant to be
* used for connecting to ZooKeeper.
*/
@InterfaceAudience.Private
public static class JaasConfiguration extends
javax.security.auth.login.Configuration {
private static AppConfigurationEntry[] entry;
private String entryName;
/**
* Add an entry to the jaas configuration with the passed in name,
* principal, and keytab. The other necessary options will be set for you.
*
* @param entryName
* The name of the entry (e.g. "Client")
* @param principal
* The principal of the user
* @param keytab
* The location of the keytab
*/
public JaasConfiguration(String entryName, String principal, String keytab) {
this.entryName = entryName;
Map<String, String> options = new HashMap<String, String>();
options.put("keyTab", keytab);
options.put("principal", principal);
options.put("useKeyTab", "true");
options.put("storeKey", "true");
options.put("useTicketCache", "false");
options.put("refreshKrb5Config", "true");
String jaasEnvVar = System.getenv("HADOOP_JAAS_DEBUG");
if (jaasEnvVar != null && "true".equalsIgnoreCase(jaasEnvVar)) {
options.put("debug", "true");
}
entry = new AppConfigurationEntry[] {
new AppConfigurationEntry(getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options) };
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
return (entryName.equals(name)) ? entry : null;
}
private String getKrb5LoginModuleName() {
String krb5LoginModuleName;
if (System.getProperty("java.vendor").contains("IBM")) {
krb5LoginModuleName = "com.ibm.security.auth.module.Krb5LoginModule";
} else {
krb5LoginModuleName = "com.sun.security.auth.module.Krb5LoginModule";
}
return krb5LoginModuleName;
}
}
@Override
public void startThreads() throws IOException {
if (!isExternalClient) {
try {
zkClient.start();
} catch (Exception e) {
throw new IOException("Could not start Curator Framework", e);
}
} else {
// If namespace parents are implicitly created, they won't have ACLs.
// So, let's explicitly create them.
CuratorFramework nullNsFw = zkClient.usingNamespace(null);
EnsurePath ensureNs =
nullNsFw.newNamespaceAwareEnsurePath("/" + zkClient.getNamespace());
try {
ensureNs.ensure(nullNsFw.getZookeeperClient());
} catch (Exception e) {
throw new IOException("Could not create namespace", e);
}
}
listenerThreadPool = Executors.newSingleThreadExecutor();
try {
delTokSeqCounter = new SharedCount(zkClient, ZK_DTSM_SEQNUM_ROOT, 0);
if (delTokSeqCounter != null) {
delTokSeqCounter.start();
}
} catch (Exception e) {
throw new IOException("Could not start Sequence Counter", e);
}
try {
keyIdSeqCounter = new SharedCount(zkClient, ZK_DTSM_KEYID_ROOT, 0);
if (keyIdSeqCounter != null) {
keyIdSeqCounter.start();
}
} catch (Exception e) {
throw new IOException("Could not start KeyId Counter", e);
}
try {
createPersistentNode(ZK_DTSM_MASTER_KEY_ROOT);
createPersistentNode(ZK_DTSM_TOKENS_ROOT);
} catch (Exception e) {
throw new RuntimeException("Could not create ZK paths");
}
try {
keyCache = new PathChildrenCache(zkClient, ZK_DTSM_MASTER_KEY_ROOT, true);
if (keyCache != null) {
keyCache.start(StartMode.BUILD_INITIAL_CACHE);
keyCache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client,
PathChildrenCacheEvent event)
throws Exception {
switch (event.getType()) {
case CHILD_ADDED:
processKeyAddOrUpdate(event.getData().getData());
break;
case CHILD_UPDATED:
processKeyAddOrUpdate(event.getData().getData());
break;
case CHILD_REMOVED:
processKeyRemoved(event.getData().getPath());
break;
default:
break;
}
}
}, listenerThreadPool);
}
} catch (Exception e) {
throw new IOException("Could not start PathChildrenCache for keys", e);
}
try {
tokenCache = new PathChildrenCache(zkClient, ZK_DTSM_TOKENS_ROOT, true);
if (tokenCache != null) {
tokenCache.start(StartMode.BUILD_INITIAL_CACHE);
tokenCache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client,
PathChildrenCacheEvent event) throws Exception {
switch (event.getType()) {
case CHILD_ADDED:
processTokenAddOrUpdate(event.getData());
break;
case CHILD_UPDATED:
processTokenAddOrUpdate(event.getData());
break;
case CHILD_REMOVED:
processTokenRemoved(event.getData());
break;
default:
break;
}
}
}, listenerThreadPool);
}
} catch (Exception e) {
throw new IOException("Could not start PathChildrenCache for tokens", e);
}
super.startThreads();
}
private void processKeyAddOrUpdate(byte[] data) throws IOException {
ByteArrayInputStream bin = new ByteArrayInputStream(data);
DataInputStream din = new DataInputStream(bin);
DelegationKey key = new DelegationKey();
key.readFields(din);
synchronized (this) {
allKeys.put(key.getKeyId(), key);
}
}
private void processKeyRemoved(String path) {
int i = path.lastIndexOf('/');
if (i > 0) {
String tokSeg = path.substring(i + 1);
int j = tokSeg.indexOf('_');
if (j > 0) {
int keyId = Integer.parseInt(tokSeg.substring(j + 1));
synchronized (this) {
allKeys.remove(keyId);
}
}
}
}
private void processTokenAddOrUpdate(ChildData data) throws IOException {
ByteArrayInputStream bin = new ByteArrayInputStream(data.getData());
DataInputStream din = new DataInputStream(bin);
TokenIdent ident = createIdentifier();
ident.readFields(din);
long renewDate = din.readLong();
int pwdLen = din.readInt();
byte[] password = new byte[pwdLen];
int numRead = din.read(password, 0, pwdLen);
if (numRead > -1) {
DelegationTokenInformation tokenInfo =
new DelegationTokenInformation(renewDate, password);
synchronized (this) {
currentTokens.put(ident, tokenInfo);
// The cancel task might be waiting
notifyAll();
}
}
}
private void processTokenRemoved(ChildData data) throws IOException {
ByteArrayInputStream bin = new ByteArrayInputStream(data.getData());
DataInputStream din = new DataInputStream(bin);
TokenIdent ident = createIdentifier();
ident.readFields(din);
synchronized (this) {
currentTokens.remove(ident);
// The cancel task might be waiting
notifyAll();
}
}
@Override
public void stopThreads() {
super.stopThreads();
try {
if (tokenCache != null) {
tokenCache.close();
}
} catch (Exception e) {
LOG.error("Could not stop Delegation Token Cache", e);
}
try {
if (delTokSeqCounter != null) {
delTokSeqCounter.close();
}
} catch (Exception e) {
LOG.error("Could not stop Delegation Token Counter", e);
}
try {
if (keyIdSeqCounter != null) {
keyIdSeqCounter.close();
}
} catch (Exception e) {
LOG.error("Could not stop Key Id Counter", e);
}
try {
if (keyCache != null) {
keyCache.close();
}
} catch (Exception e) {
LOG.error("Could not stop KeyCache", e);
}
try {
if (!isExternalClient && (zkClient != null)) {
zkClient.close();
}
} catch (Exception e) {
LOG.error("Could not stop Curator Framework", e);
}
if (listenerThreadPool != null) {
listenerThreadPool.shutdown();
try {
// wait for existing tasks to terminate
if (!listenerThreadPool.awaitTermination(shutdownTimeout,
TimeUnit.MILLISECONDS)) {
LOG.error("Forcing Listener threadPool to shutdown !!");
listenerThreadPool.shutdownNow();
}
} catch (InterruptedException ie) {
listenerThreadPool.shutdownNow();
Thread.currentThread().interrupt();
}
}
}
private void createPersistentNode(String nodePath) throws Exception {
try {
zkClient.create().withMode(CreateMode.PERSISTENT).forPath(nodePath);
} catch (KeeperException.NodeExistsException ne) {
LOG.debug(nodePath + " znode already exists !!");
} catch (Exception e) {
throw new IOException(nodePath + " znode could not be created !!", e);
}
}
@Override
protected int getDelegationTokenSeqNum() {
return delTokSeqCounter.getCount();
}
private void incrSharedCount(SharedCount sharedCount) throws Exception {
while (true) {
// Loop until we successfully increment the counter
VersionedValue<Integer> versionedValue = sharedCount.getVersionedValue();
if (sharedCount.trySetCount(versionedValue, versionedValue.getValue() + 1)) {
break;
}
}
}
@Override
protected int incrementDelegationTokenSeqNum() {
try {
incrSharedCount(delTokSeqCounter);
} catch (InterruptedException e) {
// The ExpirationThread is just finishing.. so dont do anything..
LOG.debug("Thread interrupted while performing token counter increment", e);
Thread.currentThread().interrupt();
} catch (Exception e) {
throw new RuntimeException("Could not increment shared counter !!", e);
}
return delTokSeqCounter.getCount();
}
@Override
protected void setDelegationTokenSeqNum(int seqNum) {
try {
delTokSeqCounter.setCount(seqNum);
} catch (Exception e) {
throw new RuntimeException("Could not set shared counter !!", e);
}
}
@Override
protected int getCurrentKeyId() {
return keyIdSeqCounter.getCount();
}
@Override
protected int incrementCurrentKeyId() {
try {
incrSharedCount(keyIdSeqCounter);
} catch (InterruptedException e) {
// The ExpirationThread is just finishing.. so dont do anything..
LOG.debug("Thread interrupted while performing keyId increment", e);
Thread.currentThread().interrupt();
} catch (Exception e) {
throw new RuntimeException("Could not increment shared keyId counter !!", e);
}
return keyIdSeqCounter.getCount();
}
@Override
protected DelegationKey getDelegationKey(int keyId) {
// First check if its I already have this key
DelegationKey key = allKeys.get(keyId);
// Then query ZK
if (key == null) {
try {
key = getKeyFromZK(keyId);
if (key != null) {
allKeys.put(keyId, key);
}
} catch (IOException e) {
LOG.error("Error retrieving key [" + keyId + "] from ZK", e);
}
}
return key;
}
private DelegationKey getKeyFromZK(int keyId) throws IOException {
String nodePath =
getNodePath(ZK_DTSM_MASTER_KEY_ROOT, DELEGATION_KEY_PREFIX + keyId);
try {
byte[] data = zkClient.getData().forPath(nodePath);
if ((data == null) || (data.length == 0)) {
return null;
}
ByteArrayInputStream bin = new ByteArrayInputStream(data);
DataInputStream din = new DataInputStream(bin);
DelegationKey key = new DelegationKey();
key.readFields(din);
return key;
} catch (KeeperException.NoNodeException e) {
LOG.error("No node in path [" + nodePath + "]");
} catch (Exception ex) {
throw new IOException(ex);
}
return null;
}
@Override
protected DelegationTokenInformation getTokenInfo(TokenIdent ident) {
// First check if I have this..
DelegationTokenInformation tokenInfo = currentTokens.get(ident);
// Then query ZK
if (tokenInfo == null) {
try {
tokenInfo = getTokenInfoFromZK(ident);
if (tokenInfo != null) {
currentTokens.put(ident, tokenInfo);
}
} catch (IOException e) {
LOG.error("Error retrieving tokenInfo [" + ident.getSequenceNumber()
+ "] from ZK", e);
}
}
return tokenInfo;
}
private DelegationTokenInformation getTokenInfoFromZK(TokenIdent ident)
throws IOException {
return getTokenInfoFromZK(ident, false);
}
private DelegationTokenInformation getTokenInfoFromZK(TokenIdent ident,
boolean quiet) throws IOException {
String nodePath =
getNodePath(ZK_DTSM_TOKENS_ROOT,
DELEGATION_TOKEN_PREFIX + ident.getSequenceNumber());
try {
byte[] data = zkClient.getData().forPath(nodePath);
if ((data == null) || (data.length == 0)) {
return null;
}
ByteArrayInputStream bin = new ByteArrayInputStream(data);
DataInputStream din = new DataInputStream(bin);
createIdentifier().readFields(din);
long renewDate = din.readLong();
int pwdLen = din.readInt();
byte[] password = new byte[pwdLen];
int numRead = din.read(password, 0, pwdLen);
if (numRead > -1) {
DelegationTokenInformation tokenInfo =
new DelegationTokenInformation(renewDate, password);
return tokenInfo;
}
} catch (KeeperException.NoNodeException e) {
if (!quiet) {
LOG.error("No node in path [" + nodePath + "]");
}
} catch (Exception ex) {
throw new IOException(ex);
}
return null;
}
@Override
protected void storeDelegationKey(DelegationKey key) throws IOException {
addOrUpdateDelegationKey(key, false);
}
@Override
protected void updateDelegationKey(DelegationKey key) throws IOException {
addOrUpdateDelegationKey(key, true);
}
private void addOrUpdateDelegationKey(DelegationKey key, boolean isUpdate)
throws IOException {
String nodeCreatePath =
getNodePath(ZK_DTSM_MASTER_KEY_ROOT,
DELEGATION_KEY_PREFIX + key.getKeyId());
ByteArrayOutputStream os = new ByteArrayOutputStream();
DataOutputStream fsOut = new DataOutputStream(os);
if (LOG.isDebugEnabled()) {
LOG.debug("Storing ZKDTSMDelegationKey_" + key.getKeyId());
}
key.write(fsOut);
try {
if (zkClient.checkExists().forPath(nodeCreatePath) != null) {
zkClient.setData().forPath(nodeCreatePath, os.toByteArray())
.setVersion(-1);
if (!isUpdate) {
LOG.debug("Key with path [" + nodeCreatePath
+ "] already exists.. Updating !!");
}
} else {
zkClient.create().withMode(CreateMode.PERSISTENT)
.forPath(nodeCreatePath, os.toByteArray());
if (isUpdate) {
LOG.debug("Updating non existent Key path [" + nodeCreatePath
+ "].. Adding new !!");
}
}
} catch (KeeperException.NodeExistsException ne) {
LOG.debug(nodeCreatePath + " znode already exists !!");
} catch (Exception ex) {
throw new IOException(ex);
} finally {
os.close();
}
}
@Override
protected void removeStoredMasterKey(DelegationKey key) {
String nodeRemovePath =
getNodePath(ZK_DTSM_MASTER_KEY_ROOT,
DELEGATION_KEY_PREFIX + key.getKeyId());
if (LOG.isDebugEnabled()) {
LOG.debug("Removing ZKDTSMDelegationKey_" + key.getKeyId());
}
try {
if (zkClient.checkExists().forPath(nodeRemovePath) != null) {
while(zkClient.checkExists().forPath(nodeRemovePath) != null){
zkClient.delete().guaranteed().forPath(nodeRemovePath);
}
} else {
LOG.debug("Attempted to delete a non-existing znode " + nodeRemovePath);
}
} catch (Exception e) {
LOG.debug(nodeRemovePath + " znode could not be removed!!");
}
}
@Override
protected void storeToken(TokenIdent ident,
DelegationTokenInformation tokenInfo) throws IOException {
try {
addOrUpdateToken(ident, tokenInfo, false);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
protected void updateToken(TokenIdent ident,
DelegationTokenInformation tokenInfo) throws IOException {
String nodeRemovePath =
getNodePath(ZK_DTSM_TOKENS_ROOT, DELEGATION_TOKEN_PREFIX
+ ident.getSequenceNumber());
try {
if (zkClient.checkExists().forPath(nodeRemovePath) != null) {
addOrUpdateToken(ident, tokenInfo, true);
} else {
addOrUpdateToken(ident, tokenInfo, false);
LOG.debug("Attempted to update a non-existing znode " + nodeRemovePath);
}
} catch (Exception e) {
throw new RuntimeException("Could not update Stored Token ZKDTSMDelegationToken_"
+ ident.getSequenceNumber(), e);
}
}
@Override
protected void removeStoredToken(TokenIdent ident)
throws IOException {
String nodeRemovePath =
getNodePath(ZK_DTSM_TOKENS_ROOT, DELEGATION_TOKEN_PREFIX
+ ident.getSequenceNumber());
if (LOG.isDebugEnabled()) {
LOG.debug("Removing ZKDTSMDelegationToken_"
+ ident.getSequenceNumber());
}
try {
if (zkClient.checkExists().forPath(nodeRemovePath) != null) {
while(zkClient.checkExists().forPath(nodeRemovePath) != null){
zkClient.delete().guaranteed().forPath(nodeRemovePath);
}
} else {
LOG.debug("Attempted to remove a non-existing znode " + nodeRemovePath);
}
} catch (Exception e) {
throw new RuntimeException(
"Could not remove Stored Token ZKDTSMDelegationToken_"
+ ident.getSequenceNumber(), e);
}
}
@Override
public synchronized TokenIdent cancelToken(Token<TokenIdent> token,
String canceller) throws IOException {
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf);
TokenIdent id = createIdentifier();
id.readFields(in);
try {
if (!currentTokens.containsKey(id)) {
// See if token can be retrieved and placed in currentTokens
getTokenInfo(id);
}
return super.cancelToken(token, canceller);
} catch (Exception e) {
LOG.error("Exception while checking if token exist !!", e);
return id;
}
}
private void addOrUpdateToken(TokenIdent ident,
DelegationTokenInformation info, boolean isUpdate) throws Exception {
String nodeCreatePath =
getNodePath(ZK_DTSM_TOKENS_ROOT, DELEGATION_TOKEN_PREFIX
+ ident.getSequenceNumber());
ByteArrayOutputStream tokenOs = new ByteArrayOutputStream();
DataOutputStream tokenOut = new DataOutputStream(tokenOs);
ByteArrayOutputStream seqOs = new ByteArrayOutputStream();
try {
ident.write(tokenOut);
tokenOut.writeLong(info.getRenewDate());
tokenOut.writeInt(info.getPassword().length);
tokenOut.write(info.getPassword());
if (LOG.isDebugEnabled()) {
LOG.debug((isUpdate ? "Updating " : "Storing ")
+ "ZKDTSMDelegationToken_" +
ident.getSequenceNumber());
}
if (isUpdate) {
zkClient.setData().forPath(nodeCreatePath, tokenOs.toByteArray())
.setVersion(-1);
} else {
zkClient.create().withMode(CreateMode.PERSISTENT)
.forPath(nodeCreatePath, tokenOs.toByteArray());
}
} finally {
seqOs.close();
}
}
/**
* Simple implementation of an {@link ACLProvider} that simply returns an ACL
* that gives all permissions only to a single principal.
*/
private static class SASLOwnerACLProvider implements ACLProvider {
private final List<ACL> saslACL;
private SASLOwnerACLProvider(String principal) {
this.saslACL = Collections.singletonList(
new ACL(Perms.ALL, new Id("sasl", principal)));
}
@Override
public List<ACL> getDefaultAcl() {
return saslACL;
}
@Override
public List<ACL> getAclForPath(String path) {
return saslACL;
}
}
@VisibleForTesting
@Private
@Unstable
static String getNodePath(String root, String nodeName) {
return (root + "/" + nodeName);
}
@VisibleForTesting
public ExecutorService getListenerThreadPool() {
return listenerThreadPool;
}
}
|
[
"\"HADOOP_JAAS_DEBUG\""
] |
[] |
[
"HADOOP_JAAS_DEBUG"
] |
[]
|
["HADOOP_JAAS_DEBUG"]
|
java
| 1 | 0 | |
netbox/models/tag.go
|
// Code generated by go-swagger; DO NOT EDIT.
// Copyright 2020 The go-netbox Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Tag tag
//
// swagger:model Tag
type Tag struct {
// Color
// Max Length: 6
// Min Length: 1
// Pattern: ^[0-9a-f]{6}$
Color string `json:"color,omitempty"`
// Description
// Max Length: 200
Description string `json:"description"`
// Display
// Read Only: true
Display string `json:"display,omitempty"`
// Id
// Read Only: true
ID int64 `json:"id,omitempty"`
// Name
// Required: true
// Max Length: 100
// Min Length: 1
Name *string `json:"name"`
// Slug
// Required: true
// Max Length: 100
// Min Length: 1
// Pattern: ^[-a-zA-Z0-9_]+$
Slug *string `json:"slug"`
// Tagged items
// Read Only: true
TaggedItems int64 `json:"tagged_items,omitempty"`
// Url
// Read Only: true
// Format: uri
URL strfmt.URI `json:"url,omitempty"`
}
// Validate validates this tag
func (m *Tag) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateColor(formats); err != nil {
res = append(res, err)
}
if err := m.validateDescription(formats); err != nil {
res = append(res, err)
}
if err := m.validateName(formats); err != nil {
res = append(res, err)
}
if err := m.validateSlug(formats); err != nil {
res = append(res, err)
}
if err := m.validateURL(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Tag) validateColor(formats strfmt.Registry) error {
if swag.IsZero(m.Color) { // not required
return nil
}
if err := validate.MinLength("color", "body", m.Color, 1); err != nil {
return err
}
if err := validate.MaxLength("color", "body", m.Color, 6); err != nil {
return err
}
if err := validate.Pattern("color", "body", m.Color, `^[0-9a-f]{6}$`); err != nil {
return err
}
return nil
}
func (m *Tag) validateDescription(formats strfmt.Registry) error {
if swag.IsZero(m.Description) { // not required
return nil
}
if err := validate.MaxLength("description", "body", m.Description, 200); err != nil {
return err
}
return nil
}
func (m *Tag) validateName(formats strfmt.Registry) error {
if err := validate.Required("name", "body", m.Name); err != nil {
return err
}
if err := validate.MinLength("name", "body", *m.Name, 1); err != nil {
return err
}
if err := validate.MaxLength("name", "body", *m.Name, 100); err != nil {
return err
}
return nil
}
func (m *Tag) validateSlug(formats strfmt.Registry) error {
if err := validate.Required("slug", "body", m.Slug); err != nil {
return err
}
if err := validate.MinLength("slug", "body", *m.Slug, 1); err != nil {
return err
}
if err := validate.MaxLength("slug", "body", *m.Slug, 100); err != nil {
return err
}
if err := validate.Pattern("slug", "body", *m.Slug, `^[-a-zA-Z0-9_]+$`); err != nil {
return err
}
return nil
}
func (m *Tag) validateURL(formats strfmt.Registry) error {
if swag.IsZero(m.URL) { // not required
return nil
}
if err := validate.FormatOf("url", "body", "uri", m.URL.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validate this tag based on the context it is used
func (m *Tag) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateDisplay(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateID(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateTaggedItems(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateURL(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Tag) contextValidateDisplay(ctx context.Context, formats strfmt.Registry) error {
if err := validate.ReadOnly(ctx, "display", "body", string(m.Display)); err != nil {
return err
}
return nil
}
func (m *Tag) contextValidateID(ctx context.Context, formats strfmt.Registry) error {
if err := validate.ReadOnly(ctx, "id", "body", int64(m.ID)); err != nil {
return err
}
return nil
}
func (m *Tag) contextValidateTaggedItems(ctx context.Context, formats strfmt.Registry) error {
if err := validate.ReadOnly(ctx, "tagged_items", "body", int64(m.TaggedItems)); err != nil {
return err
}
return nil
}
func (m *Tag) contextValidateURL(ctx context.Context, formats strfmt.Registry) error {
if err := validate.ReadOnly(ctx, "url", "body", strfmt.URI(m.URL)); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *Tag) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Tag) UnmarshalBinary(b []byte) error {
var res Tag
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
lang/go/gbe/env.go
|
package main
import (
"fmt"
"os"
"strings"
)
func main(){
os.Setenv("FOO", "1")
fmt.Println("FOO:", os.Getenv("FOO"))
fmt.Println("BAR:", os.Getenv("BAR"))
for _, e := range os.Environ() {
pair := strings.Split(e, "=")
fmt.Println(pair[0])
}
}
|
[
"\"FOO\"",
"\"BAR\""
] |
[] |
[
"BAR",
"FOO"
] |
[]
|
["BAR", "FOO"]
|
go
| 2 | 0 | |
log/log.go
|
// Package log.
// There are 5 different loggers.
// 4 loggers are disabled by default - Debug, Info, Warning and Error.
// When enabled - they are writing to a file, otherwise they are writing to the void.
// Regular log mode enables Info, Warning and Error, while debug mode enables all 4 of them.
// Separately there is a logger log.Normal which is for main communication with the user.
// The tool never prints to the StdOut reserving that channel exclusively
// to the container in case it's being pipe'd for output processing.
// Thus - all log.Normal messages being print to StdErr.
// Quiet mode will redirect log.Normal into the log.Info logger,
// which is discarded by default.
// All log.Normal messages can be filtered out with regexp `^runtainer\:\s`.
package log
import (
"io"
"io/ioutil"
"log"
"os"
"strings"
"sync"
"github.com/spf13/viper"
)
var (
logFileOnce sync.Once
logFile *os.File
logWriter io.Writer
Debug *log.Logger
Info *log.Logger
Warning *log.Logger
Error *log.Logger
Normal *log.Logger
)
// SetupLog initializes the loggers that are exported by this module.
// Returns a callback function that when called will close any open resources by the loggers, such as files.
// It can be called multiple times, when the logging level settings changes.
// Every instance of a callback function returned can be used and they are equivalent.
func SetupLog() func() {
// initially, before we read cobra and viper, all logs will remain disabled with no possibility to enable it
// if we need to debug anything related to cobra/viper routines, at least we can use these env variables to configure loggers from the get go
var quiet, debug, info bool
if viper.IsSet("debug") {
debug = viper.GetBool("debug")
} else {
debug = strings.ToLower(os.Getenv("RT_DEBUG")) == "true"
}
if viper.IsSet("quiet") {
quiet = viper.GetBool("quiet")
} else {
quiet = strings.ToLower(os.Getenv("RT_QUIET")) == "true"
}
if viper.IsSet("log") {
info = viper.GetBool("log")
} else {
info = strings.ToLower(os.Getenv("RT_LOG")) == "true"
}
if debug || info {
logFileOnce.Do(func() {
var err error
logFile, err = os.OpenFile("runtainer.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
log.Panic(err)
}
})
logWriter = logFile
} else {
logWriter = ioutil.Discard
if logFile != nil {
logFile.Close()
}
}
var debugWriter io.Writer
if debug {
debugWriter = logWriter
} else {
debugWriter = ioutil.Discard
}
logFlags := log.Ldate | log.Ltime | log.Lshortfile
Debug = log.New(debugWriter, "[DEBUG] ", logFlags)
Info = log.New(logWriter, "[INFO] ", logFlags)
Warning = log.New(logWriter, "[WARNING] ", logFlags)
Error = log.New(logWriter, "[ERROR] ", logFlags)
stderrWriter := io.MultiWriter(os.Stderr, Error.Writer())
if quiet {
Normal = Info
} else {
Normal = log.New(stderrWriter, "runtainer: ", 0)
}
Debug.Print("Logger initialized")
return func() {
if logFile != nil {
Debug.Print("Closing log file")
logFile.Close()
}
}
}
|
[
"\"RT_DEBUG\"",
"\"RT_QUIET\"",
"\"RT_LOG\""
] |
[] |
[
"RT_DEBUG",
"RT_QUIET",
"RT_LOG"
] |
[]
|
["RT_DEBUG", "RT_QUIET", "RT_LOG"]
|
go
| 3 | 0 | |
app/celerydemo/asgi.py
|
"""
ASGI config for celerydemo project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'celerydemo.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/test/java/es/linkeddata/librairy/loader/tasks/ReTrainAndExportModel.java
|
package es.linkeddata.librairy.loader.tasks;
import com.google.common.base.Strings;
import es.linkeddata.librairy.loader.PropertiesConfig;
import es.linkeddata.librairy.loader.service.ModelService;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author Badenes Olmedo, Carlos <[email protected]>
*/
public class ReTrainAndExportModel {
private static final Logger LOG = LoggerFactory.getLogger(ReTrainAndExportModel.class);
@Test
public void execute(){
String propertiesPath = Strings.isNullOrEmpty(System.getenv("LOADER_PROPERTIES"))? "application.properties" : System.getenv("LOADER_PROPERTIES");
ModelService modelService = new ModelService();
try {
PropertiesConfig config = new PropertiesConfig(propertiesPath);
modelService.retrain(config, true);
modelService.export(config);
} catch (Exception e) {
LOG.error("Unexpected error", e);
}
}
}
|
[
"\"LOADER_PROPERTIES\"",
"\"LOADER_PROPERTIES\""
] |
[] |
[
"LOADER_PROPERTIES"
] |
[]
|
["LOADER_PROPERTIES"]
|
java
| 1 | 0 | |
geotrek/celery.py
|
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'geotrek.settings.default')
app = Celery('geotrek')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/test/java/com/google/devtools/build/lib/sandbox/RealSandboxfsProcessTest.java
|
// Copyright 2018 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.sandbox;
import static junit.framework.TestCase.fail;
import com.google.devtools.build.lib.vfs.FileSystem;
import com.google.devtools.build.lib.vfs.JavaIoFileSystem;
import com.google.devtools.build.lib.vfs.Path;
import java.io.IOException;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/** Tests for {@link RealSandboxfsProcess}. */
@RunWith(JUnit4.class)
public class RealSandboxfsProcessTest extends BaseSandboxfsProcessTest {
@Override
Path newTmpDir() {
String rawTmpDir = System.getenv("TEST_TMPDIR");
if (rawTmpDir == null) {
fail("Test requires TEST_TMPDIR to be defined in the environment");
}
FileSystem fileSystem = new JavaIoFileSystem();
Path tmpDir = fileSystem.getPath(rawTmpDir);
if (!tmpDir.isDirectory()) {
fail("TEST_TMPDIR must point to a directory");
}
return tmpDir;
}
@Override
SandboxfsProcess mount(Path mountPoint) throws IOException {
String rawSandboxfs = System.getenv("SANDBOXFS");
if (rawSandboxfs == null) {
fail("Test requires SANDBOXFS to be defined in the environment");
}
FileSystem fileSystem = new JavaIoFileSystem();
Path sandboxfs = fileSystem.getPath(rawSandboxfs);
if (!sandboxfs.isExecutable()) {
fail("SANDBOXFS must point to an executable binary");
}
return RealSandboxfsProcess.mount(sandboxfs, mountPoint, fileSystem.getPath("/dev/stderr"));
}
}
|
[
"\"TEST_TMPDIR\"",
"\"SANDBOXFS\""
] |
[] |
[
"SANDBOXFS",
"TEST_TMPDIR"
] |
[]
|
["SANDBOXFS", "TEST_TMPDIR"]
|
java
| 2 | 0 | |
task4/generate/generagefeature/tools/test_sg_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
# Set up custom environment before nearly anything else is imported
# NOTE: this should be the first import (no not reorder)
import sys
import os
sys.path.append(os.getcwd())
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
import argparse
import os
import json
import torch
from maskrcnn_benchmark.config import cfg
from scene_graph_benchmark.config import sg_cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.data.datasets.utils.load_files import config_dataset_file
from maskrcnn_benchmark.engine.inference import inference
from scene_graph_benchmark.scene_parser import SceneParser
from scene_graph_benchmark.AttrRCNN import AttrRCNN
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
def run_test(cfg, model, distributed, model_name):
#print("model ===================",model)
if distributed and hasattr(model, 'module'):
model = model.module
torch.cuda.empty_cache() # TODO check if it helps
iou_types = ("bbox",)
if cfg.MODEL.MASK_ON:
iou_types = iou_types + ("segm",)
if cfg.MODEL.KEYPOINT_ON:
iou_types = iou_types + ("keypoints",)
output_folders = [None] * len(cfg.DATASETS.TEST)
dataset_names = cfg.DATASETS.TEST
if cfg.OUTPUT_DIR:
if len(dataset_names) == 1:
output_folder = os.path.join(
cfg.OUTPUT_DIR, "inference",
os.path.splitext(model_name)[0]
)
mkdir(output_folder)
output_folders = [output_folder]
else:
for idx, dataset_name in enumerate(dataset_names):
dataset_name1 = dataset_name.replace('/', '_')
output_folder = os.path.join(
cfg.OUTPUT_DIR, "inference",
dataset_name1,
os.path.splitext(model_name)[0]
)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
labelmap_file = config_dataset_file(cfg.DATA_DIR, cfg.DATASETS.LABELMAP_FILE)
for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):
results = inference(
model,
cfg,
data_loader_val,
dataset_name=dataset_name,
iou_types=iou_types,
box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,
bbox_aug=cfg.TEST.BBOX_AUG.ENABLED,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=output_folder,
skip_performance_eval=cfg.TEST.SKIP_PERFORMANCE_EVAL,
labelmap_file=labelmap_file,
save_predictions=cfg.TEST.SAVE_PREDICTIONS,
)
print("++++++++++++++++++++++++++++++++")
print("result",results)
print("++++++++++++++++++++++++++++++++")
# renaming box_proposals metric to rpn_proposals if RPN_ONLY is True
if results and 'box_proposal' in results and cfg.MODEL.RPN_ONLY:
results['rpn_proposal'] = results.pop('box_proposal')
if results and output_folder:
results_path = os.path.join(output_folder, "results.json")
# checking if this file already exists and only updating tasks
# that are already present. This is useful for including
# e.g. RPN_ONLY metrics
if os.path.isfile(results_path):
with open(results_path, 'rt') as fin:
old_results = json.load(fin)
old_results.update(results)
results = old_results
with open(results_path, 'wt') as fout:
json.dump(results, fout)
synchronize()
# evaluate attribute detection
if not cfg.MODEL.RPN_ONLY and cfg.MODEL.ATTRIBUTE_ON and (not cfg.TEST.SKIP_PERFORMANCE_EVAL):
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
for output_folder, dataset_name, data_loader_val in zip(
output_folders, dataset_names, data_loaders_val
):
results_attr = inference(
model,
cfg,
data_loader_val,
dataset_name=dataset_name,
iou_types=iou_types,
box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=output_folder,
skip_performance_eval=cfg.TEST.SKIP_PERFORMANCE_EVAL,
labelmap_file=labelmap_file,
save_predictions=cfg.TEST.SAVE_PREDICTIONS,
eval_attributes=True,
)
print("++++++++++++++++++++++++++++++++")
print("result_attr",results_attr)
print("++++++++++++++++++++++++++++++++")
if results_attr and output_folder:
results_path = os.path.join(output_folder, "results.json")
# checking if this file already exists and only updating tasks
# that are already present. This is useful for including
# e.g. RPN_ONLY metrics
if os.path.isfile(results_path):
with open(results_path, 'rt') as fin:
old_results = json.load(fin)
old_results.update(results_attr)
results_attr = old_results
with open(results_path, 'wt') as fout:
json.dump(results_attr, fout)
synchronize()
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Inference")
parser.add_argument(
"--config-file",
default="/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"--ckpt",
help="The path to the checkpoint for test, default is the latest checkpoint.",
default=None,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = num_gpus > 1
cfg.set_new_allowed(True)
cfg.merge_from_other_cfg(sg_cfg)
cfg.set_new_allowed(False)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend=cfg.DISTRIBUTED_BACKEND, init_method="env://"
)
synchronize()
save_dir = ""
logger = setup_logger("maskrcnn_benchmark", save_dir, get_rank())
logger.info("Using {} GPUs".format(num_gpus))
logger.info(cfg)
logger.info("Collecting env info (might take some time)")
logger.info("\n" + collect_env_info())
if cfg.MODEL.META_ARCHITECTURE == "SceneParser":
model = SceneParser(cfg)
elif cfg.MODEL.META_ARCHITECTURE == "AttrRCNN":
model = AttrRCNN(cfg)
model.to(cfg.MODEL.DEVICE)
output_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)
ckpt = cfg.MODEL.WEIGHT if args.ckpt is None else args.ckpt
_ = checkpointer.load(ckpt, use_latest=args.ckpt is None)
model_name = os.path.basename(ckpt)
print("==================================")
# print("cfg",cfg)
# print("model_name",model_name)
# print("args.distributed",args.distributed)
print("==================================")
run_test(cfg, model, args.distributed, model_name)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"WORLD_SIZE"
] |
[]
|
["WORLD_SIZE"]
|
python
| 1 | 0 | |
molecule/ec2/dynamic_inventory.py
|
"""
Generate inventory from Molecule scenario
"""
import os
import yaml
with open(os.environ['MOLECULE_EPHEMERAL_DIRECTORY'] + "/instance_config.yml", encoding='utf-8') as yamlfile:
parsed_yaml_file = yaml.load(yamlfile, Loader=yaml.FullLoader)
with open(os.path.dirname(os.path.abspath(__file__)) + "/../../inventory_ssh.yml", "r", encoding='utf-8') as old_inventory:
parsed_old_inventory = yaml.load(old_inventory, Loader=yaml.FullLoader)
with open(os.path.dirname(os.path.abspath(__file__)) + "/../../inventory_ssh.yml", "w", encoding='utf-8') as new_inventory:
inventoryfile = {'all': {'children':{}}}
def add_if_key_not_exist(dict_obj, key, value):
""" Add new key-value pair to dictionary only if
key does not exist in dictionary. """
if key not in dict_obj:
dict_obj.update({key: value})
address = parsed_yaml_file[0]['address']
identity_file = parsed_yaml_file[0]['identity_file']
for item in parsed_old_inventory['all']['children']:
print(item)
groupname = item + "_1"
add_if_key_not_exist(inventoryfile['all']['children'], item, {})
add_if_key_not_exist(inventoryfile['all']['children'][item], 'hosts', {})
add_if_key_not_exist(inventoryfile['all']['children'][item]['hosts'], groupname, {})
add_if_key_not_exist(inventoryfile['all']['children'][item]['hosts'][groupname], 'ansible_host', address)
add_if_key_not_exist(inventoryfile['all']['children'][item]['hosts'][groupname], 'ansible_private_key_file', identity_file)
add_if_key_not_exist(inventoryfile['all']['children'][item]['hosts'][groupname], 'ansible_ssh_common_args', "-o UserKnownHostsFile=/dev/null -o ControlMaster=auto -o ControlPersist=60s -o ForwardX11=no -o LogLevel=ERROR -o IdentitiesOnly=yes -o StrictHostKeyChecking=no")
add_if_key_not_exist(inventoryfile['all']['children'][item]['hosts'][groupname], 'ansible_user', 'centos')
add_if_key_not_exist(inventoryfile['all']['children'][item]['hosts'][groupname], 'connection', 'ssh')
yaml.dump(inventoryfile, new_inventory)
|
[] |
[] |
[
"MOLECULE_EPHEMERAL_DIRECTORY"
] |
[]
|
["MOLECULE_EPHEMERAL_DIRECTORY"]
|
python
| 1 | 0 | |
model.py
|
# -*- coding: utf-8 -*-
import os
os.environ['TF_KERAS'] = "1"
import tensorflow as tf
import config
from bert4keras.models import build_transformer_model
from module import label_smoothing, noam_scheme
import logging
import constant
import numpy as np
import law_accu_term_constraint
logging.basicConfig(level=logging.INFO)
class Transformer:
def __init__(self, hp):
self.hp = hp
self.transformer = build_transformer_model(
config_path='tf_xsbert/config.json',
model='bert',
)
with tf.variable_scope('crf_layer', reuse=tf.AUTO_REUSE):
self.trans = tf.get_variable(
"transitions",
shape=[constant.len_sub, constant.len_sub],
# shape=[2, 2],
initializer=tf.contrib.layers.xavier_initializer())
# ------
# matching
with tf.variable_scope('law_contents', reuse=tf.AUTO_REUSE):
self.law_contents_embeddings = tf.get_variable(name='law_content_embedding', initializer=np.load('data/law_contents.npy'), trainable=True)
# law_accu, law_term constraint
self.law_accu = tf.constant(law_accu_term_constraint.law_accu, dtype=tf.float32)
self.law_term = tf.constant(law_accu_term_constraint.law_term, dtype=tf.float32)
self.trigger_role = tf.constant(constant.TRIGGER_ROLE_MATRIX, dtype=tf.float32)
with tf.variable_scope('sub_embeddings', reuse=tf.AUTO_REUSE):
self.sub_embeddings = tf.get_variable(
"sub_embeddings",
shape=[constant.len_sub, 768],
initializer=tf.contrib.layers.xavier_initializer())
with tf.variable_scope('law_content_attention', reuse=tf.AUTO_REUSE):
self.law_content_attention = tf.get_variable(
"law_content_attention_matrix",
shape=[768, 768],
initializer=tf.contrib.layers.xavier_initializer())
# self.SUP_SUB_MATRIX = tf.constant(constant.SUP_SUB_MATRIX, dtype=tf.float32)
def encoder(self, token_ids, segment_ids, token_len):
memory = self.transformer([token_ids, segment_ids])
return memory
def role_module(self, memory):
with tf.variable_scope('role', reuse=tf.AUTO_REUSE):
role_sup_weights = tf.layers.dense(memory, constant.len_sup) # [N, T, len_sup]
role_sup_weights = tf.nn.softmax(role_sup_weights, dim=-2)
contexts = tf.matmul(tf.transpose(memory, [0, 2, 1]), role_sup_weights) # [N, dim, len_sup]
orientended = tf.reduce_mean(contexts, axis=-1) # [N, dim]
orientended_expand = tf.tile(tf.expand_dims(orientended, axis=1), [1, tf.shape(memory)[1], 1]) # [N, T, dim]
concat = (memory + orientended_expand) / 2
logits = tf.nn.softmax(tf.matmul(concat, tf.transpose(self.sub_embeddings)), axis=-1)
return logits
def legal_predict(self, memory, logits_role, flag, token_len):
# -----------------------
if self.hp.train_event != 'None':
# role_indices = tf.argmax(logits_role, axis=-1) # [N, T]
role_indices, _ = tf.contrib.crf.crf_decode(logits_role, self.trans, token_len)
target_indices = role_indices
target_indices_mask = tf.cast(tf.not_equal(target_indices, 0), dtype=tf.float32)
target_words_embeddings = tf.multiply(tf.expand_dims(target_indices_mask, axis=-1), memory)
target_words_sub_embeddings = tf.nn.embedding_lookup(self.sub_embeddings, target_indices)
# target_words_embeddings = tf.concat([target_words_embeddings, target_words_sub_embeddings], axis=-1)
target_words_embeddings = (target_words_embeddings + target_words_sub_embeddings) / 2
target_words_embeddings = tf.reduce_max(target_words_embeddings, axis=1)
else:
target_words_embeddings = memory
target_words_embeddings = tf.reduce_max(target_words_embeddings, axis=1)
x_weight = tf.nn.softmax(tf.matmul(tf.matmul(target_words_embeddings, self.law_content_attention), tf.transpose(self.law_contents_embeddings, [1, 0])),
axis=-1) # N, 101
law_context = tf.matmul(x_weight, self.law_contents_embeddings)
total_context = tf.concat([law_context, target_words_embeddings], axis=-1)
with tf.variable_scope('classification_law', reuse=tf.AUTO_REUSE):
logtis_law = tf.layers.dense(total_context, constant.len_law)
with tf.variable_scope('classification_accu', reuse=tf.AUTO_REUSE):
logtis_accu = tf.layers.dense(total_context, constant.len_accu)
with tf.variable_scope('classification_term', reuse=tf.AUTO_REUSE):
logtis_term = tf.layers.dense(total_context, constant.len_term)
return logtis_law, logtis_accu, logtis_term
def train(self, token_ids, segment_ids, role_labels, law, accu, term, flag, token_len):
memory = self.encoder(token_ids, segment_ids, token_len)
logits_role = self.role_module(memory)
logits_law, logits_accu, logits_term = self.legal_predict(memory, logits_role, flag, token_len)
# true_law = label_smoothing(tf.one_hot(law, depth=constant.len_law))
# true_accu = label_smoothing(tf.one_hot(accu, depth=constant.len_accu))
# true_term = label_smoothing(tf.one_hot(term, depth=constant.len_term))
true_law = tf.one_hot(law, depth=constant.len_law)
true_accu = tf.one_hot(accu, depth=constant.len_accu)
true_term = tf.one_hot(term, depth=constant.len_term)
#-----------
law_indexs = tf.one_hot(tf.argmax(logits_law, axis=-1), depth=101, axis=-1)
accu_dis = tf.matmul(law_indexs, self.law_accu) # N, 117
term_dis = tf.matmul(law_indexs, self.law_term) # N, 11
logits_accu_argmax = tf.argmax(logits_accu, axis=-1)
logits_term_argmax = tf.argmax(logits_term, axis=-1)
accu_softmax = tf.nn.softmax(logits_accu, axis=-1)
accu_softmax_mask = tf.where(tf.cast(accu_dis, dtype=tf.bool), accu_softmax, tf.ones_like(accu_softmax))
loss_accu_mask = tf.reduce_sum(true_accu * tf.log(accu_softmax_mask), axis=-1)
loss_accu_original = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits_accu, labels=true_accu)
term_softmax = tf.nn.softmax(logits_term, axis=-1)
term_softmax_mask = tf.where(tf.cast(term_dis, dtype=tf.bool), term_softmax, tf.ones_like(term_softmax))
loss_term_mask = tf.reduce_sum(true_term * tf.log(term_softmax_mask), axis=-1)
loss_term_original = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits_term, labels=true_term)
loss_accu = tf.reduce_mean(
tf.where(tf.equal(tf.cast(logits_accu_argmax, tf.int32), accu), loss_accu_mask, loss_accu_original))
loss_term = tf.reduce_mean(
tf.where(tf.equal(tf.cast(logits_term_argmax, tf.int32), term), loss_term_mask, loss_term_original))
loss_law = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits_law, labels=true_law))
loss_legal = loss_law * self.hp.law_weight \
+ loss_accu * self.hp.accu_weight \
+ loss_term * self.hp.term_weight
if self.hp.train_event != 'None':
log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(
inputs=logits_role,
tag_indices=role_labels,
transition_params=self.trans,
sequence_lengths=token_len)
loss_role = tf.multiply(-log_likelihood, tf.cast(flag, tf.float32))
loss_role = tf.reduce_sum(loss_role) / (tf.cast(tf.reduce_sum(flag), dtype=tf.float32) + constant.INF)
# ------------
criminal_prob = tf.squeeze(logits_role[:, :, 14:15], axis=-1) # [N, T]
criminal_prob = tf.reduce_max(criminal_prob, axis=-1) # N
absolute_loss_1 = tf.multiply(1 - criminal_prob, tf.cast(flag, tf.float32))
absolute_loss_1 = tf.reduce_sum(absolute_loss_1) / (
tf.cast(tf.reduce_sum(flag), dtype=tf.float32) + constant.INF)
trigger_prob = logits_role[:, :, 1:14] # [N, T, 13]
sum_all = tf.reduce_sum(tf.reduce_sum(trigger_prob, axis=-1), axis=-1) # N
max_all = tf.reduce_max(tf.reduce_max(trigger_prob, axis=-1), axis=-1) # N
absolute_loss_2 = sum_all - max_all + (1 - max_all)
absolute_loss_2 = tf.multiply(absolute_loss_2, tf.cast(flag, tf.float32))
absolute_loss_2 = tf.reduce_sum(absolute_loss_2) / (
tf.cast(tf.reduce_sum(flag), dtype=tf.float32) + constant.INF)
# --------
trigger_type = 1 + tf.argmax(tf.reduce_max(trigger_prob, axis=1), axis=-1) # N
selected_role = tf.matmul(tf.one_hot(trigger_type, depth=constant.len_sub), self.trigger_role) # N, len_sub
pos = tf.tile(tf.expand_dims(selected_role, axis=1), [1, tf.shape(trigger_prob)[1], 1]) # N, T, len_sub
neg = 1 - pos
pos = tf.multiply(pos, logits_role)
neg = tf.multiply(neg, logits_role)
sum_all_2 = tf.reduce_sum(tf.reduce_sum(neg, axis=-1), axis=-1) # N
max_all_2 = tf.reduce_max(tf.reduce_max(pos, axis=-1), axis=-1) # N
loss_3 = (1 - max_all_2) + sum_all_2
loss_3 = tf.multiply(loss_3, tf.cast(flag, tf.float32))
loss_3 = tf.reduce_sum(loss_3) / (
tf.cast(tf.reduce_sum(flag), dtype=tf.float32) + constant.INF)
total_loss = loss_legal \
+ loss_role * self.hp.role_weight \
+ (absolute_loss_1 + absolute_loss_2 + loss_3) / 3 * self.hp.CSTR_weight
else:
total_loss = loss_legal
global_step = tf.train.get_or_create_global_step()
lr = noam_scheme(self.hp.lr, global_step, self.hp.warmup_steps)
optimizer = tf.train.AdamOptimizer(lr)
train_op = optimizer.minimize(total_loss, global_step=global_step)
tf.summary.scalar('lr', lr)
tf.summary.scalar("loss", total_loss)
tf.summary.scalar("loss_law", loss_law)
tf.summary.scalar("loss_accu", loss_accu)
tf.summary.scalar("loss_term", loss_term)
# tf.summary.scalar("loss_role", loss_role)
# tf.summary.scalar("loss_absolute1", absolute_loss_1)
# tf.summary.scalar("loss_absolute2", absolute_loss_2)
# tf.summary.scalar("loss_trigger_role", loss_3)
summaries = tf.summary.merge_all()
return total_loss, train_op, global_step, summaries
def test(self, token_ids, segment_ids, role_labels, law, accu, term, flag, token_len):
memory = self.encoder(token_ids, segment_ids, token_len)
logits_role = self.role_module(memory)
if self.hp.train_event != 'None':
predict_role, _ = tf.contrib.crf.crf_decode(logits_role, self.trans, token_len)
else:
predict_role = tf.argmax(logits_role, axis=-1)
logits_law, logits_accu, logits_term = self.legal_predict(memory, logits_role, flag, token_len)
predict_law = tf.argmax(logits_law, axis=-1)
predict_law_one_hot = tf.one_hot(predict_law, depth=101, axis=-1)
predict_accu = tf.argmax(
tf.multiply(tf.matmul(predict_law_one_hot, self.law_accu), tf.nn.softmax(logits_accu, axis=-1)), axis=-1)
predict_term = tf.argmax(
tf.multiply(tf.matmul(predict_law_one_hot, self.law_term), tf.nn.softmax(logits_term, axis=-1)), axis=-1)
return predict_law, predict_accu, predict_term, predict_role
def generate_logits(self, token_ids, segment_ids, role_labels, law, accu, term, flag, token_len):
memory = self.encoder(token_ids, segment_ids, token_len)
logits_role = self.role_module(memory)
logits_law, logits_accu, logits_term = self.legal_predict(memory, logits_role, flag)
return logits_law, logits_accu, logits_term
|
[] |
[] |
[
"TF_KERAS"
] |
[]
|
["TF_KERAS"]
|
python
| 1 | 0 | |
vendor/go.etcd.io/etcd/clientv3/client.go
|
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
"errors"
"fmt"
"net"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
"go.etcd.io/etcd/clientv3/balancer"
"go.etcd.io/etcd/clientv3/balancer/picker"
"go.etcd.io/etcd/clientv3/balancer/resolver/endpoint"
"go.etcd.io/etcd/clientv3/credentials"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
"go.etcd.io/etcd/pkg/logutil"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
grpccredentials "google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
var (
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
ErrOldCluster = errors.New("etcdclient: old cluster version")
roundRobinBalancerName = fmt.Sprintf("etcd-%s", picker.RoundrobinBalanced.String())
)
func init() {
lg := zap.NewNop()
if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
lcfg := logutil.DefaultZapLoggerConfig
lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
var err error
lg, err = lcfg.Build() // info level logging
if err != nil {
panic(err)
}
}
// TODO: support custom balancer
balancer.RegisterBuilder(balancer.Config{
Policy: picker.RoundrobinBalanced,
Name: roundRobinBalancerName,
Logger: lg,
})
}
// Client provides and manages an etcd v3 client session.
type Client struct {
Cluster
KV
Lease
Watcher
Auth
Maintenance
conn *grpc.ClientConn
cfg Config
creds grpccredentials.TransportCredentials
resolverGroup *endpoint.ResolverGroup
mu *sync.RWMutex
ctx context.Context
cancel context.CancelFunc
// Username is a user name for authentication.
Username string
// Password is a password for authentication.
Password string
authTokenBundle credentials.Bundle
callOpts []grpc.CallOption
lg *zap.Logger
}
// New creates a new etcdv3 client from a given configuration.
func New(cfg Config) (*Client, error) {
if len(cfg.Endpoints) == 0 {
return nil, ErrNoAvailableEndpoints
}
return newClient(&cfg)
}
// NewCtxClient creates a client with a context but no underlying grpc
// connection. This is useful for embedded cases that override the
// service interface implementations and do not need connection management.
func NewCtxClient(ctx context.Context) *Client {
cctx, cancel := context.WithCancel(ctx)
return &Client{ctx: cctx, cancel: cancel}
}
// NewFromURL creates a new etcdv3 client from a URL.
func NewFromURL(url string) (*Client, error) {
return New(Config{Endpoints: []string{url}})
}
// NewFromURLs creates a new etcdv3 client from URLs.
func NewFromURLs(urls []string) (*Client, error) {
return New(Config{Endpoints: urls})
}
// Close shuts down the client's etcd connections.
func (c *Client) Close() error {
c.cancel()
if c.Watcher != nil {
c.Watcher.Close()
}
if c.Lease != nil {
c.Lease.Close()
}
if c.resolverGroup != nil {
c.resolverGroup.Close()
}
if c.conn != nil {
return toErr(c.ctx, c.conn.Close())
}
return c.ctx.Err()
}
// Ctx is a context for "out of band" messages (e.g., for sending
// "clean up" message when another context is canceled). It is
// canceled on client Close().
func (c *Client) Ctx() context.Context { return c.ctx }
// Endpoints lists the registered endpoints for the client.
func (c *Client) Endpoints() []string {
// copy the slice; protect original endpoints from being changed
c.mu.RLock()
defer c.mu.RUnlock()
eps := make([]string, len(c.cfg.Endpoints))
copy(eps, c.cfg.Endpoints)
return eps
}
// SetEndpoints updates client's endpoints.
func (c *Client) SetEndpoints(eps ...string) {
c.mu.Lock()
defer c.mu.Unlock()
c.cfg.Endpoints = eps
c.resolverGroup.SetEndpoints(eps)
}
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
func (c *Client) Sync(ctx context.Context) error {
mresp, err := c.MemberList(ctx)
if err != nil {
return err
}
var eps []string
for _, m := range mresp.Members {
eps = append(eps, m.ClientURLs...)
}
c.SetEndpoints(eps...)
return nil
}
func (c *Client) autoSync() {
if c.cfg.AutoSyncInterval == time.Duration(0) {
return
}
for {
select {
case <-c.ctx.Done():
return
case <-time.After(c.cfg.AutoSyncInterval):
ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
err := c.Sync(ctx)
cancel()
if err != nil && err != c.ctx.Err() {
lg.Lvl(4).Infof("Auto sync endpoints failed: %v", err)
}
}
}
}
func (c *Client) processCreds(scheme string) (creds grpccredentials.TransportCredentials) {
creds = c.creds
switch scheme {
case "unix":
case "http":
creds = nil
case "https", "unixs":
if creds != nil {
break
}
creds = credentials.NewBundle(credentials.Config{}).TransportCredentials()
default:
creds = nil
}
return creds
}
// dialSetupOpts gives the dial opts prior to any authentication.
func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
if c.cfg.DialKeepAliveTime > 0 {
params := keepalive.ClientParameters{
Time: c.cfg.DialKeepAliveTime,
Timeout: c.cfg.DialKeepAliveTimeout,
PermitWithoutStream: c.cfg.PermitWithoutStream,
}
opts = append(opts, grpc.WithKeepaliveParams(params))
}
opts = append(opts, dopts...)
dialer := endpoint.Dialer
if creds != nil {
opts = append(opts, grpc.WithTransportCredentials(creds))
// gRPC load balancer workaround. See credentials.transportCredential for details.
if credsDialer, ok := creds.(TransportCredentialsWithDialer); ok {
dialer = credsDialer.Dialer
}
} else {
opts = append(opts, grpc.WithInsecure())
}
opts = append(opts, grpc.WithContextDialer(dialer))
// Interceptor retry and backoff.
// TODO: Replace all of clientv3/retry.go with interceptor based retry, or with
// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy
// once it is available.
rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction))
opts = append(opts,
// Disable stream retry by default since go-grpc-middleware/retry does not support client streams.
// Streams that are safe to retry are enabled individually.
grpc.WithStreamInterceptor(c.streamClientInterceptor(c.lg, withMax(0), rrBackoff)),
grpc.WithUnaryInterceptor(c.unaryClientInterceptor(c.lg, withMax(defaultUnaryMaxRetries), rrBackoff)),
)
return opts, nil
}
// Dial connects to a single endpoint using the client's config.
func (c *Client) Dial(ep string) (*grpc.ClientConn, error) {
creds, err := c.directDialCreds(ep)
if err != nil {
return nil, err
}
// Use the grpc passthrough resolver to directly dial a single endpoint.
// This resolver passes through the 'unix' and 'unixs' endpoints schemes used
// by etcd without modification, allowing us to directly dial endpoints and
// using the same dial functions that we use for load balancer dialing.
return c.dial(fmt.Sprintf("passthrough:///%s", ep), creds)
}
func (c *Client) getToken(ctx context.Context) error {
var err error // return last error in a case of fail
var auth *authenticator
eps := c.Endpoints()
for _, ep := range eps {
// use dial options without dopts to avoid reusing the client balancer
var dOpts []grpc.DialOption
_, host, _ := endpoint.ParseEndpoint(ep)
target := c.resolverGroup.Target(host)
creds := c.dialWithBalancerCreds(ep)
dOpts, err = c.dialSetupOpts(creds, c.cfg.DialOptions...)
if err != nil {
err = fmt.Errorf("failed to configure auth dialer: %v", err)
continue
}
dOpts = append(dOpts, grpc.WithBalancerName(roundRobinBalancerName))
auth, err = newAuthenticator(ctx, target, dOpts, c)
if err != nil {
continue
}
defer auth.close()
var resp *AuthenticateResponse
resp, err = auth.authenticate(ctx, c.Username, c.Password)
if err != nil {
// return err without retrying other endpoints
if err == rpctypes.ErrAuthNotEnabled {
return err
}
continue
}
c.authTokenBundle.UpdateAuthToken(resp.Token)
return nil
}
return err
}
// dialWithBalancer dials the client's current load balanced resolver group. The scheme of the host
// of the provided endpoint determines the scheme used for all endpoints of the client connection.
func (c *Client) dialWithBalancer(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
_, host, _ := endpoint.ParseEndpoint(ep)
target := c.resolverGroup.Target(host)
creds := c.dialWithBalancerCreds(ep)
return c.dial(target, creds, dopts...)
}
// dial configures and dials any grpc balancer target.
func (c *Client) dial(target string, creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
opts, err := c.dialSetupOpts(creds, dopts...)
if err != nil {
return nil, fmt.Errorf("failed to configure dialer: %v", err)
}
if c.Username != "" && c.Password != "" {
c.authTokenBundle = credentials.NewBundle(credentials.Config{})
ctx, cancel := c.ctx, func() {}
if c.cfg.DialTimeout > 0 {
ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
}
err = c.getToken(ctx)
if err != nil {
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
err = context.DeadlineExceeded
}
cancel()
return nil, err
}
} else {
opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
}
cancel()
}
opts = append(opts, c.cfg.DialOptions...)
dctx := c.ctx
if c.cfg.DialTimeout > 0 {
var cancel context.CancelFunc
dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options?
}
conn, err := grpc.DialContext(dctx, target, opts...)
if err != nil {
return nil, err
}
return conn, nil
}
func (c *Client) directDialCreds(ep string) (grpccredentials.TransportCredentials, error) {
_, host, scheme := endpoint.ParseEndpoint(ep)
creds := c.creds
if len(scheme) != 0 {
creds = c.processCreds(scheme)
if creds != nil {
clone := creds.Clone()
// Set the server name must to the endpoint hostname without port since grpc
// otherwise attempts to check if x509 cert is valid for the full endpoint
// including the scheme and port, which fails.
overrideServerName, _, err := net.SplitHostPort(host)
if err != nil {
// Either the host didn't have a port or the host could not be parsed. Either way, continue with the
// original host string.
overrideServerName = host
}
clone.OverrideServerName(overrideServerName)
creds = clone
}
}
return creds, nil
}
func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCredentials {
_, _, scheme := endpoint.ParseEndpoint(ep)
creds := c.creds
if len(scheme) != 0 {
creds = c.processCreds(scheme)
}
return creds
}
// WithRequireLeader requires client requests to only succeed
// when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context {
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewOutgoingContext(ctx, md)
}
func newClient(cfg *Config) (*Client, error) {
if cfg == nil {
cfg = &Config{}
}
var creds grpccredentials.TransportCredentials
if cfg.TLS != nil {
creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials()
}
// use a temporary skeleton client to bootstrap first connection
baseCtx := context.TODO()
if cfg.Context != nil {
baseCtx = cfg.Context
}
ctx, cancel := context.WithCancel(baseCtx)
client := &Client{
conn: nil,
cfg: *cfg,
creds: creds,
ctx: ctx,
cancel: cancel,
mu: new(sync.RWMutex),
callOpts: defaultCallOpts,
}
lcfg := logutil.DefaultZapLoggerConfig
if cfg.LogConfig != nil {
lcfg = *cfg.LogConfig
}
var err error
client.lg, err = lcfg.Build()
if err != nil {
return nil, err
}
if cfg.Username != "" && cfg.Password != "" {
client.Username = cfg.Username
client.Password = cfg.Password
}
if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
}
callOpts := []grpc.CallOption{
defaultFailFast,
defaultMaxCallSendMsgSize,
defaultMaxCallRecvMsgSize,
}
if cfg.MaxCallSendMsgSize > 0 {
callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
}
if cfg.MaxCallRecvMsgSize > 0 {
callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
}
client.callOpts = callOpts
}
// Prepare a 'endpoint://<unique-client-id>/' resolver for the client and create a endpoint target to pass
// to dial so the client knows to use this resolver.
client.resolverGroup, err = endpoint.NewResolverGroup(fmt.Sprintf("client-%s", uuid.New().String()))
if err != nil {
client.cancel()
return nil, err
}
client.resolverGroup.SetEndpoints(cfg.Endpoints)
if len(cfg.Endpoints) < 1 {
return nil, fmt.Errorf("at least one Endpoint must is required in client config")
}
dialEndpoint := cfg.Endpoints[0]
// Use a provided endpoint target so that for https:// without any tls config given, then
// grpc will assume the certificate server name is the endpoint host.
conn, err := client.dialWithBalancer(dialEndpoint, grpc.WithBalancerName(roundRobinBalancerName))
if err != nil {
client.cancel()
client.resolverGroup.Close()
return nil, err
}
// TODO: With the old grpc balancer interface, we waited until the dial timeout
// for the balancer to be ready. Is there an equivalent wait we should do with the new grpc balancer interface?
client.conn = conn
client.Cluster = NewCluster(client)
client.KV = NewKV(client)
client.Lease = NewLease(client)
client.Watcher = NewWatcher(client)
client.Auth = NewAuth(client)
client.Maintenance = NewMaintenance(client)
if cfg.RejectOldCluster {
if err := client.checkVersion(); err != nil {
client.Close()
return nil, err
}
}
go client.autoSync()
return client, nil
}
// roundRobinQuorumBackoff retries against quorum between each backoff.
// This is intended for use with a round robin load balancer.
func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc {
return func(attempt uint) time.Duration {
// after each round robin across quorum, backoff for our wait between duration
n := uint(len(c.Endpoints()))
quorum := (n/2 + 1)
if attempt%quorum == 0 {
c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction))
return jitterUp(waitBetween, jitterFraction)
}
c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum))
return 0
}
}
func (c *Client) checkVersion() (err error) {
var wg sync.WaitGroup
eps := c.Endpoints()
errc := make(chan error, len(eps))
ctx, cancel := context.WithCancel(c.ctx)
if c.cfg.DialTimeout > 0 {
cancel()
ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
}
wg.Add(len(eps))
for _, ep := range eps {
// if cluster is current, any endpoint gives a recent version
go func(e string) {
defer wg.Done()
resp, rerr := c.Status(ctx, e)
if rerr != nil {
errc <- rerr
return
}
vs := strings.Split(resp.Version, ".")
maj, min := 0, 0
if len(vs) >= 2 {
var serr error
if maj, serr = strconv.Atoi(vs[0]); serr != nil {
errc <- serr
return
}
if min, serr = strconv.Atoi(vs[1]); serr != nil {
errc <- serr
return
}
}
if maj < 3 || (maj == 3 && min < 2) {
rerr = ErrOldCluster
}
errc <- rerr
}(ep)
}
// wait for success
for range eps {
if err = <-errc; err == nil {
break
}
}
cancel()
wg.Wait()
return err
}
// ActiveConnection returns the current in-use connection
func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
// isHaltErr returns true if the given error and context indicate no forward
// progress can be made, even after reconnecting.
func isHaltErr(ctx context.Context, err error) bool {
if ctx != nil && ctx.Err() != nil {
return true
}
if err == nil {
return false
}
ev, _ := status.FromError(err)
// Unavailable codes mean the system will be right back.
// (e.g., can't connect, lost leader)
// Treat Internal codes as if something failed, leaving the
// system in an inconsistent state, but retrying could make progress.
// (e.g., failed in middle of send, corrupted frame)
// TODO: are permanent Internal errors possible from grpc?
return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
}
// isUnavailableErr returns true if the given error is an unavailable error
func isUnavailableErr(ctx context.Context, err error) bool {
if ctx != nil && ctx.Err() != nil {
return false
}
if err == nil {
return false
}
ev, ok := status.FromError(err)
if ok {
// Unavailable codes mean the system will be right back.
// (e.g., can't connect, lost leader)
return ev.Code() == codes.Unavailable
}
return false
}
func toErr(ctx context.Context, err error) error {
if err == nil {
return nil
}
err = rpctypes.Error(err)
if _, ok := err.(rpctypes.EtcdError); ok {
return err
}
if ev, ok := status.FromError(err); ok {
code := ev.Code()
switch code {
case codes.DeadlineExceeded:
fallthrough
case codes.Canceled:
if ctx.Err() != nil {
err = ctx.Err()
}
}
}
return err
}
func canceledByCaller(stopCtx context.Context, err error) bool {
if stopCtx.Err() == nil || err == nil {
return false
}
return err == context.Canceled || err == context.DeadlineExceeded
}
// IsConnCanceled returns true, if error is from a closed gRPC connection.
// ref. https://github.com/grpc/grpc-go/pull/1854
func IsConnCanceled(err error) bool {
if err == nil {
return false
}
// >= gRPC v1.23.x
s, ok := status.FromError(err)
if ok {
// connection is canceled or server has already closed the connection
return s.Code() == codes.Canceled || s.Message() == "transport is closing"
}
// >= gRPC v1.10.x
if err == context.Canceled {
return true
}
// <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
return strings.Contains(err.Error(), "grpc: the client connection is closing")
}
// TransportCredentialsWithDialer is for a gRPC load balancer workaround. See credentials.transportCredential for details.
type TransportCredentialsWithDialer interface {
grpccredentials.TransportCredentials
Dialer(ctx context.Context, dialEp string) (net.Conn, error)
}
|
[
"\"ETCD_CLIENT_DEBUG\""
] |
[] |
[
"ETCD_CLIENT_DEBUG"
] |
[]
|
["ETCD_CLIENT_DEBUG"]
|
go
| 1 | 0 | |
tests/unit_tests/routes_test.py
|
import os
from pathlib import Path
import unittest
import json
from starlette.testclient import TestClient
from ml_base.utilities import ModelManager
os.chdir(Path(__file__).resolve().parent.parent.parent)
os.environ["REST_CONFIG"] = "examples/rest_config.yaml"
from rest_model_service.main import app, create_app
from rest_model_service.configuration import Model
class RoutesTests(unittest.TestCase):
def test_root(self):
# arrange
client = TestClient(app)
# act
response = client.get("/")
# assert
self.assertTrue(response.status_code == 200)
# cleanup
model_manager = ModelManager()
model_manager.clear_instance()
def test_get_models(self):
# arrange
client = TestClient(app)
# act
response = client.get("/api/models")
# assert
self.assertTrue(response.status_code == 200)
self.assertTrue(response.json() == {
"models":
[
{
"display_name": "Iris Model",
"qualified_name": "iris_model",
"description": "Model for predicting the species of a flower based on its measurements.",
"version": "1.0.0"
}
]
})
# cleanup
model_manager = ModelManager()
model_manager.clear_instance()
def test_prediction(self):
# arrange
client = TestClient(app)
# act
response = client.post("/api/models/iris_model/prediction", data=json.dumps({
"sepal_length": 6.0,
"sepal_width": 5.0,
"petal_length": 3.0,
"petal_width": 2.0
}))
# assert
self.assertTrue(response.status_code == 200)
self.assertTrue(response.json() == {
"species": "Iris setosa"
})
# cleanup
model_manager = ModelManager()
model_manager.clear_instance()
def test_prediction_with_bad_data(self):
# arrange
app = create_app("REST Model Service", [Model(qualified_name="iris_model",
class_path="tests.mocks.IrisModel",
create_endpoint=True)])
client = TestClient(app)
# act
response = client.post("/api/models/iris_model/prediction", data=json.dumps({
"sepal_length": 16.0,
"sepal_width": 5.0,
"petal_length": 3.0,
"petal_width": 2.0
}))
# assert
self.assertTrue(response.status_code == 422)
# cleanup
model_manager = ModelManager()
model_manager.clear_instance()
def test_prediction_with_bad_configuration(self):
# arrange, act, assert
with self.assertRaises(ValueError) as e:
app = create_app("REST Model Service", [Model(qualified_name="asdf",
class_path="tests.mocks.IrisModel",
create_endpoint=True)])
# cleanup
model_manager = ModelManager()
model_manager.clear_instance()
def test_prediction_with_no_endpoint(self):
# arrange
app = create_app("REST Model Service", [Model(qualified_name="iris_model",
class_path="tests.mocks.IrisModel",
create_endpoint=False)])
client = TestClient(app)
# act
response = client.post("/api/models/iris_model/prediction", data=json.dumps({
"sepal_length": 16.0,
"sepal_width": 5.0,
"petal_length": 3.0,
"petal_width": 2.0
}))
# assert
self.assertTrue(response.status_code == 404)
# cleanup
model_manager = ModelManager()
model_manager.clear_instance()
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"REST_CONFIG"
] |
[]
|
["REST_CONFIG"]
|
python
| 1 | 0 | |
p2p/simulations/adapters/exec.go
|
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package adapters
import (
"bufio"
"context"
"crypto/ecdsa"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"os"
"os/exec"
"os/signal"
"path/filepath"
"regexp"
"strings"
"sync"
"syscall"
"time"
"github.com/docker/docker/pkg/reexec"
"github.com/turingcontract/go-turingcontract/log"
"github.com/turingcontract/go-turingcontract/node"
"github.com/turingcontract/go-turingcontract/p2p"
"github.com/turingcontract/go-turingcontract/p2p/discover"
"github.com/turingcontract/go-turingcontract/rpc"
"golang.org/x/net/websocket"
)
// ExecAdapter is a NodeAdapter which runs simulation nodes by executing the
// current binary as a child process.
//
// An init hook is used so that the child process executes the node services
// (rather than whataver the main() function would normally do), see the
// execP2PNode function for more information.
type ExecAdapter struct {
// BaseDir is the directory under which the data directories for each
// simulation node are created.
BaseDir string
nodes map[discover.NodeID]*ExecNode
}
// NewExecAdapter returns an ExecAdapter which stores node data in
// subdirectories of the given base directory
func NewExecAdapter(baseDir string) *ExecAdapter {
return &ExecAdapter{
BaseDir: baseDir,
nodes: make(map[discover.NodeID]*ExecNode),
}
}
// Name returns the name of the adapter for logging purposes
func (e *ExecAdapter) Name() string {
return "exec-adapter"
}
// NewNode returns a new ExecNode using the given config
func (e *ExecAdapter) NewNode(config *NodeConfig) (Node, error) {
if len(config.Services) == 0 {
return nil, errors.New("node must have at least one service")
}
for _, service := range config.Services {
if _, exists := serviceFuncs[service]; !exists {
return nil, fmt.Errorf("unknown node service %q", service)
}
}
// create the node directory using the first 12 characters of the ID
// as Unix socket paths cannot be longer than 256 characters
dir := filepath.Join(e.BaseDir, config.ID.String()[:12])
if err := os.Mkdir(dir, 0755); err != nil {
return nil, fmt.Errorf("error creating node directory: %s", err)
}
// generate the config
conf := &execNodeConfig{
Stack: node.DefaultConfig,
Node: config,
}
conf.Stack.DataDir = filepath.Join(dir, "data")
conf.Stack.WSHost = "127.0.0.1"
conf.Stack.WSPort = 0
conf.Stack.WSOrigins = []string{"*"}
conf.Stack.WSExposeAll = true
conf.Stack.P2P.EnableMsgEvents = false
conf.Stack.P2P.NoDiscovery = true
conf.Stack.P2P.NAT = nil
conf.Stack.NoUSB = true
// listen on a random localhost port (we'll get the actual port after
// starting the node through the RPC admin.nodeInfo method)
conf.Stack.P2P.ListenAddr = "127.0.0.1:0"
node := &ExecNode{
ID: config.ID,
Dir: dir,
Config: conf,
adapter: e,
}
node.newCmd = node.execCommand
e.nodes[node.ID] = node
return node, nil
}
// ExecNode starts a simulation node by exec'ing the current binary and
// running the configured services
type ExecNode struct {
ID discover.NodeID
Dir string
Config *execNodeConfig
Cmd *exec.Cmd
Info *p2p.NodeInfo
adapter *ExecAdapter
client *rpc.Client
wsAddr string
newCmd func() *exec.Cmd
key *ecdsa.PrivateKey
}
// Addr returns the node's enode URL
func (n *ExecNode) Addr() []byte {
if n.Info == nil {
return nil
}
return []byte(n.Info.Enode)
}
// Client returns an rpc.Client which can be used to communicate with the
// underlying services (it is set once the node has started)
func (n *ExecNode) Client() (*rpc.Client, error) {
return n.client, nil
}
// wsAddrPattern is a regex used to read the WebSocket address from the node's
// log
var wsAddrPattern = regexp.MustCompile(`ws://[\d.:]+`)
// Start exec's the node passing the ID and service as command line arguments
// and the node config encoded as JSON in the _P2P_NODE_CONFIG environment
// variable
func (n *ExecNode) Start(snapshots map[string][]byte) (err error) {
if n.Cmd != nil {
return errors.New("already started")
}
defer func() {
if err != nil {
log.Error("node failed to start", "err", err)
n.Stop()
}
}()
// encode a copy of the config containing the snapshot
confCopy := *n.Config
confCopy.Snapshots = snapshots
confCopy.PeerAddrs = make(map[string]string)
for id, node := range n.adapter.nodes {
confCopy.PeerAddrs[id.String()] = node.wsAddr
}
confData, err := json.Marshal(confCopy)
if err != nil {
return fmt.Errorf("error generating node config: %s", err)
}
// use a pipe for stderr so we can both copy the node's stderr to
// os.Stderr and read the WebSocket address from the logs
stderrR, stderrW := io.Pipe()
stderr := io.MultiWriter(os.Stderr, stderrW)
// start the node
cmd := n.newCmd()
cmd.Stdout = os.Stdout
cmd.Stderr = stderr
cmd.Env = append(os.Environ(), fmt.Sprintf("_P2P_NODE_CONFIG=%s", confData))
if err := cmd.Start(); err != nil {
return fmt.Errorf("error starting node: %s", err)
}
n.Cmd = cmd
// read the WebSocket address from the stderr logs
var wsAddr string
wsAddrC := make(chan string)
go func() {
s := bufio.NewScanner(stderrR)
for s.Scan() {
if strings.Contains(s.Text(), "WebSocket endpoint opened:") {
wsAddrC <- wsAddrPattern.FindString(s.Text())
}
}
}()
select {
case wsAddr = <-wsAddrC:
if wsAddr == "" {
return errors.New("failed to read WebSocket address from stderr")
}
case <-time.After(10 * time.Second):
return errors.New("timed out waiting for WebSocket address on stderr")
}
// create the RPC client and load the node info
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
client, err := rpc.DialWebsocket(ctx, wsAddr, "")
if err != nil {
return fmt.Errorf("error dialing rpc websocket: %s", err)
}
var info p2p.NodeInfo
if err := client.CallContext(ctx, &info, "admin_nodeInfo"); err != nil {
return fmt.Errorf("error getting node info: %s", err)
}
n.client = client
n.wsAddr = wsAddr
n.Info = &info
return nil
}
// execCommand returns a command which runs the node locally by exec'ing
// the current binary but setting argv[0] to "p2p-node" so that the child
// runs execP2PNode
func (n *ExecNode) execCommand() *exec.Cmd {
return &exec.Cmd{
Path: reexec.Self(),
Args: []string{"p2p-node", strings.Join(n.Config.Node.Services, ","), n.ID.String()},
}
}
// Stop stops the node by first sending SIGTERM and then SIGKILL if the node
// doesn't stop within 5s
func (n *ExecNode) Stop() error {
if n.Cmd == nil {
return nil
}
defer func() {
n.Cmd = nil
}()
if n.client != nil {
n.client.Close()
n.client = nil
n.wsAddr = ""
n.Info = nil
}
if err := n.Cmd.Process.Signal(syscall.SIGTERM); err != nil {
return n.Cmd.Process.Kill()
}
waitErr := make(chan error)
go func() {
waitErr <- n.Cmd.Wait()
}()
select {
case err := <-waitErr:
return err
case <-time.After(5 * time.Second):
return n.Cmd.Process.Kill()
}
}
// NodeInfo returns information about the node
func (n *ExecNode) NodeInfo() *p2p.NodeInfo {
info := &p2p.NodeInfo{
ID: n.ID.String(),
}
if n.client != nil {
n.client.Call(&info, "admin_nodeInfo")
}
return info
}
// ServeRPC serves RPC requests over the given connection by dialling the
// node's WebSocket address and joining the two connections
func (n *ExecNode) ServeRPC(clientConn net.Conn) error {
conn, err := websocket.Dial(n.wsAddr, "", "http://localhost")
if err != nil {
return err
}
var wg sync.WaitGroup
wg.Add(2)
join := func(src, dst net.Conn) {
defer wg.Done()
io.Copy(dst, src)
// close the write end of the destination connection
if cw, ok := dst.(interface {
CloseWrite() error
}); ok {
cw.CloseWrite()
} else {
dst.Close()
}
}
go join(conn, clientConn)
go join(clientConn, conn)
wg.Wait()
return nil
}
// Snapshots creates snapshots of the services by calling the
// simulation_snapshot RPC method
func (n *ExecNode) Snapshots() (map[string][]byte, error) {
if n.client == nil {
return nil, errors.New("RPC not started")
}
var snapshots map[string][]byte
return snapshots, n.client.Call(&snapshots, "simulation_snapshot")
}
func init() {
// register a reexec function to start a devp2p node when the current
// binary is executed as "p2p-node"
reexec.Register("p2p-node", execP2PNode)
}
// execNodeConfig is used to serialize the node configuration so it can be
// passed to the child process as a JSON encoded environment variable
type execNodeConfig struct {
Stack node.Config `json:"stack"`
Node *NodeConfig `json:"node"`
Snapshots map[string][]byte `json:"snapshots,omitempty"`
PeerAddrs map[string]string `json:"peer_addrs,omitempty"`
}
// execP2PNode starts a devp2p node when the current binary is executed with
// argv[0] being "p2p-node", reading the service / ID from argv[1] / argv[2]
// and the node config from the _P2P_NODE_CONFIG environment variable
func execP2PNode() {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
glogger.Verbosity(log.LvlInfo)
log.Root().SetHandler(glogger)
// read the services from argv
serviceNames := strings.Split(os.Args[1], ",")
// decode the config
confEnv := os.Getenv("_P2P_NODE_CONFIG")
if confEnv == "" {
log.Crit("missing _P2P_NODE_CONFIG")
}
var conf execNodeConfig
if err := json.Unmarshal([]byte(confEnv), &conf); err != nil {
log.Crit("error decoding _P2P_NODE_CONFIG", "err", err)
}
conf.Stack.P2P.PrivateKey = conf.Node.PrivateKey
conf.Stack.Logger = log.New("node.id", conf.Node.ID.String())
// use explicit IP address in ListenAddr so that Enode URL is usable
externalIP := func() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
log.Crit("error getting IP address", "err", err)
}
for _, addr := range addrs {
if ip, ok := addr.(*net.IPNet); ok && !ip.IP.IsLoopback() {
return ip.IP.String()
}
}
log.Crit("unable to determine explicit IP address")
return ""
}
if strings.HasPrefix(conf.Stack.P2P.ListenAddr, ":") {
conf.Stack.P2P.ListenAddr = externalIP() + conf.Stack.P2P.ListenAddr
}
if conf.Stack.WSHost == "0.0.0.0" {
conf.Stack.WSHost = externalIP()
}
// initialize the devp2p stack
stack, err := node.New(&conf.Stack)
if err != nil {
log.Crit("error creating node stack", "err", err)
}
// register the services, collecting them into a map so we can wrap
// them in a snapshot service
services := make(map[string]node.Service, len(serviceNames))
for _, name := range serviceNames {
serviceFunc, exists := serviceFuncs[name]
if !exists {
log.Crit("unknown node service", "name", name)
}
constructor := func(nodeCtx *node.ServiceContext) (node.Service, error) {
ctx := &ServiceContext{
RPCDialer: &wsRPCDialer{addrs: conf.PeerAddrs},
NodeContext: nodeCtx,
Config: conf.Node,
}
if conf.Snapshots != nil {
ctx.Snapshot = conf.Snapshots[name]
}
service, err := serviceFunc(ctx)
if err != nil {
return nil, err
}
services[name] = service
return service, nil
}
if err := stack.Register(constructor); err != nil {
log.Crit("error starting service", "name", name, "err", err)
}
}
// register the snapshot service
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
return &snapshotService{services}, nil
}); err != nil {
log.Crit("error starting snapshot service", "err", err)
}
// start the stack
if err := stack.Start(); err != nil {
log.Crit("error stating node stack", "err", err)
}
// stop the stack if we get a SIGTERM signal
go func() {
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, syscall.SIGTERM)
defer signal.Stop(sigc)
<-sigc
log.Info("Received SIGTERM, shutting down...")
stack.Stop()
}()
// wait for the stack to exit
stack.Wait()
}
// snapshotService is a node.Service which wraps a list of services and
// exposes an API to generate a snapshot of those services
type snapshotService struct {
services map[string]node.Service
}
func (s *snapshotService) APIs() []rpc.API {
return []rpc.API{{
Namespace: "simulation",
Version: "1.0",
Service: SnapshotAPI{s.services},
}}
}
func (s *snapshotService) Protocols() []p2p.Protocol {
return nil
}
func (s *snapshotService) Start(*p2p.Server) error {
return nil
}
func (s *snapshotService) Stop() error {
return nil
}
// SnapshotAPI provides an RPC method to create snapshots of services
type SnapshotAPI struct {
services map[string]node.Service
}
func (api SnapshotAPI) Snapshot() (map[string][]byte, error) {
snapshots := make(map[string][]byte)
for name, service := range api.services {
if s, ok := service.(interface {
Snapshot() ([]byte, error)
}); ok {
snap, err := s.Snapshot()
if err != nil {
return nil, err
}
snapshots[name] = snap
}
}
return snapshots, nil
}
type wsRPCDialer struct {
addrs map[string]string
}
// DialRPC implements the RPCDialer interface by creating a WebSocket RPC
// client of the given node
func (w *wsRPCDialer) DialRPC(id discover.NodeID) (*rpc.Client, error) {
addr, ok := w.addrs[id.String()]
if !ok {
return nil, fmt.Errorf("unknown node: %s", id)
}
return rpc.DialWebsocket(context.Background(), addr, "http://localhost")
}
|
[
"\"_P2P_NODE_CONFIG\""
] |
[] |
[
"_P2P_NODE_CONFIG"
] |
[]
|
["_P2P_NODE_CONFIG"]
|
go
| 1 | 0 | |
core/container/util/writer.go
|
/*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"archive/tar"
"bufio"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"github.com/op/go-logging"
"github.com/spf13/viper"
)
var vmLogger = logging.MustGetLogger("container")
var fileTypes = map[string]bool{
".c": true,
".h": true,
".go": true,
".yaml": true,
".json": true,
}
var javaFileTypes = map[string]bool{
".java": true,
".properties": true,
".gradle": true,
}
func WriteFolderToTarPackage(tw *tar.Writer, srcPath string, excludeDir string, includeFileTypeMap map[string]bool) error {
rootDirectory := srcPath
vmLogger.Infof("rootDirectory = %s", rootDirectory)
//append "/" if necessary
if excludeDir != "" && strings.LastIndex(excludeDir, "/") < len(excludeDir)-1 {
excludeDir = excludeDir + "/"
}
rootDirLen := len(rootDirectory)
walkFn := func(path string, info os.FileInfo, err error) error {
// If path includes .git, ignore
if strings.Contains(path, ".git") {
return nil
}
if info.Mode().IsDir() {
return nil
}
//exclude any files with excludeDir prefix. They should already be in the tar
if excludeDir != "" && strings.Index(path, excludeDir) == rootDirLen+1 {
//1 for "/"
return nil
}
// Because of scoping we can reference the external rootDirectory variable
if len(path[rootDirLen:]) == 0 {
return nil
}
// we only want 'fileTypes' source files at this point
ext := filepath.Ext(path)
if _, ok := includeFileTypeMap[ext]; ok != true {
return nil
}
newPath := fmt.Sprintf("src%s", path[rootDirLen:])
//newPath := path[len(rootDirectory):]
err = WriteFileToPackage(path, newPath, tw)
if err != nil {
return fmt.Errorf("Error writing file to package: %s", err)
}
return nil
}
if err := filepath.Walk(rootDirectory, walkFn); err != nil {
vmLogger.Infof("Error walking rootDirectory: %s", err)
return err
}
return nil
}
//WriteGopathSrc tars up files under gopath src
func WriteGopathSrc(tw *tar.Writer, excludeDir string) error {
gopath := os.Getenv("GOPATH")
// Only take the first element of GOPATH
gopath = filepath.SplitList(gopath)[0]
rootDirectory := filepath.Join(gopath, "src")
vmLogger.Infof("rootDirectory = %s", rootDirectory)
if err := WriteFolderToTarPackage(tw, rootDirectory, excludeDir, fileTypes); err != nil {
vmLogger.Errorf("Error writing folder to tar package %s", err)
return err
}
// Add the certificates to tar
if viper.GetBool("peer.tls.enabled") {
err := WriteFileToPackage(viper.GetString("peer.tls.cert.file"), "src/certs/cert.pem", tw)
if err != nil {
return fmt.Errorf("Error writing cert file to package: %s", err)
}
}
// Write the tar file out
if err := tw.Close(); err != nil {
return err
}
//ioutil.WriteFile("/tmp/chaincode_deployment.tar", inputbuf.Bytes(), 0644)
return nil
}
func WriteJavaProjectToPackage(tw *tar.Writer, srcPath string) error {
if err := WriteFolderToTarPackage(tw, srcPath, "", javaFileTypes); err != nil {
vmLogger.Errorf("Error writing folder to tar package %s", err)
return err
}
// Write the tar file out
if err := tw.Close(); err != nil {
return err
}
return nil
}
//WriteFileToPackage writes a file to the tarball
func WriteFileToPackage(localpath string, packagepath string, tw *tar.Writer) error {
fd, err := os.Open(localpath)
if err != nil {
return fmt.Errorf("%s: %s", localpath, err)
}
defer fd.Close()
is := bufio.NewReader(fd)
return WriteStreamToPackage(is, localpath, packagepath, tw)
}
//WriteStreamToPackage writes bytes (from a file reader) to the tarball
func WriteStreamToPackage(is io.Reader, localpath string, packagepath string, tw *tar.Writer) error {
info, err := os.Stat(localpath)
if err != nil {
return fmt.Errorf("%s: %s", localpath, err)
}
header, err := tar.FileInfoHeader(info, localpath)
if err != nil {
return fmt.Errorf("Error getting FileInfoHeader: %s", err)
}
//Let's take the variance out of the tar, make headers identical by using zero time
oldname := header.Name
var zeroTime time.Time
header.AccessTime = zeroTime
header.ModTime = zeroTime
header.ChangeTime = zeroTime
header.Name = packagepath
if err = tw.WriteHeader(header); err != nil {
return fmt.Errorf("Error write header for (path: %s, oldname:%s,newname:%s,sz:%d) : %s", localpath, oldname, packagepath, header.Size, err)
}
if _, err := io.Copy(tw, is); err != nil {
return fmt.Errorf("Error copy (path: %s, oldname:%s,newname:%s,sz:%d) : %s", localpath, oldname, packagepath, header.Size, err)
}
return nil
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
aiven/resource_kafka_connect_test.go
|
// Copyright (c) 2017 jelmersnoeck
// Copyright (c) 2018-2021 Aiven, Helsinki, Finland. https://aiven.io/
package aiven
import (
"fmt"
"os"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
)
// Kafka Connect service tests
func TestAccAiven_kafkaconnect(t *testing.T) {
resourceName := "aiven_kafka_connect.bar"
rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactories,
CheckDestroy: testAccCheckAivenServiceResourceDestroy,
Steps: []resource.TestStep{
{
Config: testAccKafkaConnectResource(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckAivenServiceKafkaConnectAttributes("data.aiven_kafka_connect.service"),
testAccCheckAivenServiceKafkaConnectAttributes("data.aiven_kafka_connect.service"),
resource.TestCheckResourceAttr(resourceName, "service_name", fmt.Sprintf("test-acc-sr-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "state", "RUNNING"),
resource.TestCheckResourceAttr(resourceName, "project", os.Getenv("AIVEN_PROJECT_NAME")),
resource.TestCheckResourceAttr(resourceName, "service_type", "kafka_connect"),
resource.TestCheckResourceAttr(resourceName, "cloud_name", "google-europe-west1"),
resource.TestCheckResourceAttr(resourceName, "maintenance_window_dow", "monday"),
resource.TestCheckResourceAttr(resourceName, "maintenance_window_time", "10:00:00"),
resource.TestCheckResourceAttr(resourceName, "state", "RUNNING"),
resource.TestCheckResourceAttr(resourceName, "termination_protection", "false"),
),
},
},
})
}
func testAccKafkaConnectResource(name string) string {
return fmt.Sprintf(`
data "aiven_project" "foo" {
project = "%s"
}
resource "aiven_kafka_connect" "bar" {
project = data.aiven_project.foo.project
cloud_name = "google-europe-west1"
plan = "startup-4"
service_name = "test-acc-sr-%s"
maintenance_window_dow = "monday"
maintenance_window_time = "10:00:00"
kafka_connect_user_config {
kafka_connect {
consumer_isolation_level = "read_committed"
}
public_access {
kafka_connect = true
}
}
}
data "aiven_kafka_connect" "service" {
service_name = aiven_kafka_connect.bar.service_name
project = aiven_kafka_connect.bar.project
depends_on = [aiven_kafka_connect.bar]
}
`, os.Getenv("AIVEN_PROJECT_NAME"), name)
}
|
[
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\""
] |
[] |
[
"AIVEN_PROJECT_NAME"
] |
[]
|
["AIVEN_PROJECT_NAME"]
|
go
| 1 | 0 | |
nodemonitor/master_ui.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on 2017-12-22
@author: foxty
UI for master node
"""
import sys, os
import logging
import socket
from datetime import datetime, timedelta
from flask import Flask, request, render_template
from common import YAMLConfig, dump_json, set_logging
from model import TSDAgg, Agent, NSystemReport, NCPUReport, NMemoryReport, NDiskReport, \
SInfo, SInfoHistory, SPidstatReport, SJstatGCReport, init_db
from master_cli import NodeConnector
_CONFIG = None
_APP = Flask(__name__,
static_folder='../web/dist/',
static_url_path='',
template_folder='../web/dist/')
def calc_daterange(req):
start_at = int(req.args.get('start_at'))
end_at = int(req.args.get('end_at'))
start = datetime.utcfromtimestamp(start_at/1000)
end = datetime.utcfromtimestamp(end_at/1000)
return start, end
_SAMPLE_INTERVAL = [120, 300, 600, 900, 1800, 3600, 2*3600, 6*3600, 12*3600, 24*3600]
def calc_downsample(start, end, agg, samples=60):
"""
Calculate downsample for ts series
:param start: in seconds
:param end: in seconds
:param agg: aggregation algo for down sample
:param samples:
:return:
"""
expect_interval = (end - start)/samples
candidate_intervals = filter(lambda x: x < expect_interval, _SAMPLE_INTERVAL)
interval = candidate_intervals[-1] if len(candidate_intervals) > 0 else None
return '%ds-%s' % (interval, agg.value) if interval is not None else None
@_APP.errorhandler(Exception)
def exception_handler(error):
logging.exception('unexpected error occurs')
return dump_json({'code': 400, 'message': error.message}), 400, {'Content-Type': 'application/json'}
@_APP.route("/")
def index():
return render_template('index.html')
@_APP.route('/api/dashboard/summary')
def dashboard_summary():
summary = {'agent_count': Agent.count(),
'service_count': SInfo.count(),
'alarm_count': 0,
'sample_count': 0}
return dump_json(summary)
@_APP.route('/api/agents/by_load1')
def get_agents_byload1():
agents = Agent.query_by_load1()
return dump_json(agents)
@_APP.route('/api/agents', methods=['GET'])
def get_agents():
mhost = os.getenv('MASTER_HOST', None) or socket.gethostname()
mport = _CONFIG['master']['server']['port']
master_addr = '%s:%s' % (mhost, mport)
agents = Agent.query(orderby='last_msg_at DESC')
thresh = datetime.utcnow() - timedelta(minutes=5)
for a in agents:
a.status = 'active' if a.last_msg_at and a.last_msg_at >= thresh else 'inactive'
return dump_json({'agents': agents, 'master_addr': master_addr})
@_APP.route('/api/agents', methods=['POST'])
def add_agent():
data = request.get_json()
logging.info('request add agent: %s', data)
nhost = data.get('host')
mhost = data.get('master_addr')
connectType = data.get('connect_type')
u = data.get('username')
p = data.get('password')
logging.info('install agent on %s@%s with master=%s', u, nhost, mhost)
basepath = os.path.dirname(sys.path[0])
with NodeConnector(nhost, u, p) as nc:
nc.install_agent(basepath, mhost)
logging.info('agent installed on %s@%s finished.', u, nhost)
return 'ok'
@_APP.route('/api/agents/<string:aid>', methods=['GET'])
def get_agent(aid):
agent = Agent.get_by_id(aid)
return dump_json(agent)
@_APP.route('/api/agents/<string:aid>', methods=['DELETE'])
def del_agent(aid):
connectType = request.args.get('connect_type')
u = request.args.get('username')
p = request.args.get('password')
agent = Agent.get_by_id(aid)
logging.info('remove agent on %s@%s', u, agent)
with NodeConnector(agent.host, u, p) as nc:
nc.remove_agent()
agent.remove()
logging.info('agent removed on %s@%s finished.', u, agent)
return dump_json(agent)
@_APP.route('/api/agents/<aid>/report/system', methods=['GET'])
def get_agent_sysreports(aid):
reports = NSystemReport.query_by_rtime(aid, *calc_daterange(request))
return dump_json(reports)
@_APP.route('/api/agents/<aid>/report/cpu', methods=['GET'])
def get_agent_cpureports(aid):
reports = NCPUReport.query_by_rtime(aid, *calc_daterange(request))
return dump_json(reports)
@_APP.route('/api/agents/<aid>/report/memory', methods=['GET'])
def get_agent_memreports(aid):
reports = NMemoryReport.query_by_rtime(aid, *calc_daterange(request))
return dump_json(reports)
@_APP.route('/api/agents/<aid>/report/disk', methods=['GET'])
def get_agent_diskreports(aid):
reports = NDiskReport.query_by_rtime(aid, *calc_daterange(request))
return dump_json(reports)
@_APP.route('/api/agents/<string:aid>/services')
def get_agent_services(aid):
services = SInfo.query_by_aid(aid)
status_map = {report.service_id: report for report in SPidstatReport.lst_report_by_aid(aid, len(services))}
return dump_json({'services': services, 'services_status_map': status_map})
@_APP.route('/api/agents/<string:aid>/services/<string:service_id>')
def get_service_info(aid, service_id):
service = SInfo.byid(service_id)
start, end = calc_daterange(request)
service_history = SInfoHistory.query_by_rtime(service_id, start, end)
return dump_json({'service': service, 'service_history': service_history})
@_APP.route('/api/agents/<aid>/services/<service_id>/report/pidstat',
methods=['GET'])
def get_service_pidstats(aid, service_id):
reports = SPidstatReport.query_by_rtime(service_id, *calc_daterange(request))
return dump_json(reports)
@_APP.route('/api/agents/<aid>/services/<service_id>/report/jstatgc',
methods=['GET'])
def get_service_jstatgc(aid, service_id):
start, end = calc_daterange(request)
reports = SJstatGCReport.query_by_rtime(service_id, start, end)
# shistory = SInfoHistory.query_by_rtime(service_id, start, end)
# calculate gc stats and memory stats
gcstat_recent, gcstat_range = None, None
if reports:
end_reps = []
for i, rep in enumerate(reports):
if i > 1 and rep.ts < reports[i-1].ts:
end_reps.append(reports[i-1])
end_reps.append(reports[-1])
# 1st end reprot - start report to remove data beyond the range
end_reps[0] = end_reps[0] - reports[0]
range_rep = reduce(lambda acc, r: acc + r, end_reps)
final_rep = reports[-1]
gcstat_range = range_rep.to_gcstat('range')
gcstat_recent = final_rep.to_gcstat('recent')
return dump_json({'reports': reports, 'gcstats': [gcstat_range, gcstat_recent]})
def ui_main(config, debug=False):
set_logging('ui.log')
logging.info('starting manger ui...')
dbcfg = config['master']['database']
init_db(dbcfg)
global _CONFIG
_CONFIG = config
_APP.jinja_env.variable_start_string = '{-'
_APP.jinja_env.variable_end_string = '-}'
_APP.jinja_env.auto_reload = True
_APP.config['TEMPLATES_AUTO_RELOAD'] = True
servercfg = config['ui']['server']
_APP.run(host=servercfg['host'], port=servercfg['port'], debug=debug)
if __name__ == '__main__':
basepath = os.path.dirname(sys.path[0])
ui_main(YAMLConfig(os.path.join(basepath, 'conf', 'master.yaml')), debug=True)
|
[] |
[] |
[
"MASTER_HOST"
] |
[]
|
["MASTER_HOST"]
|
python
| 1 | 0 | |
internal/options/options.go
|
package options
import (
"os"
"path/filepath"
"github.com/spf13/pflag"
)
var khmo = &K8sHttpMultiplexerOptions{}
func init() {
khmo.addFlags(pflag.CommandLine)
pflag.Parse()
}
// GetK8sHttpMultiplexerOptions returns the pointer of K8sHttpMultiplexerOptions
func GetK8sHttpMultiplexerOptions() *K8sHttpMultiplexerOptions {
return khmo
}
// K8sHttpMultiplexerOptions contains frequent command line and application options.
type K8sHttpMultiplexerOptions struct {
// KubeConfigPath is the path of the kubeconfig file to access the cluster
KubeConfigPath string
// ConfigFilePath is the path of the application to properly run
ConfigFilePath string
// InCluster is the boolean variable if k8s-http-multiplexer is running inside k8s cluster or not
InCluster bool
}
func (khmo *K8sHttpMultiplexerOptions) addFlags(fs *pflag.FlagSet) {
fs.StringVar(&khmo.KubeConfigPath, "kubeConfigPath", filepath.Join(os.Getenv("HOME"), ".kube", "config"),
"absolute path of the kubeconfig file, required when non inCluster environment")
fs.StringVar(&khmo.ConfigFilePath, "configFilePath", "../../config/sample.yaml",
"path of the configuration file")
fs.BoolVar(&khmo.InCluster, "inCluster", true,
"boolean variable if k8s-http-multiplexer is running inside k8s cluster or not, required for debugging "+
"purpose")
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
test/com/facebook/buck/parser/ParserTest.java
|
/*
* Copyright 2015-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.parser;
import static com.facebook.buck.parser.ParserConfig.DEFAULT_BUILD_FILE_NAME;
import static com.facebook.buck.testutil.WatchEventsForTests.createPathEvent;
import static com.google.common.base.Charsets.UTF_8;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasItems;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import com.facebook.buck.cli.BuckConfig;
import com.facebook.buck.cli.FakeBuckConfig;
import com.facebook.buck.event.BuckEventBus;
import com.facebook.buck.event.BuckEventBusFactory;
import com.facebook.buck.event.FakeBuckEventListener;
import com.facebook.buck.event.listener.BroadcastEventListener;
import com.facebook.buck.io.MorePaths;
import com.facebook.buck.io.ProjectFilesystem;
import com.facebook.buck.json.BuildFileParseException;
import com.facebook.buck.json.ParseBuckFileEvent;
import com.facebook.buck.jvm.java.JavaLibrary;
import com.facebook.buck.model.BuildTarget;
import com.facebook.buck.model.BuildTargetException;
import com.facebook.buck.model.BuildTargetFactory;
import com.facebook.buck.model.HasBuildTarget;
import com.facebook.buck.model.ImmutableFlavor;
import com.facebook.buck.model.UnflavoredBuildTarget;
import com.facebook.buck.rules.ActionGraphCache;
import com.facebook.buck.rules.BuildRule;
import com.facebook.buck.rules.BuildRuleResolver;
import com.facebook.buck.rules.Cell;
import com.facebook.buck.rules.ConstructorArgMarshaller;
import com.facebook.buck.rules.TargetGraph;
import com.facebook.buck.rules.TargetNode;
import com.facebook.buck.rules.TestCellBuilder;
import com.facebook.buck.rules.coercer.DefaultTypeCoercerFactory;
import com.facebook.buck.shell.GenruleDescription;
import com.facebook.buck.testutil.WatchEventsForTests;
import com.facebook.buck.testutil.integration.TemporaryPaths;
import com.facebook.buck.testutil.integration.TestDataHelper;
import com.facebook.buck.util.HumanReadableException;
import com.facebook.buck.util.MoreCollectors;
import com.facebook.buck.util.ObjectMappers;
import com.facebook.buck.util.environment.Platform;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSortedSet;
import com.google.common.collect.Iterables;
import com.google.common.eventbus.Subscribe;
import com.google.common.hash.HashCode;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import org.hamcrest.Matchers;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardWatchEventKinds;
import java.nio.file.WatchEvent;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.Optional;
import java.util.SortedMap;
import java.util.concurrent.Executors;
@RunWith(Parameterized.class)
public class ParserTest {
@Rule
public TemporaryPaths tempDir = new TemporaryPaths();
@Rule
public ExpectedException thrown = ExpectedException.none();
private final int threads;
private final boolean parallelParsing;
private Path defaultIncludeFile;
private Path includedByIncludeFile;
private Path includedByBuildFile;
private Path testBuildFile;
private Parser parser;
private ProjectFilesystem filesystem;
private Path cellRoot;
private BuckEventBus eventBus;
private Cell cell;
private ParseEventStartedCounter counter;
private ListeningExecutorService executorService;
public ParserTest(int threads, boolean parallelParsing) {
this.threads = threads;
this.parallelParsing = parallelParsing;
}
@Parameterized.Parameters
public static Collection<Object[]> generateData() {
return Arrays.asList(new Object[][] {
{ 1, false, },
{ 1, true, },
{ 2, true, },
});
}
/**
* Helper to construct a PerBuildState and use it to get nodes.
*/
@VisibleForTesting
private static ImmutableSet<Map<String, Object>> getRawTargetNodes(
Parser parser,
BuckEventBus eventBus,
Cell cell,
boolean enableProfiling,
ListeningExecutorService executor,
Path buildFile) throws InterruptedException, BuildFileParseException {
try (
PerBuildState state =
new PerBuildState(
parser,
eventBus,
executor,
cell,
enableProfiling,
SpeculativeParsing.of(false),
/* ignoreBuckAutodepsFiles */ false)) {
return Parser.getRawTargetNodes(state, cell, buildFile);
}
}
@Before
public void setUp() throws IOException, InterruptedException {
tempDir.newFolder("java", "com", "facebook");
defaultIncludeFile = tempDir.newFile(
"java/com/facebook/defaultIncludeFile").toRealPath();
Files.write(defaultIncludeFile, "\n".getBytes(UTF_8));
includedByIncludeFile = tempDir.newFile(
"java/com/facebook/includedByIncludeFile").toRealPath();
Files.write(includedByIncludeFile, "\n".getBytes(UTF_8));
includedByBuildFile = tempDir.newFile(
"java/com/facebook/includedByBuildFile").toRealPath();
Files.write(
includedByBuildFile,
"include_defs('//java/com/facebook/includedByIncludeFile')\n".getBytes(UTF_8));
testBuildFile = tempDir.newFile("java/com/facebook/BUCK").toRealPath();
Files.write(
testBuildFile,
("include_defs('//java/com/facebook/includedByBuildFile')\n" +
"java_library(name = 'foo')\n" +
"java_library(name = 'bar')\n" +
"genrule(name = 'baz', out = '')\n").getBytes(UTF_8));
tempDir.newFile("bar.py");
// Create a temp directory with some build files.
Path root = tempDir.getRoot().toRealPath();
filesystem = new ProjectFilesystem(root);
cellRoot = filesystem.getRootPath();
eventBus = BuckEventBusFactory.newInstance();
ImmutableMap.Builder<String, ImmutableMap<String, String>> configSectionsBuilder =
ImmutableMap.builder();
configSectionsBuilder
.put("buildfile", ImmutableMap.of("includes", "//java/com/facebook/defaultIncludeFile"));
if (parallelParsing) {
configSectionsBuilder.put(
"project",
ImmutableMap.of(
"temp_files", ".*\\.swp$",
"parallel_parsing", "true",
"parsing_threads", Integer.toString(threads)));
} else {
configSectionsBuilder.put("project", ImmutableMap.of("temp_files", ".*\\.swp$"));
}
configSectionsBuilder.put("unknown_flavors_messages",
ImmutableMap.of("macosx*", "This is an error message read by the .buckconfig"));
BuckConfig config = FakeBuckConfig.builder()
.setFilesystem(filesystem)
.setSections(configSectionsBuilder.build())
.build();
cell = new TestCellBuilder()
.setFilesystem(filesystem)
.setBuckConfig(config)
.build();
DefaultTypeCoercerFactory typeCoercerFactory = new DefaultTypeCoercerFactory(
ObjectMappers.newDefaultInstance());
BroadcastEventListener broadcastEventListener = new BroadcastEventListener();
broadcastEventListener.addEventBus(eventBus);
parser = new Parser(
broadcastEventListener,
cell.getBuckConfig().getView(ParserConfig.class),
typeCoercerFactory,
new ConstructorArgMarshaller(typeCoercerFactory));
counter = new ParseEventStartedCounter();
eventBus.register(counter);
executorService = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(threads));
}
@After
public void tearDown() {
executorService.shutdown();
}
@Test
@SuppressWarnings("unchecked")
public void testParseBuildFilesForTargetsWithOverlappingTargets() throws Exception {
// Execute buildTargetGraphForBuildTargets() with multiple targets that require parsing the same
// build file.
BuildTarget fooTarget = BuildTarget.builder(cellRoot, "//java/com/facebook", "foo").build();
BuildTarget barTarget = BuildTarget.builder(cellRoot, "//java/com/facebook", "bar").build();
Iterable<BuildTarget> buildTargets = ImmutableList.of(fooTarget, barTarget);
// The EventBus should be updated with events indicating how parsing ran.
FakeBuckEventListener listener = new FakeBuckEventListener();
eventBus.register(listener);
TargetGraph targetGraph = parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
buildTargets);
BuildRuleResolver resolver = buildActionGraph(eventBus, targetGraph);
BuildRule fooRule = resolver.requireRule(fooTarget);
assertNotNull(fooRule);
BuildRule barRule = resolver.requireRule(barTarget);
assertNotNull(barRule);
Iterable<ParseEvent> events = Iterables.filter(listener.getEvents(), ParseEvent.class);
assertThat(events, Matchers.contains(
Matchers.hasProperty("buildTargets", equalTo(buildTargets)),
Matchers.allOf(
Matchers.hasProperty("buildTargets", equalTo(buildTargets)),
Matchers.hasProperty("graph", equalTo(Optional.of(targetGraph)))
)));
}
@Test
public void testMissingBuildRuleInValidFile()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Execute buildTargetGraphForBuildTargets() with a target in a valid file but a bad rule name.
BuildTarget fooTarget = BuildTarget.builder(cellRoot, "//java/com/facebook", "foo").build();
BuildTarget razTarget = BuildTarget.builder(cellRoot, "//java/com/facebook", "raz").build();
Iterable<BuildTarget> buildTargets = ImmutableList.of(fooTarget, razTarget);
thrown.expectMessage(
"No rule found when resolving target //java/com/facebook:raz in build file " +
"//java/com/facebook/BUCK");
thrown.expectMessage(
"Defined in file: " +
filesystem.resolve(razTarget.getBasePath()).resolve(DEFAULT_BUILD_FILE_NAME));
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
buildTargets);
}
@Test
public void testMissingBuildFile()
throws InterruptedException, BuildFileParseException, IOException, BuildTargetException {
BuildTarget target = BuildTarget.builder(cellRoot, "//path/to/nowhere", "nowhere").build();
Iterable<BuildTarget> buildTargets = ImmutableList.of(target);
thrown.expect(Cell.MissingBuildFileException.class);
thrown.expectMessage(
String.format(
"No build file at %s when resolving target //path/to/nowhere:nowhere",
Paths.get("path", "to", "nowhere", "BUCK").toString()));
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
buildTargets);
}
@Test
public void shouldThrowAnExceptionIfConstructorArgMashallingFails()
throws IOException, BuildFileParseException, InterruptedException {
thrown.expect(HumanReadableException.class);
thrown.expectMessage("found ////cake:walk");
Path buckFile = cellRoot.resolve("BUCK");
Files.write(
buckFile,
"genrule(name = 'cake', out = 'file.txt', cmd = '$(exe ////cake:walk) > $OUT')"
.getBytes(UTF_8));
parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile);
}
@Test
public void shouldThrowAnExceptionIfADepIsInAFileThatCannotBeParsed()
throws IOException, InterruptedException, BuildTargetException, BuildFileParseException {
thrown.expectMessage("Parse error for build file");
thrown.expectMessage(Paths.get("foo/BUCK").toString());
Path buckFile = cellRoot.resolve("BUCK");
Files.write(
buckFile,
"genrule(name = 'cake', out = 'foo.txt', cmd = '$(exe //foo:bar) > $OUT')".getBytes(UTF_8));
buckFile = cellRoot.resolve("foo/BUCK");
Files.createDirectories(buckFile.getParent());
Files.write(
buckFile,
"I do not parse as python".getBytes(UTF_8));
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
Collections.singleton(BuildTargetFactory.newInstance(cell.getFilesystem(), "//:cake")));
}
@Test
public void shouldThrowAnExceptionIfMultipleTargetsAreDefinedWithTheSameName()
throws IOException, BuildFileParseException, InterruptedException {
thrown.expect(BuildFileParseException.class);
thrown.expectMessage("Duplicate rule definition found.");
Path buckFile = cellRoot.resolve("BUCK");
Files.write(
buckFile,
("export_file(name = 'cake', src = 'hello.txt')\n" +
"genrule(name = 'cake', out = 'file.txt', cmd = 'touch $OUT')\n").getBytes(UTF_8));
parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile);
}
@Test
public void shouldThrowAnExceptionIfNameIsNone()
throws IOException, BuildFileParseException, InterruptedException {
thrown.expect(BuildFileParseException.class);
thrown.expectMessage("rules 'name' field must be a string. Found None.");
Path buckFile = cellRoot.resolve("BUCK");
Files.write(
buckFile,
("genrule(name = None, out = 'file.txt', cmd = 'touch $OUT')\n").getBytes(UTF_8));
parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile);
}
@Test
public void shouldThrowAnExceptionWhenAnUnknownFlavorIsSeen()
throws BuildFileParseException, BuildTargetException, InterruptedException, IOException {
BuildTarget flavored = BuildTarget.builder(cellRoot, "//java/com/facebook", "foo")
.addFlavors(ImmutableFlavor.of("doesNotExist"))
.build();
thrown.expect(HumanReadableException.class);
thrown.expectMessage(
"Unrecognized flavor in target //java/com/facebook:foo#doesNotExist while parsing " +
"//java/com/facebook/BUCK");
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
ImmutableSortedSet.of(flavored));
}
@Test
public void shouldThrowAnExceptionWhenAnUnknownFlavorIsSeenAndShowSuggestionsDefault()
throws BuildFileParseException, BuildTargetException, InterruptedException, IOException {
BuildTarget flavored = BuildTarget.builder(cellRoot, "//java/com/facebook", "foo")
.addFlavors(ImmutableFlavor.of("android-unknown"))
.build();
thrown.expect(HumanReadableException.class);
thrown.expectMessage(
"Unrecognized flavor in target //java/com/facebook:foo#android-unknown while parsing " +
"//java/com/facebook/BUCK\nHere are some things you can try to get the following " +
"flavors to work::\nandroid-unknown : Make sure you have the Android SDK/NDK " +
"installed and set up. " +
"See https://buckbuild.com/setup/install.html#locate-android-sdk\n");
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
ImmutableSortedSet.of(flavored));
}
@Test
public void shouldThrowAnExceptionWhenAnUnknownFlavorIsSeenAndShowSuggestionsFromConfig()
throws BuildFileParseException, BuildTargetException, InterruptedException, IOException {
BuildTarget flavored = BuildTarget.builder(cellRoot, "//java/com/facebook", "foo")
.addFlavors(ImmutableFlavor.of("macosx109sdk"))
.build();
thrown.expect(HumanReadableException.class);
thrown.expectMessage(
"Unrecognized flavor in target //java/com/facebook:foo#macosx109sdk while parsing " +
"//java/com/facebook/BUCK\nHere are some things you can try to get the following " +
"flavors to work::\nmacosx109sdk : This is an error message read by the .buckconfig");
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
ImmutableSortedSet.of(flavored));
}
@Test
public void shouldThrowAnExceptionWhenAFlavorIsAskedOfATargetThatDoesntSupportFlavors()
throws BuildFileParseException, BuildTargetException, InterruptedException, IOException {
BuildTarget flavored = BuildTarget.builder(cellRoot, "//java/com/facebook", "baz")
.addFlavors(JavaLibrary.SRC_JAR)
.build();
thrown.expect(HumanReadableException.class);
thrown.expectMessage(
"Target //java/com/facebook:baz (type genrule) does not currently support flavors " +
"(tried [src])");
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
ImmutableSortedSet.of(flavored));
}
@Test
public void testInvalidDepFromValidFile()
throws IOException, BuildFileParseException, BuildTargetException, InterruptedException {
// Ensure an exception with a specific message is thrown.
thrown.expect(HumanReadableException.class);
thrown.expectMessage(
"Couldn't get dependency '//java/com/facebook/invalid/lib:missing_rule' of target " +
"'//java/com/facebook/invalid:foo'");
// Execute buildTargetGraphForBuildTargets() with a target in a valid file but a bad rule name.
tempDir.newFolder("java", "com", "facebook", "invalid");
Path testInvalidBuildFile = tempDir.newFile("java/com/facebook/invalid/BUCK");
Files.write(
testInvalidBuildFile,
("java_library(name = 'foo', deps = ['//java/com/facebook/invalid/lib:missing_rule'])\n" +
"java_library(name = 'bar')\n").getBytes(UTF_8));
tempDir.newFolder("java", "com", "facebook", "invalid", "lib");
tempDir.newFile("java/com/facebook/invalid/lib/BUCK");
BuildTarget fooTarget =
BuildTarget.builder(cellRoot, "//java/com/facebook/invalid", "foo").build();
Iterable<BuildTarget> buildTargets = ImmutableList.of(fooTarget);
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
buildTargets);
}
@Test
public void whenAllRulesRequestedWithTrueFilterThenMultipleRulesReturned()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
ImmutableSet<BuildTarget> targets = filterAllTargetsInProject(
parser,
cell,
x -> true,
BuckEventBusFactory.newInstance(),
executorService);
ImmutableSet<BuildTarget> expectedTargets = ImmutableSet.of(
BuildTarget.builder(cellRoot, "//java/com/facebook", "foo").build(),
BuildTarget.builder(cellRoot, "//java/com/facebook", "bar").build(),
BuildTarget.builder(cellRoot, "//java/com/facebook", "baz").build());
assertEquals("Should have returned all rules.", expectedTargets, targets);
}
@Test
public void whenAllRulesAreRequestedMultipleTimesThenRulesAreOnlyParsedOnce()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
assertEquals("Should have cached build rules.", 1, counter.calls);
}
@Test
public void whenNotifiedOfNonPathEventThenCacheRulesAreInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call filterAllTargetsInProject to populate the cache.
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
// Process event.
WatchEvent<Object> event = WatchEventsForTests.createOverflowEvent();
parser.onFileSystemChange(event);
// Call filterAllTargetsInProject to request cached rules.
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void pathInvalidationWorksAfterOverflow() throws Exception {
// Call filterAllTargetsInProject to populate the cache.
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
// Send overflow event.
parser.onFileSystemChange(WatchEventsForTests.createOverflowEvent());
// Call filterAllTargetsInProject to request cached rules.
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
// Send a "file added" event.
parser.onFileSystemChange(
createPathEvent(
Paths.get("java/com/facebook/Something.java"),
StandardWatchEventKinds.ENTRY_CREATE));
// Call filterAllTargetsInProject to request cached rules.
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
// Test that the third parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 3, counter.calls);
}
@Test
public void whenEnvironmentChangesThenCacheRulesAreInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
BuckConfig config = FakeBuckConfig.builder()
.setFilesystem(filesystem)
.setEnvironment(ImmutableMap.of("Some Key", "Some Value", "PATH", System.getenv("PATH")))
.build();
Cell cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build();
// Call filterAllTargetsInProject to populate the cache.
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
// Call filterAllTargetsInProject to request cached rules.
config = FakeBuckConfig.builder()
.setFilesystem(filesystem)
.setEnvironment(
ImmutableMap.of("Some Key", "Some Other Value", "PATH", System.getenv("PATH")))
.build();
cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build();
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void whenEnvironmentNotChangedThenCacheRulesAreNotInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
BuckConfig config = FakeBuckConfig.builder()
.setFilesystem(filesystem)
.setEnvironment(ImmutableMap.of("Some Key", "Some Value", "PATH", System.getenv("PATH")))
.build();
Cell cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build();
// Call filterAllTargetsInProject to populate the cache.
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
// Call filterAllTargetsInProject to request cached rules with identical environment.
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should not have invalidated cache.", 1, counter.calls);
}
@Test
public void whenNotifiedOfBuildFileAddThenCacheRulesAreInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(
MorePaths.relativize(tempDir.getRoot().toRealPath(), testBuildFile),
StandardWatchEventKinds.ENTRY_CREATE);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void whenNotifiedOfBuildFileChangeThenCacheRulesAreInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(
MorePaths.relativize(tempDir.getRoot().toRealPath(), testBuildFile),
StandardWatchEventKinds.ENTRY_MODIFY);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void whenNotifiedOfBuildFileDeleteThenCacheRulesAreInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(
MorePaths.relativize(tempDir.getRoot().toRealPath(), testBuildFile),
StandardWatchEventKinds.ENTRY_DELETE);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void whenNotifiedOfIncludeFileAddThenCacheRulesAreInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(
MorePaths.relativize(tempDir.getRoot().toRealPath(), includedByBuildFile),
StandardWatchEventKinds.ENTRY_CREATE);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void whenNotifiedOfIncludeFileChangeThenCacheRulesAreInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
assertEquals("Should have parsed at all.", 1, counter.calls);
// Process event.
WatchEvent<Path> event = createPathEvent(
MorePaths.relativize(tempDir.getRoot().toRealPath(), includedByBuildFile),
StandardWatchEventKinds.ENTRY_MODIFY);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void whenNotifiedOfIncludeFileDeleteThenCacheRulesAreInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(
MorePaths.relativize(tempDir.getRoot().toRealPath(), includedByBuildFile),
StandardWatchEventKinds.ENTRY_DELETE);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void whenNotifiedOf2ndOrderIncludeFileAddThenCacheRulesAreInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(
MorePaths.relativize(tempDir.getRoot().toRealPath(), includedByIncludeFile),
StandardWatchEventKinds.ENTRY_CREATE);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void whenNotifiedOf2ndOrderIncludeFileChangeThenCacheRulesAreInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(
MorePaths.relativize(tempDir.getRoot().toRealPath(), includedByIncludeFile),
StandardWatchEventKinds.ENTRY_MODIFY);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void whenNotifiedOf2ndOrderIncludeFileDeleteThenCacheRulesAreInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(
MorePaths.relativize(tempDir.getRoot().toRealPath(), includedByIncludeFile),
StandardWatchEventKinds.ENTRY_DELETE);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void whenNotifiedOfDefaultIncludeFileAddThenCacheRulesAreInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(
MorePaths.relativize(tempDir.getRoot().toRealPath(), defaultIncludeFile),
StandardWatchEventKinds.ENTRY_CREATE);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void whenNotifiedOfDefaultIncludeFileChangeThenCacheRulesAreInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(
MorePaths.relativize(tempDir.getRoot().toRealPath(), defaultIncludeFile),
StandardWatchEventKinds.ENTRY_MODIFY);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void whenNotifiedOfDefaultIncludeFileDeleteThenCacheRulesAreInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(
MorePaths.relativize(tempDir.getRoot().toRealPath(), defaultIncludeFile),
StandardWatchEventKinds.ENTRY_DELETE);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
// TODO(shs96c): avoid invalidation when arbitrary contained (possibly backup) files are added.
public void whenNotifiedOfContainedFileAddThenCacheRulesAreInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(
Paths.get("java/com/facebook/SomeClass.java"),
StandardWatchEventKinds.ENTRY_CREATE);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void whenNotifiedOfContainedFileAddCachedAncestorsAreInvalidatedWithoutBoundaryChecks()
throws Exception {
BuckConfig config = FakeBuckConfig.builder()
.setFilesystem(filesystem)
.setSections(
"[buildfile]",
"includes = //java/com/facebook/defaultIncludeFile",
"[project]",
"check_package_boundary = false",
"temp_files = ''")
.build();
Cell cell = new TestCellBuilder()
.setFilesystem(filesystem)
.setBuckConfig(config)
.build();
Path testAncestorBuildFile = tempDir.newFile("java/BUCK").toRealPath();
Files.write(testAncestorBuildFile, "java_library(name = 'root')\n".getBytes(UTF_8));
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testAncestorBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(Paths.get("java/com/facebook/SomeClass.java"),
StandardWatchEventKinds.ENTRY_CREATE);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testAncestorBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void whenNotifiedOfContainedFileChangeThenCacheRulesAreNotInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(Paths.get("java/com/facebook/SomeClass.java"),
StandardWatchEventKinds.ENTRY_MODIFY);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call did not repopulate the cache.
assertEquals("Should have not invalidated cache.", 1, counter.calls);
}
@Test
// TODO(shs96c): avoid invalidation when arbitrary contained (possibly backup) files are deleted.
public void whenNotifiedOfContainedFileDeleteThenCacheRulesAreInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(Paths.get("java/com/facebook/SomeClass.java"),
StandardWatchEventKinds.ENTRY_DELETE);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void whenNotifiedOfContainedTempFileAddThenCachedRulesAreNotInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(Paths.get("java/com/facebook/MumbleSwp.Java.swp"),
StandardWatchEventKinds.ENTRY_CREATE);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should not have invalidated cache.", 1, counter.calls);
}
@Test
public void whenNotifiedOfContainedTempFileChangeThenCachedRulesAreNotInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(Paths.get("java/com/facebook/MumbleSwp.Java.swp"),
StandardWatchEventKinds.ENTRY_MODIFY);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should not have invalidated cache.", 1, counter.calls);
}
@Test
public void whenNotifiedOfContainedTempFileDeleteThenCachedRulesAreNotInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(Paths.get("java/com/facebook/MumbleSwp.Java.swp"),
StandardWatchEventKinds.ENTRY_DELETE);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should not have invalidated cache.", 1, counter.calls);
}
@Test
public void whenNotifiedOfUnrelatedFileAddThenCacheRulesAreNotInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(Paths.get("SomeClass.java__backup"),
StandardWatchEventKinds.ENTRY_CREATE);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call did not repopulate the cache.
assertEquals("Should have not invalidated cache.", 1, counter.calls);
}
@Test
public void whenNotifiedOfUnrelatedFileChangeThenCacheRulesAreNotInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(Paths.get("SomeClass.java__backup"),
StandardWatchEventKinds.ENTRY_MODIFY);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call did not repopulate the cache.
assertEquals("Should have not invalidated cache.", 1, counter.calls);
}
@Test
public void whenNotifiedOfUnrelatedFileDeleteThenCacheRulesAreNotInvalidated()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
// Call parseBuildFile to populate the cache.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Process event.
WatchEvent<Path> event = createPathEvent(
Paths.get("SomeClass.java__backup"),
StandardWatchEventKinds.ENTRY_DELETE);
parser.onFileSystemChange(event);
// Call parseBuildFile to request cached rules.
getRawTargetNodes(
parser,
eventBus,
cell,
false,
executorService,
testBuildFile);
// Test that the second parseBuildFile call did not repopulate the cache.
assertEquals("Should have not invalidated cache.", 1, counter.calls);
}
@Test
public void whenAllRulesAreRequestedWithDifferingIncludesThenRulesAreParsedTwice()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
BuckConfig config = FakeBuckConfig.builder()
.setFilesystem(filesystem)
.setSections(
ImmutableMap.of(
ParserConfig.BUILDFILE_SECTION_NAME,
ImmutableMap.of(ParserConfig.INCLUDES_PROPERTY_NAME, "//bar.py")))
.build();
Cell cell = new TestCellBuilder()
.setFilesystem(filesystem)
.setBuckConfig(config)
.build();
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
assertEquals("Should have invalidated cache.", 2, counter.calls);
}
@Test
public void whenAllRulesAreRequestedWithDifferingCellsThenRulesAreParsedOnce()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
assertEquals("Should have parsed once.", 1, counter.calls);
Path newTempDir = Files.createTempDirectory("junit-temp-path").toRealPath();
Files.createFile(newTempDir.resolve("bar.py"));
ProjectFilesystem newFilesystem = new ProjectFilesystem(newTempDir);
BuckConfig config = FakeBuckConfig.builder()
.setFilesystem(newFilesystem)
.setSections(
ImmutableMap.of(
ParserConfig.BUILDFILE_SECTION_NAME,
ImmutableMap.of(ParserConfig.INCLUDES_PROPERTY_NAME, "//bar.py")))
.build();
Cell cell = new TestCellBuilder()
.setFilesystem(newFilesystem)
.setBuckConfig(config)
.build();
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
assertEquals("Should not have invalidated cache.", 1, counter.calls);
}
@Test
public void whenAllRulesThenSingleTargetRequestedThenRulesAreParsedOnce()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
BuildTarget foo = BuildTarget.builder(cellRoot, "//java/com/facebook", "foo").build();
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
ImmutableList.of(foo));
assertEquals("Should have cached build rules.", 1, counter.calls);
}
@Test
public void whenSingleTargetThenAllRulesRequestedThenRulesAreParsedOnce()
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
BuildTarget foo = BuildTarget.builder(cellRoot, "//java/com/facebook", "foo").build();
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
ImmutableList.of(foo));
filterAllTargetsInProject(
parser,
cell,
x -> true,
eventBus,
executorService);
assertEquals("Should have replaced build rules", 1, counter.calls);
}
@Test
public void whenBuildFilePathChangedThenFlavorsOfTargetsInPathAreInvalidated() throws Exception {
tempDir.newFolder("foo");
tempDir.newFolder("bar");
Path testFooBuckFile = tempDir.newFile("foo/BUCK");
Files.write(
testFooBuckFile,
"java_library(name = 'foo', visibility=['PUBLIC'])\n".getBytes(UTF_8));
Path testBarBuckFile = tempDir.newFile("bar/BUCK");
Files.write(
testBarBuckFile,
("java_library(name = 'bar',\n" +
" deps = ['//foo:foo'])\n").getBytes(UTF_8));
// Fetch //bar:bar#src to put it in cache.
BuildTarget barTarget = BuildTarget
.builder(cellRoot, "//bar", "bar")
.addFlavors(ImmutableFlavor.of("src"))
.build();
Iterable<BuildTarget> buildTargets = ImmutableList.of(barTarget);
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
buildTargets);
// Rewrite //bar:bar so it doesn't depend on //foo:foo any more.
// Delete foo/BUCK and invalidate the cache, which should invalidate
// the cache entry for //bar:bar#src.
Files.delete(testFooBuckFile);
Files.write(testBarBuckFile, "java_library(name = 'bar')\n".getBytes(UTF_8));
WatchEvent<Path> deleteEvent = createPathEvent(
Paths.get("foo").resolve("BUCK"),
StandardWatchEventKinds.ENTRY_DELETE);
parser.onFileSystemChange(deleteEvent);
WatchEvent<Path> modifyEvent = createPathEvent(
Paths.get("bar").resolve("BUCK"),
StandardWatchEventKinds.ENTRY_MODIFY);
parser.onFileSystemChange(modifyEvent);
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
buildTargets);
}
@Test
public void targetWithSourceFileChangesHash() throws Exception {
tempDir.newFolder("foo");
Path testFooBuckFile = tempDir.newFile("foo/BUCK");
Files.write(
testFooBuckFile,
"java_library(name = 'lib', srcs=glob(['*.java']), visibility=['PUBLIC'])\n"
.getBytes(UTF_8));
BuildTarget fooLibTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build();
HashCode original = buildTargetGraphAndGetHashCodes(parser, fooLibTarget).get(fooLibTarget);
DefaultTypeCoercerFactory typeCoercerFactory = new DefaultTypeCoercerFactory(
ObjectMappers.newDefaultInstance());
parser = new Parser(
new BroadcastEventListener(),
cell.getBuckConfig().getView(ParserConfig.class),
typeCoercerFactory,
new ConstructorArgMarshaller(typeCoercerFactory));
Path testFooJavaFile = tempDir.newFile("foo/Foo.java");
Files.write(testFooJavaFile, "// Ceci n'est pas une Javafile\n".getBytes(UTF_8));
HashCode updated = buildTargetGraphAndGetHashCodes(parser, fooLibTarget).get(fooLibTarget);
assertNotEquals(original, updated);
}
@Test
public void deletingSourceFileChangesHash() throws Exception {
tempDir.newFolder("foo");
Path testFooBuckFile = tempDir.newFile("foo/BUCK");
Files.write(
testFooBuckFile,
"java_library(name = 'lib', srcs=glob(['*.java']), visibility=['PUBLIC'])\n"
.getBytes(UTF_8));
Path testFooJavaFile = tempDir.newFile("foo/Foo.java");
Files.write(testFooJavaFile, "// Ceci n'est pas une Javafile\n".getBytes(UTF_8));
Path testBarJavaFile = tempDir.newFile("foo/Bar.java");
Files.write(testBarJavaFile, "// Seriously, no Java here\n".getBytes(UTF_8));
BuildTarget fooLibTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build();
HashCode originalHash = buildTargetGraphAndGetHashCodes(parser, fooLibTarget).get(fooLibTarget);
Files.delete(testBarJavaFile);
WatchEvent<Path> deleteEvent = createPathEvent(
Paths.get("foo/Bar.java"),
StandardWatchEventKinds.ENTRY_DELETE);
parser.onFileSystemChange(deleteEvent);
HashCode updatedHash = buildTargetGraphAndGetHashCodes(parser, fooLibTarget).get(fooLibTarget);
assertNotEquals(originalHash, updatedHash);
}
@Test
public void renamingSourceFileChangesHash() throws Exception {
tempDir.newFolder("foo");
Path testFooBuckFile = tempDir.newFile("foo/BUCK");
Files.write(
testFooBuckFile,
"java_library(name = 'lib', srcs=glob(['*.java']), visibility=['PUBLIC'])\n"
.getBytes(UTF_8));
Path testFooJavaFile = tempDir.newFile("foo/Foo.java");
Files.write(testFooJavaFile, "// Ceci n'est pas une Javafile\n".getBytes(UTF_8));
BuildTarget fooLibTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build();
HashCode originalHash = buildTargetGraphAndGetHashCodes(parser, fooLibTarget).get(fooLibTarget);
Files.move(testFooJavaFile, testFooJavaFile.resolveSibling("Bar.java"));
WatchEvent<Path> deleteEvent = createPathEvent(
Paths.get("foo/Foo.java"),
StandardWatchEventKinds.ENTRY_DELETE);
WatchEvent<Path> createEvent = createPathEvent(
Paths.get("foo/Bar.java"),
StandardWatchEventKinds.ENTRY_CREATE);
parser.onFileSystemChange(deleteEvent);
parser.onFileSystemChange(createEvent);
HashCode updatedHash = buildTargetGraphAndGetHashCodes(parser, fooLibTarget).get(fooLibTarget);
assertNotEquals(originalHash, updatedHash);
}
@Test
public void twoBuildTargetHashCodesPopulatesCorrectly() throws Exception {
tempDir.newFolder("foo");
Path testFooBuckFile = tempDir.newFile("foo/BUCK");
Files.write(
testFooBuckFile,
("java_library(name = 'lib', visibility=['PUBLIC'])\n" +
"java_library(name = 'lib2', visibility=['PUBLIC'])\n").getBytes(UTF_8));
BuildTarget fooLibTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build();
BuildTarget fooLib2Target = BuildTarget.builder(cellRoot, "//foo", "lib2").build();
ImmutableMap<BuildTarget, HashCode> hashes = buildTargetGraphAndGetHashCodes(
parser,
fooLibTarget,
fooLib2Target);
assertNotNull(hashes.get(fooLibTarget));
assertNotNull(hashes.get(fooLib2Target));
assertNotEquals(hashes.get(fooLibTarget), hashes.get(fooLib2Target));
}
@Test
public void addingDepToTargetChangesHashOfDependingTargetOnly() throws Exception {
tempDir.newFolder("foo");
Path testFooBuckFile = tempDir.newFile("foo/BUCK");
Files.write(
testFooBuckFile,
("java_library(name = 'lib', deps = [], visibility=['PUBLIC'])\n" +
"java_library(name = 'lib2', deps = [], visibility=['PUBLIC'])\n")
.getBytes(UTF_8));
BuildTarget fooLibTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build();
BuildTarget fooLib2Target = BuildTarget.builder(cellRoot, "//foo", "lib2").build();
ImmutableMap<BuildTarget, HashCode> hashes = buildTargetGraphAndGetHashCodes(
parser,
fooLibTarget,
fooLib2Target);
HashCode libKey = hashes.get(fooLibTarget);
HashCode lib2Key = hashes.get(fooLib2Target);
DefaultTypeCoercerFactory typeCoercerFactory = new DefaultTypeCoercerFactory(
ObjectMappers.newDefaultInstance());
parser = new Parser(
new BroadcastEventListener(),
cell.getBuckConfig().getView(ParserConfig.class),
typeCoercerFactory,
new ConstructorArgMarshaller(typeCoercerFactory));
Files.write(
testFooBuckFile,
("java_library(name = 'lib', deps = [], visibility=['PUBLIC'])\n" +
"java_library(name = 'lib2', deps = [':lib'], visibility=['PUBLIC'])\n").getBytes(UTF_8));
hashes = buildTargetGraphAndGetHashCodes(
parser,
fooLibTarget,
fooLib2Target);
assertEquals(libKey, hashes.get(fooLibTarget));
assertNotEquals(lib2Key, hashes.get(fooLib2Target));
}
@Test
public void loadedBuildFileWithoutLoadedTargetNodesLoadsAdditionalTargetNodes()
throws IOException, InterruptedException, BuildFileParseException, BuildTargetException {
tempDir.newFolder("foo");
Path testFooBuckFile = tempDir.newFile("foo/BUCK").toRealPath();
Files.write(
testFooBuckFile,
"java_library(name = 'lib1')\njava_library(name = 'lib2')\n".getBytes(UTF_8));
BuildTarget fooLib1Target = BuildTarget.builder(cellRoot, "//foo", "lib1").build();
BuildTarget fooLib2Target = BuildTarget.builder(cellRoot, "//foo", "lib2").build();
// First, only load one target from the build file so the file is parsed, but only one of the
// TargetNodes will be cached.
TargetNode<?> targetNode = parser.getTargetNode(
eventBus,
cell,
false,
executorService,
fooLib1Target);
assertThat(targetNode.getBuildTarget(), equalTo(fooLib1Target));
// Now, try to load the entire build file and get all TargetNodes.
ImmutableSet<TargetNode<?>> targetNodes = parser.getAllTargetNodes(
eventBus,
cell,
false,
executorService,
testFooBuckFile);
assertThat(targetNodes.size(), equalTo(2));
assertThat(
targetNodes.stream()
.map(TargetNode::getBuildTarget)
.collect(MoreCollectors.toImmutableList()),
hasItems(fooLib1Target, fooLib2Target));
}
@Test
public void getOrLoadTargetNodeRules()
throws IOException, InterruptedException, BuildFileParseException, BuildTargetException {
tempDir.newFolder("foo");
Path testFooBuckFile = tempDir.newFile("foo/BUCK");
Files.write(
testFooBuckFile,
"java_library(name = 'lib')\n".getBytes(UTF_8));
BuildTarget fooLibTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build();
TargetNode<?> targetNode = parser.getTargetNode(
eventBus,
cell,
false,
executorService,
fooLibTarget);
assertThat(targetNode.getBuildTarget(), equalTo(fooLibTarget));
SortedMap<String, Object> rules = parser.getRawTargetNode(
eventBus,
cell,
false,
executorService,
targetNode);
assertThat(rules, Matchers.hasKey("name"));
assertThat(
(String) rules.get("name"),
equalTo(targetNode.getBuildTarget().getShortName()));
}
@Test
public void whenBuildFileContainsSourcesUnderSymLinkNewSourcesNotAddedUntilCacheCleaned()
throws Exception {
// This test depends on creating symbolic links which we cannot do on Windows.
assumeTrue(Platform.detect() != Platform.WINDOWS);
tempDir.newFolder("bar");
tempDir.newFile("bar/Bar.java");
tempDir.newFolder("foo");
Path rootPath = tempDir.getRoot().toRealPath();
Files.createSymbolicLink(rootPath.resolve("foo/bar"), rootPath.resolve("bar"));
Path testBuckFile = rootPath.resolve("foo").resolve("BUCK");
Files.write(
testBuckFile,
"java_library(name = 'lib', srcs=glob(['bar/*.java']))\n".getBytes(UTF_8));
// Fetch //:lib to put it in cache.
BuildTarget libTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build();
Iterable<BuildTarget> buildTargets = ImmutableList.of(libTarget);
{
TargetGraph targetGraph = parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
buildTargets);
BuildRuleResolver resolver = buildActionGraph(eventBus, targetGraph);
JavaLibrary libRule = (JavaLibrary) resolver.requireRule(libTarget);
assertEquals(ImmutableSet.of(Paths.get("foo/bar/Bar.java")), libRule.getJavaSrcs());
}
tempDir.newFile("bar/Baz.java");
WatchEvent<Path> createEvent = createPathEvent(
Paths.get("bar/Baz.java"),
StandardWatchEventKinds.ENTRY_CREATE);
parser.onFileSystemChange(createEvent);
{
TargetGraph targetGraph = parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
buildTargets);
BuildRuleResolver resolver = buildActionGraph(eventBus, targetGraph);
JavaLibrary libRule = (JavaLibrary) resolver.requireRule(libTarget);
assertEquals(
ImmutableSet.of(Paths.get("foo/bar/Bar.java"), Paths.get("foo/bar/Baz.java")),
libRule.getJavaSrcs());
}
}
@Test
public void whenBuildFileContainsSourcesUnderSymLinkDeletedSourcesNotRemovedUntilCacheCleaned()
throws Exception {
// This test depends on creating symbolic links which we cannot do on Windows.
assumeTrue(Platform.detect() != Platform.WINDOWS);
tempDir.newFolder("bar");
tempDir.newFile("bar/Bar.java");
tempDir.newFolder("foo");
Path bazSourceFile = tempDir.newFile("bar/Baz.java");
Path rootPath = tempDir.getRoot().toRealPath();
Files.createSymbolicLink(rootPath.resolve("foo/bar"), rootPath.resolve("bar"));
Path testBuckFile = rootPath.resolve("foo").resolve("BUCK");
Files.write(
testBuckFile,
"java_library(name = 'lib', srcs=glob(['bar/*.java']))\n".getBytes(UTF_8));
// Fetch //:lib to put it in cache.
BuildTarget libTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build();
Iterable<BuildTarget> buildTargets = ImmutableList.of(libTarget);
{
TargetGraph targetGraph = parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
buildTargets);
BuildRuleResolver resolver = buildActionGraph(eventBus, targetGraph);
JavaLibrary libRule = (JavaLibrary) resolver.requireRule(libTarget);
assertEquals(
ImmutableSortedSet.of(Paths.get("foo/bar/Bar.java"), Paths.get("foo/bar/Baz.java")),
libRule.getJavaSrcs());
}
Files.delete(bazSourceFile);
WatchEvent<Path> deleteEvent = createPathEvent(
Paths.get("bar/Baz.java"),
StandardWatchEventKinds.ENTRY_DELETE);
parser.onFileSystemChange(deleteEvent);
{
TargetGraph targetGraph = parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
buildTargets);
BuildRuleResolver resolver = buildActionGraph(eventBus, targetGraph);
JavaLibrary libRule = (JavaLibrary) resolver.requireRule(libTarget);
assertEquals(
ImmutableSet.of(Paths.get("foo/bar/Bar.java")),
libRule.getJavaSrcs());
}
}
@Test
public void whenSymlinksForbiddenThenParseFailsOnSymlinkInSources()
throws Exception {
// This test depends on creating symbolic links which we cannot do on Windows.
assumeTrue(Platform.detect() != Platform.WINDOWS);
thrown.expect(HumanReadableException.class);
thrown.expectMessage(
"Target //foo:lib contains input files under a path which contains a symbolic link (" +
"{foo/bar=bar}). To resolve this, use separate rules and declare dependencies instead of " +
"using symbolic links.");
BuckConfig config = FakeBuckConfig.builder()
.setFilesystem(filesystem)
.setSections(
"[project]",
"allow_symlinks = forbid")
.build();
cell = new TestCellBuilder().setBuckConfig(config).setFilesystem(filesystem).build();
tempDir.newFolder("bar");
tempDir.newFile("bar/Bar.java");
tempDir.newFolder("foo");
Path rootPath = tempDir.getRoot().toRealPath();
Files.createSymbolicLink(rootPath.resolve("foo/bar"), rootPath.resolve("bar"));
Path testBuckFile = rootPath.resolve("foo").resolve("BUCK");
Files.write(
testBuckFile,
"java_library(name = 'lib', srcs=glob(['bar/*.java']))\n".getBytes(UTF_8));
BuildTarget libTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build();
Iterable<BuildTarget> buildTargets = ImmutableList.of(libTarget);
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
buildTargets);
}
@Test
public void whenSymlinksAreInReadOnlyPathsCachingIsNotDisabled()
throws Exception {
// This test depends on creating symbolic links which we cannot do on Windows.
assumeTrue(Platform.detect() != Platform.WINDOWS);
Path rootPath = tempDir.getRoot().toRealPath();
BuckConfig config = FakeBuckConfig.builder()
.setFilesystem(filesystem)
.setSections(
"[project]",
"read_only_paths = " + rootPath.resolve("foo"))
.build();
cell = new TestCellBuilder().setBuckConfig(config).setFilesystem(filesystem).build();
tempDir.newFolder("bar");
tempDir.newFile("bar/Bar.java");
tempDir.newFolder("foo");
Files.createSymbolicLink(rootPath.resolve("foo/bar"), rootPath.resolve("bar"));
Path testBuckFile = rootPath.resolve("foo").resolve("BUCK");
Files.write(
testBuckFile,
"java_library(name = 'lib', srcs=glob(['bar/*.java']))\n".getBytes(UTF_8));
BuildTarget libTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build();
Iterable<BuildTarget> buildTargets = ImmutableList.of(libTarget);
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
buildTargets);
DaemonicParserState permState = parser.getPermState();
for (BuildTarget target : buildTargets) {
assertTrue(permState
.getOrCreateNodeCache(TargetNode.class)
.lookupComputedNode(cell, target)
.isPresent());
}
}
@Test
public void buildTargetHashCodePopulatesCorrectly() throws Exception {
tempDir.newFolder("foo");
Path testFooBuckFile = tempDir.newFile("foo/BUCK");
Files.write(
testFooBuckFile,
"java_library(name = 'lib', visibility=['PUBLIC'])\n".getBytes(UTF_8));
BuildTarget fooLibTarget = BuildTarget.builder(cellRoot, "//foo", "lib").build();
// We can't precalculate the hash, since it depends on the buck version. Check for the presence
// of a hash for the right key.
HashCode hashCode = buildTargetGraphAndGetHashCodes(parser, fooLibTarget).get(fooLibTarget);
assertNotNull(hashCode);
}
@Test
public void readConfigReadsConfig() throws Exception {
Path buckFile = cellRoot.resolve("BUCK");
BuildTarget buildTarget = BuildTarget.of(
UnflavoredBuildTarget.of(
filesystem.getRootPath(),
Optional.empty(),
"//",
"cake"));
Files.write(
buckFile,
Joiner.on("").join(
ImmutableList.of(
"genrule(\n" +
"name = 'cake',\n" +
"out = read_config('foo', 'bar', 'default') + '.txt',\n" +
"cmd = 'touch $OUT'\n" +
")\n"))
.getBytes(UTF_8));
BuckConfig config =
FakeBuckConfig.builder()
.setFilesystem(filesystem)
.build();
Cell cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build();
TargetNode<GenruleDescription.Arg> node = parser
.getTargetNode(eventBus, cell, false, executorService, buildTarget)
.castArg(GenruleDescription.Arg.class)
.get();
assertThat(node.getConstructorArg().out, is(equalTo("default.txt")));
config =
FakeBuckConfig.builder()
.setSections(ImmutableMap.of("foo", ImmutableMap.of("bar", "value")))
.setFilesystem(filesystem)
.build();
cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build();
node = parser
.getTargetNode(eventBus, cell, false, executorService, buildTarget)
.castArg(GenruleDescription.Arg.class)
.get();
assertThat(node.getConstructorArg().out, is(equalTo("value.txt")));
config =
FakeBuckConfig.builder()
.setFilesystem(filesystem)
.setSections(ImmutableMap.of("foo", ImmutableMap.of("bar", "other value")))
.build();
cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build();
node = parser
.getTargetNode(eventBus, cell, false, executorService, buildTarget)
.castArg(GenruleDescription.Arg.class)
.get();
assertThat(node.getConstructorArg().out, is(equalTo("other value.txt")));
}
@Test
public void whenBuckConfigEntryChangesThenCachedRulesAreInvalidated() throws Exception {
Path buckFile = cellRoot.resolve("BUCK");
Files.write(
buckFile,
Joiner.on("").join(
ImmutableList.of(
"read_config('foo', 'bar')\n",
"genrule(name = 'cake', out = 'file.txt', cmd = 'touch $OUT')\n"))
.getBytes(UTF_8));
BuckConfig config =
FakeBuckConfig.builder()
.setSections(ImmutableMap.of("foo", ImmutableMap.of("bar", "value")))
.setFilesystem(filesystem)
.build();
Cell cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build();
parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile);
// Call filterAllTargetsInProject to request cached rules.
config =
FakeBuckConfig.builder()
.setFilesystem(filesystem)
.setSections(ImmutableMap.of("foo", ImmutableMap.of("bar", "other value")))
.build();
cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build();
parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated.", 2, counter.calls);
}
@Test
public void whenBuckConfigAddedThenCachedRulesAreInvalidated() throws Exception {
Path buckFile = cellRoot.resolve("BUCK");
Files.write(
buckFile,
Joiner.on("").join(
ImmutableList.of(
"read_config('foo', 'bar')\n",
"genrule(name = 'cake', out = 'file.txt', cmd = 'touch $OUT')\n"))
.getBytes(UTF_8));
BuckConfig config =
FakeBuckConfig.builder()
.setFilesystem(filesystem)
.build();
Cell cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build();
parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile);
// Call filterAllTargetsInProject to request cached rules.
config =
FakeBuckConfig.builder()
.setFilesystem(filesystem)
.setSections(ImmutableMap.of("foo", ImmutableMap.of("bar", "other value")))
.build();
cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build();
parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated.", 2, counter.calls);
}
@Test
public void whenBuckConfigEntryRemovedThenCachedRulesAreInvalidated() throws Exception {
Path buckFile = cellRoot.resolve("BUCK");
Files.write(
buckFile,
Joiner.on("").join(
ImmutableList.of(
"read_config('foo', 'bar')\n",
"genrule(name = 'cake', out = 'file.txt', cmd = 'touch $OUT')\n"))
.getBytes(UTF_8));
BuckConfig config =
FakeBuckConfig.builder()
.setSections(ImmutableMap.of("foo", ImmutableMap.of("bar", "value")))
.setFilesystem(filesystem)
.build();
Cell cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build();
parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile);
// Call filterAllTargetsInProject to request cached rules.
config =
FakeBuckConfig.builder()
.setFilesystem(filesystem)
.build();
cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build();
parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should have invalidated.", 2, counter.calls);
}
@Test
public void whenUnrelatedBuckConfigEntryChangesThenCachedRulesAreNotInvalidated()
throws Exception {
Path buckFile = cellRoot.resolve("BUCK");
Files.write(
buckFile,
Joiner.on("").join(
ImmutableList.of(
"read_config('foo', 'bar')\n",
"genrule(name = 'cake', out = 'file.txt', cmd = 'touch $OUT')\n"))
.getBytes(UTF_8));
BuckConfig config =
FakeBuckConfig.builder()
.setSections(
ImmutableMap.of(
"foo",
ImmutableMap.of(
"bar", "value",
"dead", "beef")))
.setFilesystem(filesystem)
.build();
Cell cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build();
parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile);
// Call filterAllTargetsInProject to request cached rules.
config =
FakeBuckConfig.builder()
.setSections(
ImmutableMap.of(
"foo",
ImmutableMap.of(
"bar", "value",
"dead", "beef different")))
.setFilesystem(filesystem)
.build();
cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build();
parser.getAllTargetNodes(eventBus, cell, false, executorService, buckFile);
// Test that the second parseBuildFile call repopulated the cache.
assertEquals("Should not have invalidated.", 1, counter.calls);
}
@Test(timeout = 20000)
public void resolveTargetSpecsDoesNotHangOnException() throws Exception {
Path buckFile = cellRoot.resolve("foo/BUCK");
Files.createDirectories(buckFile.getParent());
Files.write(buckFile, "# empty".getBytes(UTF_8));
buckFile = cellRoot.resolve("bar/BUCK");
Files.createDirectories(buckFile.getParent());
Files.write(
buckFile,
"I do not parse as python".getBytes(UTF_8));
thrown.expect(BuildFileParseException.class);
thrown.expectMessage("Parse error for build file");
thrown.expectMessage(Paths.get("bar/BUCK").toString());
parser.resolveTargetSpecs(
eventBus,
cell,
false,
executorService,
ImmutableList.of(
TargetNodePredicateSpec.of(
x -> true,
BuildFileSpec.fromRecursivePath(
Paths.get("bar"),
cell.getRoot())),
TargetNodePredicateSpec.of(
x -> true,
BuildFileSpec.fromRecursivePath(
Paths.get("foo"),
cell.getRoot()))),
SpeculativeParsing.of(true),
ParserConfig.ApplyDefaultFlavorsMode.ENABLED);
}
@Test
public void resolveTargetSpecsPreservesOrder() throws Exception {
BuildTarget foo = BuildTargetFactory.newInstance(filesystem, "//foo:foo");
Path buckFile = cellRoot.resolve("foo/BUCK");
Files.createDirectories(buckFile.getParent());
Files.write(
buckFile,
"genrule(name='foo', out='foo', cmd='foo')".getBytes(UTF_8));
BuildTarget bar = BuildTargetFactory.newInstance(filesystem, "//bar:bar");
buckFile = cellRoot.resolve("bar/BUCK");
Files.createDirectories(buckFile.getParent());
Files.write(
buckFile,
"genrule(name='bar', out='bar', cmd='bar')".getBytes(UTF_8));
ImmutableList<ImmutableSet<BuildTarget>> targets =
parser.resolveTargetSpecs(
eventBus,
cell,
false,
executorService,
ImmutableList.of(
TargetNodePredicateSpec.of(
x -> true,
BuildFileSpec.fromRecursivePath(
Paths.get("bar"),
cell.getRoot())),
TargetNodePredicateSpec.of(
x -> true,
BuildFileSpec.fromRecursivePath(
Paths.get("foo"),
cell.getRoot()))),
SpeculativeParsing.of(true),
ParserConfig.ApplyDefaultFlavorsMode.ENABLED);
assertThat(
targets,
equalTo(ImmutableList.of(ImmutableSet.of(bar), ImmutableSet.of(foo))));
targets =
parser.resolveTargetSpecs(
eventBus,
cell,
false,
executorService,
ImmutableList.of(
TargetNodePredicateSpec.of(
x -> true,
BuildFileSpec.fromRecursivePath(
Paths.get("foo"),
cell.getRoot())),
TargetNodePredicateSpec.of(
x -> true,
BuildFileSpec.fromRecursivePath(
Paths.get("bar"),
cell.getRoot()))),
SpeculativeParsing.of(true),
ParserConfig.ApplyDefaultFlavorsMode.ENABLED);
assertThat(
targets,
equalTo(ImmutableList.of(ImmutableSet.of(foo), ImmutableSet.of(bar))));
}
@Test
public void defaultFlavorsInRuleArgsAppliedToTarget() throws Exception {
// We depend on Xcode platforms for this test.
assumeTrue(Platform.detect() == Platform.MACOS);
Path buckFile = cellRoot.resolve("lib/BUCK");
Files.createDirectories(buckFile.getParent());
Files.write(
buckFile,
("cxx_library(" +
" name = 'lib', " +
" srcs=glob(['*.c']), " +
" defaults={'platform':'iphonesimulator-x86_64'}" +
")").getBytes(UTF_8));
ImmutableSet<BuildTarget> result =
parser.buildTargetGraphForTargetNodeSpecs(
eventBus,
cell,
false,
executorService,
ImmutableList.of(
AbstractBuildTargetSpec.from(
BuildTarget.builder(cellRoot, "//lib", "lib").build())),
/* ignoreBuckAutodepsFiles */ false,
ParserConfig.ApplyDefaultFlavorsMode.ENABLED).getBuildTargets();
assertThat(
result,
hasItems(
BuildTarget.builder(cellRoot, "//lib", "lib")
.addFlavors(
ImmutableFlavor.of("iphonesimulator-x86_64"),
ImmutableFlavor.of("static"))
.build()));
}
@Test
public void defaultFlavorsInConfigAppliedToTarget() throws Exception {
// We depend on Xcode platforms for this test.
assumeTrue(Platform.detect() == Platform.MACOS);
Path buckFile = cellRoot.resolve("lib/BUCK");
Files.createDirectories(buckFile.getParent());
Files.write(
buckFile,
("cxx_library(" +
" name = 'lib', " +
" srcs=glob(['*.c']) " +
")").getBytes(UTF_8));
BuckConfig config = FakeBuckConfig.builder()
.setFilesystem(filesystem)
.setSections(
ImmutableMap.of(
"defaults.cxx_library",
ImmutableMap.of(
"platform",
"iphoneos-arm64",
"type",
"shared")))
.build();
cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build();
ImmutableSet<BuildTarget> result =
parser.buildTargetGraphForTargetNodeSpecs(
eventBus,
cell,
false,
executorService,
ImmutableList.of(
AbstractBuildTargetSpec.from(
BuildTarget.builder(cellRoot, "//lib", "lib").build())),
/* ignoreBuckAutodepsFiles */ false,
ParserConfig.ApplyDefaultFlavorsMode.ENABLED).getBuildTargets();
assertThat(
result,
hasItems(
BuildTarget.builder(cellRoot, "//lib", "lib")
.addFlavors(
ImmutableFlavor.of("iphoneos-arm64"),
ImmutableFlavor.of("shared"))
.build()));
}
@Test
public void defaultFlavorsInArgsOverrideDefaultsFromConfig() throws Exception {
// We depend on Xcode platforms for this test.
assumeTrue(Platform.detect() == Platform.MACOS);
Path buckFile = cellRoot.resolve("lib/BUCK");
Files.createDirectories(buckFile.getParent());
Files.write(
buckFile,
("cxx_library(" +
" name = 'lib', " +
" srcs=glob(['*.c']), " +
" defaults={'platform':'macosx-x86_64'}" +
")").getBytes(UTF_8));
BuckConfig config = FakeBuckConfig.builder()
.setFilesystem(filesystem)
.setSections(
ImmutableMap.of(
"defaults.cxx_library",
ImmutableMap.of(
"platform",
"iphoneos-arm64",
"type",
"shared")))
.build();
cell = new TestCellBuilder().setFilesystem(filesystem).setBuckConfig(config).build();
ImmutableSet<BuildTarget> result =
parser.buildTargetGraphForTargetNodeSpecs(
eventBus,
cell,
false,
executorService,
ImmutableList.of(
AbstractBuildTargetSpec.from(
BuildTarget.builder(cellRoot, "//lib", "lib").build())),
/* ignoreBuckAutodepsFiles */ false,
ParserConfig.ApplyDefaultFlavorsMode.ENABLED).getBuildTargets();
assertThat(
result,
hasItems(
BuildTarget.builder(cellRoot, "//lib", "lib")
.addFlavors(
ImmutableFlavor.of("macosx-x86_64"),
ImmutableFlavor.of("shared"))
.build()));
}
@Test
public void testGetCacheReturnsSame() throws Exception {
assertEquals(
parser.getPermState().getOrCreateNodeCache(TargetNode.class),
parser.getPermState().getOrCreateNodeCache(TargetNode.class));
assertNotEquals(
parser.getPermState().getOrCreateNodeCache(TargetNode.class),
parser.getPermState().getOrCreateNodeCache(Map.class));
}
@Test
public void groupsAreExpanded() throws Exception {
Path buckFile = cellRoot.resolve("BUCK");
Files.createDirectories(buckFile.getParent());
Path groupsData = TestDataHelper.getTestDataScenario(this, "groups");
Files.copy(groupsData.resolve("BUCK.fixture"), buckFile);
BuildTarget fooTarget = BuildTargetFactory.newInstance(cellRoot, "//:foo");
BuildTarget barTarget = BuildTargetFactory.newInstance(cellRoot, "//:bar");
TargetGraph targetGraph = parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
ImmutableSet.of(barTarget));
assertThat(targetGraph.getGroupsContainingTarget(fooTarget).size(), is(2));
assertThat(
targetGraph.get(fooTarget).isVisibleTo(targetGraph, targetGraph.get(barTarget)),
is(true));
assertThat(
targetGraph.get(barTarget).isVisibleTo(targetGraph, targetGraph.get(fooTarget)),
is(false));
}
@Test
public void testVisibilityGetsChecked() throws Exception {
Path visibilityData = TestDataHelper.getTestDataScenario(this, "visibility");
Path visibilityBuckFile = cellRoot.resolve("BUCK");
Path visibilitySubBuckFile = cellRoot.resolve("sub/BUCK");
Files.createDirectories(visibilityBuckFile.getParent());
Files.createDirectories(visibilitySubBuckFile.getParent());
Files.copy(visibilityData.resolve("BUCK.fixture"), visibilityBuckFile);
Files.copy(visibilityData.resolve("sub/BUCK.fixture"), visibilitySubBuckFile);
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
ImmutableSet.of(BuildTargetFactory.newInstance(cellRoot, "//:should_pass")));
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
ImmutableSet.of(BuildTargetFactory.newInstance(cellRoot, "//:should_pass2")));
try {
parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
ImmutableSet.of(BuildTargetFactory.newInstance(cellRoot, "//:should_fail")));
Assert.fail("did not expect to succeed parsing");
} catch (Exception e) {
assertThat(e, instanceOf(HumanReadableException.class));
assertThat(
e.getMessage(),
containsString("//:should_fail depends on //sub:sub, which is not visible"));
}
}
private BuildRuleResolver buildActionGraph(BuckEventBus eventBus, TargetGraph targetGraph) {
return Preconditions.checkNotNull(
ActionGraphCache.getFreshActionGraph(eventBus, targetGraph)).getResolver();
}
/**
* Populates the collection of known build targets that this Parser will use to construct an
* action graph using all build files inside the given project root and returns an optionally
* filtered set of build targets.
*
* @param filter if specified, applied to each rule in rules. All matching rules will be included
* in the List returned by this method. If filter is null, then this method returns null.
* @return The build targets in the project filtered by the given filter.
*/
public static synchronized ImmutableSet<BuildTarget> filterAllTargetsInProject(
Parser parser,
Cell cell,
Predicate<TargetNode<?>> filter,
BuckEventBus buckEventBus,
ListeningExecutorService executor)
throws BuildFileParseException, BuildTargetException, IOException, InterruptedException {
return FluentIterable
.from(
parser.buildTargetGraphForTargetNodeSpecs(
buckEventBus,
cell,
false,
executor,
ImmutableList.of(
TargetNodePredicateSpec.of(
filter,
BuildFileSpec.fromRecursivePath(
Paths.get(""),
cell.getRoot()))),
/* ignoreBuckAutodepsFiles */ false)
.getTargetGraph().getNodes())
.filter(filter)
.transform(HasBuildTarget::getBuildTarget)
.toSet();
}
private ImmutableMap<BuildTarget, HashCode> buildTargetGraphAndGetHashCodes(
Parser parser,
BuildTarget... buildTargets) throws Exception {
// Build the target graph so we can access the hash code cache.
ImmutableList<BuildTarget> buildTargetsList = ImmutableList.copyOf(buildTargets);
TargetGraph targetGraph = parser.buildTargetGraph(
eventBus,
cell,
false,
executorService,
buildTargetsList);
ImmutableMap.Builder<BuildTarget, HashCode> toReturn = ImmutableMap.builder();
for (TargetNode<?> node : targetGraph.getNodes()) {
toReturn.put(node.getBuildTarget(), node.getRawInputsHashCode());
}
return toReturn.build();
}
private static class ParseEventStartedCounter {
int calls = 0;
// We know that the ProjectBuildFileParser emits a Started event when it parses a build file.
@Subscribe
@SuppressWarnings("unused")
public void call(ParseBuckFileEvent.Started parseEvent) {
calls++;
}
}
}
|
[
"\"PATH\"",
"\"PATH\"",
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
java
| 1 | 0 | |
tests/src/test/scala/common/WhiskProperties.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package common;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.util.Properties;
import static org.junit.Assert.assertTrue;
/**
* Properties that describe a whisk installation
*/
public class WhiskProperties {
/**
* The name of the properties file.
*/
protected static final String WHISK_PROPS_FILE = "whisk.properties";
/**
* Default concurrency level if otherwise unspecified
*/
private static final int DEFAULT_CONCURRENCY = 20;
/**
* If true, then tests will direct to the router rather than the edge
* components.
*/
public static final boolean testRouter = System.getProperty("test.router", "false").equals("true");
/**
* The number of tests to run concurrently.
*/
public static final int concurrentTestCount = getConcurrentTestCount(System.getProperty("testthreads", null));
/**
* The root of the whisk installation, used to retrieve files relative to
* home.
*/
private static final String whiskHome;
/**
* The properties read from the WHISK_PROPS_FILE.
*/
private static final Properties whiskProperties;
static {
/**
* Finds the whisk home directory. This is resolved to either (in
* order):
*
* 1. a system property openwhisk.dir
*
* 2. OPENWHISK_HOME from the environment
*
* 3. a path in the directory tree containing WHISK_PROPS_FILE.
*
* @return the path to whisk home as a string
* @throws assertion
* failure if whisk home cannot be determined
*/
String wskdir = System.getProperty("openwhisk.home", System.getenv("OPENWHISK_HOME"));
if (wskdir == null) {
String dir = System.getProperty("user.dir");
if (dir != null) {
File propfile = findFileRecursively(dir, WHISK_PROPS_FILE);
if (propfile != null) {
wskdir = propfile.getParent();
}
}
}
assertTrue("could not determine openwhisk home", wskdir != null);
File wskpropsFile = new File(wskdir, WHISK_PROPS_FILE);
assertTrue(String.format("'%s' does not exists but required", wskpropsFile), wskpropsFile.exists());
// loads properties from file
whiskProperties = loadProperties(wskpropsFile);
// set whisk home from read properties
whiskHome = whiskProperties.getProperty("openwhisk.home");
System.out.format("test router? %s\n", testRouter);
}
/**
* The path to the CLI directory.
*/
public static String getCLIDir() {
return whiskHome + "/bin";
}
/**
* The path to the Go CLI executable.
*/
public static String getCLIPath() {
return getCLIDir() + "/wsk";
}
public static File getFileRelativeToWhiskHome(String name) {
return new File(whiskHome, name);
}
public static String getProperty(String string) {
return whiskProperties.getProperty(string);
}
public static String getKafkaHosts() {
return whiskProperties.getProperty("kafka.hosts");
}
public static int getKafkaMonitorPort() {
return Integer.parseInt(whiskProperties.getProperty("kafkaras.host.port"));
}
public static String getZookeeperHost() {
return whiskProperties.getProperty("zookeeper.hosts");
}
public static String getMainDockerEndpoint() {
return whiskProperties.getProperty("main.docker.endpoint");
}
public static boolean useCLIDownload() {
return whiskProperties.getProperty("use.cli.download").equals("true");
}
public static String[] getInvokerHosts() {
// split of empty string is non-empty array
String hosts = whiskProperties.getProperty("invoker.hosts");
return (hosts == null || hosts.equals("")) ? new String[0] : hosts.split(",");
}
public static String[] getAdditionalHosts() {
// split of empty string is non-empty array
String hosts = whiskProperties.getProperty("additional.hosts");
return (hosts == null || hosts.equals("")) ? new String[0] : hosts.split(",");
}
public static int numberOfInvokers() {
return getInvokerHosts().length;
}
public static String getSslCertificateChallenge() {
return whiskProperties.getProperty("whisk.ssl.challenge");
}
/**
* Note that when testRouter == true, we pretend the router host is edge
* host.
*/
public static String getEdgeHost() {
return testRouter ? getRouterHost() : whiskProperties.getProperty("edge.host");
}
public static String getRealEdgeHost() {
return whiskProperties.getProperty("edge.host");
}
public static String getAuthForTesting() {
return whiskProperties.getProperty("testing.auth");
}
public static String getRouterHost() {
return whiskProperties.getProperty("router.host");
}
public static String getApiProto() {
return whiskProperties.getProperty("whisk.api.host.proto");
}
public static String getApiHost() {
return whiskProperties.getProperty("whisk.api.host.name");
}
public static String getApiPort() {
return whiskProperties.getProperty("whisk.api.host.port");
}
public static String getApiHostForAction() {
return getApiProto() + "://" + getApiHost() + ":" + getApiPort();
}
public static String getApiHostForClient(String subdomain, boolean includeProtocol) {
String proto = whiskProperties.getProperty("whisk.api.host.proto");
String port = whiskProperties.getProperty("whisk.api.host.port");
String host = whiskProperties.getProperty("whisk.api.localhost.name");
if (includeProtocol) {
return proto + "://" + subdomain + "." + host + ":" + port;
} else {
return subdomain + "." + host + ":" + port;
}
}
public static int getPartsInVanitySubdomain() {
return Integer.parseInt(whiskProperties.getProperty("whisk.api.vanity.subdomain.parts"));
}
public static int getEdgeHostApiPort() {
return Integer.parseInt(whiskProperties.getProperty("edge.host.apiport"));
}
public static String getControllerHosts() {
return whiskProperties.getProperty("controller.hosts");
}
public static String getDBHosts() {
return whiskProperties.getProperty("db.hostsList");
}
public static int getControllerBasePort() {
return Integer.parseInt(whiskProperties.getProperty("controller.host.basePort"));
}
public static String getBaseControllerHost() {
return getControllerHosts().split(",")[0];
}
public static String getBaseDBHost() {
return getDBHosts().split(",")[0];
}
public static String getBaseControllerAddress() {
return getBaseControllerHost() + ":" + getControllerBasePort();
}
public static int getMaxActionInvokesPerMinute() {
String valStr = whiskProperties.getProperty("limits.actions.invokes.perMinute");
return Integer.parseInt(valStr);
}
/**
* read the contents of auth key file and return as a Pair
* <username,password>
*/
public static Pair getBasicAuth() {
File f = getAuthFileForTesting();
String contents = readAuthKey(f);
String[] parts = contents.split(":");
assert parts.length == 2;
return new Pair(parts[0], parts[1]);
}
/**
* @return the path to a file holding the auth key used during junit testing
*/
public static File getAuthFileForTesting() {
String testAuth = getAuthForTesting();
if (testAuth.startsWith(File.separator)) {
return new File(testAuth);
} else {
return WhiskProperties.getFileRelativeToWhiskHome(testAuth);
}
}
/**
* read the contents of a file which holds an auth key.
*/
public static String readAuthKey(File filename) {
// the following funny relative path works both from Eclipse and when
// running in bin/ directory from ant
try {
byte[] encoded = Files.readAllBytes(filename.toPath());
String authKey = new String(encoded, "UTF-8").trim();
return authKey;
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
/**
* @return the path to a file holding the VCAP_SERVICES used during junit
* testing
*/
public static File getVCAPServicesFile() {
String vcapServices = whiskProperties.getProperty("vcap.services.file");
if (vcapServices.startsWith(File.separator)) {
return new File(vcapServices);
} else {
return WhiskProperties.getFileRelativeToWhiskHome(vcapServices);
}
}
/**
* are we running on Mac OS X?
*/
public static boolean onMacOSX() {
String osname = System.getProperty("os.name");
return osname.toLowerCase().contains("mac");
}
/**
* are we running on Linux?
*/
public static boolean onLinux() {
String osname = System.getProperty("os.name");
return osname.equalsIgnoreCase("linux");
}
/**
* where is python 2.7?
*/
public static final String python = findPython();
protected static File findFileRecursively(String dir, String needle) {
if (dir != null) {
File base = new File(dir);
File file = new File(base, needle);
if (file.exists()) {
return file;
} else {
return findFileRecursively(base.getParent(), needle);
}
} else {
return null;
}
}
/**
* Load properties from whisk.properties
*/
protected static Properties loadProperties(File propsFile) {
Properties props = new Properties();
InputStream input = null;
try {
input = new FileInputStream(propsFile);
// load a properties file
props.load(input);
} catch (IOException ex) {
ex.printStackTrace();
} finally {
if (input != null) {
try {
input.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
return props;
}
private static String findPython() {
File p27 = new File("/usr/local/bin/python2.7");
if (p27.exists()) {
return "/usr/local/bin/python2.7";
} else {
return "python";
}
}
private static int getConcurrentTestCount(String count) {
if (count != null && count.trim().isEmpty() == false) {
try {
int threads = Integer.parseInt(count);
if (threads > 0) {
return threads;
}
} catch (NumberFormatException e) {
}
}
return DEFAULT_CONCURRENCY;
}
}
|
[
"\"OPENWHISK_HOME\""
] |
[] |
[
"OPENWHISK_HOME"
] |
[]
|
["OPENWHISK_HOME"]
|
java
| 1 | 0 | |
pkg/agent/main.go
|
package main
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
"os"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/rancher/rancher/pkg/agent/cluster"
"github.com/rancher/rancher/pkg/agent/node"
"github.com/rancher/rancher/pkg/remotedialer"
"github.com/rancher/rancher/pkg/rkenodeconfigclient"
"github.com/sirupsen/logrus"
)
const (
Token = "X-API-Tunnel-Token"
Params = "X-API-Tunnel-Params"
)
func main() {
if os.Getenv("CATTLE_DEBUG") == "true" || os.Getenv("RANCHER_DEBUG") == "true" {
logrus.SetLevel(logrus.DebugLevel)
}
if err := run(); err != nil {
log.Fatal(err)
}
}
func isCluster() bool {
return os.Getenv("CATTLE_CLUSTER") == "true"
}
func getParams() (map[string]interface{}, error) {
if isCluster() {
return cluster.Params()
}
return node.Params(), nil
}
func getTokenAndURL() (string, string, error) {
token, url, err := node.TokenAndURL()
if err != nil {
return "", "", err
}
if token == "" {
return cluster.TokenAndURL()
}
return token, url, nil
}
func isConnect() bool {
if os.Getenv("CATTLE_AGENT_CONNECT") == "true" {
return true
}
_, err := os.Stat("connected")
return err == nil
}
func connected() {
f, err := os.Create("connected")
if err != nil {
f.Close()
}
}
func cleanup(ctx context.Context) error {
if os.Getenv("CATTLE_K8S_MANAGED") != "true" {
return nil
}
c, err := client.NewEnvClient()
if err != nil {
return err
}
args := filters.NewArgs()
args.Add("label", "io.cattle.agent=true")
containers, err := c.ContainerList(ctx, types.ContainerListOptions{
All: true,
Filters: args,
})
if err != nil {
return err
}
for _, container := range containers {
if _, ok := container.Labels["io.kubernetes.pod.namespace"]; ok {
continue
}
container := container
go func() {
time.Sleep(15 * time.Second)
logrus.Infof("Removing unmanaged agent %s(%s)", container.Names[0], container.ID)
c.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{
Force: true,
})
}()
}
return nil
}
func run() error {
params, err := getParams()
if err != nil {
return err
}
bytes, err := json.Marshal(params)
if err != nil {
return err
}
token, server, err := getTokenAndURL()
if err != nil {
return err
}
headers := map[string][]string{
Token: {token},
Params: {base64.StdEncoding.EncodeToString(bytes)},
}
serverURL, err := url.Parse(server)
if err != nil {
return err
}
onConnect := func(ctx context.Context) error {
connected()
connectConfig := fmt.Sprintf("https://%s/v3/connect/config", serverURL.Host)
if err := rkenodeconfigclient.ConfigClient(ctx, connectConfig, headers); err != nil {
return err
}
if isCluster() {
return nil
}
if err := cleanup(context.Background()); err != nil {
return err
}
go func() {
logrus.Infof("Starting plan monitor")
for {
select {
case <-time.After(2 * time.Minute):
err := rkenodeconfigclient.ConfigClient(ctx, connectConfig, headers)
if err != nil {
logrus.Errorf("failed to check plan: %v", err)
}
case <-ctx.Done():
return
}
}
}()
return nil
}
for {
wsURL := fmt.Sprintf("wss://%s/v3/connect", serverURL.Host)
if !isConnect() {
wsURL += "/register"
}
logrus.Infof("Connecting to %s with token %s", wsURL, token)
remotedialer.ClientConnect(wsURL, http.Header(headers), nil, func(proto, address string) bool {
switch proto {
case "tcp":
return true
case "unix":
return address == "/var/run/docker.sock"
}
return false
}, onConnect)
time.Sleep(5 * time.Second)
}
}
|
[
"\"CATTLE_DEBUG\"",
"\"RANCHER_DEBUG\"",
"\"CATTLE_CLUSTER\"",
"\"CATTLE_AGENT_CONNECT\"",
"\"CATTLE_K8S_MANAGED\""
] |
[] |
[
"RANCHER_DEBUG",
"CATTLE_DEBUG",
"CATTLE_CLUSTER",
"CATTLE_K8S_MANAGED",
"CATTLE_AGENT_CONNECT"
] |
[]
|
["RANCHER_DEBUG", "CATTLE_DEBUG", "CATTLE_CLUSTER", "CATTLE_K8S_MANAGED", "CATTLE_AGENT_CONNECT"]
|
go
| 5 | 0 | |
main.go
|
package main
import (
"fmt"
"github.com/danielkov/gin-helmet"
"github.com/gin-contrib/cors"
"github.com/gin-contrib/static"
"github.com/gin-gonic/gin"
"github.com/joho/godotenv"
log "github.com/sirupsen/logrus"
"gitlab.127-0-0-1.fr/vx3r/wg-gen-web/api"
"gitlab.127-0-0-1.fr/vx3r/wg-gen-web/core"
"gitlab.127-0-0-1.fr/vx3r/wg-gen-web/util"
"os"
"path/filepath"
)
func init() {
log.SetFormatter(&log.TextFormatter{})
log.SetOutput(os.Stderr)
log.SetLevel(log.DebugLevel)
}
func main() {
log.Infof("Starting Wg Gen Web version: %s", util.Version)
// load .env environment variables
err := godotenv.Load()
if err != nil {
log.WithFields(log.Fields{
"err": err,
}).Fatal("failed to load .env file")
}
// check directories or create it
if !util.DirectoryExists(filepath.Join(os.Getenv("WG_CONF_DIR"))) {
err = os.Mkdir(filepath.Join(os.Getenv("WG_CONF_DIR")), 0755)
if err != nil {
log.WithFields(log.Fields{
"err": err,
"dir": filepath.Join(os.Getenv("WG_CONF_DIR")),
}).Fatal("failed to create directory")
}
}
// check if server.json exists otherwise create it with default values
if !util.FileExists(filepath.Join(os.Getenv("WG_CONF_DIR"), "server.json")) {
_, err = core.ReadServer()
if err != nil {
log.WithFields(log.Fields{
"err": err,
}).Fatal("server.json doesnt not exists and can not read it")
}
}
if os.Getenv("GIN_MODE") == "debug" {
// set gin release debug
gin.SetMode(gin.DebugMode)
} else {
// set gin release mode
gin.SetMode(gin.ReleaseMode)
// disable console color
gin.DisableConsoleColor()
// log level info
log.SetLevel(log.InfoLevel)
}
// migrate
err = core.MigrateInitialStructChange()
if err != nil {
log.WithFields(log.Fields{
"err": err,
}).Fatal("failed to migrate initial struct changes")
}
err = core.MigratePresharedKey()
if err != nil {
log.WithFields(log.Fields{
"err": err,
}).Fatal("failed to migrate preshared key struct changes")
}
// dump wg config file
err = core.UpdateServerConfigWg()
if err != nil {
log.WithFields(log.Fields{
"err": err,
}).Fatal("failed to dump wg config file")
}
// creates a gin router with default middleware: logger and recovery (crash-free) middleware
app := gin.Default()
// cors middleware
config := cors.DefaultConfig()
config.AllowAllOrigins = true
app.Use(cors.New(config))
// protection middleware
app.Use(helmet.Default())
// no route redirect to frontend app
app.NoRoute(func(c *gin.Context) {
c.Redirect(301, "/index.html")
})
// serve static files
app.Use(static.Serve("/", static.LocalFile("./ui/dist", false)))
// apply api router
api.ApplyRoutes(app)
err = app.Run(fmt.Sprintf("%s:%s", os.Getenv("SERVER"), os.Getenv("PORT")))
if err != nil {
log.WithFields(log.Fields{
"err": err,
}).Fatal("failed to start server")
}
}
|
[
"\"WG_CONF_DIR\"",
"\"WG_CONF_DIR\"",
"\"WG_CONF_DIR\"",
"\"WG_CONF_DIR\"",
"\"GIN_MODE\"",
"\"SERVER\"",
"\"PORT\""
] |
[] |
[
"PORT",
"WG_CONF_DIR",
"SERVER",
"GIN_MODE"
] |
[]
|
["PORT", "WG_CONF_DIR", "SERVER", "GIN_MODE"]
|
go
| 4 | 0 | |
cmd/gate.go
|
package main
import (
"context"
"encoding/hex"
"encoding/json"
"flag"
"fmt"
"github.com/MinterTeam/explorer-gate/v2/api"
"github.com/MinterTeam/explorer-gate/v2/core"
"github.com/MinterTeam/minter-go-sdk/v2/api/grpc_client"
"github.com/joho/godotenv"
"github.com/sirupsen/logrus"
"github.com/tendermint/tendermint/libs/pubsub"
"os"
"time"
)
var Version string // Version
var GitCommit string // Git commit
var BuildDate string // Build date
var AppName string // Application name
var version = flag.Bool(`v`, false, `Prints current version`)
func main() {
flag.Parse()
if *version {
fmt.Printf(`%s v%s Commit %s builded %s`, AppName, Version, GitCommit, BuildDate)
os.Exit(0)
}
path, err := os.Getwd()
if fileExists(path + "/.env") {
fmt.Printf(`loading .env file: %s`, path+".env")
err := godotenv.Load()
if err != nil {
panic("Error loading .env file")
}
}
//Init Logger
logger := logrus.New()
logger.SetFormatter(&logrus.JSONFormatter{})
logger.SetOutput(os.Stdout)
logger.SetReportCaller(true)
if os.Getenv("GATE_DEBUG") != "1" && os.Getenv("GATE_DEBUG") != "true" {
logger.SetFormatter(&logrus.TextFormatter{
DisableColors: false,
FullTimestamp: true,
})
} else {
logger.SetFormatter(&logrus.JSONFormatter{})
logger.SetLevel(logrus.WarnLevel)
}
contextLogger := logger.WithFields(logrus.Fields{
"version": Version,
"app": "Minter Gate",
})
pubsubServer := pubsub.NewServer()
err = pubsubServer.Start()
if err != nil {
contextLogger.Error(err)
}
nodeApi, err := grpc_client.New(os.Getenv("NODE_API"))
if err != nil {
logrus.Fatal(err)
}
status, err := nodeApi.Status()
if err != nil {
panic(err)
}
latestBlock := status.LatestBlockHeight
logger.Info(fmt.Sprintf("Starting with block %d", status.LatestBlockHeight))
gateService := core.New(nodeApi, pubsubServer, contextLogger)
go func() {
for {
block, err := nodeApi.BlockExtended(latestBlock, true)
if err != nil {
time.Sleep(time.Second)
continue
}
for _, tx := range block.Transactions {
if tx.Code != 0 {
err := pubsubServer.PublishWithTags(context.TODO(), "FailTx", map[string]string{
"error": fmt.Sprintf("%X", tx.Log),
})
if err != nil {
logger.Error(err)
}
continue
}
b, err := hex.DecodeString(tx.RawTx)
if err != nil {
logger.Error(err)
continue
}
txJson, err := json.Marshal(tx)
if err != nil {
logger.Error(err)
continue
}
err = pubsubServer.PublishWithTags(context.TODO(), "NewTx", map[string]string{
"tx": fmt.Sprintf("%X", b),
"txData": string(txJson),
"height": fmt.Sprintf("%d", block.Height),
})
if err != nil {
logger.Error(err)
}
}
latestBlock++
time.Sleep(1 * time.Second)
}
}()
api.Run(gateService, pubsubServer)
}
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
|
[
"\"GATE_DEBUG\"",
"\"GATE_DEBUG\"",
"\"NODE_API\""
] |
[] |
[
"NODE_API",
"GATE_DEBUG"
] |
[]
|
["NODE_API", "GATE_DEBUG"]
|
go
| 2 | 0 | |
Testing/PythonTests/coverage.py
|
import os
import sys
from shapeworks import *
def coverageTest():
femur = Mesh(os.environ["DATA"] + "/femur.vtk")
pelvis = Mesh(os.environ["DATA"] + "/pelvis.vtk")
pelvis.coverage(femur)
compareMesh = Mesh(os.environ["DATA"] + "/fm_coverage.vtk")
return pelvis == compareMesh
val = coverageTest()
if val is False:
sys.exit(1)
|
[] |
[] |
[
"DATA"
] |
[]
|
["DATA"]
|
python
| 1 | 0 | |
features/fixtures/negroni/main.go
|
package main
import (
"context"
"fmt"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/bugsnag/bugsnag-go/v2"
"github.com/bugsnag/bugsnag-go-negroni"
"github.com/urfave/negroni"
)
func main() {
config := bugsnag.Configuration{
APIKey: os.Getenv("API_KEY"),
Endpoints: bugsnag.Endpoints{
Notify: os.Getenv("BUGSNAG_ENDPOINT"),
Sessions: os.Getenv("BUGSNAG_ENDPOINT"),
},
AppVersion: os.Getenv("APP_VERSION"),
AppType: os.Getenv("APP_TYPE"),
Hostname: os.Getenv("HOSTNAME"),
}
if notifyReleaseStages := os.Getenv("NOTIFY_RELEASE_STAGES"); notifyReleaseStages != "" {
config.NotifyReleaseStages = strings.Split(notifyReleaseStages, ",")
}
if releaseStage := os.Getenv("RELEASE_STAGE"); releaseStage != "" {
config.ReleaseStage = releaseStage
}
if filters := os.Getenv("PARAMS_FILTERS"); filters != "" {
config.ParamsFilters = []string{filters}
}
acs, err := strconv.ParseBool(os.Getenv("AUTO_CAPTURE_SESSIONS"))
if err == nil {
config.AutoCaptureSessions = acs
}
bugsnag.Configure(config)
// Increase publish rate for testing
bugsnag.DefaultSessionPublishInterval = time.Millisecond * 300
mux := http.NewServeMux()
mux.HandleFunc("/autonotify-then-recover", unhandledCrash)
mux.HandleFunc("/handled", handledError)
mux.HandleFunc("/session", session)
mux.HandleFunc("/autonotify", autonotify)
mux.HandleFunc("/onbeforenotify", onBeforeNotify)
mux.HandleFunc("/recover", dontDie)
mux.HandleFunc("/user", user)
n := negroni.New()
n.Use(negroni.NewRecovery())
// Add bugsnag handler after negroni.NewRecovery() to ensure panics get picked up
n.Use(bugsnagnegroni.AutoNotify())
n.UseHandler(mux)
http.ListenAndServe(":"+os.Getenv("SERVER_PORT"), n)
}
func unhandledCrash(w http.ResponseWriter, r *http.Request) {
// Invalid type assertion, will panic
func(a interface{}) string {
return a.(string)
}(struct{}{})
}
func handledError(w http.ResponseWriter, r *http.Request) {
if _, err := os.Open("nonexistent_file.txt"); err != nil {
if errClass := os.Getenv("ERROR_CLASS"); errClass != "" {
bugsnag.Notify(err, r.Context(), bugsnag.ErrorClass{Name: errClass})
} else {
bugsnag.Notify(err, r.Context())
}
}
}
func session(w http.ResponseWriter, r *http.Request) {
log.Println("single session")
}
func dontDie(w http.ResponseWriter, r *http.Request) {
defer bugsnag.Recover(r.Context())
panic("Request killed but recovered")
}
func user(w http.ResponseWriter, r *http.Request) {
bugsnag.Notify(fmt.Errorf("oops"), bugsnag.User{
Id: "test-user-id",
Name: "test-user-name",
Email: "test-user-email",
})
}
func onBeforeNotify(w http.ResponseWriter, r *http.Request) {
bugsnag.OnBeforeNotify(
func(event *bugsnag.Event, config *bugsnag.Configuration) error {
if event.Message == "Ignore this error" {
return fmt.Errorf("not sending errors to ignore")
}
// continue notifying as normal
if event.Message == "Change error message" {
event.Message = "Error message was changed"
}
return nil
})
bugsnag.Notify(fmt.Errorf("Ignore this error"))
time.Sleep(100 * time.Millisecond)
bugsnag.Notify(fmt.Errorf("Don't ignore this error"))
time.Sleep(100 * time.Millisecond)
bugsnag.Notify(fmt.Errorf("Change error message"))
time.Sleep(100 * time.Millisecond)
}
func autonotify(w http.ResponseWriter, r *http.Request) {
go func(ctx context.Context) {
defer func() { recover() }()
defer bugsnag.AutoNotify(ctx)
panic("Go routine killed with auto notify")
}(r.Context())
}
|
[
"\"API_KEY\"",
"\"BUGSNAG_ENDPOINT\"",
"\"BUGSNAG_ENDPOINT\"",
"\"APP_VERSION\"",
"\"APP_TYPE\"",
"\"HOSTNAME\"",
"\"NOTIFY_RELEASE_STAGES\"",
"\"RELEASE_STAGE\"",
"\"PARAMS_FILTERS\"",
"\"AUTO_CAPTURE_SESSIONS\"",
"\"SERVER_PORT\"",
"\"ERROR_CLASS\""
] |
[] |
[
"SERVER_PORT",
"NOTIFY_RELEASE_STAGES",
"APP_TYPE",
"API_KEY",
"RELEASE_STAGE",
"BUGSNAG_ENDPOINT",
"ERROR_CLASS",
"HOSTNAME",
"PARAMS_FILTERS",
"APP_VERSION",
"AUTO_CAPTURE_SESSIONS"
] |
[]
|
["SERVER_PORT", "NOTIFY_RELEASE_STAGES", "APP_TYPE", "API_KEY", "RELEASE_STAGE", "BUGSNAG_ENDPOINT", "ERROR_CLASS", "HOSTNAME", "PARAMS_FILTERS", "APP_VERSION", "AUTO_CAPTURE_SESSIONS"]
|
go
| 11 | 0 | |
scripts/smgr/base.py
|
import test_v1
import fixtures
import sys
import os
from common.contrail_test_init import ContrailTestInit
from smgr_common import SmgrFixture
class ServerManagerTest(test_v1.BaseTestCase_v1):
@classmethod
def setUpClass(self):
super(ServerManagerTest, self).setUpClass()
if 'PARAMS_FILE' in os.environ:
self.ini_file = os.environ.get('PARAMS_FILE')
else:
self.ini_file = 'sanity_params.ini'
if 'TESTBED_FILE' in os.environ:
self.testbed_py = os.environ.get('TESTBED_FILE')
else:
self.testbed_py = 'testbed.py'
if 'SMGR_FILE' in os.environ:
self.smgr_file = os.environ.get('SMGR_FILE')
else:
self.smgr_file = 'smgr_input.ini'
self.inputs = '1'
self.logger.info("Configuring setup for smgr tests.")
self.smgr_fixture = SmgrFixture(self.inputs, \
testbed_py=self.testbed_py, \
smgr_config_ini=self.smgr_file, \
test_local=False,logger = self.logger)
self.logger.info("Adding Server to smgr DB")
self.smgr_fixture.svrmgr_add_all()
print ".................................................completed init..............."
# end setUpClass
@classmethod
def tearDownClass(self):
super(ServerManagerTest, self).tearDownClass()
#end tearDownClass
def verify(self):
"""verfiy common resources."""
self.logger.debug("Verify the common resources")
pass
def remove_from_cleanups(self, fix):
for cleanup in self._cleanups:
if fix.cleanUp in cleanup:
self._cleanups.remove(cleanup)
break
#end remove_from_cleanups
|
[] |
[] |
[
"SMGR_FILE",
"TESTBED_FILE",
"PARAMS_FILE"
] |
[]
|
["SMGR_FILE", "TESTBED_FILE", "PARAMS_FILE"]
|
python
| 3 | 0 | |
classroom_scraping.py
|
import django
import os
import sys
from django.core.exceptions import ObjectDoesNotExist
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.options import Options
from os.path import dirname
PROJECT_PATH = os.path.join(dirname(__file__), "../")
TIME_INTERVAL = 5
def scrape_classrooms(building, table):
counter = 1
while True:
try:
classroom_name = table.find_element_by_xpath(
f"((//tr)[{counter}]//td)[3]"
).text
if len(Classroom.objects.filter(name=classroom_name)) == 0:
print(f"Creating classroom {classroom_name} ({building.name})")
Classroom.objects.create(
name=classroom_name,
building=building,
)
counter += 1
except NoSuchElementException:
break
def scrape(driver):
for building_name in Building.objects.values_list('name'):
driver.get("https://www.unimore.it/covid19/trovaaula.html")
print(f"----------------- {building_name[0]} -----------------")
building_name = building_name[0]
apostrophe_adjusted = building_name.split("'")[0]
x = driver.find_element_by_xpath(
f"//li[contains(text(), '{apostrophe_adjusted}')]"
"//a[contains(text(), 'Elenco Aule con link per registrazione presenza')]"
)
driver.execute_script("arguments[0].click();", x)
try:
table = driver.find_element_by_xpath("//table[@class='tabella-responsiva']/tbody")
building = Building.objects.get(name=building_name)
scrape_classrooms(building, table)
except (NoSuchElementException, ObjectDoesNotExist):
pass
if __name__ == "__main__":
sys.path.append(os.path.join(os.path.dirname(__file__), PROJECT_PATH))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "reservation_tool_base_folder.settings")
django.setup()
from reservation_management.models import Building, Classroom
options = Options()
options.headless = True
driver = webdriver.Firefox(options=options)
# Selenium configuration:
driver.implicitly_wait(TIME_INTERVAL)
scrape(driver)
driver.delete_all_cookies()
driver.close()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
azure-vote/azure-vote/main.py
|
from flask import Flask, request, render_template
import os
import random
import redis
import socket
import sys
app = Flask(__name__)
# Load configurations from environment or config file
app.config.from_pyfile('config_file.cfg')
if ("VOTE1VALUE" in os.environ and os.environ['VOTE1VALUE']):
button1 = os.environ['VOTE1VALUE']
else:
button1 = app.config['VOTE1VALUE']
if ("VOTE2VALUE" in os.environ and os.environ['VOTE2VALUE']):
button2 = os.environ['VOTE2VALUE']
else:
button2 = app.config['VOTE2VALUE']
if ("VOTE3VALUE" in os.environ and os.environ['VOTE3VALUE']):
button3 = os.environ['VOTE3VALUE']
else:
button3 = app.config['VOTE3VALUE']
if ("TITLE" in os.environ and os.environ['TITLE']):
title = os.environ['TITLE']
else:
title = app.config['TITLE']
# Redis configurations
redis_server = os.environ['REDIS']
# Redis Connection
try:
if "REDIS_PWD" in os.environ:
r = redis.StrictRedis(host=redis_server,
port=6379,
password=os.environ['REDIS_PWD'])
else:
r = redis.Redis(redis_server)
r.ping()
except redis.ConnectionError:
exit('Failed to connect to Redis, terminating.')
# Change title to host name to demo NLB
if app.config['SHOWHOST'] == "true":
title = socket.gethostname()
# Init Redis
if not r.get(button1): r.set(button1,0)
if not r.get(button2): r.set(button2,0)
if not r.get(button3): r.set(button3,0)
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
# Get current values
vote1 = r.get(button1).decode('utf-8')
vote2 = r.get(button2).decode('utf-8')
vote3 = r.get(button3).decode('utf-8')
# Return index with values
return render_template("index.html", value1=int(vote1), value2=int(vote2), value3=int(vote3), button1=button1, button2=button2, button3=button3, title=title)
elif request.method == 'POST':
if request.form['vote'] == 'reset':
# Empty table and return results
r.set(button1,0)
r.set(button2,0)
r.set(button3,0)
vote1 = r.get(button1).decode('utf-8')
vote2 = r.get(button2).decode('utf-8')
vote3 = r.get(button3).decode('utf-8')
return render_template("index.html", value1=int(vote1), value2=int(vote2), value3=int(vote3), button1=button1, button2=button2, button3=button3, title=title)
else:
# Insert vote result into DB
vote = request.form['vote']
r.incr(vote,1)
# Get current values
vote1 = r.get(button1).decode('utf-8')
vote2 = r.get(button2).decode('utf-8')
vote3 = r.get(button3).decode('utf-8')
# Return results
return render_template("index.html", value1=int(vote1), value2=int(vote2), value3=int(vote3), button1=button1, button2=button2, button3=button3, title=title)
if __name__ == "__main__":
app.run()
|
[] |
[] |
[
"VOTE3VALUE",
"REDIS",
"TITLE",
"VOTE1VALUE",
"VOTE2VALUE",
"REDIS_PWD"
] |
[]
|
["VOTE3VALUE", "REDIS", "TITLE", "VOTE1VALUE", "VOTE2VALUE", "REDIS_PWD"]
|
python
| 6 | 0 | |
maven-plugins/maven-php-phpdoc/src/main/java/org/phpmaven/phpdoc/impl/PhpdocBatchSupport.java
|
/**
* Copyright 2010-2012 by PHP-maven.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.phpmaven.phpdoc.impl;
import java.io.File;
import java.io.IOException;
import org.apache.maven.execution.MavenSession;
import org.apache.maven.plugin.logging.Log;
import org.codehaus.plexus.component.annotations.Component;
import org.codehaus.plexus.component.annotations.Configuration;
import org.codehaus.plexus.component.annotations.Requirement;
import org.codehaus.plexus.component.repository.exception.ComponentLookupException;
import org.codehaus.plexus.configuration.PlexusConfigurationException;
import org.codehaus.plexus.util.xml.Xpp3Dom;
import org.phpmaven.core.BuildPluginConfiguration;
import org.phpmaven.core.ConfigurationParameter;
import org.phpmaven.core.IComponentFactory;
import org.phpmaven.exec.IPhpExecutableConfiguration;
import org.phpmaven.phpdoc.IPhpdocRequest;
import org.phpmaven.phpdoc.IPhpdocSupport;
import org.phpmaven.phpexec.library.IPhpExecutable;
import org.phpmaven.phpexec.library.PhpCoreException;
import org.phpmaven.phpexec.library.PhpErrorException;
import org.phpmaven.phpexec.library.PhpException;
import org.phpmaven.phpexec.library.PhpWarningException;
/**
* Implementation of phpdoc support invoking the phpdoc batch.
*
* @author <a href="mailto:[email protected]">Martin Eisengardt</a>
* @since 2.0.0
*/
@Component(role = IPhpdocSupport.class, instantiationStrategy = "per-lookup", hint = "PHP_EXE")
@BuildPluginConfiguration(groupId = "de.slothsoft.phpmaven", artifactId = "maven-php-phpdoc", filter = {
"phpdocService", "installPhpdoc", "installFolder"
})
public class PhpdocBatchSupport extends AbstractPhpdocSupport {
/**
* Path to phpDoc. If nothing is configured phpdoc is expected in the path.
*/
@Configuration(name = "phpDocFilePath", value = "phpdoc")
private String phpDocFilePath;
/**
* The phpdoc configuraton file. The default is ${project.basedir}/src/site/phpdoc/phpdoc.config
*/
@ConfigurationParameter(name = "phpDocConfigFile", expression = "${project.basedir}/src/site/phpdoc/phpdoc.config")
private File phpDocConfigFile;
/**
* The generated phpDoc file.
*/
@ConfigurationParameter(
name = "generatedPhpDocConfigFile",
expression = "${project.build.directory}/temp/phpdoc/phpdoc.ini")
private File generatedPhpDocConfigFile;
/**
* The executable config.
*/
@Configuration(name = "executableConfig", value = "")
private Xpp3Dom executableConfig;
/**
* The component factory.
*/
@Requirement
private IComponentFactory factory;
/**
* The maven session.
*/
@ConfigurationParameter(name = "session", expression = "${session}")
private MavenSession session;
/**
* The phpdoc version to be used.
*/
@Configuration(name = "phpdocVersion", value = "1.4.2")
private String phpdocVersion;
/**
* The additional arguments passed to phpdoc.
*/
@Configuration(name = "arguments", value = "")
private String arguments;
/**
* {@inheritDoc}
*/
@Override
public void generateReport(Log log, IPhpdocRequest request) throws PhpException {
try {
final IPhpExecutable exec = this.factory.lookup(
IPhpExecutableConfiguration.class,
this.executableConfig,
this.session).getPhpExecutable();
if (this.phpdocVersion.startsWith("1.")) {
writeIni(log, request, phpDocConfigFile, generatedPhpDocConfigFile);
} else {
writeXml(log, request, phpDocConfigFile, generatedPhpDocConfigFile);
}
final String path = System.getProperty("java.library.path") + File.pathSeparator + System.getenv("PATH");
log.debug("PATH: " + path);
final String[] paths = path.split(File.pathSeparator);
File phpDocFile = null;
if ("phpdoc".equals(phpDocFilePath)) {
for (int i = 0; i < paths.length; i++) {
final File file = new File(paths[i], "phpdoc");
if (file.isFile()) {
phpDocFile = file;
break;
}
}
} else {
phpDocFile = new File(phpDocFilePath);
}
if (phpDocFile == null || !phpDocFile.isFile()) {
throw new PhpCoreException("phpdoc not found in path");
}
String command = "\"" + phpDocFile + "\" -c \"" + generatedPhpDocConfigFile.getAbsolutePath() + "\"";
if (arguments != null && arguments.length() > 0) {
command += " " + arguments;
}
log.debug("Executing PHPDocumentor: " + command);
// XXX: commandLine.setWorkingDirectory(phpDocFile.getParent());
String result;
try {
result = exec.execute(command, phpDocFile);
} catch (PhpWarningException ex) {
result = ex.getAppendedOutput();
// silently ignore; only errors are important
}
for (final String line : result.split("\n")) {
if (line.startsWith("ERROR:")) {
// this is a error of phpdocumentor.
log.error("Got error from php-documentor. " +
"Enable debug (-X) to fetch the php output.\n" +
line);
throw new PhpErrorException(phpDocFile, line);
}
}
} catch (PlexusConfigurationException ex) {
throw new PhpCoreException("Errors invoking phpdoc", ex);
} catch (IOException ex) {
throw new PhpCoreException("Errors invoking phpdoc", ex);
} catch (ComponentLookupException ex) {
throw new PhpCoreException("Errors invoking phpdoc", ex);
}
}
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
java
| 1 | 0 | |
pymystem3/mystem.py
|
# -*- coding: utf-8 -*-
"""
A Python wrapper of the Yandex Mystem 3.1 morphological analyzer.
"""
from __future__ import print_function
from itertools import ifilter, imap
import os
import platform
import select
import subprocess
import sys
import socket
if sys.version_info[0] < 3:
from cStringIO import StringIO
else:
from io import BytesIO as StringIO
try:
import ujson as json
except ImportError:
import json
from .constants import (MYSTEM_BIN, MYSTEM_EXE, MYSTEM_DIR)
try:
broken_pipe = BrokenPipeError
except NameError:
broken_pipe = socket.error
_TARBALL_URLS = {
'linux': {
'64bit': "http://download.cdn.yandex.net/mystem/mystem-3.1-linux-64bit.tar.gz",
},
'darwin': "http://download.cdn.yandex.net/mystem/mystem-3.1-macosx.tar.gz",
'win': {
'64bit': "http://download.cdn.yandex.net/mystem/mystem-3.1-win-64bit.zip",
},
}
_NL = unicode('\n').encode('utf-8')
_POSIX = os.name == 'posix'
_PIPELINE_MODE = False
if _POSIX and '__pypy__' in sys.builtin_module_names:
_PIPELINE_MODE = sys.pypy_version_info >= (2, 5, 0)
elif _POSIX:
_PIPELINE_MODE = True
def autoinstall(out=sys.stderr):
"""
Install mystem binary as :py:const:`~pymystem3.constants.MYSTEM_BIN`.
Do nothing if already installed.
"""
if os.path.isfile(MYSTEM_BIN):
return
install(out)
def install(out=sys.stderr):
"""
Install mystem binary as :py:const:`~pymystem3.constants.MYSTEM_BIN`.
Overwrite if already installed.
"""
import requests
import tempfile
url = _get_tarball_url()
print("Installing mystem to %s from %s" % (MYSTEM_BIN, url), file=out)
if not os.path.isdir(MYSTEM_DIR):
os.makedirs(MYSTEM_DIR)
tmp_fd, tmp_path = tempfile.mkstemp()
try:
r = requests.get(url, stream=True)
with os.fdopen(tmp_fd, 'wb') as fd:
for chunk in r.iter_content(64 * 1024):
fd.write(chunk)
fd.flush()
if url.endswith('.tar.gz'):
import tarfile
tar = tarfile.open(tmp_path)
try:
tar.extract(MYSTEM_EXE, MYSTEM_DIR)
finally:
tar.close()
elif url.endswith('.zip'):
import zipfile
zip = zipfile.ZipFile(tmp_path)
try:
zip.extractall(MYSTEM_DIR)
finally:
zip.close()
else:
raise NotImplementedError("Could not install mystem from %s" % url)
finally:
os.unlink(tmp_path)
def _get_on_prefix(kvs, key):
for k, v in kvs.iteritems():
if key.startswith(k):
return v
return None
def _get_tarball_url():
bits, _ = platform.architecture()
url = _get_on_prefix(_TARBALL_URLS, sys.platform)
if url is None:
raise NotImplementedError("Your system is not supported. Feel free to report bug or make a pull request.")
if isinstance(url, basestring):
return url
url = url.get(bits, None)
if url is None:
raise NotImplementedError("Your system is not supported. Feel free to report bug or make a pull request.")
return url
def _set_non_blocking(fd):
"""
Set the file description of the given file descriptor to non-blocking.
"""
if _PIPELINE_MODE:
import fcntl
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
class Mystem(object):
"""
Wrap mystem binary to be able it use from Python.
The two main methods you may use are the :py:meth:`__init__` initializer,
and the :py:meth:`analyze` method to process your data and get mystem
output results.
:param mystem_bin: path to mystem binary
:type mystem_bin: str
:param grammar_info: glue grammatical information for same lemmas in output.
:type grammar_info: bool
:param disambiguation: apply disambiguation
:type disambiguation: bool
:param entire_input: copy entire input to output
:type entire_input: bool
:param weight: print context-independent lemma weight
:type weight: bool
:param generate_all: generate all possible hypotheses
:type generate_all: bool
:param fixlist: Use fixlist file; file path
:type fixlist: str
.. note:: Default value of :py:attr:`mystem_bin` can be overwritted by :envvar:`MYSTEM_BIN`.
"""
def __init__(
self,
mystem_bin=None,
grammar_info=True,
disambiguation=True,
entire_input=True,
weight=True,
generate_all=False,
fixlist=None
):
self._mystem_bin = mystem_bin
self._grammar_info = grammar_info
self._disambiguation = disambiguation
self._entire_input = entire_input
self._weight = weight
self._generate_all = generate_all
self._fixlist = fixlist
self._procin = None
self._procout = None
self._procout_no = None
self._proc = None
if self._mystem_bin is None:
self._mystem_bin = os.environ.get("MYSTEM_BIN", None)
if self._mystem_bin is None:
autoinstall()
self._mystem_bin = MYSTEM_BIN
self._mystemargs = ["--format", "json"]
if self._grammar_info is True:
self._mystemargs.append('-gi')
if self._disambiguation is True:
self._mystemargs.append('-d')
if self._entire_input is True:
self._mystemargs.append('-c')
if self._weight is True:
self._mystemargs.append('--weight')
if self._generate_all is True:
self._mystemargs.append('--generate-all')
if self._fixlist is not None:
self._mystemargs.append('--fixlist')
self._mystemargs.append(self._fixlist)
def __del__(self):
self.close() # terminate process on exit
def __enter__(self):
if self._proc is None:
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def start(self):
"""
Run mystem binary.
.. note:: It is not mandatory to call it. Use it if you want to avoid waiting for mystem loads.
"""
self._start_mystem()
def close(self):
if self._proc is not None:
self._proc.terminate() # Send TERM signal to process
self._procin.close() # Then close stdin
self._procout.close() # And stdout
self._proc.wait() # Finally wait for terminaion
self._procin = None
self._procout = None
self._procout_no = None
self._proc = None
def _start_mystem(self):
self._proc = subprocess.Popen([self._mystem_bin] + self._mystemargs,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
bufsize=0,
close_fds=True if _POSIX else False)
self._procin, self._procout = self._proc.stdin, self._proc.stdout
self._procout_no = self._procout.fileno()
_set_non_blocking(self._procout)
def analyze(self, text):
"""
Make morphology analysis for a text.
:param text: text to analyze
:type text: str
:returns: result of morphology analysis.
:rtype: dict
"""
result = []
for line in text.splitlines():
try:
result.extend(self._analyze_impl(line))
except broken_pipe:
self.close()
self.start()
result.extend(self._analyze_impl(line))
return result
def lemmatize(self, text):
"""
Make morphology analysis for a text and return list of lemmas.
:param text: text to analyze
:type text: str
:returns: list of lemmas
:rtype: list
"""
need_encode = (sys.version_info[0] < 3 and isinstance(text, str))
infos = self.analyze(text)
lemmas = list(ifilter(None, imap(self._get_lemma, infos)))
if need_encode is True:
lemmas = [l.encode('utf-8') for l in lemmas]
return lemmas
if _PIPELINE_MODE:
def _analyze_impl(self, text):
if isinstance(text, unicode):
text = text.encode('utf-8')
if self._proc is None:
self._start_mystem()
self._procin.write(text)
self._procin.write(_NL)
self._procin.flush()
sio = StringIO()
out = None
obj = None
select.select([self._procout_no], [], [])
while True:
try:
out = self._procout.read()
sio.write(out)
obj = json.loads(sio.getvalue().decode('utf-8'))
break
except (IOError, ValueError):
rd, _, _ = select.select([self._procout_no], [], [], 30)
if self._procout_no not in rd:
raise RuntimeError("Problem has been occured. Current state:\ntext:\n%r\nout:\n%r\nsio:\n%r" %
(text, out, sio.getvalue()))
return obj
else:
def _analyze_impl(self, text):
if isinstance(text, unicode):
text = text.encode('utf-8')
if self._proc is None:
self._start_mystem()
self._procin.write(text)
self._procin.write(_NL)
out, _ = self._proc.communicate()
self._proc = None
try:
#obj = json.loads(out)
obj = json.loads(out.decode('utf-8'))
except (IOError, ValueError):
raise RuntimeError("Problem has been occured. Current state:\ntext:\n%r\nout:\n%r" %
(text, out))
return obj
@staticmethod
def _get_lemma(o):
try:
return o['analysis'][0]['lex']
except (KeyError, IndexError):
return o['text'] if 'text' in o else None
|
[] |
[] |
[
"MYSTEM_BIN"
] |
[]
|
["MYSTEM_BIN"]
|
python
| 1 | 0 | |
ioctl/util/util.go
|
// Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package util
import (
"bytes"
"crypto/tls"
"fmt"
"math/big"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"github.com/spf13/cobra"
"go.uber.org/zap"
"golang.org/x/crypto/ssh/terminal"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
"github.com/iotexproject/iotex-core/ioctl/config"
"github.com/iotexproject/iotex-core/ioctl/output"
"github.com/iotexproject/iotex-core/ioctl/validator"
"github.com/iotexproject/iotex-core/pkg/log"
)
const (
// IotxDecimalNum defines the number of decimal digits for IoTeX
IotxDecimalNum = 18
// GasPriceDecimalNum defines the number of decimal digits for gas price
GasPriceDecimalNum = 12
)
// ExecuteCmd executes cmd with args, and return system output, e.g., help info, and error
func ExecuteCmd(cmd *cobra.Command, args ...string) (string, error) {
buf := new(bytes.Buffer)
cmd.SetOut(buf)
cmd.SetErr(buf)
cmd.SetArgs(args)
err := cmd.Execute()
return buf.String(), err
}
// ConnectToEndpoint starts a new connection
func ConnectToEndpoint(secure bool) (*grpc.ClientConn, error) {
endpoint := config.ReadConfig.Endpoint
if endpoint == "" {
return nil, output.NewError(output.ConfigError, `use "ioctl config set endpoint" to config endpoint first`, nil)
}
if !secure {
return grpc.Dial(endpoint, grpc.WithInsecure())
}
return grpc.Dial(endpoint, grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{})))
}
// StringToRau converts different unit string into Rau big int
func StringToRau(amount string, numDecimals int) (*big.Int, error) {
amountStrings := strings.Split(amount, ".")
if len(amountStrings) != 1 {
if len(amountStrings) > 2 || len(amountStrings[1]) > numDecimals {
return nil, output.NewError(output.ConvertError, "failed to convert string into big int", nil)
}
amountStrings[0] += amountStrings[1]
numDecimals -= len(amountStrings[1])
}
if len(amountStrings[0]) == 0 {
return nil, output.NewError(output.ConvertError, "failed to convert string into big int", nil)
}
zeroString := strings.Repeat("0", numDecimals)
amountStrings[0] += zeroString
amountRau, ok := new(big.Int).SetString(amountStrings[0], 10)
if !ok {
return nil, output.NewError(output.ConvertError, "failed to convert string into big int", nil)
}
if amountRau.Sign() < 0 {
return nil, output.NewError(output.ConvertError, "invalid number that is minus", nil)
}
return amountRau, nil
}
// RauToString converts Rau big int into Iotx string
func RauToString(amount *big.Int, numDecimals int) string {
if numDecimals == 0 {
return amount.String()
}
targetUnit := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(numDecimals)), nil)
amountInt, amountDec := big.NewInt(0), big.NewInt(0)
amountInt.DivMod(amount, targetUnit, amountDec)
if amountDec.Sign() != 0 {
decString := strings.TrimRight(amountDec.String(), "0")
zeroString := strings.Repeat("0", numDecimals-len(amountDec.String()))
decString = zeroString + decString
return amountInt.String() + "." + decString
}
return amountInt.String()
}
// StringToIOTX converts Rau string to Iotx string
func StringToIOTX(amount string) (string, error) {
amountInt, err := StringToRau(amount, 0)
if err != nil {
return "", output.NewError(output.ConvertError, "", err)
}
return RauToString(amountInt, IotxDecimalNum), nil
}
// ReadSecretFromStdin used to safely get password input
func ReadSecretFromStdin() (string, error) {
signalListener := make(chan os.Signal, 1)
signal.Notify(signalListener, os.Interrupt)
routineTerminate := make(chan struct{})
sta, err := terminal.GetState(int(syscall.Stdin))
if err != nil {
return "", output.NewError(output.RuntimeError, "", err)
}
go func() {
for {
select {
case <-signalListener:
err = terminal.Restore(int(syscall.Stdin), sta)
if err != nil {
log.L().Error("failed restore terminal", zap.Error(err))
return
}
os.Exit(130)
case <-routineTerminate:
return
}
}
}()
bytePass, err := terminal.ReadPassword(int(syscall.Stdin))
close(routineTerminate)
if err != nil {
return "", output.NewError(output.RuntimeError, "failed to read password", nil)
}
return string(bytePass), nil
}
// GetAddress get address from address or alias or context
func GetAddress(in string) (string, error) {
addr, err := config.GetAddressOrAlias(in)
if err != nil {
return "", output.NewError(output.AddressError, "", err)
}
return Address(addr)
}
// Address returns the address corresponding to alias. if 'in' is an IoTeX address, returns 'in'
func Address(in string) (string, error) {
if len(in) >= validator.IoAddrLen {
if err := validator.ValidateAddress(in); err != nil {
return "", output.NewError(output.ValidationError, in, err)
}
return in, nil
}
addr, ok := config.ReadConfig.Aliases[in]
if ok {
return addr, nil
}
return "", output.NewError(output.ConfigError, "cannot find address for alias "+in, nil)
}
// JwtAuth used for ioctl set auth and send for every grpc request
func JwtAuth() (jwt metadata.MD, err error) {
jwtFile := os.Getenv("HOME") + "/.config/ioctl/default/auth.jwt"
jwtString, err := os.ReadFile(jwtFile)
if err != nil {
return nil, err
}
return metadata.Pairs("authorization", "bearer "+string(jwtString)), nil
}
// CheckArgs used for check ioctl cmd arg(s)'s num
func CheckArgs(validNum ...int) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
for _, n := range validNum {
if len(args) == n {
return nil
}
}
nums := strings.Replace(strings.Trim(fmt.Sprint(validNum), "[]"), " ", " or ", -1)
return fmt.Errorf("accepts "+nums+" arg(s), received %d", len(args))
}
}
// TrimHexPrefix removes 0x prefix from a string if it has
func TrimHexPrefix(s string) string {
return strings.TrimPrefix(s, "0x")
}
// ParseHdwPath parse hdwallet path
func ParseHdwPath(addressOrAlias string) (uint32, uint32, uint32, error) {
// parse derive path
// for hdw::1/1/2, return 1, 1, 2
// for hdw::1/2, treat as default account = 0, return 0, 1, 2
args := strings.Split(addressOrAlias[5:], "/")
if len(args) < 2 || len(args) > 3 {
return 0, 0, 0, output.NewError(output.ValidationError, "derivation path error", nil)
}
arg := make([]uint32, 3)
j := 0
for i := 3 - len(args); i < 3; i++ {
u64, err := strconv.ParseUint(args[j], 10, 32)
if err != nil {
return 0, 0, 0, output.NewError(output.InputError, fmt.Sprintf("%v must be integer value", args[j]), err)
}
arg[i] = uint32(u64)
j++
}
return arg[0], arg[1], arg[2], nil
}
// AliasIsHdwalletKey check whether to use hdwallet key
func AliasIsHdwalletKey(addressOrAlias string) bool {
return strings.HasPrefix(strings.ToLower(addressOrAlias), "hdw::")
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_vendored_deps/library/oc_serviceaccount_secret.py
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import fcntl
import json
import time
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/serviceaccount_secret -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_serviceaccount_secret
short_description: Module to manage openshift service account secrets
description:
- Manage openshift service account secrets programmatically.
options:
state:
description:
- If present, the service account will be linked with the secret if it is not already. If absent, the service account will be unlinked from the secret if it is already linked. If list, information about the service account secrets will be gathered and returned as part of the Ansible call results.
required: false
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: false
aliases: []
service_account:
description:
- Name of the service account.
required: true
default: None
aliases: []
namespace:
description:
- Namespace of the service account and secret.
required: true
default: None
aliases: []
secret:
description:
- The secret that should be linked to the service account.
required: false
default: None
aliases: []
author:
- "Kenny Woodson <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: get secrets of a service account
oc_serviceaccount_secret:
state: list
service_account: builder
namespace: default
register: sasecretout
- name: Link a service account to a specific secret
oc_serviceaccount_secret:
service_account: builder
secret: mynewsecret
namespace: default
register: sasecretout
'''
# -*- -*- -*- End included fragment: doc/serviceaccount_secret -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods,too-many-instance-attributes
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup_ext=None,
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
if backup_ext is None:
self.backup_ext = ".{}".format(time.strftime("%Y%m%dT%H%M%S"))
else:
self.backup_ext = backup_ext
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def remove_entry(data, key, index=None, value=None, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
if value is not None:
data.pop(value)
elif index is not None:
raise YeditException("remove_entry for a dictionary does not have an index {}".format(index))
else:
data.clear()
return True
elif key == '' and isinstance(data, list):
ind = None
if value is not None:
try:
ind = data.index(value)
except ValueError:
return False
elif index is not None:
ind = index
else:
del data[:]
if ind is not None:
data.pop(ind)
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
yfd.write(contents)
fcntl.flock(yfd, fcntl.LOCK_UN)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, '{}{}'.format(self.filename, self.backup_ext))
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
if self.content_type == 'yaml':
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
elif self.content_type == 'json':
Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True))
else:
raise YeditException('Unsupported content_type: {}.'.format(self.content_type) +
'Please specify a content_type of yaml or json.')
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path, index=None, value=None):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, index, value, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.safe_load(str(invalue))
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
content_type=params['content_type'],
backup_ext=params['backup_ext'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'], params['index'], params['value'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, edits=None, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
updated = False
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
updated = True
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if results['changed']:
updated = True
if updated:
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-p')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None, field_selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
if field_selector is not None:
cmd.append('--field-selector={}'.format(field_selector))
# Name cannot be used with selector or field_selector.
if selector is None and field_selector is None and name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
version = version[1:] # Remove the 'v' prefix
versions_dict[tech + '_numeric'] = version.split('+')[0]
# "3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = "{}.{}".format(*version.split('.'))
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/serviceaccount.py -*- -*- -*-
class ServiceAccountConfig(object):
'''Service account config class
This class stores the options and returns a default service account
'''
# pylint: disable=too-many-arguments
def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None):
self.name = sname
self.kubeconfig = kubeconfig
self.namespace = namespace
self.secrets = secrets or []
self.image_pull_secrets = image_pull_secrets or []
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiate a properly structured volume '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'ServiceAccount'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['secrets'] = []
if self.secrets:
for sec in self.secrets:
self.data['secrets'].append({"name": sec})
self.data['imagePullSecrets'] = []
if self.image_pull_secrets:
for sec in self.image_pull_secrets:
self.data['imagePullSecrets'].append({"name": sec})
class ServiceAccount(Yedit):
''' Class to wrap the oc command line tools '''
image_pull_secrets_path = "imagePullSecrets"
secrets_path = "secrets"
def __init__(self, content):
'''ServiceAccount constructor'''
super(ServiceAccount, self).__init__(content=content)
self._secrets = None
self._image_pull_secrets = None
@property
def image_pull_secrets(self):
''' property for image_pull_secrets '''
if self._image_pull_secrets is None:
self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or []
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, secrets):
''' property for secrets '''
self._image_pull_secrets = secrets
@property
def secrets(self):
''' property for secrets '''
if not self._secrets:
self._secrets = self.get(ServiceAccount.secrets_path) or []
return self._secrets
@secrets.setter
def secrets(self, secrets):
''' property for secrets '''
self._secrets = secrets
def delete_secret(self, inc_secret):
''' remove a secret '''
remove_idx = None
for idx, sec in enumerate(self.secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.secrets[remove_idx]
return True
return False
def delete_image_pull_secret(self, inc_secret):
''' remove a image_pull_secret '''
remove_idx = None
for idx, sec in enumerate(self.image_pull_secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.image_pull_secrets[remove_idx]
return True
return False
def find_secret(self, inc_secret):
'''find secret'''
for secret in self.secrets:
if secret['name'] == inc_secret:
return secret
return None
def find_image_pull_secret(self, inc_secret):
'''find secret'''
for secret in self.image_pull_secrets:
if secret['name'] == inc_secret:
return secret
return None
def add_secret(self, inc_secret):
'''add secret'''
if self.secrets:
self.secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.secrets_path, [{"name": inc_secret}])
def add_image_pull_secret(self, inc_secret):
'''add image_pull_secret'''
if self.image_pull_secrets:
self.image_pull_secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}])
# -*- -*- -*- End included fragment: lib/serviceaccount.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_serviceaccount_secret.py -*- -*- -*-
class OCServiceAccountSecret(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'sa'
def __init__(self, config, verbose=False):
''' Constructor for OpenshiftOC '''
super(OCServiceAccountSecret, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self.verbose = verbose
self._service_account = None
@property
def service_account(self):
''' Property for the service account '''
if not self._service_account:
self.get()
return self._service_account
@service_account.setter
def service_account(self, data):
''' setter for the service account '''
self._service_account = data
def exists(self, in_secret):
''' verifies if secret exists in the service account '''
result = self.service_account.find_secret(in_secret)
if not result:
return False
return True
def get(self):
''' get the service account definition from the master '''
sao = self._get(OCServiceAccountSecret.kind, self.config.name)
if sao['returncode'] == 0:
self.service_account = ServiceAccount(content=sao['results'][0])
sao['results'] = self.service_account.get('secrets')
return sao
def delete(self):
''' delete secrets '''
modified = []
for rem_secret in self.config.secrets:
modified.append(self.service_account.delete_secret(rem_secret))
if any(modified):
return self._replace_content(OCServiceAccountSecret.kind, self.config.name, self.service_account.yaml_dict)
return {'returncode': 0, 'changed': False}
def put(self):
''' place secrets into sa '''
modified = False
for add_secret in self.config.secrets:
if not self.service_account.find_secret(add_secret):
self.service_account.add_secret(add_secret)
modified = True
if modified:
return self._replace_content(OCServiceAccountSecret.kind, self.config.name, self.service_account.yaml_dict)
return {'returncode': 0, 'changed': False}
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
''' run the ansible idempotent code '''
sconfig = ServiceAccountConfig(params['service_account'],
params['namespace'],
params['kubeconfig'],
[params['secret']],
None)
oc_sa_sec = OCServiceAccountSecret(sconfig, verbose=params['debug'])
state = params['state']
api_rval = oc_sa_sec.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': "list"}
########
# Delete
########
if state == 'absent':
if oc_sa_sec.exists(params['secret']):
if check_mode:
return {'changed': True, 'msg': 'Would have removed the " + \
"secret from the service account.'}
api_rval = oc_sa_sec.delete()
return {'changed': True, 'results': api_rval, 'state': "absent"}
return {'changed': False, 'state': "absent"}
if state == 'present':
########
# Create
########
if not oc_sa_sec.exists(params['secret']):
if check_mode:
return {'changed': True, 'msg': 'Would have added the ' + \
'secret to the service account.'}
# Create it here
api_rval = oc_sa_sec.put()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sa_sec.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "present"}
return {'changed': False, 'results': api_rval, 'state': "present"}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
# -*- -*- -*- End included fragment: class/oc_serviceaccount_secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_serviceaccount_secret.py -*- -*- -*-
def main():
'''
ansible oc module to manage service account secrets.
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default=None, required=True, type='str'),
secret=dict(default=None, type='str'),
service_account=dict(required=True, type='str'),
),
supports_check_mode=True,
)
rval = OCServiceAccountSecret.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_serviceaccount_secret.py -*- -*- -*-
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
pgwatch2/pgwatch2.go
|
package main
import (
"container/list"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/sha256"
go_sql "database/sql"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"net"
"net/http"
"os"
"path"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/coreos/go-systemd/daemon"
client "github.com/influxdata/influxdb1-client/v2"
"github.com/jessevdk/go-flags"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
"github.com/marpaia/graphite-golang"
"github.com/op/go-logging"
"github.com/shopspring/decimal"
"golang.org/x/crypto/pbkdf2"
"gopkg.in/yaml.v2"
)
var commit = "" // Git hash. Will be set on build time by build_gatherer.sh / goreleaser
var date = "" // Will be set on build time by build_gatherer.sh / goreleaser
type MonitoredDatabase struct {
DBUniqueName string `yaml:"unique_name"`
DBUniqueNameOrig string // to preserve belonging to a specific instance for continuous modes where DBUniqueName will be dynamic
Group string
Host string
Port string
DBName string
User string
Password string
PasswordType string `yaml:"password_type"`
LibPQConnStr string `yaml:"libpq_conn_str"`
SslMode string
SslRootCAPath string `yaml:"sslrootcert"`
SslClientCertPath string `yaml:"sslcert"`
SslClientKeyPath string `yaml:"sslkey"`
Metrics map[string]float64 `yaml:"custom_metrics"`
MetricsStandby map[string]float64 `yaml:"custom_metrics_standby"`
StmtTimeout int64 `yaml:"stmt_timeout"`
DBType string
DBNameIncludePattern string `yaml:"dbname_include_pattern"`
DBNameExcludePattern string `yaml:"dbname_exclude_pattern"`
PresetMetrics string `yaml:"preset_metrics"`
PresetMetricsStandby string `yaml:"preset_metrics_standby"`
IsSuperuser bool `yaml:"is_superuser"`
IsEnabled bool `yaml:"is_enabled"`
CustomTags map[string]string `yaml:"custom_tags"` // ignored on graphite
HostConfig HostConfigAttrs `yaml:"host_config"`
OnlyIfMaster bool `yaml:"only_if_master"`
}
type HostConfigAttrs struct {
DcsType string `yaml:"dcs_type"`
DcsEndpoints []string `yaml:"dcs_endpoints"`
Scope string
Namespace string
Username string
Password string
CAFile string `yaml:"ca_file"`
CertFile string `yaml:"cert_file"`
KeyFile string `yaml:"key_file"`
LogsGlobPath string `yaml:"logs_glob_path"` // default $data_directory / $log_directory / *.csvlog
LogsMatchRegex string `yaml:"logs_match_regex"` // default is for CSVLOG format. needs to capture following named groups: log_time, user_name, database_name and error_severity
PerMetricDisabledTimes []HostConfigPerMetricDisabledTimes `yaml:"per_metric_disabled_intervals"`
}
type HostConfigPerMetricDisabledTimes struct { // metric gathering override per host / metric / time
Metrics []string `yaml:"metrics"`
DisabledTimes []string `yaml:"disabled_times"`
DisabledDays string `yaml:"disabled_days"`
}
type PatroniClusterMember struct {
Scope string
Name string
ConnUrl string `yaml:"conn_url"`
Role string
}
type PresetConfig struct {
Name string
Description string
Metrics map[string]float64
}
type MetricColumnAttrs struct {
PrometheusGaugeColumns []string `yaml:"prometheus_gauge_columns"`
PrometheusIgnoredColumns []string `yaml:"prometheus_ignored_columns"` // for cases where we don't want some columns to be exposed in Prom mode
PrometheusAllGaugeColumns bool `yaml:"prometheus_all_gauge_columns"`
}
type MetricAttrs struct {
IsInstanceLevel bool `yaml:"is_instance_level"`
MetricStorageName string `yaml:"metric_storage_name"`
ExtensionVersionOverrides []ExtensionOverrides `yaml:"extension_version_based_overrides"`
IsPrivate bool `yaml:"is_private"` // used only for extension overrides currently and ignored otherwise
DisabledDays string `yaml:"disabled_days"` // Cron style, 0 = Sunday. Ranges allowed: 0,2-4
DisableTimes []string `yaml:"disabled_times"` // "11:00-13:00"
StatementTimeoutSeconds int64 `yaml:"statement_timeout_seconds"` // overrides per monitored DB settings
}
type MetricVersionProperties struct {
Sql string
SqlSU string
MasterOnly bool
StandbyOnly bool
ColumnAttrs MetricColumnAttrs // Prometheus Metric Type (Counter is default) and ignore list
MetricAttrs MetricAttrs
}
type ControlMessage struct {
Action string // START, STOP, PAUSE
Config map[string]float64
}
type MetricFetchMessage struct {
DBUniqueName string
DBUniqueNameOrig string
MetricName string
DBType string
Interval time.Duration
CreatedOn time.Time
StmtTimeoutOverride int64
}
type MetricStoreMessage struct {
DBUniqueName string
DBType string
MetricName string
CustomTags map[string]string
Data [](map[string]interface{})
MetricDefinitionDetails MetricVersionProperties
RealDbname string
SystemIdentifier string
}
type MetricStoreMessagePostgres struct {
Time time.Time
DBName string
Metric string
Data map[string]interface{}
TagData map[string]interface{}
}
type ChangeDetectionResults struct { // for passing around DDL/index/config change detection results
Created int
Altered int
Dropped int
}
type DBVersionMapEntry struct {
LastCheckedOn time.Time
IsInRecovery bool
Version decimal.Decimal
VersionStr string
RealDbname string
SystemIdentifier string
IsSuperuser bool // if true and no helpers are installed, use superuser SQL version of metric if available
Extensions map[string]decimal.Decimal
}
type ExistingPartitionInfo struct {
StartTime time.Time
EndTime time.Time
}
type ExtensionOverrides struct {
TargetMetric string `yaml:"target_metric"`
ExpectedExtensionVersions []ExtensionInfo `yaml:"expected_extension_versions"`
}
type ExtensionInfo struct {
ExtName string `yaml:"ext_name"`
ExtMinVersion decimal.Decimal `yaml:"ext_min_version"`
}
const EPOCH_COLUMN_NAME string = "epoch_ns" // this column (epoch in nanoseconds) is expected in every metric query
const TAG_PREFIX string = "tag_"
const METRIC_DEFINITION_REFRESH_TIME int64 = 120 // min time before checking for new/changed metric definitions
const GRAPHITE_METRICS_PREFIX string = "pgwatch2"
const PERSIST_QUEUE_MAX_SIZE = 10000 // storage queue max elements. when reaching the limit, older metrics will be dropped.
// actual requirements depend a lot of metric type and nr. of obects in schemas,
// but 100k should be enough for 24h, assuming 5 hosts monitored with "exhaustive" preset config. this would also require ~2 GB RAM per one Influx host
const DATASTORE_INFLUX = "influx"
const DATASTORE_GRAPHITE = "graphite"
const DATASTORE_JSON = "json"
const DATASTORE_POSTGRES = "postgres"
const DATASTORE_PROMETHEUS = "prometheus"
const PRESET_CONFIG_YAML_FILE = "preset-configs.yaml"
const FILE_BASED_METRIC_HELPERS_DIR = "00_helpers"
const PG_CONN_RECYCLE_SECONDS = 1800 // applies for monitored nodes
const APPLICATION_NAME = "pgwatch2" // will be set on all opened PG connections for informative purposes
const MAX_PG_CONNECTIONS_PER_MONITORED_DB = 2 // for limiting max concurrent queries on a single DB, sql.DB maxPoolSize cannot be fully trusted
const GATHERER_STATUS_START = "START"
const GATHERER_STATUS_STOP = "STOP"
const METRICDB_IDENT = "metricDb"
const CONFIGDB_IDENT = "configDb"
const CONTEXT_PROMETHEUS_SCRAPE = "prometheus-scrape"
const DCS_TYPE_ETCD = "etcd"
const DCS_TYPE_ZOOKEEPER = "zookeeper"
const DCS_TYPE_CONSUL = "consul"
const DBTYPE_PG = "postgres"
const DBTYPE_PG_CONT = "postgres-continuous-discovery"
const DBTYPE_BOUNCER = "pgbouncer"
const DBTYPE_PGPOOL = "pgpool"
const DBTYPE_PATRONI = "patroni"
const DBTYPE_PATRONI_CONT = "patroni-continuous-discovery"
const DBTYPE_PATRONI_NAMESPACE_DISCOVERY = "patroni-namespace-discovery"
const MONITORED_DBS_DATASTORE_SYNC_INTERVAL_SECONDS = 600 // write actively monitored DBs listing to metrics store after so many seconds
const MONITORED_DBS_DATASTORE_SYNC_METRIC_NAME = "configured_dbs" // FYI - for Postgres datastore there's also the admin.all_unique_dbnames table with all recent DB unique names with some metric data
const RECO_PREFIX = "reco_" // special handling for metrics with such prefix, data stored in RECO_METRIC_NAME
const RECO_METRIC_NAME = "recommendations"
const SPECIAL_METRIC_CHANGE_EVENTS = "change_events"
const SPECIAL_METRIC_SERVER_LOG_EVENT_COUNTS = "server_log_event_counts"
const SPECIAL_METRIC_PGBOUNCER_STATS = "pgbouncer_stats"
const SPECIAL_METRIC_PGPOOL_STATS = "pgpool_stats"
var dbTypeMap = map[string]bool{DBTYPE_PG: true, DBTYPE_PG_CONT: true, DBTYPE_BOUNCER: true, DBTYPE_PATRONI: true, DBTYPE_PATRONI_CONT: true, DBTYPE_PGPOOL: true, DBTYPE_PATRONI_NAMESPACE_DISCOVERY: true}
var dbTypes = []string{DBTYPE_PG, DBTYPE_PG_CONT, DBTYPE_BOUNCER, DBTYPE_PATRONI, DBTYPE_PATRONI_CONT, DBTYPE_PATRONI_NAMESPACE_DISCOVERY} // used for informational purposes
var specialMetrics = map[string]bool{RECO_METRIC_NAME: true, SPECIAL_METRIC_CHANGE_EVENTS: true, SPECIAL_METRIC_SERVER_LOG_EVENT_COUNTS: true}
var configDb *sqlx.DB
var metricDb *sqlx.DB
var graphiteConnection *graphite.Graphite
var log = logging.MustGetLogger("main")
var metric_def_map map[string]map[decimal.Decimal]MetricVersionProperties
var metric_def_map_lock = sync.RWMutex{}
var host_metric_interval_map = make(map[string]float64) // [db1_metric] = 30
var db_pg_version_map = make(map[string]DBVersionMapEntry)
var db_pg_version_map_lock = sync.RWMutex{}
var db_get_pg_version_map_lock = make(map[string]sync.RWMutex) // synchronize initial PG version detection to 1 instance for each defined host
var monitored_db_cache map[string]MonitoredDatabase
var monitored_db_cache_lock sync.RWMutex
var monitored_db_conn_cache map[string]*sqlx.DB = make(map[string]*sqlx.DB)
var monitored_db_conn_cache_lock = sync.RWMutex{}
var db_conn_limiting_channel = make(map[string](chan bool))
var db_conn_limiting_channel_lock = sync.RWMutex{}
var last_sql_fetch_error sync.Map
var influx_host_count = 1
var InfluxConnectStrings [2]string // Max. 2 Influx metrics stores currently supported
// secondary Influx meant for HA or Grafana load balancing for 100+ instances with lots of alerts
var fileBased = false
var adHocMode = false
var preset_metric_def_map map[string]map[string]float64 // read from metrics folder in "file mode"
/// internal statistics calculation
var lastSuccessfulDatastoreWriteTimeEpoch int64
var datastoreWriteFailuresCounter uint64
var datastoreWriteSuccessCounter uint64
var totalMetricFetchFailuresCounter uint64
var datastoreTotalWriteTimeMicroseconds uint64
var totalMetricsFetchedCounter uint64
var totalMetricsReusedFromCacheCounter uint64
var totalMetricsDroppedCounter uint64
var totalDatasetsFetchedCounter uint64
var metricPointsPerMinuteLast5MinAvg int64 = -1 // -1 means the summarization ticker has not yet run
var gathererStartTime time.Time = time.Now()
var useConnPooling bool
var partitionMapMetric = make(map[string]ExistingPartitionInfo) // metric = min/max bounds
var partitionMapMetricDbname = make(map[string]map[string]ExistingPartitionInfo) // metric[dbname = min/max bounds]
var testDataGenerationModeWG sync.WaitGroup
var PGDummyMetricTables = make(map[string]time.Time)
var PGDummyMetricTablesLock = sync.RWMutex{}
var PGSchemaType string
var failedInitialConnectHosts = make(map[string]bool) // hosts that couldn't be connected to even once
var addRealDbname bool
var addSystemIdentifier bool
var forceRecreatePGMetricPartitions = false // to signal override PG metrics storage cache
var lastMonitoredDBsUpdate time.Time
var instanceMetricCache = make(map[string]([]map[string]interface{})) // [dbUnique+metric]lastly_fetched_data
var instanceMetricCacheLock = sync.RWMutex{}
var instanceMetricCacheTimestamp = make(map[string]time.Time) // [dbUnique+metric]last_fetch_time
var instanceMetricCacheTimestampLock = sync.RWMutex{}
var MinExtensionInfoAvailable, _ = decimal.NewFromString("9.1")
var regexIsAlpha = regexp.MustCompile("^[a-zA-Z]+$")
var rBouncerAndPgpoolVerMatch = regexp.MustCompile(`\d+\.+\d+`) // extract $major.minor from "4.1.2 (karasukiboshi)" or "PgBouncer 1.12.0"
func IsPostgresDBType(dbType string) bool {
if dbType == DBTYPE_BOUNCER || dbType == DBTYPE_PGPOOL {
return false
}
return true
}
func GetPostgresDBConnection(libPqConnString, host, port, dbname, user, password, sslmode, sslrootcert, sslcert, sslkey string) (*sqlx.DB, error) {
var err error
var db *sqlx.DB
//log.Debug("Connecting to: ", host, port, dbname, user, password)
if len(libPqConnString) > 0 {
if strings.Contains(strings.ToLower(libPqConnString), "sslmode") {
db, err = sqlx.Open("postgres", libPqConnString)
} else {
if strings.Contains(libPqConnString, "postgresql://") { // JDBC style
if strings.Contains(libPqConnString, "?") { // a bit simplistic, regex would be better
//log.Debug("Adding sslmode", libPqConnString+"&sslmode=disable")
db, err = sqlx.Open("postgres", libPqConnString+"&sslmode=disable")
} else {
//log.Debug("Adding sslmode", libPqConnString+"?sslmode=disable")
db, err = sqlx.Open("postgres", libPqConnString+"?sslmode=disable")
}
} else { // LibPQ style
db, err = sqlx.Open("postgres", libPqConnString+" sslmode=disable")
}
}
} else {
conn_str := fmt.Sprintf("host=%s port=%s dbname='%s' sslmode=%s user=%s application_name=%s sslrootcert='%s' sslcert='%s' sslkey='%s'",
host, port, dbname, sslmode, user, APPLICATION_NAME, sslrootcert, sslcert, sslkey)
if password != "" { // having empty string as password effectively disables .pgpass so include only if password given
conn_str += "password=" + password
}
db, err = sqlx.Open("postgres", conn_str)
}
return db, err
}
func StringToBoolOrFail(boolAsString, inputParamName string) bool {
conversionMap := map[string]bool{
"true": true, "t": true, "on": true, "y": true, "yes": true, "require": true, "1": true,
"false": false, "f": false, "off": false, "n": false, "no": false, "disable": false, "0": false,
}
val, ok := conversionMap[strings.TrimSpace(strings.ToLower(boolAsString))]
if !ok {
if inputParamName != "" {
log.Fatalf("invalid input for boolean string parameter \"%s\": \"%s\". can be of: 1, t, T, TRUE, true, True, 0, f, F, FALSE, false, False", inputParamName, boolAsString)
} else {
log.Fatalf("invalid input for boolean string: %s. can be of: 1, t, T, TRUE, true, True, 0, f, F, FALSE, false, False", boolAsString)
}
}
return val
}
func InitAndTestConfigStoreConnection(host, port, dbname, user, password, requireSSL string, failOnErr bool) error {
var err error
SSLMode := "disable"
var retries = 3 // ~15s
if StringToBoolOrFail(requireSSL, "--pg-require-ssl") {
SSLMode = "require"
}
for i := 0; i <= retries; i++ {
// configDb is used by the main thread only. no verify-ca/verify-full support currently
configDb, err = GetPostgresDBConnection("", host, port, dbname, user, password, SSLMode, "", "", "")
if err != nil {
if i < retries {
log.Errorf("could not open metricDb connection. retrying in 5s. %d retries left. err: %v", retries-i, err)
time.Sleep(time.Second * 5)
continue
}
if failOnErr {
log.Fatal("could not open configDb connection! exit.")
} else {
log.Error("could not open configDb connection!")
return err
}
}
err = configDb.Ping()
if err != nil {
if i < retries {
log.Errorf("could not ping configDb! retrying in 5s. %d retries left. err: %v", retries-i, err)
time.Sleep(time.Second * 5)
continue
}
if failOnErr {
log.Fatal("could not ping configDb! exit.", err)
} else {
log.Error("could not ping configDb!", err)
return err
}
} else {
log.Info("connect to configDb OK!")
break
}
}
configDb.SetMaxIdleConns(1)
configDb.SetMaxOpenConns(2)
configDb.SetConnMaxLifetime(time.Second * time.Duration(PG_CONN_RECYCLE_SECONDS))
return nil
}
func InitAndTestMetricStoreConnection(connStr string, failOnErr bool) error {
var err error
var retries = 3 // ~15s
for i := 0; i <= retries; i++ {
metricDb, err = GetPostgresDBConnection(connStr, "", "", "", "", "", "", "", "", "")
if err != nil {
if i < retries {
log.Errorf("could not open metricDb connection. retrying in 5s. %d retries left. err: %v", retries-i, err)
time.Sleep(time.Second * 5)
continue
}
if failOnErr {
log.Fatal("could not open metricDb connection! exit. err:", err)
} else {
log.Error("could not open metricDb connection:", err)
return err
}
}
err = metricDb.Ping()
if err != nil {
if i < retries {
log.Errorf("could not ping metricDb! retrying in 5s. %d retries left. err: %v", retries-i, err)
time.Sleep(time.Second * 5)
continue
}
if failOnErr {
log.Fatal("could not ping metricDb! exit.", err)
} else {
return err
}
} else {
log.Info("connect to metricDb OK!")
break
}
}
metricDb.SetMaxIdleConns(1)
metricDb.SetMaxOpenConns(2)
metricDb.SetConnMaxLifetime(time.Second * time.Duration(PG_CONN_RECYCLE_SECONDS))
return nil
}
func DBExecRead(conn *sqlx.DB, host_ident, sql string, args ...interface{}) ([](map[string]interface{}), error) {
ret := make([]map[string]interface{}, 0)
var rows *sqlx.Rows
var err error
if conn == nil {
return nil, errors.New("nil connection")
}
rows, err = conn.Queryx(sql, args...)
if err != nil {
if !(host_ident == METRICDB_IDENT || host_ident == CONFIGDB_IDENT) {
if conn != nil {
conn.Close()
}
monitored_db_conn_cache_lock.Lock()
defer monitored_db_conn_cache_lock.Unlock()
if _, ok := monitored_db_conn_cache[host_ident]; ok {
monitored_db_conn_cache[host_ident] = nil
}
// connection problems or bad queries etc are quite common so caller should decide if to output something
log.Debug("failed to query", host_ident, "sql:", sql, "err:", err)
}
return nil, err
}
defer rows.Close()
for rows.Next() {
row := make(map[string]interface{})
err = rows.MapScan(row)
if err != nil {
log.Error("failed to MapScan a result row", host_ident, err)
return nil, err
}
ret = append(ret, row)
}
err = rows.Err()
if err != nil {
log.Error("failed to fully process resultset for", host_ident, "sql:", sql, "err:", err)
}
return ret, err
}
func DBExecReadByDbUniqueName(dbUnique, metricName string, useCache bool, stmtTimeoutOverride int64, sql string, args ...interface{}) ([](map[string]interface{}), error, time.Duration) {
var conn *sqlx.DB
var libPQConnStr string
var exists bool
var md MonitoredDatabase
var err error
var duration time.Duration
if strings.TrimSpace(sql) == "" {
return nil, errors.New("empty SQL"), duration
}
md, err = GetMonitoredDatabaseByUniqueName(dbUnique)
if err != nil {
return nil, err, duration
}
db_conn_limiting_channel_lock.RLock()
conn_limit_channel, ok := db_conn_limiting_channel[dbUnique]
db_conn_limiting_channel_lock.RUnlock()
if !ok {
log.Fatal("db_conn_limiting_channel not initialized for ", dbUnique)
}
//log.Debugf("Waiting for SQL token [%s:%s]...", msg.DBUniqueName, msg.MetricName)
token := <-conn_limit_channel
defer func() {
conn_limit_channel <- token
}()
libPQConnStr = md.LibPQConnStr
if opts.AdHocConnString != "" {
libPQConnStr = opts.AdHocConnString
}
if !useCache {
if md.DBType == DBTYPE_BOUNCER {
md.DBName = "pgbouncer"
}
conn, err = GetPostgresDBConnection(libPQConnStr, md.Host, md.Port, md.DBName, md.User, md.Password,
md.SslMode, md.SslRootCAPath, md.SslClientCertPath, md.SslClientKeyPath)
if err != nil {
return nil, err, duration
}
defer conn.Close()
} else {
var dbStats go_sql.DBStats
monitored_db_conn_cache_lock.RLock()
conn, exists = monitored_db_conn_cache[dbUnique]
monitored_db_conn_cache_lock.RUnlock()
if conn != nil {
dbStats = conn.Stats()
}
if !exists || conn == nil || dbStats.OpenConnections == 0 {
if md.DBType == DBTYPE_BOUNCER {
md.DBName = "pgbouncer"
}
conn, err = GetPostgresDBConnection(libPQConnStr, md.Host, md.Port, md.DBName, md.User, md.Password,
md.SslMode, md.SslRootCAPath, md.SslClientCertPath, md.SslClientKeyPath)
if err != nil {
return nil, err, duration
}
conn.SetMaxIdleConns(1)
conn.SetMaxOpenConns(MAX_PG_CONNECTIONS_PER_MONITORED_DB)
// recycling periodically makes sense as long sessions might bloat memory or maybe conn info (password) was changed
conn.SetConnMaxLifetime(time.Second * time.Duration(PG_CONN_RECYCLE_SECONDS))
monitored_db_conn_cache_lock.Lock()
monitored_db_conn_cache[dbUnique] = conn
monitored_db_conn_cache_lock.Unlock()
}
}
if !adHocMode && IsPostgresDBType(md.DBType) {
stmtTimeout := md.StmtTimeout
if stmtTimeoutOverride > 0 {
stmtTimeout = stmtTimeoutOverride
}
if stmtTimeout > 0 { // 0 = don't change, use DB level settings
_, err = DBExecRead(conn, dbUnique, fmt.Sprintf("SET statement_timeout TO '%ds'", stmtTimeout))
}
if err != nil {
atomic.AddUint64(&totalMetricFetchFailuresCounter, 1)
return nil, err, duration
}
}
t1 := time.Now()
data, err := DBExecRead(conn, dbUnique, sql, args...)
t2 := time.Now()
if err != nil {
atomic.AddUint64(&totalMetricFetchFailuresCounter, 1)
}
return data, err, t2.Sub(t1)
}
func GetAllActiveHostsFromConfigDB() ([](map[string]interface{}), error) {
sql_latest := `
select /* pgwatch2_generated */
md_unique_name, md_group, md_dbtype, md_hostname, md_port, md_dbname, md_user, coalesce(md_password, '') as md_password,
coalesce(p.pc_config, md_config)::text as md_config, coalesce(s.pc_config, md_config_standby, '{}'::jsonb)::text as md_config_standby,
md_statement_timeout_seconds, md_sslmode, md_is_superuser,
coalesce(md_include_pattern, '') as md_include_pattern, coalesce(md_exclude_pattern, '') as md_exclude_pattern,
coalesce(md_custom_tags::text, '{}') as md_custom_tags, md_root_ca_path, md_client_cert_path, md_client_key_path,
md_password_type, coalesce(md_host_config, '{}')::text as md_host_config, md_only_if_master
from
pgwatch2.monitored_db
left join
pgwatch2.preset_config p on p.pc_name = md_preset_config_name /* primary preset if any */
left join
pgwatch2.preset_config s on s.pc_name = md_preset_config_name_standby /* standby preset if any */
where
md_is_enabled
`
sql_prev := `
select /* pgwatch2_generated */
md_unique_name, md_group, md_dbtype, md_hostname, md_port, md_dbname, md_user, coalesce(md_password, '') as md_password,
coalesce(pc_config, md_config)::text as md_config, md_statement_timeout_seconds, md_sslmode, md_is_superuser,
coalesce(md_include_pattern, '') as md_include_pattern, coalesce(md_exclude_pattern, '') as md_exclude_pattern,
coalesce(md_custom_tags::text, '{}') as md_custom_tags, md_root_ca_path, md_client_cert_path, md_client_key_path,
md_password_type, coalesce(md_host_config, '{}')::text as md_host_config, md_only_if_master
from
pgwatch2.monitored_db
left join
pgwatch2.preset_config on pc_name = md_preset_config_name
where
md_is_enabled
`
data, err := DBExecRead(configDb, CONFIGDB_IDENT, sql_latest)
if err != nil {
err1 := err
log.Debugf("Failed to query the monitored DB-s config with latest SQL: %v ", err1)
data, err = DBExecRead(configDb, CONFIGDB_IDENT, sql_prev)
if err == nil {
log.Warning("Fetching monitored DB-s config succeeded with SQL from previous schema version - gatherer update required!")
} else {
log.Errorf("Failed to query the monitored DB-s config: %v", err1) // show the original error
}
}
return data, err
}
func GetMonitoredDatabasesFromConfigDB() ([]MonitoredDatabase, error) {
monitoredDBs := make([]MonitoredDatabase, 0)
activeHostData, err := GetAllActiveHostsFromConfigDB()
groups := strings.Split(opts.Group, ",")
skippedEntries := 0
if err != nil {
log.Errorf("Failed to read monitoring config from DB: %s", err)
return monitoredDBs, err
}
for _, row := range activeHostData {
if len(opts.Group) > 0 { // filter out rows with non-matching groups
matched := false
for _, g := range groups {
if row["md_group"].(string) == g {
matched = true
break
}
}
if !matched {
skippedEntries += 1
continue
}
}
if skippedEntries > 0 {
log.Infof("Filtered out %d config entries based on --groups input", skippedEntries)
}
metricConfig, err := jsonTextToMap(row["md_config"].(string))
if err != nil {
log.Warningf("Cannot parse metrics JSON config for \"%s\": %v", row["md_unique_name"].(string), err)
continue
}
metricConfigStandby := make(map[string]float64)
if configStandby, ok := row["md_config_standby"]; ok {
metricConfigStandby, err = jsonTextToMap(configStandby.(string))
if err != nil {
log.Warningf("Cannot parse standby metrics JSON config for \"%s\". Ignoring standby config: %v", row["md_unique_name"].(string), err)
}
}
customTags, err := jsonTextToStringMap(row["md_custom_tags"].(string))
if err != nil {
log.Warningf("Cannot parse custom tags JSON for \"%s\". Ignoring custom tags. Error: %v", row["md_unique_name"].(string), err)
customTags = nil
}
hostConfigAttrs := HostConfigAttrs{}
err = yaml.Unmarshal([]byte(row["md_host_config"].(string)), &hostConfigAttrs)
if err != nil {
log.Warningf("Cannot parse host config JSON for \"%s\". Ignoring host config. Error: %v", row["md_unique_name"].(string), err)
}
md := MonitoredDatabase{
DBUniqueName: row["md_unique_name"].(string),
DBUniqueNameOrig: row["md_unique_name"].(string),
Host: row["md_hostname"].(string),
Port: row["md_port"].(string),
DBName: row["md_dbname"].(string),
User: row["md_user"].(string),
IsSuperuser: row["md_is_superuser"].(bool),
Password: row["md_password"].(string),
PasswordType: row["md_password_type"].(string),
SslMode: row["md_sslmode"].(string),
SslRootCAPath: row["md_root_ca_path"].(string),
SslClientCertPath: row["md_client_cert_path"].(string),
SslClientKeyPath: row["md_client_key_path"].(string),
StmtTimeout: row["md_statement_timeout_seconds"].(int64),
Metrics: metricConfig,
MetricsStandby: metricConfigStandby,
DBType: row["md_dbtype"].(string),
DBNameIncludePattern: row["md_include_pattern"].(string),
DBNameExcludePattern: row["md_exclude_pattern"].(string),
Group: row["md_group"].(string),
HostConfig: hostConfigAttrs,
OnlyIfMaster: row["md_only_if_master"].(bool),
CustomTags: customTags}
if _, ok := dbTypeMap[md.DBType]; !ok {
log.Warningf("Ignoring host \"%s\" - unknown dbtype: %s. Expected one of: %+v", md.DBUniqueName, md.DBType, dbTypes)
continue
}
if md.PasswordType == "aes-gcm-256" && opts.AesGcmKeyphrase != "" {
md.Password = decrypt(md.DBUniqueName, opts.AesGcmKeyphrase, md.Password)
}
if md.DBType == DBTYPE_PG_CONT {
resolved, err := ResolveDatabasesFromConfigEntry(md)
if err != nil {
log.Errorf("Failed to resolve DBs for \"%s\": %s", md.DBUniqueName, err)
if md.PasswordType == "aes-gcm-256" && opts.AesGcmKeyphrase == "" {
log.Errorf("No decryption key set. Use the --aes-gcm-keyphrase or --aes-gcm-keyphrase params to set")
}
continue
}
temp_arr := make([]string, 0)
for _, rdb := range resolved {
monitoredDBs = append(monitoredDBs, rdb)
temp_arr = append(temp_arr, rdb.DBName)
}
log.Debugf("Resolved %d DBs with prefix \"%s\": [%s]", len(resolved), md.DBUniqueName, strings.Join(temp_arr, ", "))
} else if md.DBType == DBTYPE_PATRONI || md.DBType == DBTYPE_PATRONI_CONT || md.DBType == DBTYPE_PATRONI_NAMESPACE_DISCOVERY {
resolved, err := ResolveDatabasesFromPatroni(md)
if err != nil {
log.Errorf("Failed to resolve DBs for \"%s\": %s", md.DBUniqueName, err)
continue
}
temp_arr := make([]string, 0)
for _, rdb := range resolved {
monitoredDBs = append(monitoredDBs, rdb)
temp_arr = append(temp_arr, rdb.DBName)
}
log.Debugf("Resolved %d DBs with prefix \"%s\": [%s]", len(resolved), md.DBUniqueName, strings.Join(temp_arr, ", "))
} else {
monitoredDBs = append(monitoredDBs, md)
}
monitoredDBs = append(monitoredDBs)
}
return monitoredDBs, err
}
func SendToInflux(connect_str, conn_id string, storeMessages []MetricStoreMessage) error {
if len(storeMessages) == 0 {
return nil
}
ts_warning_printed := false
retries := 1 // 1 retry
retry:
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: connect_str,
Username: opts.InfluxUser,
Password: opts.InfluxPassword,
})
if err != nil {
log.Error("Error connecting to Influx", conn_id, ": ", err)
if retries > 0 {
retries--
time.Sleep(time.Millisecond * 200)
goto retry
}
atomic.AddUint64(&datastoreWriteFailuresCounter, 1)
return err
}
defer c.Close()
bp, err := client.NewBatchPoints(client.BatchPointsConfig{Database: opts.InfluxDbname})
if err != nil {
atomic.AddUint64(&datastoreWriteFailuresCounter, 1)
return err
}
rows_batched := 0
total_rows := 0
for _, msg := range storeMessages {
if msg.Data == nil || len(msg.Data) == 0 {
continue
}
log.Debugf("SendToInflux %s data[0] of %d [%s:%s]: %v", conn_id, len(msg.Data), msg.DBUniqueName, msg.MetricName, msg.Data[0])
for _, dr := range msg.Data {
// Create a point and add to batch
var epoch_time time.Time
var epoch_ns int64
tags := make(map[string]string)
fields := make(map[string]interface{})
total_rows += 1
tags["dbname"] = msg.DBUniqueName
if msg.CustomTags != nil {
for k, v := range msg.CustomTags {
tags[k] = fmt.Sprintf("%v", v)
}
}
for k, v := range dr {
if v == nil || v == "" {
continue // not storing NULLs
}
if k == EPOCH_COLUMN_NAME {
epoch_ns = v.(int64)
} else if strings.HasPrefix(k, TAG_PREFIX) {
tag := k[4:]
tags[tag] = fmt.Sprintf("%v", v)
} else {
fields[k] = v
}
}
if epoch_ns == 0 {
if !ts_warning_printed && msg.MetricName != SPECIAL_METRIC_PGBOUNCER_STATS {
log.Warning("No timestamp_ns found, (gatherer) server time will be used. measurement:", msg.MetricName)
ts_warning_printed = true
}
epoch_time = time.Now()
} else {
epoch_time = time.Unix(0, epoch_ns)
}
pt, err := client.NewPoint(msg.MetricName, tags, fields, epoch_time)
if err != nil {
log.Errorf("Calling NewPoint() of Influx driver failed. Datapoint \"%s\" dropped. Err: %s", dr, err)
atomic.AddUint64(&totalMetricsDroppedCounter, 1)
continue
}
bp.AddPoint(pt)
rows_batched += 1
}
}
t1 := time.Now()
err = c.Write(bp)
t_diff := time.Since(t1)
if err == nil {
if len(storeMessages) == 1 {
log.Infof("wrote %d/%d rows to InfluxDB %s for [%s:%s] in %.1f ms", rows_batched, total_rows,
conn_id, storeMessages[0].DBUniqueName, storeMessages[0].MetricName, float64(t_diff.Nanoseconds())/1000000.0)
} else {
log.Infof("wrote %d/%d rows from %d metric sets to InfluxDB %s in %.1f ms", rows_batched, total_rows,
len(storeMessages), conn_id, float64(t_diff.Nanoseconds())/1000000.0)
}
atomic.StoreInt64(&lastSuccessfulDatastoreWriteTimeEpoch, t1.Unix())
atomic.AddUint64(&datastoreTotalWriteTimeMicroseconds, uint64(t_diff.Microseconds()))
atomic.AddUint64(&datastoreWriteSuccessCounter, 1)
} else {
atomic.AddUint64(&datastoreWriteFailuresCounter, 1)
}
return err
}
func SendToPostgres(storeMessages []MetricStoreMessage) error {
if len(storeMessages) == 0 {
return nil
}
ts_warning_printed := false
metricsToStorePerMetric := make(map[string][]MetricStoreMessagePostgres)
rows_batched := 0
total_rows := 0
pg_part_bounds := make(map[string]ExistingPartitionInfo) // metric=min/max
pg_part_bounds_dbname := make(map[string]map[string]ExistingPartitionInfo) // metric=[dbname=min/max]
var err error
if PGSchemaType == "custom" {
metricsToStorePerMetric["metrics"] = make([]MetricStoreMessagePostgres, 0) // everything inserted into "metrics".
// TODO warn about collision if someone really names some new metric "metrics"
}
for _, msg := range storeMessages {
if msg.Data == nil || len(msg.Data) == 0 {
continue
}
log.Debug("SendToPG data[0] of ", len(msg.Data), ":", msg.Data[0])
for _, dr := range msg.Data {
var epoch_time time.Time
var epoch_ns int64
tags := make(map[string]interface{})
fields := make(map[string]interface{})
total_rows += 1
if msg.CustomTags != nil {
for k, v := range msg.CustomTags {
tags[k] = fmt.Sprintf("%v", v)
}
}
for k, v := range dr {
if v == nil || v == "" {
continue // not storing NULLs
}
if k == EPOCH_COLUMN_NAME {
epoch_ns = v.(int64)
} else if strings.HasPrefix(k, TAG_PREFIX) {
tag := k[4:]
tags[tag] = fmt.Sprintf("%v", v)
} else {
fields[k] = v
}
}
if epoch_ns == 0 {
if !ts_warning_printed && msg.MetricName != SPECIAL_METRIC_PGBOUNCER_STATS {
log.Warning("No timestamp_ns found, server time will be used. measurement:", msg.MetricName)
ts_warning_printed = true
}
epoch_time = time.Now()
} else {
epoch_time = time.Unix(0, epoch_ns)
}
var metricsArr []MetricStoreMessagePostgres
var ok bool
var metricNameTemp string
if PGSchemaType == "custom" {
metricNameTemp = "metrics"
} else {
metricNameTemp = msg.MetricName
}
metricsArr, ok = metricsToStorePerMetric[metricNameTemp]
if !ok {
metricsToStorePerMetric[metricNameTemp] = make([]MetricStoreMessagePostgres, 0)
}
metricsArr = append(metricsArr, MetricStoreMessagePostgres{Time: epoch_time, DBName: msg.DBUniqueName,
Metric: msg.MetricName, Data: fields, TagData: tags})
metricsToStorePerMetric[metricNameTemp] = metricsArr
rows_batched += 1
if PGSchemaType == "metric" || PGSchemaType == "metric-time" || PGSchemaType == "timescale" {
// set min/max timestamps to check/create partitions
bounds, ok := pg_part_bounds[msg.MetricName]
if !ok || (ok && epoch_time.Before(bounds.StartTime)) {
bounds.StartTime = epoch_time
pg_part_bounds[msg.MetricName] = bounds
}
if !ok || (ok && epoch_time.After(bounds.EndTime)) {
bounds.EndTime = epoch_time
pg_part_bounds[msg.MetricName] = bounds
}
} else if PGSchemaType == "metric-dbname-time" {
_, ok := pg_part_bounds_dbname[msg.MetricName]
if !ok {
pg_part_bounds_dbname[msg.MetricName] = make(map[string]ExistingPartitionInfo)
}
bounds, ok := pg_part_bounds_dbname[msg.MetricName][msg.DBUniqueName]
if !ok || (ok && epoch_time.Before(bounds.StartTime)) {
bounds.StartTime = epoch_time
pg_part_bounds_dbname[msg.MetricName][msg.DBUniqueName] = bounds
}
if !ok || (ok && epoch_time.After(bounds.EndTime)) {
bounds.EndTime = epoch_time
pg_part_bounds_dbname[msg.MetricName][msg.DBUniqueName] = bounds
}
}
}
}
if PGSchemaType == "metric" {
err = EnsureMetric(pg_part_bounds, forceRecreatePGMetricPartitions)
} else if PGSchemaType == "metric-time" {
err = EnsureMetricTime(pg_part_bounds, forceRecreatePGMetricPartitions, false)
} else if PGSchemaType == "metric-dbname-time" {
err = EnsureMetricDbnameTime(pg_part_bounds_dbname, forceRecreatePGMetricPartitions)
} else if PGSchemaType == "timescale" {
err = EnsureMetricTimescale(pg_part_bounds, forceRecreatePGMetricPartitions)
} else {
log.Fatal("should never happen...")
}
if forceRecreatePGMetricPartitions {
forceRecreatePGMetricPartitions = false
}
if err != nil {
atomic.AddUint64(&datastoreWriteFailuresCounter, 1)
return err
}
// send data to PG, with a separate COPY for all metrics
log.Debugf("COPY-ing %d metrics to Postgres metricsDB...", rows_batched)
t1 := time.Now()
txn, err := metricDb.Begin()
if err != nil {
log.Error("Could not start Postgres metricsDB transaction:", err)
atomic.AddUint64(&datastoreWriteFailuresCounter, 1)
return err
}
defer func() {
if err == nil {
tx_err := txn.Commit()
if tx_err != nil {
log.Debug("COPY Commit to Postgres failed:", tx_err)
}
} else {
tx_err := txn.Rollback()
if tx_err != nil {
log.Debug("COPY Rollback to Postgres failed:", tx_err)
}
}
}()
for metricName, metrics := range metricsToStorePerMetric {
var stmt *go_sql.Stmt
if PGSchemaType == "custom" {
stmt, err = txn.Prepare(pq.CopyIn("metrics", "time", "dbname", "metric", "data", "tag_data"))
if err != nil {
log.Error("Could not prepare COPY to 'metrics' table:", err)
atomic.AddUint64(&datastoreWriteFailuresCounter, 1)
return err
}
} else {
log.Debugf("COPY-ing %d rows into '%s'...", len(metrics), metricName)
stmt, err = txn.Prepare(pq.CopyIn(metricName, "time", "dbname", "data", "tag_data"))
if err != nil {
log.Errorf("Could not prepare COPY to '%s' table: %v", metricName, err)
atomic.AddUint64(&datastoreWriteFailuresCounter, 1)
return err
}
}
for _, m := range metrics {
jsonBytes, err := mapToJson(m.Data)
if err != nil {
log.Errorf("Skipping 1 metric for [%s:%s] due to JSON conversion error: %s", m.DBName, m.Metric, err)
atomic.AddUint64(&totalMetricsDroppedCounter, 1)
continue
}
if len(m.TagData) > 0 {
jsonBytesTags, err := mapToJson(m.TagData)
if err != nil {
log.Errorf("Skipping 1 metric for [%s:%s] due to JSON conversion error: %s", m.DBName, m.Metric, err)
atomic.AddUint64(&datastoreWriteFailuresCounter, 1)
goto stmt_close
}
if PGSchemaType == "custom" {
_, err = stmt.Exec(m.Time, m.DBName, m.Metric, string(jsonBytes), string(jsonBytesTags))
} else {
_, err = stmt.Exec(m.Time, m.DBName, string(jsonBytes), string(jsonBytesTags))
}
if err != nil {
log.Errorf("Formatting metric %s data to COPY format failed for %s: %v ", m.Metric, m.DBName, err)
atomic.AddUint64(&datastoreWriteFailuresCounter, 1)
goto stmt_close
}
} else {
if PGSchemaType == "custom" {
_, err = stmt.Exec(m.Time, m.DBName, m.Metric, string(jsonBytes), nil)
} else {
_, err = stmt.Exec(m.Time, m.DBName, string(jsonBytes), nil)
}
if err != nil {
log.Errorf("Formatting metric %s data to COPY format failed for %s: %v ", m.Metric, m.DBName, err)
atomic.AddUint64(&datastoreWriteFailuresCounter, 1)
goto stmt_close
}
}
}
_, err = stmt.Exec()
if err != nil {
log.Error("COPY to Postgres failed:", err)
atomic.AddUint64(&datastoreWriteFailuresCounter, 1)
if strings.Contains(err.Error(), "no partition") {
log.Warning("Some metric partitions might have been removed, halting all metric storage. Trying to re-create all needed partitions on next run")
forceRecreatePGMetricPartitions = true
}
}
stmt_close:
err = stmt.Close()
if err != nil {
log.Error("stmt.Close() failed:", err)
}
}
t_diff := time.Since(t1)
if err == nil {
if len(storeMessages) == 1 {
log.Infof("wrote %d/%d rows to Postgres for [%s:%s] in %.1f ms", rows_batched, total_rows,
storeMessages[0].DBUniqueName, storeMessages[0].MetricName, float64(t_diff.Nanoseconds())/1000000)
} else {
log.Infof("wrote %d/%d rows from %d metric sets to Postgres in %.1f ms", rows_batched, total_rows,
len(storeMessages), float64(t_diff.Nanoseconds())/1000000)
}
atomic.StoreInt64(&lastSuccessfulDatastoreWriteTimeEpoch, t1.Unix())
atomic.AddUint64(&datastoreTotalWriteTimeMicroseconds, uint64(t_diff.Microseconds()))
atomic.AddUint64(&datastoreWriteSuccessCounter, 1)
}
return err
}
func OldPostgresMetricsDeleter(metricAgeDaysThreshold int64, schemaType string) {
sqlDoesOldPartListingFuncExist := `SELECT count(*) FROM information_schema.routines WHERE routine_schema = 'admin' AND routine_name = 'get_old_time_partitions'`
oldPartListingFuncExists := false // if func existing (>v1.8.1) then use it to drop old partitions in smaller batches
// as for large setup (50+ DBs) one could reach the default "max_locks_per_transaction" otherwise
ret, err := DBExecRead(metricDb, METRICDB_IDENT, sqlDoesOldPartListingFuncExist)
if err == nil && len(ret) > 0 && ret[0]["count"].(int64) > 0 {
oldPartListingFuncExists = true
}
time.Sleep(time.Hour * 1) // to reduce distracting log messages at startup
for {
// metric|metric-time|metric-dbname-time|custom
if schemaType == "metric" {
rows_deleted, err := DeleteOldPostgresMetrics(metricAgeDaysThreshold)
if err != nil {
log.Errorf("Failed to delete old (>%d days) metrics from Postgres: %v", metricAgeDaysThreshold, err)
time.Sleep(time.Second * 300)
continue
}
log.Infof("Deleted %d old metrics rows...", rows_deleted)
} else if schemaType == "timescale" || (!oldPartListingFuncExists && (schemaType == "metric-time" || schemaType == "metric-dbname-time")) {
parts_dropped, err := DropOldTimePartitions(metricAgeDaysThreshold)
if err != nil {
log.Errorf("Failed to drop old partitions (>%d days) from Postgres: %v", metricAgeDaysThreshold, err)
time.Sleep(time.Second * 300)
continue
}
log.Infof("Dropped %d old metric partitions...", parts_dropped)
} else if oldPartListingFuncExists && (schemaType == "metric-time" || schemaType == "metric-dbname-time") {
partsToDrop, err := GetOldTimePartitions(metricAgeDaysThreshold)
if err != nil {
log.Errorf("Failed to get a listing of old (>%d days) time partitions from Postgres metrics DB - check that the admin.get_old_time_partitions() function is rolled out: %v", metricAgeDaysThreshold, err)
time.Sleep(time.Second * 300)
continue
}
if len(partsToDrop) > 0 {
log.Infof("Dropping %d old metric partitions one by one...", len(partsToDrop))
for _, toDrop := range partsToDrop {
sqlDropTable := fmt.Sprintf(`DROP TABLE IF EXISTS %s`, toDrop)
log.Debugf("Dropping old metric data partition: %s", toDrop)
_, err := DBExecRead(metricDb, METRICDB_IDENT, sqlDropTable)
if err != nil {
log.Errorf("Failed to drop old partition %s from Postgres metrics DB: %v", toDrop, err)
time.Sleep(time.Second * 300)
} else {
time.Sleep(time.Second * 5)
}
}
} else {
log.Infof("No old metric partitions found to drop...")
}
}
time.Sleep(time.Hour * 12)
}
}
func DeleteOldPostgresMetrics(metricAgeDaysThreshold int64) (int64, error) {
// for 'metric' schema i.e. no time partitions
var total_dropped int64
get_top_lvl_tables_sql := `
select 'public.' || quote_ident(c.relname) as table_full_name
from pg_class c
join pg_namespace n on n.oid = c.relnamespace
where relkind in ('r', 'p') and nspname = 'public'
and exists (select 1 from pg_attribute where attrelid = c.oid and attname = 'time')
and pg_catalog.obj_description(c.oid, 'pg_class') = 'pgwatch2-generated-metric-lvl'
order by 1
`
delete_sql := `
with q_blocks_range as (
select min(ctid), max(ctid) from (
select ctid from %s
where time < (now() - '1day'::interval * %d)
order by ctid
limit 5000
) x
),
q_deleted as (
delete from %s
where ctid between (select min from q_blocks_range) and (select max from q_blocks_range)
and time < (now() - '1day'::interval * %d)
returning *
)
select count(*) from q_deleted;
`
top_lvl_tables, err := DBExecRead(metricDb, METRICDB_IDENT, get_top_lvl_tables_sql)
if err != nil {
return total_dropped, err
}
for _, dr := range top_lvl_tables {
log.Debugf("Dropping one chunk (max 5000 rows) of old data (if any found) from %v", dr["table_full_name"])
sql := fmt.Sprintf(delete_sql, dr["table_full_name"].(string), metricAgeDaysThreshold, dr["table_full_name"].(string), metricAgeDaysThreshold)
for {
ret, err := DBExecRead(metricDb, METRICDB_IDENT, sql)
if err != nil {
return total_dropped, err
}
if ret[0]["count"].(int64) == 0 {
break
}
total_dropped += ret[0]["count"].(int64)
log.Debugf("Dropped %d rows from %v, sleeping 100ms...", ret[0]["count"].(int64), dr["table_full_name"])
time.Sleep(time.Millisecond * 500)
}
}
return total_dropped, nil
}
func DropOldTimePartitions(metricAgeDaysThreshold int64) (int, error) {
parts_dropped := 0
var err error
sql_old_part := `select admin.drop_old_time_partitions($1, $2)`
ret, err := DBExecRead(metricDb, METRICDB_IDENT, sql_old_part, metricAgeDaysThreshold, false)
if err != nil {
log.Error("Failed to drop old time partitions from Postgres metricDB:", err)
return parts_dropped, err
}
parts_dropped = int(ret[0]["drop_old_time_partitions"].(int64))
return parts_dropped, err
}
func GetOldTimePartitions(metricAgeDaysThreshold int64) ([]string, error) {
partsToDrop := make([]string, 0)
var err error
sqlGetOldParts := `select admin.get_old_time_partitions($1)`
ret, err := DBExecRead(metricDb, METRICDB_IDENT, sqlGetOldParts, metricAgeDaysThreshold)
if err != nil {
log.Error("Failed to get a listing of old time partitions from Postgres metricDB:", err)
return partsToDrop, err
}
for _, row := range ret {
partsToDrop = append(partsToDrop, row["get_old_time_partitions"].(string))
}
return partsToDrop, nil
}
func CheckIfPGSchemaInitializedOrFail() string {
var partFuncSignature string
var pgSchemaType string
schema_type_sql := `select schema_type from admin.storage_schema_type`
ret, err := DBExecRead(metricDb, METRICDB_IDENT, schema_type_sql)
if err != nil {
log.Fatal("have you initialized the metrics schema, including a row in 'storage_schema_type' table, from schema_base.sql?", err)
}
if err == nil && len(ret) == 0 {
log.Fatal("no metric schema selected, no row in table 'storage_schema_type'. see the README from the 'pgwatch2/sql/metric_store' folder on choosing a schema")
}
pgSchemaType = ret[0]["schema_type"].(string)
if !(pgSchemaType == "metric" || pgSchemaType == "metric-time" || pgSchemaType == "metric-dbname-time" || pgSchemaType == "custom" || pgSchemaType == "timescale") {
log.Fatalf("Unknow Postgres schema type found from Metrics DB: %s", pgSchemaType)
}
if pgSchemaType == "custom" {
sql := `
SELECT has_table_privilege(session_user, 'public.metrics', 'INSERT') ok;
`
ret, err := DBExecRead(metricDb, METRICDB_IDENT, sql)
if err != nil || (err == nil && !ret[0]["ok"].(bool)) {
log.Fatal("public.metrics table not existing or no INSERT privileges")
}
} else {
sql := `
SELECT has_table_privilege(session_user, 'admin.metrics_template', 'INSERT') ok;
`
ret, err := DBExecRead(metricDb, METRICDB_IDENT, sql)
if err != nil || (err == nil && !ret[0]["ok"].(bool)) {
log.Fatal("admin.metrics_template table not existing or no INSERT privileges")
}
}
if pgSchemaType == "metric" {
partFuncSignature = "admin.ensure_partition_metric(text)"
} else if pgSchemaType == "metric-time" {
partFuncSignature = "admin.ensure_partition_metric_time(text,timestamp with time zone,integer)"
} else if pgSchemaType == "metric-dbname-time" {
partFuncSignature = "admin.ensure_partition_metric_dbname_time(text,text,timestamp with time zone,integer)"
} else if pgSchemaType == "timescale" {
partFuncSignature = "admin.ensure_partition_timescale(text)"
}
if partFuncSignature != "" {
sql := `
SELECT has_function_privilege(session_user,
'%s',
'execute') ok;
`
ret, err := DBExecRead(metricDb, METRICDB_IDENT, fmt.Sprintf(sql, partFuncSignature))
if err != nil || (err == nil && !ret[0]["ok"].(bool)) {
log.Fatalf("%s function not existing or no EXECUTE privileges. Have you rolled out the schema correctly from pgwatch2/sql/metric_store?", partFuncSignature)
}
}
return pgSchemaType
}
func AddDBUniqueMetricToListingTable(db_unique, metric string) error {
sql := `insert into admin.all_distinct_dbname_metrics
select $1, $2
where not exists (
select * from admin.all_distinct_dbname_metrics where dbname = $1 and metric = $2
)`
_, err := DBExecRead(metricDb, METRICDB_IDENT, sql, db_unique, metric)
return err
}
func UniqueDbnamesListingMaintainer(daemonMode bool) {
// due to metrics deletion the listing can go out of sync (a trigger not really wanted)
sql_top_level_metrics := `SELECT table_name FROM admin.get_top_level_metric_tables()`
sql_distinct := `
WITH RECURSIVE t(dbname) AS (
SELECT MIN(dbname) AS dbname FROM %s
UNION
SELECT (SELECT MIN(dbname) FROM %s WHERE dbname > t.dbname) FROM t )
SELECT dbname FROM t WHERE dbname NOTNULL ORDER BY 1
`
sql_delete := `DELETE FROM admin.all_distinct_dbname_metrics WHERE NOT dbname = ANY($1) and metric = $2 RETURNING *`
sql_delete_all := `DELETE FROM admin.all_distinct_dbname_metrics WHERE metric = $1 RETURNING *`
sql_add := `
INSERT INTO admin.all_distinct_dbname_metrics SELECT u, $2 FROM (select unnest($1::text[]) as u) x
WHERE NOT EXISTS (select * from admin.all_distinct_dbname_metrics where dbname = u and metric = $2)
RETURNING *;
`
for {
if daemonMode {
time.Sleep(time.Hour * 6)
}
log.Infof("Refreshing admin.all_distinct_dbname_metrics listing table...")
all_distinct_metric_tables, err := DBExecRead(metricDb, METRICDB_IDENT, sql_top_level_metrics)
if err != nil {
log.Error("Could not refresh Postgres dbnames listing table:", err)
} else {
for _, dr := range all_distinct_metric_tables {
found_dbnames_map := make(map[string]bool)
found_dbnames_arr := make([]string, 0)
metric_name := strings.Replace(dr["table_name"].(string), "public.", "", 1)
log.Debugf("Refreshing all_distinct_dbname_metrics listing for metric: %s", metric_name)
ret, err := DBExecRead(metricDb, METRICDB_IDENT, fmt.Sprintf(sql_distinct, dr["table_name"], dr["table_name"]))
if err != nil {
log.Errorf("Could not refresh Postgres all_distinct_dbname_metrics listing table for '%s': %s", metric_name, err)
break
}
for _, dr_dbname := range ret {
found_dbnames_map[dr_dbname["dbname"].(string)] = true // "set" behaviour, don't want duplicates
}
// delete all that are not known and add all that are not there
for k := range found_dbnames_map {
found_dbnames_arr = append(found_dbnames_arr, k)
}
if len(found_dbnames_arr) == 0 { // delete all entries for given metric
log.Debugf("Deleting Postgres all_distinct_dbname_metrics listing table entries for metric '%s':", metric_name)
_, err = DBExecRead(metricDb, METRICDB_IDENT, sql_delete_all, metric_name)
if err != nil {
log.Errorf("Could not delete Postgres all_distinct_dbname_metrics listing table entries for metric '%s': %s", metric_name, err)
}
continue
}
ret, err = DBExecRead(metricDb, METRICDB_IDENT, sql_delete, pq.Array(found_dbnames_arr), metric_name)
if err != nil {
log.Errorf("Could not refresh Postgres all_distinct_dbname_metrics listing table for metric '%s': %s", metric_name, err)
} else if len(ret) > 0 {
log.Infof("Removed %d stale entries from all_distinct_dbname_metrics listing table for metric: %s", len(ret), metric_name)
}
ret, err = DBExecRead(metricDb, METRICDB_IDENT, sql_add, pq.Array(found_dbnames_arr), metric_name)
if err != nil {
log.Errorf("Could not refresh Postgres all_distinct_dbname_metrics listing table for metric '%s': %s", metric_name, err)
} else if len(ret) > 0 {
log.Infof("Added %d entry to the Postgres all_distinct_dbname_metrics listing table for metric: %s", len(ret), metric_name)
}
}
}
if !daemonMode {
return
}
}
}
func EnsureMetricDummy(metric string) {
if opts.Datastore != DATASTORE_POSTGRES {
return
}
sql_ensure := `
select admin.ensure_dummy_metrics_table($1) as created
`
PGDummyMetricTablesLock.Lock()
defer PGDummyMetricTablesLock.Unlock()
lastEnsureCall, ok := PGDummyMetricTables[metric]
if ok && lastEnsureCall.After(time.Now().Add(-1*time.Hour)) {
return
} else {
ret, err := DBExecRead(metricDb, METRICDB_IDENT, sql_ensure, metric)
if err != nil {
log.Errorf("Failed to create dummy partition of metric '%s': %v", metric, err)
} else {
if ret[0]["created"].(bool) {
log.Infof("Created a dummy partition of metric '%s'", metric)
}
PGDummyMetricTables[metric] = time.Now()
}
}
}
func EnsureMetric(pg_part_bounds map[string]ExistingPartitionInfo, force bool) error {
sql_ensure := `
select * from admin.ensure_partition_metric($1)
`
for metric := range pg_part_bounds {
_, ok := partitionMapMetric[metric] // sequential access currently so no lock needed
if !ok || force {
_, err := DBExecRead(metricDb, METRICDB_IDENT, sql_ensure, metric)
if err != nil {
log.Errorf("Failed to create partition on metric '%s': %v", metric, err)
return err
}
partitionMapMetric[metric] = ExistingPartitionInfo{}
}
}
return nil
}
func EnsureMetricTimescale(pg_part_bounds map[string]ExistingPartitionInfo, force bool) error {
var err error
sql_ensure := `
select * from admin.ensure_partition_timescale($1)
`
for metric := range pg_part_bounds {
if strings.HasSuffix(metric, "_realtime") {
continue
}
_, ok := partitionMapMetric[metric]
if !ok {
_, err = DBExecRead(metricDb, METRICDB_IDENT, sql_ensure, metric)
if err != nil {
log.Errorf("Failed to create a TimescaleDB table for metric '%s': %v", metric, err)
return err
}
partitionMapMetric[metric] = ExistingPartitionInfo{}
}
}
err = EnsureMetricTime(pg_part_bounds, force, true)
if err != nil {
return err
}
return nil
}
func EnsureMetricTime(pg_part_bounds map[string]ExistingPartitionInfo, force bool, realtime_only bool) error {
// TODO if less < 1d to part. end, precreate ?
sql_ensure := `
select * from admin.ensure_partition_metric_time($1, $2)
`
for metric, pb := range pg_part_bounds {
if realtime_only && !strings.HasSuffix(metric, "_realtime") {
continue
}
if pb.StartTime.IsZero() || pb.EndTime.IsZero() {
return fmt.Errorf("zero StartTime/EndTime in partitioning request: [%s:%v]", metric, pb)
}
partInfo, ok := partitionMapMetric[metric]
if !ok || (ok && (pb.StartTime.Before(partInfo.StartTime))) || force {
ret, err := DBExecRead(metricDb, METRICDB_IDENT, sql_ensure, metric, pb.StartTime)
if err != nil {
log.Error("Failed to create partition on 'metrics':", err)
return err
}
if !ok {
partInfo = ExistingPartitionInfo{}
}
partInfo.StartTime = ret[0]["part_available_from"].(time.Time)
partInfo.EndTime = ret[0]["part_available_to"].(time.Time)
partitionMapMetric[metric] = partInfo
}
if pb.EndTime.After(partInfo.EndTime) || pb.EndTime.Equal(partInfo.EndTime) || force {
ret, err := DBExecRead(metricDb, METRICDB_IDENT, sql_ensure, metric, pb.EndTime)
if err != nil {
log.Error("Failed to create partition on 'metrics':", err)
return err
}
partInfo.EndTime = ret[0]["part_available_to"].(time.Time)
partitionMapMetric[metric] = partInfo
}
}
return nil
}
func EnsureMetricDbnameTime(metric_dbname_part_bounds map[string]map[string]ExistingPartitionInfo, force bool) error {
// TODO if less < 1d to part. end, precreate ?
sql_ensure := `
select * from admin.ensure_partition_metric_dbname_time($1, $2, $3)
`
for metric, dbnameTimestampMap := range metric_dbname_part_bounds {
_, ok := partitionMapMetricDbname[metric]
if !ok {
partitionMapMetricDbname[metric] = make(map[string]ExistingPartitionInfo)
}
for dbname, pb := range dbnameTimestampMap {
if pb.StartTime.IsZero() || pb.EndTime.IsZero() {
return fmt.Errorf("zero StartTime/EndTime in partitioning request: [%s:%v]", metric, pb)
}
partInfo, ok := partitionMapMetricDbname[metric][dbname]
if !ok || (ok && (pb.StartTime.Before(partInfo.StartTime))) || force {
ret, err := DBExecRead(metricDb, METRICDB_IDENT, sql_ensure, metric, dbname, pb.StartTime)
if err != nil {
log.Errorf("Failed to create partition for [%s:%s]: %v", metric, dbname, err)
return err
}
if !ok {
partInfo = ExistingPartitionInfo{}
}
partInfo.StartTime = ret[0]["part_available_from"].(time.Time)
partInfo.EndTime = ret[0]["part_available_to"].(time.Time)
partitionMapMetricDbname[metric][dbname] = partInfo
}
if pb.EndTime.After(partInfo.EndTime) || pb.EndTime.Equal(partInfo.EndTime) || force {
ret, err := DBExecRead(metricDb, METRICDB_IDENT, sql_ensure, metric, dbname, pb.EndTime)
if err != nil {
log.Errorf("Failed to create partition for [%s:%s]: %v", metric, dbname, err)
return err
}
partInfo.EndTime = ret[0]["part_available_to"].(time.Time)
partitionMapMetricDbname[metric][dbname] = partInfo
}
}
}
return nil
}
func InitGraphiteConnection(host string, port int) {
var err error
log.Debug("Connecting to Graphite...")
graphiteConnection, err = graphite.NewGraphite(host, port)
if err != nil {
log.Fatal("could not connect to Graphite:", err)
}
log.Debug("OK")
}
func SendToGraphite(dbname, measurement string, data [](map[string]interface{})) error {
if len(data) == 0 {
log.Warning("No data passed to SendToGraphite call")
return nil
}
log.Debugf("Writing %d rows to Graphite", len(data))
metric_base_prefix := GRAPHITE_METRICS_PREFIX + "." + measurement + "." + dbname + "."
metrics := make([]graphite.Metric, 0, len(data)*len(data[0]))
for _, dr := range data {
var epoch_s int64
// we loop over columns the first time just to find the timestamp
for k, v := range dr {
if v == nil || v == "" {
continue // not storing NULLs
} else if k == EPOCH_COLUMN_NAME {
epoch_s = v.(int64) / 1e9
break
}
}
if epoch_s == 0 {
log.Warning("No timestamp_ns found, server time will be used. measurement:", measurement)
epoch_s = time.Now().Unix()
}
for k, v := range dr {
if v == nil || v == "" {
continue // not storing NULLs
}
if k == EPOCH_COLUMN_NAME {
continue
} else {
var metric graphite.Metric
if strings.HasPrefix(k, TAG_PREFIX) { // ignore tags for Graphite
metric.Name = metric_base_prefix + k[4:]
} else {
metric.Name = metric_base_prefix + k
}
switch t := v.(type) {
case int:
metric.Value = fmt.Sprintf("%d", v)
case int32:
metric.Value = fmt.Sprintf("%d", v)
case int64:
metric.Value = fmt.Sprintf("%d", v)
case float64:
metric.Value = fmt.Sprintf("%f", v)
default:
log.Infof("Invalid (non-numeric) column type ignored: metric %s, column: %v, return type: %T", measurement, k, t)
continue
}
metric.Timestamp = epoch_s
metrics = append(metrics, metric)
}
}
} // dr
log.Debug("Sending", len(metrics), "metric points to Graphite...")
t1 := time.Now()
err := graphiteConnection.SendMetrics(metrics)
t_diff := time.Since(t1)
if err != nil {
atomic.AddUint64(&datastoreWriteFailuresCounter, 1)
log.Error("could not send metric to Graphite:", err)
} else {
atomic.StoreInt64(&lastSuccessfulDatastoreWriteTimeEpoch, t1.Unix())
atomic.AddUint64(&datastoreTotalWriteTimeMicroseconds, uint64(t_diff.Microseconds()))
atomic.AddUint64(&datastoreWriteSuccessCounter, 1)
log.Debug("Sent in ", t_diff.Microseconds(), "us")
}
return err
}
func GetMonitoredDatabaseByUniqueName(name string) (MonitoredDatabase, error) {
monitored_db_cache_lock.RLock()
defer monitored_db_cache_lock.RUnlock()
_, exists := monitored_db_cache[name]
if !exists {
return MonitoredDatabase{}, errors.New("DBUnique not found")
}
return monitored_db_cache[name], nil
}
func UpdateMonitoredDBCache(data []MonitoredDatabase) {
if len(data) > 0 {
monitored_db_cache_new := make(map[string]MonitoredDatabase)
for _, row := range data {
monitored_db_cache_new[row.DBUniqueName] = row
}
monitored_db_cache_lock.Lock()
monitored_db_cache = monitored_db_cache_new
monitored_db_cache_lock.Unlock()
}
}
func ProcessRetryQueue(data_source, conn_str, conn_ident string, retry_queue *list.List, limit int) error {
var err error
iterations_done := 0
for retry_queue.Len() > 0 { // send over the whole re-try queue at once if connection works
log.Debug("Processing retry_queue", conn_ident, ". Items in retry_queue: ", retry_queue.Len())
msg := retry_queue.Back().Value.([]MetricStoreMessage)
if data_source == DATASTORE_INFLUX {
err = SendToInflux(conn_str, conn_ident, msg)
} else if data_source == DATASTORE_POSTGRES {
err = SendToPostgres(msg)
} else if data_source == DATASTORE_GRAPHITE {
for _, m := range msg {
err = SendToGraphite(m.DBUniqueName, m.MetricName, m.Data) // TODO add baching
}
} else {
log.Fatal("Invalid datastore:", data_source)
}
if err != nil {
if data_source == DATASTORE_INFLUX && strings.Contains(err.Error(), "unable to parse") {
if len(msg) == 1 { // can only pinpoint faulty input data without batching
log.Errorf("Dropping metric [%s:%s] as Influx is unable to parse the data: %v",
msg[0].DBUniqueName, msg[0].MetricName, msg[0].Data) // ignore data points consisting of anything else than strings and floats
atomic.AddUint64(&totalMetricsDroppedCounter, 1)
} else {
log.Errorf("Dropping %d metric-sets as Influx is unable to parse the data: %s", len(msg), err)
atomic.AddUint64(&totalMetricsDroppedCounter, uint64(len(msg)))
}
} else if data_source == DATASTORE_INFLUX && strings.Contains(err.Error(), "partial write: max-values-per-tag limit exceeded") {
log.Errorf("Partial write into Influx for [%s:%s], check / increase the max-values-per-tag in InfluxDB config: %v",
msg[0].DBUniqueName, msg[0].MetricName, err)
atomic.AddUint64(&totalMetricsDroppedCounter, 1)
} else {
return err // still gone, retry later
}
}
retry_queue.Remove(retry_queue.Back())
iterations_done++
if limit > 0 && limit == iterations_done {
return nil
}
}
return nil
}
func MetricsBatcher(data_store string, batchingMaxDelayMillis int64, buffered_storage_ch <-chan []MetricStoreMessage, storage_ch chan<- []MetricStoreMessage) {
if batchingMaxDelayMillis <= 0 {
log.Fatalf("Check --batching-delay-ms, zero/negative batching delay:", batchingMaxDelayMillis)
}
var datapointCounter int = 0
var maxBatchSize int = 1000 // flush on maxBatchSize metric points or batchingMaxDelayMillis passed
batch := make([]MetricStoreMessage, 0) // no size limit here as limited in persister already
ticker := time.NewTicker(time.Millisecond * time.Duration(batchingMaxDelayMillis))
for {
select {
case <-ticker.C:
if len(batch) > 0 {
flushed := make([]MetricStoreMessage, len(batch))
copy(flushed, batch)
log.Debugf("Flushing %d metric datasets due to batching timeout", len(batch))
storage_ch <- flushed
batch = make([]MetricStoreMessage, 0)
datapointCounter = 0
}
case msg := <-buffered_storage_ch:
for _, m := range msg { // in reality msg are sent by fetchers one by one though
batch = append(batch, m)
datapointCounter += len(m.Data)
if datapointCounter > maxBatchSize { // flush. also set some last_sent_timestamp so that ticker would pass a round?
flushed := make([]MetricStoreMessage, len(batch))
copy(flushed, batch)
log.Debugf("Flushing %d metric datasets due to maxBatchSize limit of %d datapoints", len(batch), maxBatchSize)
storage_ch <- flushed
batch = make([]MetricStoreMessage, 0)
datapointCounter = 0
}
}
}
}
}
func WriteMetricsToJsonFile(msgArr []MetricStoreMessage, jsonPath string) error {
if len(msgArr) == 0 {
return nil
}
jsonOutFile, err := os.OpenFile(jsonPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0640)
if err != nil {
atomic.AddUint64(&datastoreWriteFailuresCounter, 1)
return err
}
defer jsonOutFile.Close()
log.Infof("Writing %d metric sets to JSON file at \"%s\"...", len(msgArr), jsonPath)
enc := json.NewEncoder(jsonOutFile)
for _, msg := range msgArr {
dataRow := map[string]interface{}{"metric": msg.MetricName, "data": msg.Data, "dbname": msg.DBUniqueName, "custom_tags": msg.CustomTags}
if addRealDbname && msg.RealDbname != "" {
dataRow[opts.RealDbnameField] = msg.RealDbname
}
if addSystemIdentifier && msg.SystemIdentifier != "" {
dataRow[opts.SystemIdentifierField] = msg.SystemIdentifier
}
err = enc.Encode(dataRow)
if err != nil {
atomic.AddUint64(&datastoreWriteFailuresCounter, 1)
return err
}
}
return nil
}
func MetricsPersister(data_store string, storage_ch <-chan []MetricStoreMessage) {
var last_try = make([]time.Time, influx_host_count) // if Influx errors out, don't retry before 10s
var last_drop_warning = make([]time.Time, influx_host_count) // log metric points drops every 10s to not overflow logs in case Influx is down for longer
var retry_queues = make([]*list.List, influx_host_count) // separate queues for all Influx hosts
var in_error = make([]bool, influx_host_count)
var err error
for i := 0; i < influx_host_count; i++ {
retry_queues[i] = list.New()
}
for {
select {
case msg_arr := <-storage_ch:
for i, retry_queue := range retry_queues {
retry_queue_length := retry_queue.Len()
if retry_queue_length > 0 {
if retry_queue_length == PERSIST_QUEUE_MAX_SIZE {
dropped_msgs := retry_queue.Remove(retry_queue.Back())
datasets_dropped := len(dropped_msgs.([]MetricStoreMessage))
datapoints_dropped := 0
for _, msg := range dropped_msgs.([]MetricStoreMessage) {
datapoints_dropped += len(msg.Data)
}
atomic.AddUint64(&totalMetricsDroppedCounter, uint64(datapoints_dropped))
if last_drop_warning[i].IsZero() || last_drop_warning[i].Before(time.Now().Add(time.Second*-10)) {
log.Warningf("Dropped %d oldest data sets with %d data points from queue %d as PERSIST_QUEUE_MAX_SIZE = %d exceeded",
datasets_dropped, datapoints_dropped, i, PERSIST_QUEUE_MAX_SIZE)
last_drop_warning[i] = time.Now()
}
}
retry_queue.PushFront(msg_arr)
} else {
if data_store == DATASTORE_INFLUX {
err = SendToInflux(InfluxConnectStrings[i], strconv.Itoa(i), msg_arr)
} else if data_store == DATASTORE_POSTGRES {
err = SendToPostgres(msg_arr)
if err != nil && strings.Contains(err.Error(), "does not exist") {
// in case data was cleaned by user externally
log.Warning("re-initializing metric partition cache due to possible external data cleanup...")
partitionMapMetric = make(map[string]ExistingPartitionInfo)
partitionMapMetricDbname = make(map[string]map[string]ExistingPartitionInfo)
}
} else if data_store == DATASTORE_GRAPHITE {
for _, m := range msg_arr {
err = SendToGraphite(m.DBUniqueName, m.MetricName, m.Data) // TODO does Graphite library support batching?
if err != nil {
atomic.AddUint64(&datastoreWriteFailuresCounter, 1)
}
}
} else if data_store == DATASTORE_JSON {
err = WriteMetricsToJsonFile(msg_arr, opts.JsonStorageFile)
} else {
log.Fatal("Invalid datastore:", data_store)
}
last_try[i] = time.Now()
if err != nil {
if opts.Datastore == DATASTORE_INFLUX {
if strings.Contains(err.Error(), "unable to parse") { // TODO move to a separate func
if len(msg_arr) == 1 {
log.Errorf("Dropping metric [%s:%s] as Influx is unable to parse the data: %s",
msg_arr[0].DBUniqueName, msg_arr[0].MetricName, msg_arr[0].Data) // ignore data points consisting of anything else than strings and floats
} else {
log.Errorf("Dropping %d metric-sets as Influx is unable to parse the data: %s", len(msg_arr), err)
// TODO loop over single metrics in case of errors?
}
} else if strings.Contains(err.Error(), "partial write: max-values-per-tag limit exceeded") {
if len(msg_arr) == 1 {
log.Errorf("Partial write into Influx for [%s:%s], check / increase the max-values-per-tag in InfluxDB config: %v",
msg_arr[0].DBUniqueName, msg_arr[0].MetricName, err)
} else {
log.Errorf("Partial write into Influx, check / increase the max-values-per-tag in InfluxDB config: %v", err)
}
}
} else {
log.Errorf("Failed to write into datastore %d: %s", i, err)
in_error[i] = true
retry_queue.PushFront(msg_arr)
}
}
}
}
default:
for i, retry_queue := range retry_queues {
if retry_queue.Len() > 0 && (!in_error[i] || last_try[i].Before(time.Now().Add(time.Second*-10))) {
err := ProcessRetryQueue(data_store, InfluxConnectStrings[i], strconv.Itoa(i), retry_queue, 100)
if err != nil {
log.Error("Error processing retry queue", i, ":", err)
in_error[i] = true
} else {
in_error[i] = false
}
last_try[i] = time.Now()
} else {
time.Sleep(time.Millisecond * 100) // nothing in queue nor in channel
}
}
}
}
}
func DBGetPGVersion(dbUnique string, dbType string, noCache bool) (DBVersionMapEntry, error) {
var ver DBVersionMapEntry
var verNew DBVersionMapEntry
var ok bool
sql := `
select /* pgwatch2_generated */ (regexp_matches(
regexp_replace(current_setting('server_version'), '(beta|devel).*', '', 'g'),
E'\\d+\\.?\\d+?')
)[1]::text as ver, pg_is_in_recovery(), current_database()::text;
`
sql_sysid := `select /* pgwatch2_generated */ system_identifier::text from pg_control_system();`
sql_su := `select /* pgwatch2_generated */ rolsuper or exists (
select * from pg_catalog.pg_auth_members m
join pg_catalog.pg_roles b on (m.roleid = b.oid)
where m.member = r.oid and b.rolname = 'rds_superuser') as rolsuper
from pg_roles r where rolname = session_user;`
sql_extensions := `select /* pgwatch2_generated */ extname::text, (regexp_matches(extversion, $$\d+\.?\d+?$$))[1]::text as extversion from pg_extension order by 1;`
pgpool_version := `SHOW POOL_VERSION` // supported from pgpool2 v3.0
db_pg_version_map_lock.RLock()
get_ver_lock, ok := db_get_pg_version_map_lock[dbUnique]
if !ok {
log.Fatal("db_get_pg_version_map_lock uninitialized")
}
ver, ok = db_pg_version_map[dbUnique]
db_pg_version_map_lock.RUnlock()
if !noCache && ok && ver.LastCheckedOn.After(time.Now().Add(time.Minute*-2)) { // use cached version for 2 min
//log.Debugf("using cached postgres version %s for %s", ver.Version.String(), dbUnique)
return ver, nil
} else {
get_ver_lock.Lock() // limit to 1 concurrent version info fetch per DB
defer get_ver_lock.Unlock()
log.Debugf("[%s][%s] determining DB version and recovery status...", dbUnique, dbType)
if verNew.Extensions == nil {
verNew.Extensions = make(map[string]decimal.Decimal)
}
if dbType == DBTYPE_BOUNCER {
data, err, _ := DBExecReadByDbUniqueName(dbUnique, "", false, 0, "show version")
if err != nil {
return verNew, err
}
if len(data) == 0 {
// surprisingly pgbouncer 'show version' outputs in pre v1.12 is emitted as 'NOTICE' which cannot be accessed from Go lib/pg
verNew.Version, _ = decimal.NewFromString("0")
verNew.VersionStr = "0"
} else {
matches := rBouncerAndPgpoolVerMatch.FindStringSubmatch(data[0]["version"].(string))
if len(matches) != 1 {
log.Errorf("Unexpected PgBouncer version input: %s", data[0]["version"].(string))
return ver, fmt.Errorf("Unexpected PgBouncer version input: %s", data[0]["version"].(string))
}
verNew.VersionStr = matches[0]
verNew.Version, _ = decimal.NewFromString(matches[0])
}
} else if dbType == DBTYPE_PGPOOL {
data, err, _ := DBExecReadByDbUniqueName(dbUnique, "", false, 0, pgpool_version)
if err != nil {
return verNew, err
}
if len(data) == 0 {
verNew.Version, _ = decimal.NewFromString("3.0")
verNew.VersionStr = "3.0"
} else {
matches := rBouncerAndPgpoolVerMatch.FindStringSubmatch(string(data[0]["pool_version"].([]byte)))
if len(matches) != 1 {
log.Errorf("Unexpected PgPool version input: %s", data[0]["pool_version"].([]byte))
return ver, fmt.Errorf("Unexpected PgPool version input: %s", data[0]["pool_version"].([]byte))
}
verNew.VersionStr = matches[0]
verNew.Version, _ = decimal.NewFromString(matches[0])
}
} else {
data, err, _ := DBExecReadByDbUniqueName(dbUnique, "", useConnPooling, 0, sql)
if err != nil {
if noCache {
return ver, err
} else {
log.Info("DBGetPGVersion failed, using old cached value", err)
return ver, nil
}
}
verNew.Version, _ = decimal.NewFromString(data[0]["ver"].(string))
verNew.VersionStr = data[0]["ver"].(string)
verNew.IsInRecovery = data[0]["pg_is_in_recovery"].(bool)
verNew.RealDbname = data[0]["current_database"].(string)
if verNew.Version.GreaterThanOrEqual(decimal.NewFromFloat(10)) && addSystemIdentifier {
log.Debugf("[%s] determining system identifier version (pg ver: %v)", dbUnique, verNew.VersionStr)
data, err, _ := DBExecReadByDbUniqueName(dbUnique, "", useConnPooling, 0, sql_sysid)
if err == nil && len(data) > 0 {
verNew.SystemIdentifier = data[0]["system_identifier"].(string)
}
}
log.Debugf("[%s] determining if monitoring user is a superuser...", dbUnique)
data, err, _ = DBExecReadByDbUniqueName(dbUnique, "", useConnPooling, 0, sql_su)
if err == nil {
verNew.IsSuperuser = data[0]["rolsuper"].(bool)
}
log.Debugf("[%s] superuser=%v", dbUnique, verNew.IsSuperuser)
if verNew.Version.GreaterThanOrEqual(MinExtensionInfoAvailable) {
//log.Debugf("[%s] determining installed extensions info...", dbUnique)
data, err, _ = DBExecReadByDbUniqueName(dbUnique, "", useConnPooling, 0, sql_extensions)
if err != nil {
log.Errorf("[%s] failed to determine installed extensions info: %v", dbUnique, err)
} else {
for _, dr := range data {
extver, err := decimal.NewFromString(dr["extversion"].(string))
if err != nil {
log.Errorf("[%s] failed to determine extension version info for extension %s: %v", dbUnique, dr["extname"], err)
continue
}
verNew.Extensions[dr["extname"].(string)] = extver
}
log.Debugf("[%s] installed extensions: %+v", dbUnique, verNew.Extensions)
}
}
}
verNew.LastCheckedOn = time.Now()
db_pg_version_map_lock.Lock()
db_pg_version_map[dbUnique] = verNew
db_pg_version_map_lock.Unlock()
}
return verNew, nil
}
// Need to define a sort interface as Go doesn't have support for Numeric/Decimal
type Decimal []decimal.Decimal
func (a Decimal) Len() int { return len(a) }
func (a Decimal) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a Decimal) Less(i, j int) bool { return a[i].LessThan(a[j]) }
// assumes upwards compatibility for versions
func GetMetricVersionProperties(metric string, vme DBVersionMapEntry, metricDefMap map[string]map[decimal.Decimal]MetricVersionProperties) (MetricVersionProperties, error) {
var keys []decimal.Decimal
var mdm map[string]map[decimal.Decimal]MetricVersionProperties
if metricDefMap != nil {
mdm = metricDefMap
} else {
metric_def_map_lock.RLock()
mdm = deepCopyMetricDefinitionMap(metric_def_map) // copy of global cache
metric_def_map_lock.RUnlock()
}
_, ok := mdm[metric]
if !ok || len(mdm[metric]) == 0 {
log.Debug("metric", metric, "not found")
return MetricVersionProperties{}, errors.New("metric SQL not found")
}
for k := range mdm[metric] {
keys = append(keys, k)
}
sort.Sort(Decimal(keys))
var best_ver decimal.Decimal
var min_ver decimal.Decimal
var found bool
for _, ver := range keys {
if vme.Version.GreaterThanOrEqual(ver) {
best_ver = ver
found = true
}
if min_ver.IsZero() || ver.LessThan(min_ver) {
min_ver = ver
}
}
if !found {
if vme.Version.LessThan(min_ver) { // metric not yet available for given PG ver
return MetricVersionProperties{}, fmt.Errorf("no suitable SQL found for metric \"%s\", server version \"%s\" too old. min defined SQL ver: %s", metric, vme.VersionStr, min_ver.String())
}
return MetricVersionProperties{}, fmt.Errorf("no suitable SQL found for metric \"%s\", version \"%s\"", metric, vme.VersionStr)
}
ret := mdm[metric][best_ver]
// check if SQL def. override defined for some specific extension version and replace the metric SQL-s if so
if ret.MetricAttrs.ExtensionVersionOverrides != nil && len(ret.MetricAttrs.ExtensionVersionOverrides) > 0 {
if vme.Extensions != nil && len(vme.Extensions) > 0 {
log.Debugf("[%s] extension version based override request found: %+v", metric, ret.MetricAttrs.ExtensionVersionOverrides)
for _, extOverride := range ret.MetricAttrs.ExtensionVersionOverrides {
var matching = true
for _, extVer := range extOverride.ExpectedExtensionVersions { // "natural" sorting of metric definition assumed
installedExtVer, ok := vme.Extensions[extVer.ExtName]
if !ok || !installedExtVer.GreaterThanOrEqual(extVer.ExtMinVersion) {
matching = false
}
}
if matching { // all defined extensions / versions (if many) need to match
_, ok := mdm[extOverride.TargetMetric]
if !ok {
log.Warningf("extension based override metric not found for metric %s. substitute metric name: %s", metric, extOverride.TargetMetric)
continue
}
mvp, err := GetMetricVersionProperties(extOverride.TargetMetric, vme, mdm)
if err != nil {
log.Warningf("undefined extension based override for metric %s, substitute metric name: %s, version: %s not found", metric, extOverride.TargetMetric, best_ver)
continue
}
log.Debugf("overriding metric %s based on the extension_version_based_overrides metric attribute with %s:%s", metric, extOverride.TargetMetric, best_ver)
if mvp.Sql != "" {
ret.Sql = mvp.Sql
}
if mvp.SqlSU != "" {
ret.SqlSU = mvp.SqlSU
}
}
}
}
}
return ret, nil
}
func DetectSprocChanges(dbUnique string, vme DBVersionMapEntry, storage_ch chan<- []MetricStoreMessage, host_state map[string]map[string]string) ChangeDetectionResults {
detected_changes := make([](map[string]interface{}), 0)
var first_run bool
var change_counts ChangeDetectionResults
log.Debugf("[%s][%s] checking for sproc changes...", dbUnique, SPECIAL_METRIC_CHANGE_EVENTS)
if _, ok := host_state["sproc_hashes"]; !ok {
first_run = true
host_state["sproc_hashes"] = make(map[string]string)
}
mvp, err := GetMetricVersionProperties("sproc_hashes", vme, nil)
if err != nil {
log.Error("could not get sproc_hashes sql:", err)
return change_counts
}
data, err, _ := DBExecReadByDbUniqueName(dbUnique, "sproc_hashes", useConnPooling, mvp.MetricAttrs.StatementTimeoutSeconds, mvp.Sql)
if err != nil {
log.Error("could not read sproc_hashes from monitored host: ", dbUnique, ", err:", err)
return change_counts
}
for _, dr := range data {
obj_ident := dr["tag_sproc"].(string) + ":" + dr["tag_oid"].(string)
prev_hash, ok := host_state["sproc_hashes"][obj_ident]
if ok { // we have existing state
if prev_hash != dr["md5"].(string) {
log.Info("detected change in sproc:", dr["tag_sproc"], ", oid:", dr["tag_oid"])
dr["event"] = "alter"
detected_changes = append(detected_changes, dr)
host_state["sproc_hashes"][obj_ident] = dr["md5"].(string)
change_counts.Altered += 1
}
} else { // check for new / delete
if !first_run {
log.Info("detected new sproc:", dr["tag_sproc"], ", oid:", dr["tag_oid"])
dr["event"] = "create"
detected_changes = append(detected_changes, dr)
change_counts.Created += 1
}
host_state["sproc_hashes"][obj_ident] = dr["md5"].(string)
}
}
// detect deletes
if !first_run && len(host_state["sproc_hashes"]) != len(data) {
deleted_sprocs := make([]string, 0)
// turn resultset to map => [oid]=true for faster checks
current_oid_map := make(map[string]bool)
for _, dr := range data {
current_oid_map[dr["tag_sproc"].(string)+":"+dr["tag_oid"].(string)] = true
}
for sproc_ident := range host_state["sproc_hashes"] {
_, ok := current_oid_map[sproc_ident]
if !ok {
splits := strings.Split(sproc_ident, ":")
log.Info("detected delete of sproc:", splits[0], ", oid:", splits[1])
influx_entry := make(map[string]interface{})
influx_entry["event"] = "drop"
influx_entry["tag_sproc"] = splits[0]
influx_entry["tag_oid"] = splits[1]
if len(data) > 0 {
influx_entry["epoch_ns"] = data[0]["epoch_ns"]
} else {
influx_entry["epoch_ns"] = time.Now().UnixNano()
}
detected_changes = append(detected_changes, influx_entry)
deleted_sprocs = append(deleted_sprocs, sproc_ident)
change_counts.Dropped += 1
}
}
for _, deleted_sproc := range deleted_sprocs {
delete(host_state["sproc_hashes"], deleted_sproc)
}
}
log.Debugf("[%s][%s] detected %d sproc changes", dbUnique, SPECIAL_METRIC_CHANGE_EVENTS, len(detected_changes))
if len(detected_changes) > 0 {
md, _ := GetMonitoredDatabaseByUniqueName(dbUnique)
storage_ch <- []MetricStoreMessage{MetricStoreMessage{DBUniqueName: dbUnique, MetricName: "sproc_changes", Data: detected_changes, CustomTags: md.CustomTags}}
} else if opts.Datastore == DATASTORE_POSTGRES && first_run {
EnsureMetricDummy("sproc_changes")
}
return change_counts
}
func DetectTableChanges(dbUnique string, vme DBVersionMapEntry, storage_ch chan<- []MetricStoreMessage, host_state map[string]map[string]string) ChangeDetectionResults {
detected_changes := make([](map[string]interface{}), 0)
var first_run bool
var change_counts ChangeDetectionResults
log.Debugf("[%s][%s] checking for table changes...", dbUnique, SPECIAL_METRIC_CHANGE_EVENTS)
if _, ok := host_state["table_hashes"]; !ok {
first_run = true
host_state["table_hashes"] = make(map[string]string)
}
mvp, err := GetMetricVersionProperties("table_hashes", vme, nil)
if err != nil {
log.Error("could not get table_hashes sql:", err)
return change_counts
}
data, err, _ := DBExecReadByDbUniqueName(dbUnique, "table_hashes", useConnPooling, mvp.MetricAttrs.StatementTimeoutSeconds, mvp.Sql)
if err != nil {
log.Error("could not read table_hashes from monitored host:", dbUnique, ", err:", err)
return change_counts
}
for _, dr := range data {
obj_ident := dr["tag_table"].(string)
prev_hash, ok := host_state["table_hashes"][obj_ident]
//log.Debug("inspecting table:", obj_ident, "hash:", prev_hash)
if ok { // we have existing state
if prev_hash != dr["md5"].(string) {
log.Info("detected DDL change in table:", dr["tag_table"])
dr["event"] = "alter"
detected_changes = append(detected_changes, dr)
host_state["table_hashes"][obj_ident] = dr["md5"].(string)
change_counts.Altered += 1
}
} else { // check for new / delete
if !first_run {
log.Info("detected new table:", dr["tag_table"])
dr["event"] = "create"
detected_changes = append(detected_changes, dr)
change_counts.Created += 1
}
host_state["table_hashes"][obj_ident] = dr["md5"].(string)
}
}
// detect deletes
if !first_run && len(host_state["table_hashes"]) != len(data) {
deleted_tables := make([]string, 0)
// turn resultset to map => [table]=true for faster checks
current_table_map := make(map[string]bool)
for _, dr := range data {
current_table_map[dr["tag_table"].(string)] = true
}
for table := range host_state["table_hashes"] {
_, ok := current_table_map[table]
if !ok {
log.Info("detected drop of table:", table)
influx_entry := make(map[string]interface{})
influx_entry["event"] = "drop"
influx_entry["tag_table"] = table
if len(data) > 0 {
influx_entry["epoch_ns"] = data[0]["epoch_ns"]
} else {
influx_entry["epoch_ns"] = time.Now().UnixNano()
}
detected_changes = append(detected_changes, influx_entry)
deleted_tables = append(deleted_tables, table)
change_counts.Dropped += 1
}
}
for _, deleted_table := range deleted_tables {
delete(host_state["table_hashes"], deleted_table)
}
}
log.Debugf("[%s][%s] detected %d table changes", dbUnique, SPECIAL_METRIC_CHANGE_EVENTS, len(detected_changes))
if len(detected_changes) > 0 {
md, _ := GetMonitoredDatabaseByUniqueName(dbUnique)
storage_ch <- []MetricStoreMessage{MetricStoreMessage{DBUniqueName: dbUnique, MetricName: "table_changes", Data: detected_changes, CustomTags: md.CustomTags}}
} else if opts.Datastore == DATASTORE_POSTGRES && first_run {
EnsureMetricDummy("table_changes")
}
return change_counts
}
func DetectIndexChanges(dbUnique string, vme DBVersionMapEntry, storage_ch chan<- []MetricStoreMessage, host_state map[string]map[string]string) ChangeDetectionResults {
detected_changes := make([](map[string]interface{}), 0)
var first_run bool
var change_counts ChangeDetectionResults
log.Debugf("[%s][%s] checking for index changes...", dbUnique, SPECIAL_METRIC_CHANGE_EVENTS)
if _, ok := host_state["index_hashes"]; !ok {
first_run = true
host_state["index_hashes"] = make(map[string]string)
}
mvp, err := GetMetricVersionProperties("index_hashes", vme, nil)
if err != nil {
log.Error("could not get index_hashes sql:", err)
return change_counts
}
data, err, _ := DBExecReadByDbUniqueName(dbUnique, "index_hashes", useConnPooling, mvp.MetricAttrs.StatementTimeoutSeconds, mvp.Sql)
if err != nil {
log.Error("could not read index_hashes from monitored host:", dbUnique, ", err:", err)
return change_counts
}
for _, dr := range data {
obj_ident := dr["tag_index"].(string)
prev_hash, ok := host_state["index_hashes"][obj_ident]
if ok { // we have existing state
if prev_hash != (dr["md5"].(string) + dr["is_valid"].(string)) {
log.Info("detected index change:", dr["tag_index"], ", table:", dr["table"])
dr["event"] = "alter"
detected_changes = append(detected_changes, dr)
host_state["index_hashes"][obj_ident] = dr["md5"].(string) + dr["is_valid"].(string)
change_counts.Altered += 1
}
} else { // check for new / delete
if !first_run {
log.Info("detected new index:", dr["tag_index"])
dr["event"] = "create"
detected_changes = append(detected_changes, dr)
change_counts.Created += 1
}
host_state["index_hashes"][obj_ident] = dr["md5"].(string) + dr["is_valid"].(string)
}
}
// detect deletes
if !first_run && len(host_state["index_hashes"]) != len(data) {
deleted_indexes := make([]string, 0)
// turn resultset to map => [table]=true for faster checks
current_index_map := make(map[string]bool)
for _, dr := range data {
current_index_map[dr["tag_index"].(string)] = true
}
for index_name := range host_state["index_hashes"] {
_, ok := current_index_map[index_name]
if !ok {
log.Info("detected drop of index_name:", index_name)
influx_entry := make(map[string]interface{})
influx_entry["event"] = "drop"
influx_entry["tag_index"] = index_name
if len(data) > 0 {
influx_entry["epoch_ns"] = data[0]["epoch_ns"]
} else {
influx_entry["epoch_ns"] = time.Now().UnixNano()
}
detected_changes = append(detected_changes, influx_entry)
deleted_indexes = append(deleted_indexes, index_name)
change_counts.Dropped += 1
}
}
for _, deleted_index := range deleted_indexes {
delete(host_state["index_hashes"], deleted_index)
}
}
log.Debugf("[%s][%s] detected %d index changes", dbUnique, SPECIAL_METRIC_CHANGE_EVENTS, len(detected_changes))
if len(detected_changes) > 0 {
md, _ := GetMonitoredDatabaseByUniqueName(dbUnique)
storage_ch <- []MetricStoreMessage{MetricStoreMessage{DBUniqueName: dbUnique, MetricName: "index_changes", Data: detected_changes, CustomTags: md.CustomTags}}
} else if opts.Datastore == DATASTORE_POSTGRES && first_run {
EnsureMetricDummy("index_changes")
}
return change_counts
}
func DetectPrivilegeChanges(dbUnique string, vme DBVersionMapEntry, storage_ch chan<- []MetricStoreMessage, host_state map[string]map[string]string) ChangeDetectionResults {
detected_changes := make([](map[string]interface{}), 0)
var first_run bool
var change_counts ChangeDetectionResults
log.Debugf("[%s][%s] checking object privilege changes...", dbUnique, SPECIAL_METRIC_CHANGE_EVENTS)
if _, ok := host_state["object_privileges"]; !ok {
first_run = true
host_state["object_privileges"] = make(map[string]string)
}
mvp, err := GetMetricVersionProperties("privilege_changes", vme, nil)
if err != nil || mvp.Sql == "" {
log.Warningf("[%s][%s] could not get SQL for 'privilege_changes'. cannot detect privilege changes", dbUnique, SPECIAL_METRIC_CHANGE_EVENTS)
return change_counts
}
// returns rows of: object_type, tag_role, tag_object, privilege_type
data, err, _ := DBExecReadByDbUniqueName(dbUnique, "privilege_changes", useConnPooling, mvp.MetricAttrs.StatementTimeoutSeconds, mvp.Sql)
if err != nil {
log.Errorf("[%s][%s] failed to fetch object privileges info: %v", dbUnique, SPECIAL_METRIC_CHANGE_EVENTS, err)
return change_counts
}
current_state := make(map[string]bool)
for _, dr := range data {
obj_ident := fmt.Sprintf("%s#:#%s#:#%s#:#%s", dr["object_type"], dr["tag_role"], dr["tag_object"], dr["privilege_type"])
if first_run {
host_state["object_privileges"][obj_ident] = ""
} else {
_, ok := host_state["object_privileges"][obj_ident]
if !ok {
log.Infof("[%s][%s] detected new object privileges: role=%s, object_type=%s, object=%s, privilege_type=%s",
dbUnique, SPECIAL_METRIC_CHANGE_EVENTS, dr["tag_role"], dr["object_type"], dr["tag_object"], dr["privilege_type"])
dr["event"] = "GRANT"
detected_changes = append(detected_changes, dr)
change_counts.Created += 1
host_state["object_privileges"][obj_ident] = ""
}
current_state[obj_ident] = true
}
}
// check revokes - exists in old state only
if !first_run && len(current_state) > 0 {
for obj_prev_run := range host_state["object_privileges"] {
if _, ok := current_state[obj_prev_run]; !ok {
splits := strings.Split(obj_prev_run, "#:#")
log.Infof("[%s][%s] detected removed object privileges: role=%s, object_type=%s, object=%s, privilege_type=%s",
dbUnique, SPECIAL_METRIC_CHANGE_EVENTS, splits[1], splits[0], splits[2], splits[3])
revoke_entry := make(map[string]interface{})
if epoch_ns, ok := data[0]["epoch_ns"]; ok {
revoke_entry["epoch_ns"] = epoch_ns
} else {
revoke_entry["epoch_ns"] = time.Now().UnixNano()
}
revoke_entry["object_type"] = splits[0]
revoke_entry["tag_role"] = splits[1]
revoke_entry["tag_object"] = splits[2]
revoke_entry["privilege_type"] = splits[3]
revoke_entry["event"] = "REVOKE"
detected_changes = append(detected_changes, revoke_entry)
change_counts.Dropped += 1
delete(host_state["object_privileges"], obj_prev_run)
}
}
}
if opts.Datastore == DATASTORE_POSTGRES && first_run {
EnsureMetricDummy("privilege_changes")
}
log.Debugf("[%s][%s] detected %d object privilege changes...", dbUnique, SPECIAL_METRIC_CHANGE_EVENTS, len(detected_changes))
if len(detected_changes) > 0 {
md, _ := GetMonitoredDatabaseByUniqueName(dbUnique)
storage_ch <- []MetricStoreMessage{MetricStoreMessage{DBUniqueName: dbUnique, MetricName: "privilege_changes", Data: detected_changes, CustomTags: md.CustomTags}}
}
return change_counts
}
func DetectConfigurationChanges(dbUnique string, vme DBVersionMapEntry, storage_ch chan<- []MetricStoreMessage, host_state map[string]map[string]string) ChangeDetectionResults {
detected_changes := make([](map[string]interface{}), 0)
var first_run bool
var change_counts ChangeDetectionResults
log.Debugf("[%s][%s] checking for configuration changes...", dbUnique, SPECIAL_METRIC_CHANGE_EVENTS)
if _, ok := host_state["configuration_hashes"]; !ok {
first_run = true
host_state["configuration_hashes"] = make(map[string]string)
}
mvp, err := GetMetricVersionProperties("configuration_hashes", vme, nil)
if err != nil {
log.Error("could not get index_hashes sql:", err)
return change_counts
}
data, err, _ := DBExecReadByDbUniqueName(dbUnique, "configuration_hashes", useConnPooling, mvp.MetricAttrs.StatementTimeoutSeconds, mvp.Sql)
if err != nil {
log.Error("could not read configuration_hashes from monitored host:", dbUnique, ", err:", err)
return change_counts
}
for _, dr := range data {
obj_ident := dr["tag_setting"].(string)
prev_hash, ok := host_state["configuration_hashes"][obj_ident]
if ok { // we have existing state
if prev_hash != dr["value"].(string) {
log.Warningf("detected settings change: %s = %s (prev: %s)",
dr["tag_setting"], dr["value"], prev_hash)
dr["event"] = "alter"
detected_changes = append(detected_changes, dr)
host_state["configuration_hashes"][obj_ident] = dr["value"].(string)
change_counts.Altered += 1
}
} else { // check for new, delete not relevant here (pg_upgrade)
if !first_run {
log.Warning("detected new setting:", dr["tag_setting"])
dr["event"] = "create"
detected_changes = append(detected_changes, dr)
change_counts.Created += 1
}
host_state["configuration_hashes"][obj_ident] = dr["value"].(string)
}
}
log.Debugf("[%s][%s] detected %d configuration changes", dbUnique, SPECIAL_METRIC_CHANGE_EVENTS, len(detected_changes))
if len(detected_changes) > 0 {
md, _ := GetMonitoredDatabaseByUniqueName(dbUnique)
storage_ch <- []MetricStoreMessage{MetricStoreMessage{DBUniqueName: dbUnique, MetricName: "configuration_changes", Data: detected_changes, CustomTags: md.CustomTags}}
} else if opts.Datastore == DATASTORE_POSTGRES {
EnsureMetricDummy("configuration_changes")
}
return change_counts
}
func GetAllRecoMetricsForVersion(vme DBVersionMapEntry) map[string]MetricVersionProperties {
mvp_map := make(map[string]MetricVersionProperties)
metric_def_map_lock.RLock()
defer metric_def_map_lock.RUnlock()
for m := range metric_def_map {
if strings.HasPrefix(m, RECO_PREFIX) {
mvp, err := GetMetricVersionProperties(m, vme, metric_def_map)
if err != nil {
log.Warningf("Could not get SQL definition for metric \"%s\", PG %s", m, vme.VersionStr)
} else if !mvp.MetricAttrs.IsPrivate {
mvp_map[m] = mvp
}
}
}
return mvp_map
}
func GetRecommendations(dbUnique string, vme DBVersionMapEntry) ([]map[string]interface{}, error, time.Duration) {
ret_data := make([]map[string]interface{}, 0)
var total_duration time.Duration
start_time_epoch_ns := time.Now().UnixNano()
reco_metrics := GetAllRecoMetricsForVersion(vme)
log.Debugf("Processing %d recommendation metrics for \"%s\"", len(reco_metrics), dbUnique)
for m, mvp := range reco_metrics {
data, err, duration := DBExecReadByDbUniqueName(dbUnique, m, useConnPooling, mvp.MetricAttrs.StatementTimeoutSeconds, mvp.Sql)
total_duration += duration
if err != nil {
if strings.Contains(err.Error(), "does not exist") { // some more exotic extensions missing is expected, don't pollute the error log
log.Infof("[%s:%s] Could not execute recommendations SQL: %v", dbUnique, m, err)
} else {
log.Errorf("[%s:%s] Could not execute recommendations SQL: %v", dbUnique, m, err)
}
continue
}
for _, d := range data {
d[EPOCH_COLUMN_NAME] = start_time_epoch_ns
d["major_ver"] = PgVersionDecimalToMajorVerFloat(dbUnique, vme.Version)
ret_data = append(ret_data, d)
}
}
if len(ret_data) == 0 { // insert a dummy entry minimally so that Grafana can show at least a dropdown
dummy := make(map[string]interface{})
dummy["tag_reco_topic"] = "dummy"
dummy["tag_object_name"] = "-"
dummy["recommendation"] = "no recommendations"
dummy[EPOCH_COLUMN_NAME] = start_time_epoch_ns
dummy["major_ver"] = PgVersionDecimalToMajorVerFloat(dbUnique, vme.Version)
ret_data = append(ret_data, dummy)
}
return ret_data, nil, total_duration
}
func PgVersionDecimalToMajorVerFloat(dbUnique string, pgVer decimal.Decimal) float64 {
ver_float, _ := pgVer.Float64()
if ver_float >= 10 {
return math.Floor(ver_float)
} else {
return ver_float
}
}
func CheckForPGObjectChangesAndStore(dbUnique string, vme DBVersionMapEntry, storage_ch chan<- []MetricStoreMessage, host_state map[string]map[string]string) {
sproc_counts := DetectSprocChanges(dbUnique, vme, storage_ch, host_state) // TODO some of Detect*() code could be unified...
table_counts := DetectTableChanges(dbUnique, vme, storage_ch, host_state)
index_counts := DetectIndexChanges(dbUnique, vme, storage_ch, host_state)
conf_counts := DetectConfigurationChanges(dbUnique, vme, storage_ch, host_state)
priv_change_counts := DetectPrivilegeChanges(dbUnique, vme, storage_ch, host_state)
if opts.Datastore == DATASTORE_POSTGRES {
EnsureMetricDummy("object_changes")
}
// need to send info on all object changes as one message as Grafana applies "last wins" for annotations with similar timestamp
message := ""
if sproc_counts.Altered > 0 || sproc_counts.Created > 0 || sproc_counts.Dropped > 0 {
message += fmt.Sprintf(" sprocs %d/%d/%d", sproc_counts.Created, sproc_counts.Altered, sproc_counts.Dropped)
}
if table_counts.Altered > 0 || table_counts.Created > 0 || table_counts.Dropped > 0 {
message += fmt.Sprintf(" tables/views %d/%d/%d", table_counts.Created, table_counts.Altered, table_counts.Dropped)
}
if index_counts.Altered > 0 || index_counts.Created > 0 || index_counts.Dropped > 0 {
message += fmt.Sprintf(" indexes %d/%d/%d", index_counts.Created, index_counts.Altered, index_counts.Dropped)
}
if conf_counts.Altered > 0 || conf_counts.Created > 0 {
message += fmt.Sprintf(" configuration %d/%d/%d", conf_counts.Created, conf_counts.Altered, conf_counts.Dropped)
}
if priv_change_counts.Dropped > 0 || priv_change_counts.Created > 0 {
message += fmt.Sprintf(" privileges %d/%d/%d", priv_change_counts.Created, priv_change_counts.Altered, priv_change_counts.Dropped)
}
if message > "" {
message = "Detected changes for \"" + dbUnique + "\" [Created/Altered/Dropped]:" + message
log.Info(message)
detected_changes_summary := make([](map[string]interface{}), 0)
influx_entry := make(map[string]interface{})
influx_entry["details"] = message
influx_entry["epoch_ns"] = time.Now().UnixNano()
detected_changes_summary = append(detected_changes_summary, influx_entry)
md, _ := GetMonitoredDatabaseByUniqueName(dbUnique)
storage_ch <- []MetricStoreMessage{MetricStoreMessage{DBUniqueName: dbUnique, DBType: md.DBType, MetricName: "object_changes", Data: detected_changes_summary, CustomTags: md.CustomTags}}
}
}
func FilterPgbouncerData(data []map[string]interface{}, database_to_keep string, vme DBVersionMapEntry) []map[string]interface{} {
filtered_data := make([]map[string]interface{}, 0)
if len(database_to_keep) > 0 {
for _, dr := range data {
//log.Debugf("bouncer dr: %+v", dr)
_, ok := dr["database"]
if !ok {
log.Warning("Expected 'database' key not found from pgbouncer_stats, not storing data")
continue
}
if dr["database"] != database_to_keep {
continue // we only want pgbouncer stats for the DB specified in monitored_dbs.md_dbname
}
delete(dr, "database") // remove 'database' as we use 'dbname' by convention
numericCounterVer, _ := decimal.NewFromString("1.12")
if vme.Version.GreaterThanOrEqual(numericCounterVer) { // v1.12 counters are of type numeric instead of int64
for k, v := range dr {
decimalCounter, err := decimal.NewFromString(string(v.([]uint8)))
if err != nil {
log.Errorf("Could not parse \"%+v\" to Decimal: %s", string(v.([]uint8)), err)
return filtered_data
}
dr[k] = decimalCounter.IntPart() // technically could cause overflow...but highly unlikely for 2^63
}
}
filtered_data = append(filtered_data, dr)
}
}
return filtered_data
}
// some extra work needed as pgpool SHOW commands don't specify the return data types for some reason
func FetchMetricsPgpool(msg MetricFetchMessage, vme DBVersionMapEntry, mvp MetricVersionProperties) ([]map[string]interface{}, error, time.Duration) {
var ret_data = make([]map[string]interface{}, 0)
var duration time.Duration
epoch_ns := time.Now().UnixNano()
sql_lines := strings.Split(strings.ToUpper(mvp.Sql), "\n")
for _, sql := range sql_lines {
if strings.HasPrefix(sql, "SHOW POOL_NODES") {
data, err, dur := DBExecReadByDbUniqueName(msg.DBUniqueName, msg.MetricName, useConnPooling, 0, sql)
duration = duration + dur
if err != nil {
log.Errorf("[%s][%s] Could not fetch PgPool statistics: %v", msg.DBUniqueName, msg.MetricName, err)
return data, err, duration
}
for _, row := range data {
ret_row := make(map[string]interface{})
ret_row[EPOCH_COLUMN_NAME] = epoch_ns
for k, v := range row {
vs := string(v.([]byte))
// need 1 tag so that Influx would not merge rows
if k == "node_id" {
ret_row["tag_node_id"] = vs
continue
}
ret_row[k] = vs
if k == "status" { // was changed from numeric to string at some pgpool version so leave the string
// but also add "status_num" field
if vs == "up" {
ret_row["status_num"] = 1
} else if vs == "down" {
ret_row["status_num"] = 0
} else {
i, err := strconv.ParseInt(vs, 10, 64)
if err == nil {
ret_row["status_num"] = i
}
}
continue
}
// everything is returned as text, so try to convert all numerics into ints / floats
if k != "lb_weight" {
i, err := strconv.ParseInt(vs, 10, 64)
if err == nil {
ret_row[k] = i
continue
}
}
f, err := strconv.ParseFloat(vs, 64)
if err == nil {
ret_row[k] = f
continue
}
}
ret_data = append(ret_data, ret_row)
}
} else if strings.HasPrefix(sql, "SHOW POOL_PROCESSES") {
if len(ret_data) == 0 {
log.Warningf("[%s][%s] SHOW POOL_NODES needs to be placed before SHOW POOL_PROCESSES. ignoring SHOW POOL_PROCESSES", msg.DBUniqueName, msg.MetricName)
continue
}
data, err, dur := DBExecReadByDbUniqueName(msg.DBUniqueName, msg.MetricName, useConnPooling, 0, sql)
duration = duration + dur
if err != nil {
log.Errorf("[%s][%s] Could not fetch PgPool statistics: %v", msg.DBUniqueName, msg.MetricName, err)
continue
}
// summarize processes_total / processes_active over all rows
processes_total := 0
processes_active := 0
for _, row := range data {
processes_total++
v, ok := row["database"]
if !ok {
log.Infof("[%s][%s] column 'database' not found from data returned by SHOW POOL_PROCESSES, check pool version / SQL definition", msg.DBUniqueName, msg.MetricName)
continue
}
if len(v.([]byte)) > 0 {
processes_active++
}
}
for _, ret_row := range ret_data {
ret_row["processes_total"] = processes_total
ret_row["processes_active"] = processes_active
}
}
}
//log.Fatalf("%+v", ret_data)
return ret_data, nil, duration
}
func FetchMetrics(msg MetricFetchMessage, host_state map[string]map[string]string, storage_ch chan<- []MetricStoreMessage, context string) ([]MetricStoreMessage, error) {
var vme DBVersionMapEntry
var db_pg_version decimal.Decimal
var err, firstErr error
var sql string
var retryWithSuperuserSQL = true
var data, cachedData []map[string]interface{}
var duration time.Duration
var md MonitoredDatabase
var fromCache, isCacheable bool
vme, err = DBGetPGVersion(msg.DBUniqueName, msg.DBType, false)
if err != nil {
log.Error("failed to fetch pg version for ", msg.DBUniqueName, msg.MetricName, err)
return nil, err
}
db_pg_version = vme.Version
if msg.DBType == DBTYPE_BOUNCER {
db_pg_version = decimal.Decimal{} // version is 0.0 for all pgbouncer sql per convention
}
mvp, err := GetMetricVersionProperties(msg.MetricName, vme, nil)
if err != nil && msg.MetricName != RECO_METRIC_NAME {
epoch, ok := last_sql_fetch_error.Load(msg.MetricName + ":" + db_pg_version.String())
if !ok || ((time.Now().Unix() - epoch.(int64)) > 3600) { // complain only 1x per hour
log.Infof("Failed to get SQL for metric '%s', version '%s': %v", msg.MetricName, vme.VersionStr, err)
last_sql_fetch_error.Store(msg.MetricName+":"+db_pg_version.String(), time.Now().Unix())
}
if strings.Contains(err.Error(), "too old") {
return nil, nil
}
return nil, err
}
isCacheable = IsCacheableMetric(msg, mvp)
if isCacheable && opts.InstanceLevelCacheMaxSeconds > 0 && msg.Interval.Seconds() > float64(opts.InstanceLevelCacheMaxSeconds) {
cachedData = GetFromInstanceCacheIfNotOlderThanSeconds(msg, opts.InstanceLevelCacheMaxSeconds)
if len(cachedData) > 0 {
fromCache = true
goto send_to_storage_channel
}
}
retry_with_superuser_sql: // if 1st fetch with normal SQL fails, try with SU SQL if it's defined
sql = mvp.Sql
if (vme.IsSuperuser || (retryWithSuperuserSQL && firstErr != nil)) && mvp.SqlSU != "" {
sql = mvp.SqlSU
retryWithSuperuserSQL = false
}
if sql == "" && !(msg.MetricName == SPECIAL_METRIC_CHANGE_EVENTS || msg.MetricName == RECO_METRIC_NAME) {
// let's ignore dummy SQL-s
log.Debugf("[%s:%s] Ignoring fetch message - got an empty/dummy SQL string", msg.DBUniqueName, msg.MetricName)
return nil, nil
}
if (mvp.MasterOnly && vme.IsInRecovery) || (mvp.StandbyOnly && !vme.IsInRecovery) {
log.Debugf("[%s:%s] Skipping fetching of as server not in wanted state (IsInRecovery=%v)", msg.DBUniqueName, msg.MetricName, vme.IsInRecovery)
return nil, nil
}
if msg.MetricName == SPECIAL_METRIC_CHANGE_EVENTS && context != CONTEXT_PROMETHEUS_SCRAPE { // special handling, multiple queries + stateful
CheckForPGObjectChangesAndStore(msg.DBUniqueName, vme, storage_ch, host_state) // TODO no host_state for Prometheus currently
} else if msg.MetricName == RECO_METRIC_NAME && context != CONTEXT_PROMETHEUS_SCRAPE {
data, _, duration = GetRecommendations(msg.DBUniqueName, vme)
} else if msg.DBType == DBTYPE_PGPOOL {
data, _, duration = FetchMetricsPgpool(msg, vme, mvp)
} else {
data, err, duration = DBExecReadByDbUniqueName(msg.DBUniqueName, msg.MetricName, useConnPooling, msg.StmtTimeoutOverride, sql)
if err != nil {
// let's soften errors to "info" from functions that expect the server to be a primary to reduce noise
if strings.Contains(err.Error(), "recovery is in progress") {
db_pg_version_map_lock.RLock()
ver := db_pg_version_map[msg.DBUniqueName]
db_pg_version_map_lock.RUnlock()
if ver.IsInRecovery {
log.Debugf("[%s:%s] failed to fetch metrics: %s", msg.DBUniqueName, msg.MetricName, err)
return nil, err
}
}
if retryWithSuperuserSQL && mvp.SqlSU != "" {
firstErr = err
log.Infof("[%s:%s] Normal fetch failed, re-trying to fetch with SU SQL", msg.DBUniqueName, msg.MetricName)
goto retry_with_superuser_sql
} else {
if firstErr != nil {
log.Infof("[%s:%s] failed to fetch metrics also with SU SQL so initial error will be returned. Current err: %s", msg.DBUniqueName, msg.MetricName, err)
return nil, firstErr // returning the initial error
} else {
log.Infof("[%s:%s] failed to fetch metrics: %s", msg.DBUniqueName, msg.MetricName, err)
}
}
return nil, err
} else {
md, err = GetMonitoredDatabaseByUniqueName(msg.DBUniqueName)
if err != nil {
log.Errorf("[%s:%s] could not get monitored DB details", msg.DBUniqueName, err)
return nil, err
}
log.Infof("[%s:%s] fetched %d rows in %.1f ms", msg.DBUniqueName, msg.MetricName, len(data), float64(duration.Nanoseconds())/1000000)
if msg.MetricName == SPECIAL_METRIC_PGBOUNCER_STATS { // clean unwanted pgbouncer pool stats here as not possible in SQL
data = FilterPgbouncerData(data, md.DBName, vme)
}
}
}
if isCacheable && opts.InstanceLevelCacheMaxSeconds > 0 && msg.Interval.Seconds() > float64(opts.InstanceLevelCacheMaxSeconds) {
PutToInstanceCache(msg, data)
}
send_to_storage_channel:
if (addRealDbname || addSystemIdentifier) && msg.DBType == DBTYPE_PG {
db_pg_version_map_lock.RLock()
ver := db_pg_version_map[msg.DBUniqueName]
db_pg_version_map_lock.RUnlock()
data = AddDbnameSysinfoIfNotExistsToQueryResultData(msg, data, ver)
}
if mvp.MetricAttrs.MetricStorageName != "" {
log.Debugf("[%s] rerouting metric %s data to %s based on metric attributes", msg.DBUniqueName, msg.MetricName, mvp.MetricAttrs.MetricStorageName)
msg.MetricName = mvp.MetricAttrs.MetricStorageName
}
if fromCache {
md, err = GetMonitoredDatabaseByUniqueName(msg.DBUniqueName)
if err != nil {
log.Errorf("[%s:%s] could not get monitored DB details", msg.DBUniqueName, err)
return nil, err
}
log.Infof("[%s:%s] loaded %d rows from the instance cache", msg.DBUniqueName, msg.MetricName, len(cachedData))
atomic.AddUint64(&totalMetricsReusedFromCacheCounter, uint64(len(cachedData)))
return []MetricStoreMessage{MetricStoreMessage{DBUniqueName: msg.DBUniqueName, MetricName: msg.MetricName, Data: cachedData, CustomTags: md.CustomTags,
MetricDefinitionDetails: mvp, RealDbname: vme.RealDbname, SystemIdentifier: vme.SystemIdentifier}}, nil
} else {
atomic.AddUint64(&totalMetricsFetchedCounter, uint64(len(data)))
return []MetricStoreMessage{MetricStoreMessage{DBUniqueName: msg.DBUniqueName, MetricName: msg.MetricName, Data: data, CustomTags: md.CustomTags,
MetricDefinitionDetails: mvp, RealDbname: vme.RealDbname, SystemIdentifier: vme.SystemIdentifier}}, nil
}
}
func GetFromInstanceCacheIfNotOlderThanSeconds(msg MetricFetchMessage, maxAgeSeconds int64) []map[string]interface{} {
var clonedData []map[string]interface{}
instanceMetricCacheTimestampLock.RLock()
instanceMetricTS, ok := instanceMetricCacheTimestamp[msg.DBUniqueNameOrig+msg.MetricName]
instanceMetricCacheTimestampLock.RUnlock()
if !ok {
//log.Debugf("[%s:%s] no instance cache entry", msg.DBUniqueNameOrig, msg.MetricName)
return nil
}
if time.Now().Unix()-instanceMetricTS.Unix() > maxAgeSeconds {
//log.Debugf("[%s:%s] instance cache entry too old", msg.DBUniqueNameOrig, msg.MetricName)
return nil
}
log.Debugf("[%s:%s] reading metric data from instance cache of \"%s\"", msg.DBUniqueName, msg.MetricName, msg.DBUniqueNameOrig)
instanceMetricCacheLock.RLock()
instanceMetricData, ok := instanceMetricCache[msg.DBUniqueNameOrig+msg.MetricName]
if !ok {
instanceMetricCacheLock.RUnlock()
return nil
}
clonedData = deepCopyMetricData(instanceMetricData)
instanceMetricCacheLock.RUnlock()
return clonedData
}
func PutToInstanceCache(msg MetricFetchMessage, data []map[string]interface{}) {
if len(data) == 0 {
return
}
dataCopy := deepCopyMetricData(data)
log.Debugf("[%s:%s] filling instance cache", msg.DBUniqueNameOrig, msg.MetricName)
instanceMetricCacheLock.Lock()
instanceMetricCache[msg.DBUniqueNameOrig+msg.MetricName] = dataCopy
instanceMetricCacheLock.Unlock()
instanceMetricCacheTimestampLock.Lock()
instanceMetricCacheTimestamp[msg.DBUniqueNameOrig+msg.MetricName] = time.Now()
instanceMetricCacheTimestampLock.Unlock()
}
func IsCacheableMetric(msg MetricFetchMessage, mvp MetricVersionProperties) bool {
if !(msg.DBType == DBTYPE_PG_CONT || msg.DBType == DBTYPE_PATRONI_CONT) {
return false
}
return mvp.MetricAttrs.IsInstanceLevel
}
func AddDbnameSysinfoIfNotExistsToQueryResultData(msg MetricFetchMessage, data []map[string]interface{}, ver DBVersionMapEntry) []map[string]interface{} {
enriched_data := make([]map[string]interface{}, 0)
log.Debugf("Enriching all rows of [%s:%s] with sysinfo (%s) / real dbname (%s) if set. ", msg.DBUniqueName, msg.MetricName, ver.SystemIdentifier, ver.RealDbname)
for _, dr := range data {
if addRealDbname && ver.RealDbname != "" {
old, ok := dr[TAG_PREFIX+opts.RealDbnameField]
if !ok || old == "" {
dr[TAG_PREFIX+opts.RealDbnameField] = ver.RealDbname
}
}
if addSystemIdentifier && ver.SystemIdentifier != "" {
old, ok := dr[TAG_PREFIX+opts.SystemIdentifierField]
if !ok || old == "" {
dr[TAG_PREFIX+opts.SystemIdentifierField] = ver.SystemIdentifier
}
}
enriched_data = append(enriched_data, dr)
}
return enriched_data
}
func StoreMetrics(metrics []MetricStoreMessage, storage_ch chan<- []MetricStoreMessage) (int, error) {
if len(metrics) > 0 {
atomic.AddUint64(&totalDatasetsFetchedCounter, 1)
storage_ch <- metrics
return len(metrics), nil
}
return 0, nil
}
func deepCopyMetricStoreMessages(metricStoreMessages []MetricStoreMessage) []MetricStoreMessage {
new := make([]MetricStoreMessage, 0)
for _, msm := range metricStoreMessages {
data_new := make([]map[string]interface{}, 0)
for _, dr := range msm.Data {
dr_new := make(map[string]interface{})
for k, v := range dr {
dr_new[k] = v
}
data_new = append(data_new, dr_new)
}
tag_data_new := make(map[string]string)
for k, v := range msm.CustomTags {
tag_data_new[k] = v
}
m := MetricStoreMessage{DBUniqueName: msm.DBUniqueName, MetricName: msm.MetricName, DBType: msm.DBType,
Data: data_new, CustomTags: tag_data_new}
new = append(new, m)
}
return new
}
func deepCopyMetricData(data []map[string]interface{}) []map[string]interface{} {
newData := make([]map[string]interface{}, len(data))
for i, dr := range data {
newRow := make(map[string]interface{})
for k, v := range dr {
newRow[k] = v
}
newData[i] = newRow
}
return newData
}
func deepCopyMetricDefinitionMap(mdm map[string]map[decimal.Decimal]MetricVersionProperties) map[string]map[decimal.Decimal]MetricVersionProperties {
newMdm := make(map[string]map[decimal.Decimal]MetricVersionProperties)
for metric, verMap := range mdm {
newMdm[metric] = make(map[decimal.Decimal]MetricVersionProperties)
for ver, mvp := range verMap {
newMdm[metric][ver] = mvp
}
}
return newMdm
}
// ControlMessage notifies of shutdown + interval change
func MetricGathererLoop(dbUniqueName, dbUniqueNameOrig, dbType, metricName string, config_map map[string]float64, control_ch <-chan ControlMessage, store_ch chan<- []MetricStoreMessage) {
config := config_map
interval := config[metricName]
ticker := time.NewTicker(time.Millisecond * time.Duration(interval*1000))
host_state := make(map[string]map[string]string)
var last_uptime_s int64 = -1 // used for "server restarted" event detection
var last_error_notification_time time.Time
var vme DBVersionMapEntry
failed_fetches := 0
metricNameForStorage := metricName
lastDBVersionFetchTime := time.Unix(0, 0) // check DB ver. ev. 5 min
var stmtTimeoutOverride int64
if opts.TestdataDays != 0 {
if metricName == SPECIAL_METRIC_SERVER_LOG_EVENT_COUNTS || metricName == SPECIAL_METRIC_CHANGE_EVENTS {
return
}
testDataGenerationModeWG.Add(1)
}
if opts.Datastore == DATASTORE_POSTGRES && opts.TestdataDays == 0 {
if _, is_special_metric := specialMetrics[metricName]; !is_special_metric {
vme, err := DBGetPGVersion(dbUniqueName, dbType, false)
if err != nil {
log.Warningf("[%s][%s] Failed to determine possible re-routing name, Grafana dashboards with re-routed metrics might not show all hosts", dbUniqueName, metricName)
} else {
mvp, err := GetMetricVersionProperties(metricName, vme, nil)
if err != nil && !strings.Contains(err.Error(), "too old") {
log.Warningf("[%s][%s] Failed to determine possible re-routing name, Grafana dashboards with re-routed metrics might not show all hosts", dbUniqueName, metricName)
} else if mvp.MetricAttrs.MetricStorageName != "" {
metricNameForStorage = mvp.MetricAttrs.MetricStorageName
}
}
}
err := AddDBUniqueMetricToListingTable(dbUniqueName, metricNameForStorage)
if err != nil {
log.Errorf("Could not add newly found gatherer [%s:%s] to the 'all_distinct_dbname_metrics' listing table: %v", dbUniqueName, metricName, err)
}
EnsureMetricDummy(metricNameForStorage) // ensure that there is at least an empty top-level table not to get ugly Grafana notifications
}
if metricName == SPECIAL_METRIC_SERVER_LOG_EVENT_COUNTS {
logparseLoop(dbUniqueName, metricName, config_map, control_ch, store_ch) // no return
return
}
for {
if lastDBVersionFetchTime.Add(time.Minute * time.Duration(5)).Before(time.Now()) {
vme, _ = DBGetPGVersion(dbUniqueName, dbType, false) // in case of errors just ignore metric "disabled" time ranges
lastDBVersionFetchTime = time.Now()
mvp, err := GetMetricVersionProperties(metricName, vme, nil)
if err == nil && mvp.MetricAttrs.StatementTimeoutSeconds > 0 {
stmtTimeoutOverride = mvp.MetricAttrs.StatementTimeoutSeconds
} else {
stmtTimeoutOverride = 0
}
}
metricCurrentlyDisabled := IsMetricCurrentlyDisabledForHost(metricName, vme, dbUniqueName)
if metricCurrentlyDisabled && opts.TestdataDays == 0 {
log.Debugf("[%s][%s] Ignoring fetch as metric disabled for current time range", dbUniqueName, metricName)
} else {
t1 := time.Now()
metricStoreMessages, err := FetchMetrics(
MetricFetchMessage{DBUniqueName: dbUniqueName, DBUniqueNameOrig: dbUniqueNameOrig, MetricName: metricName, DBType: dbType, Interval: time.Second * time.Duration(interval), StmtTimeoutOverride: stmtTimeoutOverride},
host_state,
store_ch,
"")
t2 := time.Now()
if t2.Sub(t1) > (time.Second * time.Duration(interval)) {
log.Warningf("Total fetching time of %vs bigger than %vs interval for [%s:%s]", t2.Sub(t1).Truncate(time.Millisecond*100).Seconds(), interval, dbUniqueName, metricName)
}
if err != nil {
failed_fetches += 1
// complain only 1x per 10min per host/metric...
if last_error_notification_time.IsZero() || last_error_notification_time.Add(time.Second*time.Duration(600)).Before(time.Now()) {
log.Errorf("Failed to fetch metric data for [%s:%s]: %v", dbUniqueName, metricName, err)
if failed_fetches > 1 {
log.Errorf("Total failed fetches for [%s:%s]: %d", dbUniqueName, metricName, failed_fetches)
}
last_error_notification_time = time.Now()
}
} else if metricStoreMessages != nil && len(metricStoreMessages[0].Data) > 0 {
// pick up "server restarted" events here to avoid doing extra selects from CheckForPGObjectChangesAndStore code
if metricName == "db_stats" {
postmaster_uptime_s, ok := (metricStoreMessages[0].Data)[0]["postmaster_uptime_s"]
if ok {
if last_uptime_s != -1 {
if postmaster_uptime_s.(int64) < last_uptime_s { // restart (or possibly also failover when host is routed) happened
message := "Detected server restart (or failover) of \"" + dbUniqueName + "\""
log.Warning(message)
detected_changes_summary := make([](map[string]interface{}), 0)
entry := map[string]interface{}{"details": message, "epoch_ns": (metricStoreMessages[0].Data)[0]["epoch_ns"]}
detected_changes_summary = append(detected_changes_summary, entry)
metricStoreMessages = append(metricStoreMessages,
MetricStoreMessage{DBUniqueName: dbUniqueName, DBType: dbType,
MetricName: "object_changes", Data: detected_changes_summary, CustomTags: metricStoreMessages[0].CustomTags})
}
}
last_uptime_s = postmaster_uptime_s.(int64)
}
}
if opts.TestdataDays != 0 {
orig_msms := deepCopyMetricStoreMessages(metricStoreMessages)
log.Warningf("Generating %d days of data for [%s:%s]", opts.TestdataDays, dbUniqueName, metricName)
test_metrics_stored := 0
simulated_time := t1
end_time := t1.Add(time.Hour * time.Duration(opts.TestdataDays*24))
if opts.TestdataDays < 0 {
simulated_time, end_time = end_time, simulated_time
}
for simulated_time.Before(end_time) {
log.Debugf("Metric [%s], simulating time: %v", metricName, simulated_time)
for host_nr := 1; host_nr <= opts.TestdataMultiplier; host_nr++ {
fake_dbname := fmt.Sprintf("%s-%d", dbUniqueName, host_nr)
msgs_copy_tmp := deepCopyMetricStoreMessages(orig_msms)
for i := 0; i < len(msgs_copy_tmp[0].Data); i++ {
(msgs_copy_tmp[0].Data)[i][EPOCH_COLUMN_NAME] = (simulated_time.UnixNano() + int64(1000*i))
}
msgs_copy_tmp[0].DBUniqueName = fake_dbname
//log.Debugf("fake data for [%s:%s]: %v", metricName, fake_dbname, msgs_copy_tmp[0].Data)
_, _ = StoreMetrics(msgs_copy_tmp, store_ch)
test_metrics_stored += len(msgs_copy_tmp[0].Data)
}
time.Sleep(time.Duration(opts.TestdataMultiplier * 10000000)) // 10ms * multiplier (in nanosec).
// would generate more metrics than persister can write and eat up RAM
simulated_time = simulated_time.Add(time.Second * time.Duration(interval))
}
log.Warningf("exiting MetricGathererLoop for [%s], %d total data points generated for %d hosts",
metricName, test_metrics_stored, opts.TestdataMultiplier)
testDataGenerationModeWG.Done()
return
} else {
_, _ = StoreMetrics(metricStoreMessages, store_ch)
}
}
if opts.TestdataDays != 0 { // covers errors & no data
testDataGenerationModeWG.Done()
return
}
}
select {
case msg := <-control_ch:
log.Debug("got control msg", dbUniqueName, metricName, msg)
if msg.Action == GATHERER_STATUS_START {
config = msg.Config
interval = config[metricName]
if ticker != nil {
ticker.Stop()
}
ticker = time.NewTicker(time.Millisecond * time.Duration(interval*1000))
log.Debug("started MetricGathererLoop for ", dbUniqueName, metricName, " interval:", interval)
} else if msg.Action == GATHERER_STATUS_STOP {
log.Debug("exiting MetricGathererLoop for ", dbUniqueName, metricName, " interval:", interval)
return
}
case <-ticker.C:
log.Debugf("MetricGathererLoop for [%s:%s] slept for %s", dbUniqueName, metricName, time.Second*time.Duration(interval))
}
}
}
func IsStringInSlice(target string, slice []string) bool {
for _, s := range slice {
if target == s {
return true
}
}
return false
}
func IsMetricCurrentlyDisabledForHost(metricName string, vme DBVersionMapEntry, dbUniqueName string) bool {
_, isSpecialMetric := specialMetrics[metricName]
mvp, err := GetMetricVersionProperties(metricName, vme, nil)
if err != nil {
if isSpecialMetric || strings.Contains(err.Error(), "too old") {
return false
}
log.Warningf("[%s][%s] Ignoring any possible time based gathering restrictions, could not get metric details", dbUniqueName, metricName)
return false
}
md, err := GetMonitoredDatabaseByUniqueName(dbUniqueName) // TODO caching?
if err != nil {
log.Warningf("[%s][%s] Ignoring any possible time based gathering restrictions, could not get DB details", dbUniqueName, metricName)
return false
}
if md.HostConfig.PerMetricDisabledTimes == nil && mvp.MetricAttrs.DisabledDays == "" && len(mvp.MetricAttrs.DisableTimes) == 0 {
//log.Debugf("[%s][%s] No time based gathering restrictions defined", dbUniqueName, metricName)
return false
}
metricHasOverrides := false
if md.HostConfig.PerMetricDisabledTimes != nil {
for _, hcdt := range md.HostConfig.PerMetricDisabledTimes {
if IsStringInSlice(metricName, hcdt.Metrics) && (hcdt.DisabledDays != "" || len(hcdt.DisabledTimes) > 0) {
metricHasOverrides = true
break
}
}
if !metricHasOverrides && mvp.MetricAttrs.DisabledDays == "" && len(mvp.MetricAttrs.DisableTimes) == 0 {
//log.Debugf("[%s][%s] No time based gathering restrictions defined", dbUniqueName, metricName)
return false
}
}
return IsInDisabledTimeDayRange(time.Now(), mvp.MetricAttrs.DisabledDays, mvp.MetricAttrs.DisableTimes, md.HostConfig.PerMetricDisabledTimes, metricName, dbUniqueName)
}
// days: 0 = Sun, ranges allowed
func IsInDaySpan(locTime time.Time, days, metric, dbUnique string) bool {
//log.Debug("IsInDaySpan", locTime, days, metric, dbUnique)
if days == "" {
return false
}
curDayInt := int(locTime.Weekday())
daysMap := DaysStringToIntMap(days)
//log.Debugf("curDayInt %v, daysMap %+v", curDayInt, daysMap)
_, ok := daysMap[curDayInt]
return ok
}
func DaysStringToIntMap(days string) map[int]bool { // TODO validate with some regex when reading in configs, have dbname info then
ret := make(map[int]bool)
for _, s := range strings.Split(days, ",") {
if strings.Contains(s, "-") {
dayRange := strings.Split(s, "-")
if len(dayRange) != 2 {
log.Warningf("Ignoring invalid day range specification: %s. Check config", s)
continue
}
startDay, err := strconv.Atoi(dayRange[0])
endDay, err2 := strconv.Atoi(dayRange[1])
if err != nil || err2 != nil {
log.Warningf("Ignoring invalid day range specification: %s. Check config", s)
continue
}
for i := startDay; i <= endDay && i >= 0 && i <= 7; i++ {
ret[i] = true
}
} else {
day, err := strconv.Atoi(s)
if err != nil {
log.Warningf("Ignoring invalid day range specification: %s. Check config", days)
continue
}
ret[day] = true
}
}
if _, ok := ret[7]; ok { // Cron allows either 0 or 7 for Sunday
ret[0] = true
}
return ret
}
func IsInTimeSpan(checkTime time.Time, timeRange, metric, dbUnique string) bool {
layout := "15:04"
var t1, t2 time.Time
var err error
timeRange = strings.TrimSpace(timeRange)
if len(timeRange) < 11 {
log.Warningf("[%s][%s] invalid time range: %s. Check config", dbUnique, metric, timeRange)
return false
}
s1 := timeRange[0:5]
s2 := timeRange[6:11]
tz := strings.TrimSpace(timeRange[11:])
if len(tz) > 1 { // time zone specified
if regexIsAlpha.MatchString(tz) {
layout = "15:04 MST"
} else {
layout = "15:04 -0700"
}
t1, err = time.Parse(layout, s1+" "+tz)
if err == nil {
t2, err = time.Parse(layout, s2+" "+tz)
}
} else { // no time zone
t1, err = time.Parse(layout, s1)
if err == nil {
t2, err = time.Parse(layout, s2)
}
}
if err != nil {
log.Warningf("[%s][%s] Ignoring invalid disabled time range: %s. Check config. Erorr: %v", dbUnique, metric, timeRange, err)
return false
}
check, err := time.Parse("15:04 -0700", strconv.Itoa(checkTime.Hour())+":"+strconv.Itoa(checkTime.Minute())+" "+t1.Format("-0700")) // UTC by default
if err != nil {
log.Warningf("[%s][%s] Ignoring invalid disabled time range: %s. Check config. Error: %v", dbUnique, metric, timeRange, err)
return false
}
if t1.After(t2) {
t2 = t2.AddDate(0, 0, 1)
}
return check.Before(t2) && check.After(t1)
}
func IsInDisabledTimeDayRange(localTime time.Time, metricAttrsDisabledDays string, metricAttrsDisabledTimes []string, hostConfigPerMetricDisabledTimes []HostConfigPerMetricDisabledTimes, metric, dbUnique string) bool {
hostConfigMetricMatch := false
for _, hcdi := range hostConfigPerMetricDisabledTimes { // host config takes precedence when both specified
dayMatchFound := false
timeMatchFound := false
if IsStringInSlice(metric, hcdi.Metrics) {
hostConfigMetricMatch = true
if !dayMatchFound && hcdi.DisabledDays != "" && IsInDaySpan(localTime, hcdi.DisabledDays, metric, dbUnique) {
dayMatchFound = true
}
for _, dt := range hcdi.DisabledTimes {
if IsInTimeSpan(localTime, dt, metric, dbUnique) {
timeMatchFound = true
break
}
}
}
if hostConfigMetricMatch && (timeMatchFound || len(hcdi.DisabledTimes) == 0) && (dayMatchFound || hcdi.DisabledDays == "") {
//log.Debugf("[%s][%s] Host config ignored time/day match, skipping fetch", dbUnique, metric)
return true
}
}
if !hostConfigMetricMatch && (metricAttrsDisabledDays != "" || len(metricAttrsDisabledTimes) > 0) {
dayMatchFound := IsInDaySpan(localTime, metricAttrsDisabledDays, metric, dbUnique)
timeMatchFound := false
for _, timeRange := range metricAttrsDisabledTimes {
if IsInTimeSpan(localTime, timeRange, metric, dbUnique) {
timeMatchFound = true
break
}
}
if (timeMatchFound || len(metricAttrsDisabledTimes) == 0) && (dayMatchFound || metricAttrsDisabledDays == "") {
//log.Debugf("[%s][%s] MetricAttrs ignored time/day match, skipping fetch", dbUnique, metric)
return true
}
}
return false
}
func UpdateMetricDefinitionMap(newMetrics map[string]map[decimal.Decimal]MetricVersionProperties) {
metric_def_map_lock.Lock()
metric_def_map = newMetrics
metric_def_map_lock.Unlock()
//log.Debug("metric_def_map:", metric_def_map)
log.Debug("metrics definitions refreshed - nr. found:", len(newMetrics))
}
func ReadMetricDefinitionMapFromPostgres(failOnError bool) (map[string]map[decimal.Decimal]MetricVersionProperties, error) {
metric_def_map_new := make(map[string]map[decimal.Decimal]MetricVersionProperties)
sql := `select /* pgwatch2_generated */ m_name, m_pg_version_from::text, m_sql, m_master_only, m_standby_only,
coalesce(m_column_attrs::text, '') as m_column_attrs, coalesce(m_column_attrs::text, '') as m_column_attrs,
coalesce(ma_metric_attrs::text, '') as ma_metric_attrs, m_sql_su
from
pgwatch2.metric
left join
pgwatch2.metric_attribute on (ma_metric_name = m_name)
where
m_is_active
order by
1, 2`
log.Info("updating metrics definitons from ConfigDB...")
data, err := DBExecRead(configDb, CONFIGDB_IDENT, sql)
if err != nil {
if failOnError {
log.Fatal(err)
} else {
log.Error(err)
return metric_def_map, err
}
}
if len(data) == 0 {
log.Warning("no active metric definitions found from config DB")
return metric_def_map_new, err
}
log.Debug(len(data), "active metrics found from config db (pgwatch2.metric)")
for _, row := range data {
_, ok := metric_def_map_new[row["m_name"].(string)]
if !ok {
metric_def_map_new[row["m_name"].(string)] = make(map[decimal.Decimal]MetricVersionProperties)
}
d, _ := decimal.NewFromString(row["m_pg_version_from"].(string))
ca := MetricColumnAttrs{}
if row["m_column_attrs"].(string) != "" {
ca = ParseMetricColumnAttrsFromString(row["m_column_attrs"].(string))
}
ma := MetricAttrs{}
if row["ma_metric_attrs"].(string) != "" {
ma = ParseMetricAttrsFromString(row["ma_metric_attrs"].(string))
}
metric_def_map_new[row["m_name"].(string)][d] = MetricVersionProperties{
Sql: row["m_sql"].(string),
SqlSU: row["m_sql_su"].(string),
MasterOnly: row["m_master_only"].(bool),
StandbyOnly: row["m_standby_only"].(bool),
ColumnAttrs: ca,
MetricAttrs: ma,
}
}
return metric_def_map_new, err
}
func jsonTextToMap(jsonText string) (map[string]float64, error) {
retmap := make(map[string]float64)
if jsonText == "" {
return retmap, nil
}
var host_config map[string]interface{}
if err := json.Unmarshal([]byte(jsonText), &host_config); err != nil {
return nil, err
}
for k, v := range host_config {
retmap[k] = v.(float64)
}
return retmap, nil
}
func jsonTextToStringMap(jsonText string) (map[string]string, error) {
retmap := make(map[string]string)
if jsonText == "" {
return retmap, nil
}
var iMap map[string]interface{}
if err := json.Unmarshal([]byte(jsonText), &iMap); err != nil {
return nil, err
}
for k, v := range iMap {
retmap[k] = fmt.Sprintf("%v", v)
}
return retmap, nil
}
func mapToJson(metricsMap map[string]interface{}) ([]byte, error) {
return json.Marshal(metricsMap)
}
// queryInfluxDB convenience function to query the database
func queryInfluxDB(clnt client.Client, cmd string) (res []client.Result, err error) {
q := client.Query{
Command: cmd,
Database: opts.InfluxDbname,
}
if response, err := clnt.Query(q); err == nil {
if response.Error() != nil {
return res, response.Error()
}
res = response.Results
} else {
return res, err
}
return res, nil
}
func InitAndTestInfluxConnection(HostId, InfluxHost, InfluxPort, InfluxDbname, InfluxUser, InfluxPassword, InfluxSSL, SkipSSLCertVerify string, RetentionPeriod int64) (string, error) {
log.Infof("Testing Influx connection to host %s: %s, port: %s, DB: %s", HostId, InfluxHost, InfluxPort, InfluxDbname)
var connect_string string
var pgwatchDbExists bool = false
skipSSLCertVerify, _ := strconv.ParseBool(SkipSSLCertVerify)
retries := 3
if b, _ := strconv.ParseBool(InfluxSSL); b {
connect_string = fmt.Sprintf("https://%s:%s", InfluxHost, InfluxPort)
} else {
connect_string = fmt.Sprintf("http://%s:%s", InfluxHost, InfluxPort)
}
// Make client
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: connect_string,
Username: InfluxUser,
Password: InfluxPassword,
InsecureSkipVerify: skipSSLCertVerify,
})
if err != nil {
log.Fatal("Getting Influx client failed", err)
}
retry:
res, err := queryInfluxDB(c, "SHOW DATABASES")
if err != nil {
if retries > 0 {
log.Error("SHOW DATABASES failed, retrying in 5s (max 3x)...", err)
time.Sleep(time.Second * 5)
retries = retries - 1
goto retry
} else {
return connect_string, err
}
}
for _, db_arr := range res[0].Series[0].Values {
log.Debug("Found db:", db_arr[0])
if InfluxDbname == db_arr[0] {
log.Info(fmt.Sprintf("Database '%s' existing", InfluxDbname))
pgwatchDbExists = true
break
}
}
if pgwatchDbExists && RetentionPeriod > 0 {
var currentRetentionAsString string
// get current retention period
res, err := queryInfluxDB(c, fmt.Sprintf("SHOW RETENTION POLICIES ON %s", InfluxDbname))
if err != nil {
log.Errorf("Could not check Influx retention policies: %v", err)
return connect_string, err
}
for _, rp := range res[0].Series[0].Values {
log.Debugf("Found retention policy: %+v", rp)
if opts.InfluxRetentionName == rp[0].(string) {
// duration is represented as "720h0m0s" so construct similar string from --iretentiondays input
currentRetentionAsString = rp[1].(string)
break
}
}
targetRetentionAsString := fmt.Sprintf("%dh0m0s", RetentionPeriod*24)
if currentRetentionAsString != targetRetentionAsString {
log.Warningf("InfluxDB retention policy change detected, changing from %s to %s ...", currentRetentionAsString, targetRetentionAsString)
isql := fmt.Sprintf("ALTER RETENTION POLICY %s ON %s DURATION %dd REPLICATION 1 SHARD DURATION 1d", opts.InfluxRetentionName, InfluxDbname, RetentionPeriod)
log.Warningf("Executing: %s", isql)
_, err = queryInfluxDB(c, isql)
if err != nil {
log.Errorf("Could not change InfluxDB retention policy - manul review / correction recommended: %v", err)
}
}
return connect_string, nil
} else if !pgwatchDbExists {
log.Warningf("Database '%s' not found! Creating with %d days retention and retention policy name \"%s\"...", InfluxDbname, RetentionPeriod, opts.InfluxRetentionName)
isql := fmt.Sprintf("CREATE DATABASE %s WITH DURATION %dd REPLICATION 1 SHARD DURATION 1d NAME %s", InfluxDbname, RetentionPeriod, opts.InfluxRetentionName)
_, err = queryInfluxDB(c, isql)
if err != nil {
log.Fatal(err)
} else {
log.Infof("Database 'pgwatch2' created on InfluxDB host %s:%s", InfluxHost, InfluxPort)
}
}
return connect_string, nil
}
func DoesFunctionExists(dbUnique, functionName string) bool {
log.Debug("Checking for function existence", dbUnique, functionName)
sql := fmt.Sprintf("select /* pgwatch2_generated */ 1 from pg_proc join pg_namespace n on pronamespace = n.oid where proname = '%s' and n.nspname = 'public'", functionName)
data, err, _ := DBExecReadByDbUniqueName(dbUnique, "", useConnPooling, 0, sql)
if err != nil {
log.Error("Failed to check for function existence", dbUnique, functionName, err)
return false
}
if len(data) > 0 {
log.Debugf("Function %s exists on %s", functionName, dbUnique)
return true
}
return false
}
// Called once on daemon startup to try to create "metric fething helper" functions automatically
func TryCreateMetricsFetchingHelpers(dbUnique string) error {
db_pg_version, err := DBGetPGVersion(dbUnique, DBTYPE_PG, false)
if err != nil {
log.Errorf("Failed to fetch pg version for \"%s\": %s", dbUnique, err)
return err
}
if fileBased {
helpers, err := ReadMetricsFromFolder(path.Join(opts.MetricsFolder, FILE_BASED_METRIC_HELPERS_DIR), false)
if err != nil {
log.Errorf("Failed to fetch helpers from \"%s\": %s", path.Join(opts.MetricsFolder, FILE_BASED_METRIC_HELPERS_DIR), err)
return err
}
log.Debug("%d helper definitions found from \"%s\"...", len(helpers), path.Join(opts.MetricsFolder, FILE_BASED_METRIC_HELPERS_DIR))
for helperName := range helpers {
if strings.Contains(helperName, "windows") {
log.Infof("Skipping %s rollout. Windows helpers need to be rolled out manually", helperName)
continue
}
if !DoesFunctionExists(dbUnique, helperName) {
log.Debug("Trying to create metric fetching helpers for", dbUnique, helperName)
mvp, err := GetMetricVersionProperties(helperName, db_pg_version, helpers)
if err != nil {
log.Warning("Could not find query text for", dbUnique, helperName)
continue
}
_, err, _ = DBExecReadByDbUniqueName(dbUnique, "", useConnPooling, 0, mvp.Sql)
if err != nil {
log.Warning("Failed to create a metric fetching helper for", dbUnique, helperName)
log.Warning(err)
} else {
log.Info("Successfully created metric fetching helper for", dbUnique, helperName)
}
}
}
} else {
sql_helpers := "select /* pgwatch2_generated */ distinct m_name from pgwatch2.metric where m_is_active and m_is_helper" // m_name is a helper function name
data, err := DBExecRead(configDb, CONFIGDB_IDENT, sql_helpers)
if err != nil {
log.Error(err)
return err
}
for _, row := range data {
metric := row["m_name"].(string)
if strings.Contains(metric, "windows") {
log.Infof("Skipping %s rollout. Windows helpers need to be rolled out manually", metric)
continue
}
if !DoesFunctionExists(dbUnique, metric) {
log.Debug("Trying to create metric fetching helpers for", dbUnique, metric)
mvp, err := GetMetricVersionProperties(metric, db_pg_version, nil)
if err != nil {
log.Warning("Could not find query text for", dbUnique, metric)
continue
}
_, err, _ = DBExecReadByDbUniqueName(dbUnique, "", true, 0, mvp.Sql)
if err != nil {
log.Warning("Failed to create a metric fetching helper for", dbUnique, metric)
log.Warning(err)
} else {
log.Warning("Successfully created metric fetching helper for", dbUnique, metric)
}
}
}
}
return nil
}
// Expects "preset metrics" definition file named preset-config.yaml to be present in provided --metrics folder
func ReadPresetMetricsConfigFromFolder(folder string, failOnError bool) (map[string]map[string]float64, error) {
pmm := make(map[string]map[string]float64)
log.Infof("Reading preset metric config from path %s ...", path.Join(folder, PRESET_CONFIG_YAML_FILE))
preset_metrics, err := ioutil.ReadFile(path.Join(folder, PRESET_CONFIG_YAML_FILE))
if err != nil {
log.Errorf("Failed to read preset metric config definition at: %s", folder)
return pmm, err
}
pcs := make([]PresetConfig, 0)
err = yaml.Unmarshal(preset_metrics, &pcs)
if err != nil {
log.Errorf("Unmarshaling error reading preset metric config: %v", err)
return pmm, err
}
for _, pc := range pcs {
pmm[pc.Name] = pc.Metrics
}
log.Infof("%d preset metric definitions found", len(pcs))
return pmm, err
}
func ParseMetricColumnAttrsFromYAML(yamlPath string) MetricColumnAttrs {
c := MetricColumnAttrs{}
yamlFile, err := ioutil.ReadFile(yamlPath)
if err != nil {
log.Errorf("Error reading file %s: %s", yamlFile, err)
return c
}
err = yaml.Unmarshal(yamlFile, &c)
if err != nil {
log.Errorf("Unmarshaling error: %v", err)
}
return c
}
func ParseMetricAttrsFromYAML(yamlPath string) MetricAttrs {
c := MetricAttrs{}
yamlFile, err := ioutil.ReadFile(yamlPath)
if err != nil {
log.Errorf("Error reading file %s: %s", yamlFile, err)
return c
}
err = yaml.Unmarshal(yamlFile, &c)
if err != nil {
log.Errorf("Unmarshaling error: %v", err)
}
return c
}
func ParseMetricColumnAttrsFromString(jsonAttrs string) MetricColumnAttrs {
c := MetricColumnAttrs{}
err := yaml.Unmarshal([]byte(jsonAttrs), &c)
if err != nil {
log.Errorf("Unmarshaling error: %v", err)
}
return c
}
func ParseMetricAttrsFromString(jsonAttrs string) MetricAttrs {
c := MetricAttrs{}
err := yaml.Unmarshal([]byte(jsonAttrs), &c)
if err != nil {
log.Errorf("Unmarshaling error: %v", err)
}
return c
}
// expected is following structure: metric_name/pg_ver/metric(_master|standby).sql
func ReadMetricsFromFolder(folder string, failOnError bool) (map[string]map[decimal.Decimal]MetricVersionProperties, error) {
metrics_map := make(map[string]map[decimal.Decimal]MetricVersionProperties)
rIsDigitOrPunctuation := regexp.MustCompile(`^[\d\.]+$`)
metricNamePattern := `^[a-z0-9_\.]+$`
rMetricNameFilter := regexp.MustCompile(metricNamePattern)
log.Infof("Searching for metrics from path %s ...", folder)
metric_folders, err := ioutil.ReadDir(folder)
if err != nil {
if failOnError {
log.Fatalf("Could not read path %s: %s", folder, err)
}
log.Error(err)
return metrics_map, err
}
for _, f := range metric_folders {
if f.IsDir() {
if f.Name() == FILE_BASED_METRIC_HELPERS_DIR {
continue // helpers are pulled in when needed
}
if !rMetricNameFilter.MatchString(f.Name()) {
log.Warningf("Ignoring metric '%s' as name not fitting pattern: %s", f.Name(), metricNamePattern)
continue
}
//log.Debugf("Processing metric: %s", f.Name())
pgVers, err := ioutil.ReadDir(path.Join(folder, f.Name()))
if err != nil {
log.Error(err)
return metrics_map, err
}
var metricAttrs MetricAttrs
if _, err = os.Stat(path.Join(folder, f.Name(), "metric_attrs.yaml")); err == nil {
metricAttrs = ParseMetricAttrsFromYAML(path.Join(folder, f.Name(), "metric_attrs.yaml"))
//log.Debugf("Discovered following metric attributes for metric %s: %v", f.Name(), metricAttrs)
}
var metricColumnAttrs MetricColumnAttrs
if _, err = os.Stat(path.Join(folder, f.Name(), "column_attrs.yaml")); err == nil {
metricColumnAttrs = ParseMetricColumnAttrsFromYAML(path.Join(folder, f.Name(), "column_attrs.yaml"))
//log.Debugf("Discovered following column attributes for metric %s: %v", f.Name(), metricColumnAttrs)
}
for _, pgVer := range pgVers {
if strings.HasSuffix(pgVer.Name(), ".md") || pgVer.Name() == "column_attrs.yaml" || pgVer.Name() == "metric_attrs.yaml" {
continue
}
if !rIsDigitOrPunctuation.MatchString(pgVer.Name()) {
log.Warningf("Invalid metric structure - version folder names should consist of only numerics/dots, found: %s", pgVer.Name())
continue
}
d, err := decimal.NewFromString(pgVer.Name())
if err != nil {
log.Errorf("Could not parse \"%s\" to Decimal: %s", pgVer.Name(), err)
continue
}
//log.Debugf("Found %s", pgVer.Name())
metricDefs, err := ioutil.ReadDir(path.Join(folder, f.Name(), pgVer.Name()))
if err != nil {
log.Error(err)
continue
}
foundMetricDefFiles := make(map[string]bool) // to warn on accidental duplicates
for _, md := range metricDefs {
if strings.HasPrefix(md.Name(), "metric") && strings.HasSuffix(md.Name(), ".sql") {
p := path.Join(folder, f.Name(), pgVer.Name(), md.Name())
metric_sql, err := ioutil.ReadFile(p)
if err != nil {
log.Errorf("Failed to read metric definition at: %s", p)
continue
}
_, exists := foundMetricDefFiles[md.Name()]
if exists {
log.Warningf("Multiple definitions found for metric [%s:%s], using the last one (%s)...", f.Name(), pgVer.Name(), md.Name())
}
foundMetricDefFiles[md.Name()] = true
//log.Debugf("Metric definition for \"%s\" ver %s: %s", f.Name(), pgVer.Name(), metric_sql)
mvpVer, ok := metrics_map[f.Name()]
var mvp MetricVersionProperties
if !ok {
metrics_map[f.Name()] = make(map[decimal.Decimal]MetricVersionProperties)
}
mvp, ok = mvpVer[d]
if !ok {
mvp = MetricVersionProperties{Sql: string(metric_sql[:]), ColumnAttrs: metricColumnAttrs, MetricAttrs: metricAttrs}
}
if strings.Contains(md.Name(), "_master") {
mvp.MasterOnly = true
}
if strings.Contains(md.Name(), "_standby") {
mvp.StandbyOnly = true
}
if strings.Contains(md.Name(), "_su") {
mvp.SqlSU = string(metric_sql[:])
}
metrics_map[f.Name()][d] = mvp
}
}
}
}
}
return metrics_map, nil
}
func ConfigFileToMonitoredDatabases(configFilePath string) ([]MonitoredDatabase, error) {
hostList := make([]MonitoredDatabase, 0)
log.Debugf("Converting monitoring YAML config to MonitoredDatabase from path %s ...", configFilePath)
yamlFile, err := ioutil.ReadFile(configFilePath)
if err != nil {
log.Errorf("Error reading file %s: %s", configFilePath, err)
return hostList, err
}
// TODO check mod timestamp or hash, from a global "caching map"
c := make([]MonitoredDatabase, 0) // there can be multiple configs in a single file
yamlFile = []byte(os.ExpandEnv(string(yamlFile)))
err = yaml.Unmarshal(yamlFile, &c)
if err != nil {
log.Errorf("Unmarshaling error: %v", err)
return hostList, err
}
for _, v := range c {
if v.IsEnabled {
log.Debugf("Found active monitoring config entry: %#v", v)
if v.Group == "" {
v.Group = "default"
}
if v.StmtTimeout == 0 {
v.StmtTimeout = 5
}
hostList = append(hostList, v)
}
}
if len(hostList) == 0 {
log.Warningf("Could not find any valid monitoring configs from file: %s", configFilePath)
}
return hostList, nil
}
// reads through the YAML files containing descriptions on which hosts to monitor
func ReadMonitoringConfigFromFileOrFolder(fileOrFolder string) ([]MonitoredDatabase, error) {
hostList := make([]MonitoredDatabase, 0)
fi, err := os.Stat(fileOrFolder)
if err != nil {
log.Errorf("Could not Stat() path: %s", fileOrFolder)
return hostList, err
}
switch mode := fi.Mode(); {
case mode.IsDir():
log.Infof("Reading monitoring config from path %s ...", fileOrFolder)
err := filepath.Walk(fileOrFolder, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err // abort on first failure
}
if info.Mode().IsRegular() && (strings.HasSuffix(strings.ToLower(info.Name()), ".yaml") || strings.HasSuffix(strings.ToLower(info.Name()), ".yml")) {
log.Debug("Found YAML config file:", info.Name())
mdbs, err := ConfigFileToMonitoredDatabases(path)
if err == nil {
hostList = append(hostList, mdbs...)
}
}
return nil
})
if err != nil {
log.Errorf("Could not successfully Walk() path %s: %s", fileOrFolder, err)
return hostList, err
}
case mode.IsRegular():
hostList, err = ConfigFileToMonitoredDatabases(fileOrFolder)
}
return hostList, err
}
// "resolving" reads all the DB names from the given host/port, additionally matching/not matching specified regex patterns
func ResolveDatabasesFromConfigEntry(ce MonitoredDatabase) ([]MonitoredDatabase, error) {
var c *sqlx.DB
var err error
md := make([]MonitoredDatabase, 0)
// some cloud providers limit access to template1 for some reason, so try with postgres and defaultdb (Aiven)
templateDBsToTry := []string{"template1", "postgres", "defaultdb"}
for _, templateDB := range templateDBsToTry {
c, err = GetPostgresDBConnection(ce.LibPQConnStr, ce.Host, ce.Port, templateDB, ce.User, ce.Password,
ce.SslMode, ce.SslRootCAPath, ce.SslClientCertPath, ce.SslClientKeyPath)
if err != nil {
return md, err
}
err = c.Ping()
if err == nil {
break
} else {
c.Close()
}
}
if err != nil {
return md, fmt.Errorf("Failed to connect to any of the template DBs: %v", templateDBsToTry)
}
defer c.Close()
sql := `select /* pgwatch2_generated */ datname::text as datname,
quote_ident(datname)::text as datname_escaped
from pg_database
where not datistemplate
and datallowconn
and has_database_privilege (datname, 'CONNECT')
and case when length(trim($1)) > 0 then datname ~ $2 else true end
and case when length(trim($3)) > 0 then not datname ~ $4 else true end`
data, err := DBExecRead(c, ce.DBUniqueName, sql, ce.DBNameIncludePattern, ce.DBNameIncludePattern, ce.DBNameExcludePattern, ce.DBNameExcludePattern)
if err != nil {
return md, err
}
for _, d := range data {
md = append(md, MonitoredDatabase{
DBUniqueName: ce.DBUniqueName + "_" + d["datname_escaped"].(string),
DBUniqueNameOrig: ce.DBUniqueName,
DBName: d["datname"].(string),
Host: ce.Host,
Port: ce.Port,
User: ce.User,
Password: ce.Password,
PasswordType: ce.PasswordType,
SslMode: ce.SslMode,
SslRootCAPath: ce.SslRootCAPath,
SslClientCertPath: ce.SslClientCertPath,
SslClientKeyPath: ce.SslClientKeyPath,
StmtTimeout: ce.StmtTimeout,
Metrics: ce.Metrics,
MetricsStandby: ce.MetricsStandby,
PresetMetrics: ce.PresetMetrics,
PresetMetricsStandby: ce.PresetMetricsStandby,
IsSuperuser: ce.IsSuperuser,
CustomTags: ce.CustomTags,
HostConfig: ce.HostConfig,
OnlyIfMaster: ce.OnlyIfMaster,
DBType: ce.DBType})
}
return md, err
}
// Resolves regexes if exact DBs were not specified exact
func GetMonitoredDatabasesFromMonitoringConfig(mc []MonitoredDatabase) []MonitoredDatabase {
md := make([]MonitoredDatabase, 0)
if len(mc) == 0 {
return md
}
for _, e := range mc {
//log.Debugf("Processing config item: %#v", e)
if e.Metrics == nil && len(e.PresetMetrics) > 0 {
mdef, ok := preset_metric_def_map[e.PresetMetrics]
if !ok {
log.Errorf("Failed to resolve preset config \"%s\" for \"%s\"", e.PresetMetrics, e.DBUniqueName)
continue
}
e.Metrics = mdef
}
if _, ok := dbTypeMap[e.DBType]; !ok {
log.Warningf("Ignoring host \"%s\" - unknown dbtype: %s. Expected one of: %+v", e.DBUniqueName, e.DBType, dbTypes)
continue
}
if e.IsEnabled && e.PasswordType == "aes-gcm-256" && opts.AesGcmKeyphrase != "" {
e.Password = decrypt(e.DBUniqueName, opts.AesGcmKeyphrase, e.Password)
}
if e.DBType == DBTYPE_PATRONI && e.DBName == "" {
log.Warningf("Ignoring host \"%s\" as \"dbname\" attribute not specified but required by dbtype=patroni", e.DBUniqueName)
continue
}
if e.DBType == DBTYPE_PG && e.DBName == "" {
log.Warningf("Ignoring host \"%s\" as \"dbname\" attribute not specified but required by dbtype=postgres", e.DBUniqueName)
continue
}
if len(e.DBName) == 0 || e.DBType == DBTYPE_PG_CONT || e.DBType == DBTYPE_PATRONI || e.DBType == DBTYPE_PATRONI_CONT || e.DBType == DBTYPE_PATRONI_NAMESPACE_DISCOVERY {
if e.DBType == DBTYPE_PG_CONT {
log.Debugf("Adding \"%s\" (host=%s, port=%s) to continuous monitoring ...", e.DBUniqueName, e.Host, e.Port)
}
var found_dbs []MonitoredDatabase
var err error
if e.DBType == DBTYPE_PATRONI || e.DBType == DBTYPE_PATRONI_CONT || e.DBType == DBTYPE_PATRONI_NAMESPACE_DISCOVERY {
found_dbs, err = ResolveDatabasesFromPatroni(e)
} else {
found_dbs, err = ResolveDatabasesFromConfigEntry(e)
}
if err != nil {
log.Errorf("Failed to resolve DBs for \"%s\": %s", e.DBUniqueName, err)
continue
}
temp_arr := make([]string, 0)
for _, r := range found_dbs {
md = append(md, r)
temp_arr = append(temp_arr, r.DBName)
}
log.Debugf("Resolved %d DBs with prefix \"%s\": [%s]", len(found_dbs), e.DBUniqueName, strings.Join(temp_arr, ", "))
} else {
md = append(md, e)
}
}
return md
}
func StatsServerHandler(w http.ResponseWriter, req *http.Request) {
jsonResponseTemplate := `
{
"secondsFromLastSuccessfulDatastoreWrite": %d,
"totalMetricsFetchedCounter": %d,
"totalMetricsReusedFromCacheCounter": %d,
"totalDatasetsFetchedCounter": %d,
"metricPointsPerMinuteLast5MinAvg": %v,
"metricsDropped": %d,
"totalMetricFetchFailuresCounter": %d,
"datastoreWriteFailuresCounter": %d,
"datastoreSuccessfulWritesCounter": %d,
"datastoreAvgSuccessfulWriteTimeMillis": %.1f,
"gathererUptimeSeconds": %d
}
`
now := time.Now()
secondsFromLastSuccessfulDatastoreWrite := atomic.LoadInt64(&lastSuccessfulDatastoreWriteTimeEpoch)
totalMetrics := atomic.LoadUint64(&totalMetricsFetchedCounter)
cacheMetrics := atomic.LoadUint64(&totalMetricsReusedFromCacheCounter)
totalDatasets := atomic.LoadUint64(&totalDatasetsFetchedCounter)
metricsDropped := atomic.LoadUint64(&totalMetricsDroppedCounter)
metricFetchFailuresCounter := atomic.LoadUint64(&totalMetricFetchFailuresCounter)
datastoreFailures := atomic.LoadUint64(&datastoreWriteFailuresCounter)
datastoreSuccess := atomic.LoadUint64(&datastoreWriteSuccessCounter)
datastoreTotalTimeMicros := atomic.LoadUint64(&datastoreTotalWriteTimeMicroseconds) // successful writes only
datastoreAvgSuccessfulWriteTimeMillis := float64(datastoreTotalTimeMicros) / float64(datastoreSuccess) / 1000.0
gathererUptimeSeconds := uint64(now.Sub(gathererStartTime).Seconds())
var metricPointsPerMinute int64
metricPointsPerMinute = atomic.LoadInt64(&metricPointsPerMinuteLast5MinAvg)
if metricPointsPerMinute == -1 { // calculate avg. on the fly if 1st summarization hasn't happened yet
metricPointsPerMinute = int64((totalMetrics * 60) / gathererUptimeSeconds)
}
_, _ = io.WriteString(w, fmt.Sprintf(jsonResponseTemplate, time.Now().Unix()-secondsFromLastSuccessfulDatastoreWrite, totalMetrics, cacheMetrics, totalDatasets, metricPointsPerMinute, metricsDropped, metricFetchFailuresCounter, datastoreFailures, datastoreSuccess, datastoreAvgSuccessfulWriteTimeMillis, gathererUptimeSeconds))
}
func StartStatsServer(port int64) {
http.HandleFunc("/", StatsServerHandler)
for {
log.Errorf("Failure in StatsServerHandler:", http.ListenAndServe(fmt.Sprintf(":%d", port), nil))
time.Sleep(time.Second * 60)
}
}
// Calculates 1min avg metric fetching statistics for last 5min for StatsServerHandler to display
func StatsSummarizer() {
var prevMetricsCounterValue uint64
var currentMetricsCounterValue uint64
ticker := time.NewTicker(time.Minute * 5)
var lastSummarization time.Time = gathererStartTime
for now := range ticker.C {
currentMetricsCounterValue = atomic.LoadUint64(&totalMetricsFetchedCounter)
atomic.StoreInt64(&metricPointsPerMinuteLast5MinAvg, int64(math.Round(float64(currentMetricsCounterValue-prevMetricsCounterValue)*60/now.Sub(lastSummarization).Seconds())))
prevMetricsCounterValue = currentMetricsCounterValue
lastSummarization = now
}
}
func FilterMonitoredDatabasesByGroup(monitoredDBs []MonitoredDatabase, group string) ([]MonitoredDatabase, int) {
ret := make([]MonitoredDatabase, 0)
groups := strings.Split(group, ",")
for _, md := range monitoredDBs {
// matched := false
for _, g := range groups {
if md.Group == g {
ret = append(ret, md)
break
}
}
}
return ret, len(monitoredDBs) - len(ret)
}
func encrypt(passphrase, plaintext string) string { // called when --password-to-encrypt set
key, salt := deriveKey(passphrase, nil)
iv := make([]byte, 12)
_, _ = rand.Read(iv)
b, _ := aes.NewCipher(key)
aesgcm, _ := cipher.NewGCM(b)
data := aesgcm.Seal(nil, iv, []byte(plaintext), nil)
return hex.EncodeToString(salt) + "-" + hex.EncodeToString(iv) + "-" + hex.EncodeToString(data)
}
func deriveKey(passphrase string, salt []byte) ([]byte, []byte) {
if salt == nil {
salt = make([]byte, 8)
_, _ = rand.Read(salt)
}
return pbkdf2.Key([]byte(passphrase), salt, 1000, 32, sha256.New), salt
}
func decrypt(dbUnique, passphrase, ciphertext string) string {
arr := strings.Split(ciphertext, "-")
if len(arr) != 3 {
log.Warningf("Aes-gcm-256 encrypted password for \"%s\" should consist of 3 parts - using 'as is'", dbUnique)
return ciphertext
}
salt, _ := hex.DecodeString(arr[0])
iv, _ := hex.DecodeString(arr[1])
data, _ := hex.DecodeString(arr[2])
key, _ := deriveKey(passphrase, salt)
b, _ := aes.NewCipher(key)
aesgcm, _ := cipher.NewGCM(b)
data, _ = aesgcm.Open(nil, iv, data, nil)
//log.Debug("decoded", string(data))
return string(data)
}
func SyncMonitoredDBsToDatastore(monitored_dbs []MonitoredDatabase, persistence_channel chan []MetricStoreMessage) {
if len(monitored_dbs) > 0 {
msms := make([]MetricStoreMessage, len(monitored_dbs))
now := time.Now()
for _, mdb := range monitored_dbs {
var db = make(map[string]interface{})
db["tag_group"] = mdb.Group
db["master_only"] = mdb.OnlyIfMaster
db["epoch_ns"] = now.UnixNano()
db["continuous_discovery_prefix"] = mdb.DBUniqueNameOrig
for k, v := range mdb.CustomTags {
db["tag_"+k] = v
}
var data = [](map[string]interface{}){db}
msms = append(msms, MetricStoreMessage{DBUniqueName: mdb.DBUniqueName, MetricName: MONITORED_DBS_DATASTORE_SYNC_METRIC_NAME,
Data: data})
}
persistence_channel <- msms
}
}
type Options struct {
// Slice of bool will append 'true' each time the option
// is encountered (can be set multiple times, like -vvv)
Verbose string `short:"v" long:"verbose" description:"Chat level [DEBUG|INFO|WARN]. Default: WARN" env:"PW2_VERBOSE"`
Host string `long:"host" description:"PG config DB host" default:"localhost" env:"PW2_PGHOST"`
Port string `short:"p" long:"port" description:"PG config DB port" default:"5432" env:"PW2_PGPORT"`
Dbname string `short:"d" long:"dbname" description:"PG config DB dbname" default:"pgwatch2" env:"PW2_PGDATABASE"`
User string `short:"u" long:"user" description:"PG config DB user" default:"pgwatch2" env:"PW2_PGUSER"`
Password string `long:"password" description:"PG config DB password" env:"PW2_PGPASSWORD"`
PgRequireSSL string `long:"pg-require-ssl" description:"PG config DB SSL connection only" default:"false" env:"PW2_PGSSL"`
Group string `short:"g" long:"group" description:"Group (or groups, comma separated) for filtering which DBs to monitor. By default all are monitored" env:"PW2_GROUP"`
Datastore string `long:"datastore" description:"[influx|postgres|prometheus|graphite|json]" default:"influx" env:"PW2_DATASTORE"`
PGMetricStoreConnStr string `long:"pg-metric-store-conn-str" description:"PG Metric Store" env:"PW2_PG_METRIC_STORE_CONN_STR"`
PGRetentionDays int64 `long:"pg-retention-days" description:"If set, metrics older than that will be deleted" default:"14" env:"PW2_PG_RETENTION_DAYS"`
PrometheusPort int64 `long:"prometheus-port" description:"Prometheus port. Effective with --datastore=prometheus" default:"9187" env:"PW2_PROMETHEUS_PORT"`
PrometheusListenAddr string `long:"prometheus-listen-addr" description:"Network interface to listen on" default:"0.0.0.0" env:"PW2_PROMETHEUS_LISTEN_ADDR"`
PrometheusNamespace string `long:"prometheus-namespace" description:"Prefix for all non-process (thus Postgres) metrics" default:"pgwatch2" env:"PW2_PROMETHEUS_NAMESPACE"`
InfluxHost string `long:"ihost" description:"Influx host" default:"localhost" env:"PW2_IHOST"`
InfluxPort string `long:"iport" description:"Influx port" default:"8086" env:"PW2_IPORT"`
InfluxDbname string `long:"idbname" description:"Influx DB name" default:"pgwatch2" env:"PW2_IDATABASE"`
InfluxUser string `long:"iuser" description:"Influx user" default:"root" env:"PW2_IUSER"`
InfluxPassword string `long:"ipassword" description:"Influx password" default:"root" env:"PW2_IPASSWORD"`
InfluxSSL string `long:"issl" description:"Influx require SSL" env:"PW2_ISSL"`
InfluxSSLSkipVerify string `long:"issl-skip-verify" description:"Skip Influx Cert validation i.e. allows self-signed certs" default:"true" env:"PW2_ISSL_SKIP_VERIFY"`
InfluxHost2 string `long:"ihost2" description:"Influx host II" env:"PW2_IHOST2"`
InfluxPort2 string `long:"iport2" description:"Influx port II" env:"PW2_IPORT2"`
InfluxDbname2 string `long:"idbname2" description:"Influx DB name II" default:"pgwatch2" env:"PW2_IDATABASE2"`
InfluxUser2 string `long:"iuser2" description:"Influx user II" default:"root" env:"PW2_IUSER2"`
InfluxPassword2 string `long:"ipassword2" description:"Influx password II" default:"root" env:"PW2_IPASSWORD2"`
InfluxSSL2 string `long:"issl2" description:"Influx require SSL II" env:"PW2_ISSL2"`
InfluxSSLSkipVerify2 string `long:"issl-skip-verify2" description:"Skip Influx Cert validation i.e. allows self-signed certs" default:"true" env:"PW2_ISSL_SKIP_VERIFY2"`
InfluxRetentionDays int64 `long:"iretentiondays" description:"Retention period in days. Set to 0 to use database defaults for an existing DB [default: 30]" default:"30" env:"PW2_IRETENTIONDAYS"`
InfluxRetentionName string `long:"iretentionname" description:"Retention policy name. [Default: pgwatch_def_ret]" default:"pgwatch_def_ret" env:"PW2_IRETENTIONNAME"`
GraphiteHost string `long:"graphite-host" description:"Graphite host" env:"PW2_GRAPHITEHOST"`
GraphitePort string `long:"graphite-port" description:"Graphite port" env:"PW2_GRAPHITEPORT"`
JsonStorageFile string `long:"json-storage-file" description:"Path to file where metrics will be stored when --datastore=json, one metric set per line" env:"PW2_JSON_STORAGE_FILE"`
// Params for running based on local config files, enabled distributed "push model" based metrics gathering. Metrics are sent directly to Influx/Graphite.
Config string `short:"c" long:"config" description:"File or folder of YAML files containing info on which DBs to monitor and where to store metrics" env:"PW2_CONFIG"`
MetricsFolder string `short:"m" long:"metrics-folder" description:"Folder of metrics definitions" env:"PW2_METRICS_FOLDER" default:"/etc/pgwatch2/metrics"`
BatchingDelayMs int64 `long:"batching-delay-ms" description:"Max milliseconds to wait for a batched metrics flush. [Default: 250]" default:"250" env:"PW2_BATCHING_MAX_DELAY_MS"`
AdHocConnString string `long:"adhoc-conn-str" description:"Ad-hoc mode: monitor a single Postgres DB specified by a standard Libpq connection string" env:"PW2_ADHOC_CONN_STR"`
AdHocDBType string `long:"adhoc-dbtype" description:"Ad-hoc mode: postgres|postgres-continuous-discovery" default:"postgres" env:"PW2_ADHOC_DBTYPE"`
AdHocConfig string `long:"adhoc-config" description:"Ad-hoc mode: a preset config name or a custom JSON config" env:"PW2_ADHOC_CONFIG"`
AdHocCreateHelpers string `long:"adhoc-create-helpers" description:"Ad-hoc mode: try to auto-create helpers. Needs superuser to succeed [Default: false]" default:"false" env:"PW2_ADHOC_CREATE_HELPERS"`
AdHocUniqueName string `long:"adhoc-name" description:"Ad-hoc mode: Unique 'dbname' for Influx. [Default: adhoc]" default:"adhoc" env:"PW2_ADHOC_NAME"`
InternalStatsPort int64 `long:"internal-stats-port" description:"Port for inquiring monitoring status in JSON format. [Default: 8081]" default:"8081" env:"PW2_INTERNAL_STATS_PORT"`
ConnPooling string `long:"conn-pooling" description:"Enable re-use of metrics fetching connections [Default: off]" default:"off" env:"PW2_CONN_POOLING"`
AesGcmKeyphrase string `long:"aes-gcm-keyphrase" description:"Decryption key for AES-GCM-256 passwords" env:"PW2_AES_GCM_KEYPHRASE"`
AesGcmKeyphraseFile string `long:"aes-gcm-keyphrase-file" description:"File with decryption key for AES-GCM-256 passwords" env:"PW2_AES_GCM_KEYPHRASE_FILE"`
AesGcmPasswordToEncrypt string `long:"aes-gcm-password-to-encrypt" description:"A special mode, returns the encrypted plain-text string and quits. Keyphrase(file) must be set. Useful for YAML mode" env:"PW2_AES_GCM_PASSWORD_TO_ENCRYPT"`
// NB! "Test data" mode needs to be combined with "ad-hoc" mode to get an initial set of metrics from a real source
TestdataMultiplier int `long:"testdata-multiplier" description:"For how many hosts to generate data" env:"PW2_TESTDATA_MULTIPLIER"`
TestdataDays int `long:"testdata-days" description:"For how many days to generate data" env:"PW2_TESTDATA_DAYS"`
AddRealDbname string `long:"add-real-dbname" description:"Add real DB name to each captured metric" env:"PW2_ADD_REAL_DBNAME" default:"false"`
RealDbnameField string `long:"real-dbname-field" description:"Tag key for real DB name if --add-real-dbname enabled" env:"PW2_REAL_DBNAME_FIELD" default:"real_dbname"`
AddSystemIdentifier string `long:"add-system-identifier" description:"Add system identifier to each captured metric" env:"PW2_ADD_SYSTEM_IDENTIFIER" default:"false"`
SystemIdentifierField string `long:"system-identifier-field" description:"Tag key for system identifier value if --add-system-identifier" env:"PW2_SYSTEM_IDENTIFIER_FIELD" default:"sys_id"`
ServersRefreshLoopSeconds int `long:"servers-refresh-loop-seconds" description:"Sleep time for the main loop" env:"PW2_SERVERS_REFRESH_LOOP_SECONDS" default:"120"`
InstanceLevelCacheMaxSeconds int64 `long:"instance-level-cache-max-seconds" description:"Max allowed staleness for instance level metric data shared between DBs of an instance. Affects 'continuous' host types only. Set to 0 to disable" env:"PW2_INSTANCE_LEVEL_CACHE_MAX_SECONDS" default:"30"`
Version bool `long:"version" description:"Show Git build version and exit" env:"PW2_VERSION"`
Ping bool `long:"ping" description:"Try to connect to all configured DB-s, report errors and then exit" env:"PW2_PING"`
}
var opts Options
func main() {
parser := flags.NewParser(&opts, flags.Default)
if _, err := parser.Parse(); err != nil {
return
}
if opts.Version {
if commit == "" {
fmt.Println("Git version not set! Use the 'build_gatherer.sh' script to build the binary or specify 'commit' and 'date' via -ldflags...")
os.Exit(1)
}
fmt.Printf("%s (%s)\n", commit, date)
os.Exit(0)
}
if strings.ToUpper(opts.Verbose) == "DEBUG" {
logging.SetLevel(logging.DEBUG, "main")
} else if strings.ToUpper(opts.Verbose) == "INFO" {
logging.SetLevel(logging.INFO, "main")
} else if strings.HasPrefix(strings.ToUpper(opts.Verbose), "WARN") {
logging.SetLevel(logging.WARNING, "main")
} else {
if len(opts.Verbose) >= 2 {
logging.SetLevel(logging.DEBUG, "main")
} else if len(opts.Verbose) == 1 {
logging.SetLevel(logging.INFO, "main")
} else {
logging.SetLevel(logging.WARNING, "main")
}
}
logging.SetFormatter(logging.MustStringFormatter(`%{level:.4s} %{shortfunc}: %{message}`))
log.Debugf("opts: %+v", opts)
if opts.ServersRefreshLoopSeconds <= 1 {
log.Fatal("--servers-refresh-loop-seconds must be greater than 1")
}
if len(opts.AesGcmKeyphraseFile) > 0 {
_, err := os.Stat(opts.AesGcmKeyphraseFile)
if os.IsNotExist(err) {
log.Warningf("Failed to read aes_gcm_keyphrase_file at %s, thus cannot monitor hosts with encrypted passwords", opts.AesGcmKeyphraseFile)
} else {
keyBytes, err := ioutil.ReadFile(opts.AesGcmKeyphraseFile)
if err != nil {
log.Fatalf("Failed to read aes_gcm_keyphrase_file at %s: %v", opts.AesGcmKeyphraseFile, err)
}
if keyBytes[len(keyBytes)-1] == 10 {
log.Warning("Removing newline character from keyphrase input string...")
opts.AesGcmKeyphrase = string(keyBytes[:len(keyBytes)-1]) // remove line feed
} else {
opts.AesGcmKeyphrase = string(keyBytes)
}
}
}
if opts.AesGcmPasswordToEncrypt != "" { // special flag - encrypt and exit
if opts.AesGcmKeyphrase == "" {
log.Fatal("--aes-gcm-password-to-encrypt requires --aes-gcm-keyphrase(-file)")
}
fmt.Println(encrypt(opts.AesGcmKeyphrase, opts.AesGcmPasswordToEncrypt))
os.Exit(0)
}
// ad-hoc mode
if len(opts.AdHocConnString) > 0 || len(opts.AdHocConfig) > 0 {
if len(opts.AdHocConnString) == 0 || len(opts.AdHocConfig) == 0 {
log.Fatal("--adhoc-conn-str and --adhoc-config params both need to be specified for Ad-hoc mode to work")
}
if len(opts.Config) > 0 {
log.Fatal("Conflicting flags! --adhoc-conn-str and --config cannot be both set")
}
if len(opts.MetricsFolder) == 0 {
opts.MetricsFolder = "/etc/pgwatch2/metrics"
log.Warningf("--metrics-folder path not specified, using %s", opts.MetricsFolder)
}
_, err := ioutil.ReadDir(opts.MetricsFolder)
if err != nil {
// try Docker image default file based metrics path
opts.MetricsFolder = "/pgwatch2/metrics"
_, err = ioutil.ReadDir(opts.MetricsFolder)
if err != nil {
log.Fatal("--adhoc-conn-str requires also --metrics-folder param")
}
}
if len(opts.User) > 0 && len(opts.Password) > 0 {
log.Fatal("Conflicting flags! --adhoc-conn-str and --user/--password cannot be both set")
}
if !(opts.AdHocDBType == DBTYPE_PG || opts.AdHocDBType == DBTYPE_PG_CONT) {
log.Fatalf("--adhoc-dbtype can be of: [ %s (single DB) | %s (all non-template DB-s on an instance) ]. Default: %s", DBTYPE_PG, DBTYPE_PG_CONT, DBTYPE_PG)
}
if opts.AdHocUniqueName == "adhoc" {
log.Warning("In ad-hoc mode: using default unique name 'adhoc' for metrics storage. use --adhoc-name to override.")
}
adHocMode = true
}
if opts.TestdataDays != 0 || opts.TestdataMultiplier > 0 {
if len(opts.AdHocConnString) == 0 {
log.Fatal("Test mode requires --adhoc-conn-str!")
}
if opts.TestdataMultiplier == 0 {
log.Fatal("Test mode requires --testdata-multiplier!")
}
if opts.TestdataDays == 0 {
log.Fatal("Test mode requires --testdata-days!")
}
}
if opts.AddRealDbname != "" {
addRealDbname = StringToBoolOrFail(opts.AddRealDbname, "--add-real-dbname")
if opts.RealDbnameField == "" {
log.Fatal("--real-dbname-field cannot be empty when --add-real-dbname enabled")
}
}
if opts.AddSystemIdentifier != "" {
addSystemIdentifier = StringToBoolOrFail(opts.AddSystemIdentifier, "--add-system-identifier")
if opts.SystemIdentifierField == "" {
log.Fatal("--system-identifier-field cannot be empty when --add-system-identifier enabled")
}
}
// running in config file based mode?
if len(opts.Config) > 0 {
if len(opts.MetricsFolder) == 0 {
opts.MetricsFolder = "/etc/pgwatch2/metrics" // prebuilt packages default location
log.Warningf("--metrics-folder path not specified, using %s", opts.MetricsFolder)
}
// verify that metric/config paths are readable
_, err := ioutil.ReadDir(opts.MetricsFolder)
if err != nil {
log.Fatalf("Could not read --metrics-folder path %s: %s", opts.MetricsFolder, err)
}
if !adHocMode {
fi, err := os.Stat(opts.Config)
if err != nil {
log.Fatalf("Could not Stat() path %s: %s", opts.Config, err)
}
switch mode := fi.Mode(); {
case mode.IsDir():
_, err := ioutil.ReadDir(opts.Config)
if err != nil {
log.Fatalf("Could not read path %s: %s", opts.Config, err)
}
case mode.IsRegular():
_, err := ioutil.ReadFile(opts.Config)
if err != nil {
log.Fatalf("Could not read path %s: %s", opts.Config, err)
}
}
}
fileBased = true
} else if !adHocMode {
// make sure all PG params are there
if opts.User == "" {
opts.User = os.Getenv("USER")
}
if opts.Host == "" || opts.Port == "" || opts.Dbname == "" || opts.User == "" {
fmt.Println("Check config DB parameters")
return
}
_ = InitAndTestConfigStoreConnection(opts.Host, opts.Port, opts.Dbname, opts.User, opts.Password, opts.PgRequireSSL, true)
}
// validate that input is boolean is set
if len(strings.TrimSpace(opts.InfluxSSL)) > 0 {
if _, err := strconv.ParseBool(opts.InfluxSSL); err != nil {
fmt.Println("Check --issl parameter - can be of: 1, t, T, TRUE, true, True, 0, f, F, FALSE, false, False")
return
}
} else {
opts.InfluxSSL = "false"
}
if len(strings.TrimSpace(opts.InfluxSSL2)) > 0 {
if _, err := strconv.ParseBool(opts.InfluxSSL2); err != nil {
fmt.Println("Check --issl2 parameter - can be of: 1, t, T, TRUE, true, True, 0, f, F, FALSE, false, False")
return
}
} else {
opts.InfluxSSL2 = "false"
}
if opts.BatchingDelayMs < 0 || opts.BatchingDelayMs > 3600000 {
log.Fatal("--batching-delay-ms must be between 0 and 3600000")
}
useConnPooling = StringToBoolOrFail(opts.ConnPooling, "--conn-pooling")
if opts.InternalStatsPort > 0 && !opts.Ping {
l, err := net.Listen("tcp", fmt.Sprintf(":%d", opts.InternalStatsPort))
if err != nil {
log.Fatalf("Could not start the internal statistics interface on port %d. Set --internal-stats-port to an open port or to 0 to disable. Err: %v", opts.InternalStatsPort, err)
}
err = l.Close()
if err != nil {
log.Fatalf("Could not cleanly stop the temporary listener on port %d: %v", opts.InternalStatsPort, err)
}
log.Infof("Starting the internal statistics interface on port %d...", opts.InternalStatsPort)
go StartStatsServer(opts.InternalStatsPort)
go StatsSummarizer()
}
control_channels := make(map[string](chan ControlMessage)) // [db1+metric1]=chan
persist_ch := make(chan []MetricStoreMessage, 10000)
var buffered_persist_ch chan []MetricStoreMessage
if !opts.Ping {
if opts.BatchingDelayMs > 0 && opts.Datastore != DATASTORE_PROMETHEUS {
buffered_persist_ch = make(chan []MetricStoreMessage, 10000) // "staging area" for metric storage batching, when enabled
log.Info("starting MetricsBatcher...")
go MetricsBatcher(DATASTORE_INFLUX, opts.BatchingDelayMs, buffered_persist_ch, persist_ch)
}
if opts.Datastore == DATASTORE_GRAPHITE {
if opts.GraphiteHost == "" || opts.GraphitePort == "" {
log.Fatal("--graphite-host/port needed!")
}
graphite_port, _ := strconv.ParseInt(opts.GraphitePort, 10, 64)
InitGraphiteConnection(opts.GraphiteHost, int(graphite_port))
log.Info("starting GraphitePersister...")
go MetricsPersister(DATASTORE_GRAPHITE, persist_ch)
} else if opts.Datastore == DATASTORE_INFLUX {
// check connection and store connection string
conn_str, err := InitAndTestInfluxConnection("1", opts.InfluxHost, opts.InfluxPort, opts.InfluxDbname, opts.InfluxUser,
opts.InfluxPassword, opts.InfluxSSL, opts.InfluxSSLSkipVerify, opts.InfluxRetentionDays)
if err != nil {
log.Fatal("Could not initialize InfluxDB", err)
}
InfluxConnectStrings[0] = conn_str
if len(opts.InfluxHost2) > 0 { // same check for Influx host
if len(opts.InfluxPort2) == 0 {
log.Fatal("Invalid Influx II connect info")
}
conn_str, err = InitAndTestInfluxConnection("2", opts.InfluxHost2, opts.InfluxPort2, opts.InfluxDbname2, opts.InfluxUser2,
opts.InfluxPassword2, opts.InfluxSSL2, opts.InfluxSSLSkipVerify2, opts.InfluxRetentionDays)
if err != nil {
log.Fatal("Could not initialize InfluxDB II", err)
}
InfluxConnectStrings[1] = conn_str
influx_host_count = 2
}
log.Info("InfluxDB connection(s) OK")
log.Info("starting InfluxPersister...")
go MetricsPersister(DATASTORE_INFLUX, persist_ch)
} else if opts.Datastore == DATASTORE_JSON {
if len(opts.JsonStorageFile) == 0 {
log.Fatal("--datastore=json requires --json-storage-file to be set")
}
jsonOutFile, err := os.Create(opts.JsonStorageFile) // test file path writeability
if err != nil {
log.Fatalf("Could not create JSON storage file: %s", err)
}
err = jsonOutFile.Close()
if err != nil {
log.Fatal(err)
}
log.Warningf("In JSON output mode. Gathered metrics will be written to \"%s\"...", opts.JsonStorageFile)
go MetricsPersister(DATASTORE_JSON, persist_ch)
} else if opts.Datastore == DATASTORE_POSTGRES {
if len(opts.PGMetricStoreConnStr) == 0 {
log.Fatal("--datastore=postgres requires --pg-metric-store-conn-str to be set")
}
_ = InitAndTestMetricStoreConnection(opts.PGMetricStoreConnStr, true)
PGSchemaType = CheckIfPGSchemaInitializedOrFail()
log.Info("starting PostgresPersister...")
go MetricsPersister(DATASTORE_POSTGRES, persist_ch)
log.Info("starting UniqueDbnamesListingMaintainer...")
go UniqueDbnamesListingMaintainer(true)
if opts.PGRetentionDays > 0 && (PGSchemaType == "metric" ||
PGSchemaType == "metric-time" || PGSchemaType == "metric-dbname-time") && opts.TestdataDays == 0 {
log.Info("starting old Postgres metrics cleanup job...")
go OldPostgresMetricsDeleter(opts.PGRetentionDays, PGSchemaType)
}
} else if opts.Datastore == DATASTORE_PROMETHEUS {
if opts.TestdataDays != 0 || opts.TestdataMultiplier > 0 {
log.Fatal("Test data generation mode cannot be used with Prometheus data store")
}
go StartPrometheusExporter(opts.PrometheusPort)
} else {
log.Fatal("Unknown datastore. Check the --datastore param")
}
_, _ = daemon.SdNotify(false, "READY=1") // Notify systemd, does nothing outside of systemd
}
first_loop := true
var monitored_dbs []MonitoredDatabase
var last_metrics_refresh_time int64
var err error
var metrics map[string]map[decimal.Decimal]MetricVersionProperties
var hostLastKnownStatusInRecovery = make(map[string]bool) // isInRecovery
var metric_config map[string]float64 // set to host.Metrics or host.MetricsStandby (in case optional config defined and in recovery state
for { //main loop
hostsToShutDownDueToRoleChange := make(map[string]bool) // hosts went from master to standby and have "only if master" set
if time.Now().Unix()-last_metrics_refresh_time > METRIC_DEFINITION_REFRESH_TIME {
//metrics
if fileBased {
metrics, err = ReadMetricsFromFolder(opts.MetricsFolder, first_loop)
} else {
metrics, err = ReadMetricDefinitionMapFromPostgres(first_loop)
}
if err == nil {
UpdateMetricDefinitionMap(metrics)
last_metrics_refresh_time = time.Now().Unix()
} else {
log.Errorf("Could not refresh metric definitions: %s", err)
}
}
if fileBased || adHocMode {
pmc, err := ReadPresetMetricsConfigFromFolder(opts.MetricsFolder, false)
if err != nil {
if first_loop {
log.Fatalf("Could not read preset metric config from \"%s\": %s", path.Join(opts.MetricsFolder, PRESET_CONFIG_YAML_FILE), err)
} else {
log.Errorf("Could not read preset metric config from \"%s\": %s", path.Join(opts.MetricsFolder, PRESET_CONFIG_YAML_FILE), err)
}
} else {
preset_metric_def_map = pmc
log.Debugf("Loaded preset metric config: %#v", pmc)
}
if adHocMode {
config, ok := pmc[opts.AdHocConfig]
if !ok {
log.Warningf("Could not find a preset metric config named \"%s\", assuming JSON config...", opts.AdHocConfig)
config, err = jsonTextToMap(opts.AdHocConfig)
if err != nil {
log.Fatalf("Could not parse --adhoc-config(%s): %v", opts.AdHocConfig, err)
}
}
md := MonitoredDatabase{DBUniqueName: opts.AdHocUniqueName, DBType: opts.AdHocDBType, Metrics: config, LibPQConnStr: opts.AdHocConnString}
if opts.AdHocDBType == DBTYPE_PG {
monitored_dbs = []MonitoredDatabase{md}
} else {
resolved, err := ResolveDatabasesFromConfigEntry(md)
if err != nil {
if first_loop {
log.Fatalf("Failed to resolve DBs for ConnStr \"%s\": %s", opts.AdHocConnString, err)
} else { // keep previously found list
log.Errorf("Failed to resolve DBs for ConnStr \"%s\": %s", opts.AdHocConnString, err)
}
} else {
monitored_dbs = resolved
}
}
} else {
mc, err := ReadMonitoringConfigFromFileOrFolder(opts.Config)
if err == nil {
log.Debugf("Found %d monitoring config entries", len(mc))
if len(opts.Group) > 0 {
var removed_count int
mc, removed_count = FilterMonitoredDatabasesByGroup(mc, opts.Group)
log.Infof("Filtered out %d config entries based on --groups=%s", removed_count, opts.Group)
}
monitored_dbs = GetMonitoredDatabasesFromMonitoringConfig(mc)
log.Debugf("Found %d databases to monitor from %d config items...", len(monitored_dbs), len(mc))
} else {
if first_loop {
log.Fatalf("Could not read/parse monitoring config from path: %s", opts.Config)
} else {
log.Errorf("Could not read/parse monitoring config from path: %s", opts.Config)
}
time.Sleep(time.Second * time.Duration(opts.ServersRefreshLoopSeconds))
continue
}
}
} else {
monitored_dbs, err = GetMonitoredDatabasesFromConfigDB()
if err != nil {
if first_loop {
log.Fatal("could not fetch active hosts - check config!", err)
} else {
log.Error("could not fetch active hosts:", err)
time.Sleep(time.Second * time.Duration(opts.ServersRefreshLoopSeconds))
continue
}
}
}
UpdateMonitoredDBCache(monitored_dbs)
if lastMonitoredDBsUpdate.IsZero() || lastMonitoredDBsUpdate.Before(time.Now().Add(-1*time.Second*MONITORED_DBS_DATASTORE_SYNC_INTERVAL_SECONDS)) {
monitored_dbs_copy := make([]MonitoredDatabase, len(monitored_dbs))
copy(monitored_dbs_copy, monitored_dbs)
if opts.BatchingDelayMs > 0 {
go SyncMonitoredDBsToDatastore(monitored_dbs_copy, buffered_persist_ch)
} else {
go SyncMonitoredDBsToDatastore(monitored_dbs_copy, persist_ch)
}
lastMonitoredDBsUpdate = time.Now()
}
if first_loop && (len(monitored_dbs) == 0 || len(metric_def_map) == 0) {
log.Warningf("host info refreshed, nr. of enabled entries in configuration: %d, nr. of distinct metrics: %d", len(monitored_dbs), len(metric_def_map))
} else {
log.Infof("host info refreshed, nr. of enabled entries in configuration: %d, nr. of distinct metrics: %d", len(monitored_dbs), len(metric_def_map))
}
if first_loop {
first_loop = false // only used for failing when 1st config reading fails
}
for _, host := range monitored_dbs {
log.Debugf("processing database: %s, metric config: %v, custom tags: %v, host config: %#v", host.DBUniqueName, host.Metrics, host.CustomTags, host.HostConfig)
db_unique := host.DBUniqueName
db_unique_orig := host.DBUniqueNameOrig
db_type := host.DBType
metric_config = host.Metrics
if host.PasswordType == "aes-gcm-256" && len(opts.AesGcmKeyphrase) == 0 && len(opts.AesGcmKeyphraseFile) == 0 {
// Warn if any encrypted hosts found but no keyphrase given
log.Warningf("Encrypted password type found for host \"%s\", but no decryption keyphrase specified. Use --aes-gcm-keyphrase or --aes-gcm-keyphrase-file params", db_unique)
}
db_conn_limiting_channel_lock.RLock()
_, exists := db_conn_limiting_channel[db_unique]
db_conn_limiting_channel_lock.RUnlock()
if !exists { // new host, initialize DB connection limiting structure
db_conn_limiting_channel_lock.Lock()
db_conn_limiting_channel[db_unique] = make(chan bool, MAX_PG_CONNECTIONS_PER_MONITORED_DB)
i := 0
for i < MAX_PG_CONNECTIONS_PER_MONITORED_DB {
//log.Debugf("initializing db_conn_limiting_channel %d for [%s]", i, db_unique)
db_conn_limiting_channel[db_unique] <- true
i++
}
db_conn_limiting_channel_lock.Unlock()
db_pg_version_map_lock.Lock()
db_get_pg_version_map_lock[db_unique] = sync.RWMutex{}
db_pg_version_map_lock.Unlock()
}
_, connectFailedSoFar := failedInitialConnectHosts[db_unique]
if !exists || connectFailedSoFar {
var err error
var ver DBVersionMapEntry
metric_config = make(map[string]float64)
if connectFailedSoFar {
log.Infof("retrying to connect to uninitialized DB \"%s\"...", db_unique)
} else {
log.Infof("new host \"%s\" found, checking connectivity...", db_unique)
}
ver, err = DBGetPGVersion(db_unique, db_type, true)
if err != nil {
log.Errorf("could not start metric gathering for DB \"%s\" due to connection problem: %s", db_unique, err)
if opts.AdHocConnString != "" {
log.Errorf("will retry in %ds...", opts.ServersRefreshLoopSeconds)
}
failedInitialConnectHosts[db_unique] = true
continue
} else {
log.Infof("Connect OK. [%s] is on version %s (in recovery: %v)", db_unique, ver.VersionStr, ver.IsInRecovery)
if connectFailedSoFar {
delete(failedInitialConnectHosts, db_unique)
}
if ver.IsInRecovery && host.OnlyIfMaster {
log.Infof("[%s] not added to monitoring due to 'master only' property", db_unique)
continue
}
metric_config = host.Metrics
hostLastKnownStatusInRecovery[db_unique] = ver.IsInRecovery
if ver.IsInRecovery && len(host.MetricsStandby) > 0 {
metric_config = host.MetricsStandby
}
}
if !opts.Ping && (host.IsSuperuser || (adHocMode && StringToBoolOrFail(opts.AdHocCreateHelpers, "--adhoc-create-helpers"))) && IsPostgresDBType(db_type) && !ver.IsInRecovery {
log.Infof("Trying to create helper functions if missing for \"%s\"...", db_unique)
_ = TryCreateMetricsFetchingHelpers(db_unique)
}
if opts.Datastore != DATASTORE_PROMETHEUS && !opts.Ping {
time.Sleep(time.Millisecond * 100) // not to cause a huge load spike when starting the daemon with 100+ monitored DBs
}
}
if opts.Datastore == DATASTORE_PROMETHEUS || opts.Ping {
continue
}
if IsPostgresDBType(host.DBType) {
ver, err := DBGetPGVersion(db_unique, host.DBType, false)
if err == nil { // ok to ignore error, re-tried on next loop
lastKnownStatusInRecovery := hostLastKnownStatusInRecovery[db_unique]
if ver.IsInRecovery && host.OnlyIfMaster {
log.Infof("[%s] to be removed from monitoring due to 'master only' property and status change", db_unique)
hostsToShutDownDueToRoleChange[db_unique] = true
continue
} else if lastKnownStatusInRecovery != ver.IsInRecovery {
if ver.IsInRecovery && len(host.MetricsStandby) > 0 {
log.Warningf("Switching metrics collection for \"%s\" to standby config...", db_unique)
metric_config = host.MetricsStandby
hostLastKnownStatusInRecovery[db_unique] = true
} else {
log.Warningf("Switching metrics collection for \"%s\" to primary config...", db_unique)
metric_config = host.Metrics
hostLastKnownStatusInRecovery[db_unique] = false
}
}
}
}
for metric_name := range metric_config {
metric := metric_name
metric_def_ok := false
if strings.HasPrefix(metric, RECO_PREFIX) {
metric = RECO_METRIC_NAME
}
interval := metric_config[metric]
if metric == RECO_METRIC_NAME {
metric_def_ok = true
} else {
metric_def_map_lock.RLock()
_, metric_def_ok = metric_def_map[metric]
metric_def_map_lock.RUnlock()
}
var db_metric string = db_unique + ":" + metric
_, ch_ok := control_channels[db_metric]
if metric_def_ok && !ch_ok { // initialize a new per db/per metric control channel
if interval > 0 {
host_metric_interval_map[db_metric] = interval
log.Infof("starting gatherer for [%s:%s] with interval %v s", db_unique, metric, interval)
control_channels[db_metric] = make(chan ControlMessage, 1)
if opts.BatchingDelayMs > 0 {
go MetricGathererLoop(db_unique, db_unique_orig, db_type, metric, metric_config, control_channels[db_metric], buffered_persist_ch)
} else {
go MetricGathererLoop(db_unique, db_unique_orig, db_type, metric, metric_config, control_channels[db_metric], persist_ch)
}
}
} else if (!metric_def_ok && ch_ok) || interval <= 0 {
// metric definition files were recently removed or interval set to zero
log.Warning("shutting down metric", metric, "for", host.DBUniqueName)
control_channels[db_metric] <- ControlMessage{Action: GATHERER_STATUS_STOP}
delete(control_channels, db_metric)
} else if !metric_def_ok {
epoch, ok := last_sql_fetch_error.Load(metric)
if !ok || ((time.Now().Unix() - epoch.(int64)) > 3600) { // complain only 1x per hour
log.Warningf("metric definition \"%s\" not found for \"%s\"", metric, db_unique)
last_sql_fetch_error.Store(metric, time.Now().Unix())
}
} else {
// check if interval has changed
if host_metric_interval_map[db_metric] != interval {
log.Warning("sending interval update for", db_unique, metric)
control_channels[db_metric] <- ControlMessage{Action: GATHERER_STATUS_START, Config: metric_config}
host_metric_interval_map[db_metric] = interval
}
}
}
}
if opts.Ping {
if len(failedInitialConnectHosts) > 0 {
log.Errorf("Could not reach %d configured DB host out of %d", len(failedInitialConnectHosts), len(monitored_dbs))
os.Exit(len(failedInitialConnectHosts))
}
log.Infof("All configured %d DB hosts were reachable", len(monitored_dbs))
os.Exit(0)
}
if opts.Datastore == DATASTORE_PROMETHEUS { // special behaviour, no "ahead of time" metric collection
log.Debugf("main sleeping %ds...", opts.ServersRefreshLoopSeconds)
time.Sleep(time.Second * time.Duration(opts.ServersRefreshLoopSeconds))
continue
}
if opts.TestdataDays != 0 {
log.Info("Waiting for all metrics generation goroutines to stop ...")
time.Sleep(time.Second * 10) // with that time all different metric fetchers should have started
testDataGenerationModeWG.Wait()
for {
pqlen := len(persist_ch)
if pqlen == 0 {
if opts.Datastore == DATASTORE_POSTGRES {
UniqueDbnamesListingMaintainer(false) // refresh Grafana listing table
}
log.Warning("All generators have exited and data stored. Exit")
os.Exit(0)
}
log.Infof("Waiting for generated metrics to be stored (%d still in queue) ...", pqlen)
time.Sleep(time.Second * 1)
}
}
// loop over existing channels and stop workers if DB or metric removed from config
log.Debug("checking if any workers need to be shut down...")
control_channel_list := make([]string, len(control_channels))
i := 0
for key := range control_channels {
control_channel_list[i] = key
i++
}
gatherers_shut_down := 0
next_chan:
for _, db_metric := range control_channel_list {
splits := strings.Split(db_metric, ":")
db := splits[0]
metric := splits[1]
_, ok := hostsToShutDownDueToRoleChange[db]
if !ok { // maybe some single metric was disabled
for _, host := range monitored_dbs {
if host.DBUniqueName == db {
metricConfig := metric_config
for metric_key := range metricConfig {
if metric_key == metric && metricConfig[metric_key] > 0 {
continue next_chan
}
}
}
}
}
log.Infof("shutting down gatherer for [%s:%s] ...", db, metric)
control_channels[db_metric] <- ControlMessage{Action: GATHERER_STATUS_STOP}
delete(control_channels, db_metric)
log.Debugf("control channel for [%s:%s] deleted", db, metric)
gatherers_shut_down++
}
if gatherers_shut_down > 0 {
log.Warningf("sent STOP message to %d gatherers (it might take some minutes for them to stop though)", gatherers_shut_down)
}
log.Debugf("main sleeping %ds...", opts.ServersRefreshLoopSeconds)
time.Sleep(time.Second * time.Duration(opts.ServersRefreshLoopSeconds))
}
}
|
[
"\"USER\""
] |
[] |
[
"USER"
] |
[]
|
["USER"]
|
go
| 1 | 0 | |
docs/conf.py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
"""
Sphinx documentation builder
"""
import os
# Set env flag so that we can doc functions that may otherwise not be loaded
# see for example interactive visualizations in qiskit.visualization.
os.environ['QISKIT_DOCS'] = 'TRUE'
# -- Project information -----------------------------------------------------
project = 'Qiskit'
copyright = '2019, Qiskit Development Team' # pylint: disable=redefined-builtin
author = 'Qiskit Development Team'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.16.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'sphinx_tabs.tabs',
'jupyter_sphinx',
'sphinx_autodoc_typehints',
'reno.sphinxext',
]
html_static_path = ['_static']
templates_path = ['_templates']
html_css_files = ['style.css', 'custom.css']
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
autosummary_generate = True
# -----------------------------------------------------------------------------
# Autodoc
# -----------------------------------------------------------------------------
autodoc_default_options = {
'inherited-members': None,
}
# If true, figures, tables and code-blocks are automatically numbered if they
# have a caption.
numfig = True
# A dictionary mapping 'figure', 'table', 'code-block' and 'section' to
# strings that are used for format of figure numbers. As a special character,
# %s will be replaced to figure number.
numfig_format = {
'table': 'Table %s'
}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'colorful'
# A boolean that decides whether module names are prepended to all object names
# (for object types where a “module” of some kind is defined), e.g. for
# py:function directives.
add_module_names = False
# A list of prefixes that are ignored for sorting the Python module index
# (e.g., if this is set to ['foo.'], then foo.bar is shown under B, not F).
# This can be handy if you document a project that consists of a single
# package. Works only for the HTML builder currently.
modindex_common_prefix = ['qiskit.']
# -- Configuration for extlinks extension ------------------------------------
# Refer to https://www.sphinx-doc.org/en/master/usage/extensions/extlinks.html
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme' # use the theme in subdir 'theme'
html_logo = 'images/logo.png'
#html_sidebars = {'**': ['globaltoc.html']}
html_last_updated_fmt = '%Y/%m/%d'
html_theme_options = {
'logo_only': True,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': True,
'style_nav_header_background': '#212121',
}
autoclass_content = 'both'
|
[] |
[] |
[
"QISKIT_DOCS"
] |
[]
|
["QISKIT_DOCS"]
|
python
| 1 | 0 | |
final_plots/read_aws.py
|
import psycopg2
import psycopg2.extras
import pandas as pd
import os
import time
from pathlib import Path
from dotenv import load_dotenv
def read_only_connect_aws():
env_path = 'env_readonly.env'
load_dotenv(dotenv_path=env_path)
host = "bikeshare-restored.cs9te7lm3pt2.us-east-1.rds.amazonaws.com"
port = 5432
database = "bikeshare"
user = os.environ.get("AWS_READONLY_USER")
password = os.environ.get("AWS_READONLY_PASS")
# Connect to aws postgres D
conn = psycopg2.connect(
host=host, user=user, port=port, password=password,
database=database)
return conn
# Function to load cabi data from AWS. Leaving room to add different load
# types. Right now only allowing a load of all the database
class QueryTool:
def __init__(self, connection, table=None):
self.connection = connection
self.table = table
def basic(self):
query = (
'SELECT * from ') + self.table
dataframe = pd.read_sql(query, con=self.connection)
return dataframe
def missing_check(self):
query = ("""
SELECT
COUNT(*) as total_count,
dt.operator as operator
FROM dockless_trips as dt
GROUP BY
operator;""")
dataframe = pd.read_sql(query, con=self.connection)
return dataframe
def geo_metric(self, cut):
self.cut = cut
query = ("""
SELECT
stations.end_region_code,
stations.start_region_code,
extract({0} from subq_trip.start_date) as {0},
COUNT(*) as total_trips
FROM
(SELECT * FROM {1} LIMIT 25) as subq_trip
LEFT JOIN cabi_stations_geo_temp AS stations
ON subq_trip.start_station = stations.start_short_name
AND subq_trip.end_station = stations.end_short_name
GROUP BY
stations.end_region_code,
stations.start_region_code,
extract({0} from subq_trip.start_date);""").format(cut, table)
dataframe = pd.read_sql(query, con=self.connection)
return dataframe
def annual(self, year):
self.year = year
start_string = (
'SELECT * from cabi_trips '
'WHERE EXTRACT(YEAR FROM start_date)=')
query = start_string + str(self.year)
dataframe = pd.read_sql(query, con=self.connection)
return dataframe
def describe_data(self):
cur = self.connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute("""select *
from information_schema.columns
where table_schema NOT IN (
'information_schema', 'pg_catalog')
order by table_schema, table_name""")
for row in cur:
print("schema: {schema}, table: {table}, column: {col}, \
type: {type}".format(
schema=row['table_schema'], table=row['table_name'],
col=row['column_name'], type=row['data_type']))
if __name__ == '__main__':
print('Running')
conn = read_only_connect_aws()
CABI_TRIPS = QueryTool(conn, 'cabi_trips')
CABI_TRIPS.describe_data()
|
[] |
[] |
[
"AWS_READONLY_USER",
"AWS_READONLY_PASS"
] |
[]
|
["AWS_READONLY_USER", "AWS_READONLY_PASS"]
|
python
| 2 | 0 | |
driver/minio/minio_test.go
|
// Copyright 2020 The goftp Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package minio
import (
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/jlaffaye/ftp"
"github.com/stretchr/testify/assert"
"goftp.io/server/core"
)
func runServer(t *testing.T, opt *core.ServerOpts, notifiers []core.Notifier, execute func()) {
s := core.NewServer(opt)
for _, notifier := range notifiers {
s.RegisterNotifer(notifier)
}
go func() {
err := s.ListenAndServe()
assert.EqualError(t, err, core.ErrServerClosed.Error())
}()
execute()
assert.NoError(t, s.Shutdown())
}
func TestDriver(t *testing.T) {
endpoint := os.Getenv("MINIO_SERVER_ENDPOINT")
if endpoint == "" {
t.Skip()
return
}
accessKeyID := os.Getenv("MINIO_SERVER_ACCESS_KEY_ID")
secretKey := os.Getenv("MINIO_SERVER_SECRET_KEY")
location := os.Getenv("MINIO_SERVER_LOCATION")
bucket := os.Getenv("MINIO_SERVER_BUCKET")
useSSL, _ := strconv.ParseBool(os.Getenv("MINIO_SERVER_USE_SSL"))
opt := &core.ServerOpts{
Name: "test ftpd",
Factory: NewDriverFactory(endpoint, accessKeyID, secretKey, location, bucket, useSSL, core.NewSimplePerm("root", "root")),
Port: 2120,
Auth: &core.SimpleAuth{
Name: "admin",
Password: "admin",
},
Logger: new(core.DiscardLogger),
}
runServer(t, opt, nil, func() {
tries := 5
for {
f, err := ftp.Connect(fmt.Sprintf("localhost:%d", opt.Port))
if err != nil {
fmt.Printf("Connect error: %v\n", err)
// Give server 0.5 seconds to get to the listening state
time.Sleep(500 * time.Millisecond)
tries--
if tries == 0 {
assert.True(t, false, "Cannot connnect ftp server")
return
}
continue
}
assert.NoError(t, err)
assert.NotNil(t, f)
assert.NoError(t, f.Login("admin", "admin"))
assert.Error(t, f.Login("admin", ""))
curDir, err := f.CurrentDir()
assert.NoError(t, err)
assert.EqualValues(t, "/", curDir)
err = f.RemoveDir("/")
assert.NoError(t, err)
var content = `test`
assert.NoError(t, f.Stor("server_test.go", strings.NewReader(content)))
r, err := f.Retr("server_test.go")
assert.NoError(t, err)
buf, err := ioutil.ReadAll(r)
assert.NoError(t, err)
r.Close()
assert.EqualValues(t, content, buf)
entries, err := f.List("/")
assert.NoError(t, err)
assert.EqualValues(t, 1, len(entries))
assert.EqualValues(t, "server_test.go", entries[0].Name)
assert.EqualValues(t, ftp.EntryTypeFile, entries[0].Type)
assert.EqualValues(t, len(buf), entries[0].Size)
size, err := f.FileSize("/server_test.go")
assert.NoError(t, err)
assert.EqualValues(t, 4, size)
assert.NoError(t, f.Delete("/server_test.go"))
entries, err = f.List("/")
assert.NoError(t, err)
assert.EqualValues(t, 0, len(entries))
assert.NoError(t, f.Stor("server_test2.go", strings.NewReader(content)))
err = f.RemoveDir("/")
assert.NoError(t, err)
entries, err = f.List("/")
assert.NoError(t, err)
assert.EqualValues(t, 0, len(entries))
assert.NoError(t, f.Stor("server_test3.go", strings.NewReader(content)))
err = f.Rename("/server_test3.go", "/test.go")
assert.NoError(t, err)
entries, err = f.List("/")
assert.NoError(t, err)
assert.EqualValues(t, 1, len(entries))
assert.EqualValues(t, "test.go", entries[0].Name)
assert.EqualValues(t, 4, entries[0].Size)
assert.EqualValues(t, ftp.EntryTypeFile, entries[0].Type)
err = f.MakeDir("/src")
assert.NoError(t, err)
err = f.ChangeDir("/src")
assert.NoError(t, err)
curDir, err = f.CurrentDir()
assert.NoError(t, err)
assert.EqualValues(t, "/src", curDir)
err = f.MakeDir("/new/1/2")
assert.NoError(t, err)
entries, err = f.List("/new/1")
assert.NoError(t, err)
assert.EqualValues(t, 1, len(entries))
assert.EqualValues(t, "2/", entries[0].Name)
assert.EqualValues(t, 0, entries[0].Size)
assert.EqualValues(t, ftp.EntryTypeFolder, entries[0].Type)
assert.NoError(t, f.Stor("/test/1/2/server_test3.go", strings.NewReader(content)))
r, err = f.RetrFrom("/test/1/2/server_test3.go", 2)
assert.NoError(t, err)
buf, err = ioutil.ReadAll(r)
r.Close()
assert.NoError(t, err)
assert.EqualValues(t, "st", string(buf))
curDir, err = f.CurrentDir()
assert.NoError(t, err)
assert.EqualValues(t, "/src", curDir)
assert.NoError(t, f.Stor("server_test.go", strings.NewReader(content)))
r, err = f.Retr("/src/server_test.go")
assert.NoError(t, err)
buf, err = ioutil.ReadAll(r)
r.Close()
assert.NoError(t, err)
assert.EqualValues(t, "test", string(buf))
break
}
})
}
|
[
"\"MINIO_SERVER_ENDPOINT\"",
"\"MINIO_SERVER_ACCESS_KEY_ID\"",
"\"MINIO_SERVER_SECRET_KEY\"",
"\"MINIO_SERVER_LOCATION\"",
"\"MINIO_SERVER_BUCKET\"",
"\"MINIO_SERVER_USE_SSL\""
] |
[] |
[
"MINIO_SERVER_ACCESS_KEY_ID",
"MINIO_SERVER_BUCKET",
"MINIO_SERVER_USE_SSL",
"MINIO_SERVER_ENDPOINT",
"MINIO_SERVER_SECRET_KEY",
"MINIO_SERVER_LOCATION"
] |
[]
|
["MINIO_SERVER_ACCESS_KEY_ID", "MINIO_SERVER_BUCKET", "MINIO_SERVER_USE_SSL", "MINIO_SERVER_ENDPOINT", "MINIO_SERVER_SECRET_KEY", "MINIO_SERVER_LOCATION"]
|
go
| 6 | 0 | |
telemetry/telemetry/internal/platform/android_device.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import subprocess
from telemetry.core import util
from telemetry.internal.platform import cros_device
from telemetry.internal.platform import device
from telemetry.internal.platform.profiler import monsoon
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
def _KillStrayADBProcesses():
p = subprocess.Popen(['killall', 'adb'])
p.communicate()
if p.returncode:
logging.info('No adb process was killed')
else:
logging.info('Some adb process was killed')
class AndroidDevice(device.Device):
""" Class represents information for connecting to an android device.
Attributes:
device_id: the device's serial string created by adb to uniquely
identify an emulator/device instance. This string can be found by running
'adb devices' command
enable_performance_mode: when this is set to True, android platform will be
set to high performance mode after browser is started.
"""
def __init__(self, device_id, enable_performance_mode=True):
super(AndroidDevice, self).__init__(
name='Android device %s' % device_id, guid=device_id)
self._device_id = device_id
self._enable_performance_mode = enable_performance_mode
@classmethod
def GetAllConnectedDevices(cls, blacklist):
device_serials = GetDeviceSerials(blacklist)
return [cls(s) for s in device_serials]
@property
def device_id(self):
return self._device_id
@property
def enable_performance_mode(self):
return self._enable_performance_mode
def _ListSerialsOfHealthyOnlineDevices(blacklist):
return [d.adb.GetDeviceSerial()
for d in device_utils.DeviceUtils.HealthyDevices(blacklist)
if d.IsOnline()]
def GetDeviceSerials(blacklist):
"""Return the list of device serials of healthy devices.
If a preferred device has been set with ANDROID_SERIAL, it will be first in
the returned list. The arguments specify what devices to include in the list.
"""
try:
device_serials = _ListSerialsOfHealthyOnlineDevices(blacklist)
# Sometimes stray adb processes can interfere with using adb.
except device_errors.AdbCommandFailedError:
_KillStrayADBProcesses()
device_serials = _ListSerialsOfHealthyOnlineDevices(blacklist)
# The monsoon provides power for the device, so for devices with no
# real battery, we need to turn them on after the monsoon enables voltage
# output to the device.
if not device_serials:
try:
m = monsoon.Monsoon(wait=False)
m.SetUsbPassthrough(1)
m.SetVoltage(3.8)
m.SetMaxCurrent(8)
logging.warn("""
Monsoon power monitor detected, but no Android devices.
The Monsoon's power output has been enabled. Please now ensure that:
1. The Monsoon's front and back USB are connected to the host.
2. The device is connected to the Monsoon's main and USB channels.
3. The device is turned on.
Waiting for device...
""")
util.WaitFor(_ListSerialsOfHealthyOnlineDevices(blacklist), 600)
device_serials = _ListSerialsOfHealthyOnlineDevices(blacklist)
except IOError:
return []
preferred_device = os.environ.get('ANDROID_SERIAL')
if preferred_device in device_serials:
logging.warn(
'ANDROID_SERIAL is defined. Put %s in the first of the'
'discovered devices list.' % preferred_device)
device_serials.remove(preferred_device)
device_serials.insert(0, preferred_device)
return device_serials
def GetDevice(finder_options):
"""Return a Platform instance for the device specified by |finder_options|."""
android_platform_options = finder_options.remote_platform_options
if not CanDiscoverDevices():
logging.info(
'No adb command found. Will not try searching for Android browsers.')
return None
if android_platform_options.android_blacklist_file:
blacklist = device_blacklist.Blacklist(
android_platform_options.android_blacklist_file)
else:
blacklist = None
if (android_platform_options.device
and android_platform_options.device in GetDeviceSerials(blacklist)):
return AndroidDevice(
android_platform_options.device,
enable_performance_mode=not finder_options.no_performance_mode)
devices = AndroidDevice.GetAllConnectedDevices(blacklist)
if len(devices) == 0:
logging.warn('No android devices found.')
return None
if len(devices) > 1:
logging.warn(
'Multiple devices attached. Please specify one of the following:\n' +
'\n'.join([' --device=%s' % d.device_id for d in devices]))
return None
return devices[0]
def _HasValidAdb():
"""Returns true if adb is present.
Note that this currently will return True even if the adb that's present
cannot run on this system.
"""
if os.name != 'posix' or cros_device.IsRunningOnCrOS():
return False
try:
adb_path = adb_wrapper.AdbWrapper.GetAdbPath()
except device_errors.NoAdbError:
return False
if os.path.isabs(adb_path) and not os.path.exists(adb_path):
return False
return True
def CanDiscoverDevices():
"""Returns true if devices are discoverable via adb."""
if not _HasValidAdb():
return False
try:
device_utils.DeviceUtils.HealthyDevices(None)
return True
except (device_errors.CommandFailedError, device_errors.CommandTimeoutError,
device_errors.NoAdbError, OSError):
return False
def FindAllAvailableDevices(options):
"""Returns a list of available devices.
"""
# Disable Android device discovery when remote testing a CrOS device
if options.cros_remote:
return []
android_platform_options = options.remote_platform_options
devices = []
try:
if CanDiscoverDevices():
blacklist = None
if android_platform_options.android_blacklist_file:
blacklist = device_blacklist.Blacklist(
android_platform_options.android_blacklist_file)
devices = AndroidDevice.GetAllConnectedDevices(blacklist)
finally:
if not devices and _HasValidAdb():
try:
adb_wrapper.AdbWrapper.KillServer()
except device_errors.NoAdbError as e:
logging.warning(
'adb reported as present, but NoAdbError thrown: %s', str(e))
return devices
|
[] |
[] |
[
"ANDROID_SERIAL"
] |
[]
|
["ANDROID_SERIAL"]
|
python
| 1 | 0 | |
rome/compute_u.py
|
import os
from pathlib import Path
from typing import Dict
import torch
from dotenv import load_dotenv
from transformers import AutoModelForCausalLM, AutoTokenizer
from rome import repr_tools
from util.generate import generate_fast
from .layer_stats import layer_stats
from .rome_hparams import ROMEHyperParams
# Cache variables
inv_mom2_cache = {}
context_templates_cache = None
# Load directory configurations
load_dotenv()
STATS_DIR = Path(os.getenv("STATS_DIR"))
def get_inv_cov(
model: AutoModelForCausalLM,
tok: AutoTokenizer,
layer_name: str,
mom2_dataset: str,
mom2_n_samples: str,
mom2_dtype: str,
) -> torch.Tensor:
"""
Retrieves covariance statistics, then computes the algebraic inverse.
Caches result for future use.
"""
global inv_mom2_cache
model_name = model.config._name_or_path.replace("/", "_")
key = (model_name, layer_name)
if key not in inv_mom2_cache:
print(
f"Retrieving inverse covariance statistics for {model_name} @ {layer_name}. "
f"The result will be cached to avoid repetitive computation."
)
stat = layer_stats(
model,
tok,
layer_name,
STATS_DIR,
mom2_dataset,
sample_size=mom2_n_samples,
precision=mom2_dtype,
)
inv_mom2_cache[key] = torch.inverse(
stat.mom2.moment().to("cuda")
).float() # Cast back to float32
return inv_mom2_cache[key]
def compute_u(
model: AutoModelForCausalLM,
tok: AutoTokenizer,
request: Dict,
hparams: ROMEHyperParams,
layer: int,
) -> torch.Tensor:
"""
Computes the right vector used in constructing the rank-1 update matrix.
"""
global context_templates_cache
print("Computing left vector (u)...")
# Compute projection token
word_repr_args = dict(
model=model,
tok=tok,
layer=layer,
module_template=hparams.rewrite_module_tmp,
track="in",
)
if hparams.fact_token == "subject_last":
# Sample some prefixes to get the contextual embedding of subject
word = request["subject"]
# context_templates = ["{}"]
if context_templates_cache is None:
context_templates_cache = [
x + " {}"
for x in sum(
(
generate_fast(
model,
tok,
["The"],
n_gen_per_prompt=n_gen,
max_out_len=length,
)
for length, n_gen in [(2, 20), (5, 20), (10, 10)]
),
[],
)
] + ["{}"]
print(f"Cached context templates {context_templates_cache}")
print(f"Selected u projection token {word}")
cur_repr = torch.stack(
[ # TODO batch this to drastically improve performance
repr_tools.get_repr_at_word_last_token(
context_template=templ, word=word, **word_repr_args
)
for templ in context_templates_cache
],
dim=0,
).mean(0)
elif hparams.fact_token == "last":
# Heuristic to choose last word. Not a huge deal if there's a minor
# edge case (e.g. multi-token word) because the function below will
# take the last token.
cur_repr = repr_tools.get_repr_at_idxs(
context=request["prompt"].format(request["subject"]),
idxs=[-1],
**word_repr_args,
)
print("Selected u projection token with last token")
else:
raise ValueError(f"fact_token={hparams.fact_token} not recognized")
# Apply covariance estimate
u = cur_repr
if hparams.mom2_adjustment:
u = get_inv_cov(
model,
tok,
hparams.rewrite_module_tmp.format(layer),
hparams.mom2_dataset,
hparams.mom2_n_samples,
hparams.mom2_dtype,
) @ u.unsqueeze(1)
u = u.squeeze()
return u / u.norm()
|
[] |
[] |
[
"STATS_DIR"
] |
[]
|
["STATS_DIR"]
|
python
| 1 | 0 | |
profiles_project/settings.py
|
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cl67rw0*dpbp61uv+vv7hu+f^+m#z43$(k77nh2kw$wyv-!f$h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG', 1)))
ALLOWED_HOSTS = [
'ec2-3-20-227-82.us-east-2.compute.amazonaws.com',
'127.0.0.1'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
STATIC_ROOT = 'static/'
|
[] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
python
| 1 | 0 | |
pkg/webhook/approval/approval.go
|
package approval
import (
"context"
"io/ioutil"
"k8s.io/apimachinery/pkg/types"
"os"
"path"
"strconv"
"time"
admissionRegistrationV1 "k8s.io/api/admissionregistration/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
certResources "knative.dev/pkg/webhook/certificates/resources"
"approval-operator/internal"
)
const (
DefaultPort = 443
CertDir = "/tmp/approval-webhook"
ValidationPath = "/validate-approvals"
ValidationConfigName = "validating.approval.tmax.io"
)
func Port() int {
envPort := os.Getenv("WEBHOOK_PORT")
if envPort == "" {
return DefaultPort
} else {
port, err := strconv.Atoi(envPort)
if err != nil {
log.Log.Error(err, "Cannot parse port number")
os.Exit(1)
}
return port
}
}
// Create and Store certificates for webhook server
// server key / server cert is stored as file in CertDir
// CA bundle is stored in ValidatingWebhookConfigurations
func CreateCert(ctx context.Context, client client.Client) error {
// Make directory recursively
if err := os.MkdirAll(CertDir, os.ModePerm); err != nil {
return err
}
// Get service name and namespace
svc := internal.WebhookServiceName()
ns, err := internal.Namespace()
if err != nil {
return err
}
// Create certs
tlsKey, tlsCrt, caCrt, err := certResources.CreateCerts(ctx, svc, ns, time.Now().AddDate(1, 0, 0))
if err != nil {
return err
}
// Write certs to file
keyPath := path.Join(CertDir, "tls.key")
err = ioutil.WriteFile(keyPath, tlsKey, 0644)
if err != nil {
return err
}
crtPath := path.Join(CertDir, "tls.crt")
err = ioutil.WriteFile(crtPath, tlsCrt, 0644)
if err != nil {
return err
}
// Update validatingWebhookConfigurations
conf := &admissionRegistrationV1.ValidatingWebhookConfiguration{}
if err = client.Get(ctx, types.NamespacedName{Name: ValidationConfigName}, conf); err != nil {
// Return error, even if it is 'not found' error
// ValidationWebhookConfiguration object should be created at installation time
return err
}
for i := range conf.Webhooks {
conf.Webhooks[i].ClientConfig.CABundle = caCrt
}
if err = client.Update(ctx, conf); err != nil {
return err
}
return nil
}
|
[
"\"WEBHOOK_PORT\""
] |
[] |
[
"WEBHOOK_PORT"
] |
[]
|
["WEBHOOK_PORT"]
|
go
| 1 | 0 | |
windmill-backend/app/services/aws/aws.go
|
package aws
import (
"errors"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/joho/godotenv"
"log"
"mime/multipart"
"os"
"path"
)
var AWS_S3_BASE string
var AWS_S3_REGION string
var AWS_S3_BUCKET string
func initEnv() {
if err := godotenv.Load(); err != nil {
log.Print("No .env file found")
}
AWS_S3_BASE = os.Getenv("AWS_S3_BASE")
AWS_S3_REGION = os.Getenv("AWS_S3_REGION")
AWS_S3_BUCKET = os.Getenv("AWS_S3_BUCKET")
}
var sess = connectAWS()
func connectAWS() *session.Session {
initEnv()
sess, err := session.NewSession(
&aws.Config{
Region: aws.String(AWS_S3_REGION),
})
if err != nil {
panic(err)
}
return sess
}
func UpdateDisplayPicture(file multipart.File, filename string, userId string) (string, error) {
uploader := s3manager.NewUploader(sess)
_, err := uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(AWS_S3_BUCKET),
Key: aws.String(path.Join("/users/" + userId + "/profile", filename)),
Body: file,
})
if err != nil {
// Do your error handling here
return "", errors.New("error uploading to server")
}
return AWS_S3_BASE + userId + "/profile/" + filename, nil
}
func UploadVideoToS3(file multipart.File, videoId string, userId string) (string, error) {
uploader := s3manager.NewUploader(sess)
_, err := uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(AWS_S3_BUCKET),
Key: aws.String(path.Join("/users/" + userId + "/videos", videoId + ".mp4")),
Body: file,
})
if err != nil {
return "", errors.New("error uploading to server")
}
fmt.Println("success")
return AWS_S3_BASE + userId + "/videos/" + videoId + ".mp4", nil
}
func GetUserDisplayPicture(dpPath string) (*os.File, error){
item := "displaypic.jpg"
file, err := os.Create(item)
if err != nil {
fmt.Println(err)
}
downloader := s3manager.NewDownloader(sess)
numBytes, err := downloader.Download(file,
&s3.GetObjectInput{
Bucket: aws.String(AWS_S3_BUCKET),
Key: aws.String(dpPath),
})
if err != nil {
return nil, errors.New("couldn't download profile picture")
}
fmt.Println("Downloaded", file.Name(), numBytes, "bytes")
return file, nil
}
func UploadVideoThumbnail(file multipart.File, userId string, videoId string) (string, error) {
uploader := s3manager.NewUploader(sess)
_, err := uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(AWS_S3_BUCKET),
ContentType: aws.String("image/jpeg"),
Key: aws.String(path.Join("/users/" + userId + "/videoThumbnails", videoId + ".jpg")),
Body: file,
})
if err != nil {
return "", errors.New("error uploading to server")
}
fmt.Println("success")
return AWS_S3_BASE + userId + "/videoThumbnails/" + videoId+ ".jpg", nil
}
|
[
"\"AWS_S3_BASE\"",
"\"AWS_S3_REGION\"",
"\"AWS_S3_BUCKET\""
] |
[] |
[
"AWS_S3_BASE",
"AWS_S3_BUCKET",
"AWS_S3_REGION"
] |
[]
|
["AWS_S3_BASE", "AWS_S3_BUCKET", "AWS_S3_REGION"]
|
go
| 3 | 0 | |
ucloud-sdk-java-pathx/src/test/java/cn/ucloud/pathx/client/GetGlobalSSHTrafficTest.java
|
package cn.ucloud.pathx.client;
import cn.ucloud.common.pojo.Account;
import cn.ucloud.pathx.model.GetGlobalSSHTrafficParam;
import cn.ucloud.pathx.model.GetGlobalSSHTrafficResult;
import cn.ucloud.pathx.pojo.PATHXConfig;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertNull;
/**
* @Description : PATHX.GetGlobalSSHTraffic 测试
* @Author : ucloud-sdk-generator
* @Date : 2019-03-13 10:02
**/
public class GetGlobalSSHTrafficTest {
private PATHXClient client;
private GetGlobalSSHTrafficParam param;
@Before
public void setUp() throws Exception {
client = new DefaultPATHXClient(new PATHXConfig(
new Account(System.getenv("UCloudPrivateKey"),
System.getenv("UCloudPublicKey"))));
String projectId = "org-izug1m";
String uGAId = "uga-eskzpkk2";
Integer beginTime = 1552460481;
Integer endTime = 1552546881;
param = new GetGlobalSSHTrafficParam(projectId, uGAId, beginTime, endTime);
}
@Test
public void getGlobalSSHTraffic() {
try {
GetGlobalSSHTrafficResult result = client.getGlobalSSHTraffic(param);
JSONComparator.jsonComparator(result);
} catch (Exception e) {
assertNull(e);
}
}
}
|
[
"\"UCloudPrivateKey\"",
"\"UCloudPublicKey\""
] |
[] |
[
"UCloudPrivateKey",
"UCloudPublicKey"
] |
[]
|
["UCloudPrivateKey", "UCloudPublicKey"]
|
java
| 2 | 0 | |
libgo/go/cmd/go/go_test.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main_test
import (
"bytes"
"context"
"debug/elf"
"debug/macho"
"flag"
"fmt"
"go/format"
"internal/race"
"internal/testenv"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"testing"
"time"
"cmd/go/internal/cache"
"cmd/go/internal/cfg"
"cmd/go/internal/robustio"
"cmd/internal/sys"
)
var (
canRun = true // whether we can run go or ./testgo
canRace = false // whether we can run the race detector
canCgo = false // whether we can use cgo
canMSan = false // whether we can run the memory sanitizer
exeSuffix string // ".exe" on Windows
skipExternal = false // skip external tests
)
func tooSlow(t *testing.T) {
if testing.Short() {
// In -short mode; skip test, except run it on the {darwin,linux,windows}/amd64 builders.
if testenv.Builder() != "" && runtime.GOARCH == "amd64" && (runtime.GOOS == "linux" || runtime.GOOS == "darwin" || runtime.GOOS == "windows") {
return
}
t.Skip("skipping test in -short mode")
}
}
func init() {
switch runtime.GOOS {
case "android", "js":
canRun = false
case "darwin":
switch runtime.GOARCH {
case "arm", "arm64":
canRun = false
}
case "linux":
switch runtime.GOARCH {
case "arm":
// many linux/arm machines are too slow to run
// the full set of external tests.
skipExternal = true
case "mips", "mipsle", "mips64", "mips64le":
// Also slow.
skipExternal = true
if testenv.Builder() != "" {
// On the builders, skip the cmd/go
// tests. They're too slow and already
// covered by other ports. There's
// nothing os/arch specific in the
// tests.
canRun = false
}
}
case "freebsd":
switch runtime.GOARCH {
case "arm":
// many freebsd/arm machines are too slow to run
// the full set of external tests.
skipExternal = true
canRun = false
}
case "plan9":
switch runtime.GOARCH {
case "arm":
// many plan9/arm machines are too slow to run
// the full set of external tests.
skipExternal = true
}
case "windows":
exeSuffix = ".exe"
}
}
// testGOROOT is the GOROOT to use when running testgo, a cmd/go binary
// build from this process's current GOROOT, but run from a different
// (temp) directory.
var testGOROOT string
var testCC string
var testGOCACHE string
var testGo string
var testTmpDir string
var testBin string
// testCtx is canceled when the test binary is about to time out.
//
// If https://golang.org/issue/28135 is accepted, uses of this variable in test
// functions should be replaced by t.Context().
var testCtx = context.Background()
// The TestMain function creates a go command for testing purposes and
// deletes it after the tests have been run.
func TestMain(m *testing.M) {
// $GO_GCFLAGS a compiler debug flag known to cmd/dist, make.bash, etc.
// It is not a standard go command flag; use os.Getenv, not cfg.Getenv.
if os.Getenv("GO_GCFLAGS") != "" {
fmt.Fprintf(os.Stderr, "testing: warning: no tests to run\n") // magic string for cmd/go
fmt.Printf("cmd/go test is not compatible with $GO_GCFLAGS being set\n")
fmt.Printf("SKIP\n")
return
}
os.Unsetenv("GOROOT_FINAL")
flag.Parse()
timeoutFlag := flag.Lookup("test.timeout")
if timeoutFlag != nil {
// TODO(golang.org/issue/28147): The go command does not pass the
// test.timeout flag unless either -timeout or -test.timeout is explicitly
// set on the command line.
if d := timeoutFlag.Value.(flag.Getter).Get().(time.Duration); d != 0 {
aBitShorter := d * 95 / 100
var cancel context.CancelFunc
testCtx, cancel = context.WithTimeout(testCtx, aBitShorter)
defer cancel()
}
}
if *proxyAddr != "" {
StartProxy()
select {}
}
// Run with a temporary TMPDIR to check that the tests don't
// leave anything behind.
topTmpdir, err := ioutil.TempDir("", "cmd-go-test-")
if err != nil {
log.Fatal(err)
}
if !*testWork {
defer removeAll(topTmpdir)
}
os.Setenv(tempEnvName(), topTmpdir)
dir, err := ioutil.TempDir(topTmpdir, "tmpdir")
if err != nil {
log.Fatal(err)
}
testTmpDir = dir
if !*testWork {
defer removeAll(testTmpDir)
}
testGOCACHE = cache.DefaultDir()
if canRun {
testBin = filepath.Join(testTmpDir, "testbin")
if err := os.Mkdir(testBin, 0777); err != nil {
log.Fatal(err)
}
testGo = filepath.Join(testBin, "go"+exeSuffix)
args := []string{"build", "-tags", "testgo", "-o", testGo}
if race.Enabled {
args = append(args, "-race")
}
gotool, err := testenv.GoTool()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(2)
}
goEnv := func(name string) string {
out, err := exec.Command(gotool, "env", name).CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "go env %s: %v\n%s", name, err, out)
os.Exit(2)
}
return strings.TrimSpace(string(out))
}
testGOROOT = goEnv("GOROOT")
// The whole GOROOT/pkg tree was installed using the GOHOSTOS/GOHOSTARCH
// toolchain (installed in GOROOT/pkg/tool/GOHOSTOS_GOHOSTARCH).
// The testgo.exe we are about to create will be built for GOOS/GOARCH,
// which means it will use the GOOS/GOARCH toolchain
// (installed in GOROOT/pkg/tool/GOOS_GOARCH).
// If these are not the same toolchain, then the entire standard library
// will look out of date (the compilers in those two different tool directories
// are built for different architectures and have different build IDs),
// which will cause many tests to do unnecessary rebuilds and some
// tests to attempt to overwrite the installed standard library.
// Bail out entirely in this case.
hostGOOS := goEnv("GOHOSTOS")
hostGOARCH := goEnv("GOHOSTARCH")
if hostGOOS != runtime.GOOS || hostGOARCH != runtime.GOARCH {
fmt.Fprintf(os.Stderr, "testing: warning: no tests to run\n") // magic string for cmd/go
fmt.Printf("cmd/go test is not compatible with GOOS/GOARCH != GOHOSTOS/GOHOSTARCH (%s/%s != %s/%s)\n", runtime.GOOS, runtime.GOARCH, hostGOOS, hostGOARCH)
fmt.Printf("SKIP\n")
return
}
buildCmd := exec.Command(gotool, args...)
buildCmd.Env = append(os.Environ(), "GOFLAGS=-mod=vendor")
out, err := buildCmd.CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "building testgo failed: %v\n%s", err, out)
os.Exit(2)
}
out, err = exec.Command(gotool, "env", "CC").CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "could not find testing CC: %v\n%s", err, out)
os.Exit(2)
}
testCC = strings.TrimSpace(string(out))
if out, err := exec.Command(testGo, "env", "CGO_ENABLED").Output(); err != nil {
fmt.Fprintf(os.Stderr, "running testgo failed: %v\n", err)
canRun = false
} else {
canCgo, err = strconv.ParseBool(strings.TrimSpace(string(out)))
if err != nil {
fmt.Fprintf(os.Stderr, "can't parse go env CGO_ENABLED output: %v\n", strings.TrimSpace(string(out)))
}
}
out, err = exec.Command(gotool, "env", "GOCACHE").CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "could not find testing GOCACHE: %v\n%s", err, out)
os.Exit(2)
}
testGOCACHE = strings.TrimSpace(string(out))
canMSan = canCgo && sys.MSanSupported(runtime.GOOS, runtime.GOARCH)
canRace = canCgo && sys.RaceDetectorSupported(runtime.GOOS, runtime.GOARCH)
// The race detector doesn't work on Alpine Linux:
// golang.org/issue/14481
// gccgo does not support the race detector.
if isAlpineLinux() || runtime.Compiler == "gccgo" {
canRace = false
}
}
// Don't let these environment variables confuse the test.
os.Setenv("GOENV", "off")
os.Unsetenv("GOBIN")
os.Unsetenv("GOPATH")
os.Unsetenv("GIT_ALLOW_PROTOCOL")
os.Setenv("HOME", "/test-go-home-does-not-exist")
// On some systems the default C compiler is ccache.
// Setting HOME to a non-existent directory will break
// those systems. Disable ccache and use real compiler. Issue 17668.
os.Setenv("CCACHE_DISABLE", "1")
if cfg.Getenv("GOCACHE") == "" {
os.Setenv("GOCACHE", testGOCACHE) // because $HOME is gone
}
r := m.Run()
if !*testWork {
removeAll(testTmpDir) // os.Exit won't run defer
}
if !*testWork {
// There shouldn't be anything left in topTmpdir.
dirf, err := os.Open(topTmpdir)
if err != nil {
log.Fatal(err)
}
names, err := dirf.Readdirnames(0)
if err != nil {
log.Fatal(err)
}
if len(names) > 0 {
log.Fatalf("unexpected files left in tmpdir: %v", names)
}
removeAll(topTmpdir)
}
os.Exit(r)
}
func isAlpineLinux() bool {
if runtime.GOOS != "linux" {
return false
}
fi, err := os.Lstat("/etc/alpine-release")
return err == nil && fi.Mode().IsRegular()
}
// The length of an mtime tick on this system. This is an estimate of
// how long we need to sleep to ensure that the mtime of two files is
// different.
// We used to try to be clever but that didn't always work (see golang.org/issue/12205).
var mtimeTick time.Duration = 1 * time.Second
// Manage a single run of the testgo binary.
type testgoData struct {
t *testing.T
temps []string
wd string
env []string
tempdir string
ran bool
inParallel bool
stdout, stderr bytes.Buffer
execDir string // dir for tg.run
}
// skipIfGccgo skips the test if using gccgo.
func skipIfGccgo(t *testing.T, msg string) {
if runtime.Compiler == "gccgo" {
t.Skipf("skipping test not supported on gccgo: %s", msg)
}
}
// testgo sets up for a test that runs testgo.
func testgo(t *testing.T) *testgoData {
t.Helper()
testenv.MustHaveGoBuild(t)
if skipExternal {
t.Skipf("skipping external tests on %s/%s", runtime.GOOS, runtime.GOARCH)
}
return &testgoData{t: t}
}
// must gives a fatal error if err is not nil.
func (tg *testgoData) must(err error) {
tg.t.Helper()
if err != nil {
tg.t.Fatal(err)
}
}
// check gives a test non-fatal error if err is not nil.
func (tg *testgoData) check(err error) {
tg.t.Helper()
if err != nil {
tg.t.Error(err)
}
}
// parallel runs the test in parallel by calling t.Parallel.
func (tg *testgoData) parallel() {
tg.t.Helper()
if tg.ran {
tg.t.Fatal("internal testsuite error: call to parallel after run")
}
if tg.wd != "" {
tg.t.Fatal("internal testsuite error: call to parallel after cd")
}
for _, e := range tg.env {
if strings.HasPrefix(e, "GOROOT=") || strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
val := e[strings.Index(e, "=")+1:]
if strings.HasPrefix(val, "testdata") || strings.HasPrefix(val, "./testdata") {
tg.t.Fatalf("internal testsuite error: call to parallel with testdata in environment (%s)", e)
}
}
}
tg.inParallel = true
tg.t.Parallel()
}
// pwd returns the current directory.
func (tg *testgoData) pwd() string {
tg.t.Helper()
wd, err := os.Getwd()
if err != nil {
tg.t.Fatalf("could not get working directory: %v", err)
}
return wd
}
// cd changes the current directory to the named directory. Note that
// using this means that the test must not be run in parallel with any
// other tests.
func (tg *testgoData) cd(dir string) {
tg.t.Helper()
if tg.inParallel {
tg.t.Fatal("internal testsuite error: changing directory when running in parallel")
}
if tg.wd == "" {
tg.wd = tg.pwd()
}
abs, err := filepath.Abs(dir)
tg.must(os.Chdir(dir))
if err == nil {
tg.setenv("PWD", abs)
}
}
// sleep sleeps for one tick, where a tick is a conservative estimate
// of how long it takes for a file modification to get a different
// mtime.
func (tg *testgoData) sleep() {
time.Sleep(mtimeTick)
}
// setenv sets an environment variable to use when running the test go
// command.
func (tg *testgoData) setenv(name, val string) {
tg.t.Helper()
if tg.inParallel && (name == "GOROOT" || name == "GOPATH" || name == "GOBIN") && (strings.HasPrefix(val, "testdata") || strings.HasPrefix(val, "./testdata")) {
tg.t.Fatalf("internal testsuite error: call to setenv with testdata (%s=%s) after parallel", name, val)
}
tg.unsetenv(name)
tg.env = append(tg.env, name+"="+val)
}
// unsetenv removes an environment variable.
func (tg *testgoData) unsetenv(name string) {
if tg.env == nil {
tg.env = append([]string(nil), os.Environ()...)
tg.env = append(tg.env, "GO111MODULE=off")
}
for i, v := range tg.env {
if strings.HasPrefix(v, name+"=") {
tg.env = append(tg.env[:i], tg.env[i+1:]...)
break
}
}
}
func (tg *testgoData) goTool() string {
return testGo
}
// doRun runs the test go command, recording stdout and stderr and
// returning exit status.
func (tg *testgoData) doRun(args []string) error {
tg.t.Helper()
if !canRun {
panic("testgoData.doRun called but canRun false")
}
if tg.inParallel {
for _, arg := range args {
if strings.HasPrefix(arg, "testdata") || strings.HasPrefix(arg, "./testdata") {
tg.t.Fatal("internal testsuite error: parallel run using testdata")
}
}
}
hasGoroot := false
for _, v := range tg.env {
if strings.HasPrefix(v, "GOROOT=") {
hasGoroot = true
break
}
}
prog := tg.goTool()
if !hasGoroot {
tg.setenv("GOROOT", testGOROOT)
}
tg.t.Logf("running testgo %v", args)
cmd := exec.Command(prog, args...)
tg.stdout.Reset()
tg.stderr.Reset()
cmd.Dir = tg.execDir
cmd.Stdout = &tg.stdout
cmd.Stderr = &tg.stderr
cmd.Env = tg.env
status := cmd.Run()
if tg.stdout.Len() > 0 {
tg.t.Log("standard output:")
tg.t.Log(tg.stdout.String())
}
if tg.stderr.Len() > 0 {
tg.t.Log("standard error:")
tg.t.Log(tg.stderr.String())
}
tg.ran = true
return status
}
// run runs the test go command, and expects it to succeed.
func (tg *testgoData) run(args ...string) {
tg.t.Helper()
if status := tg.doRun(args); status != nil {
wd, _ := os.Getwd()
tg.t.Logf("go %v failed unexpectedly in %s: %v", args, wd, status)
tg.t.FailNow()
}
}
// runFail runs the test go command, and expects it to fail.
func (tg *testgoData) runFail(args ...string) {
tg.t.Helper()
if status := tg.doRun(args); status == nil {
tg.t.Fatal("testgo succeeded unexpectedly")
} else {
tg.t.Log("testgo failed as expected:", status)
}
}
// runGit runs a git command, and expects it to succeed.
func (tg *testgoData) runGit(dir string, args ...string) {
tg.t.Helper()
cmd := exec.Command("git", args...)
tg.stdout.Reset()
tg.stderr.Reset()
cmd.Stdout = &tg.stdout
cmd.Stderr = &tg.stderr
cmd.Dir = dir
cmd.Env = tg.env
status := cmd.Run()
if tg.stdout.Len() > 0 {
tg.t.Log("git standard output:")
tg.t.Log(tg.stdout.String())
}
if tg.stderr.Len() > 0 {
tg.t.Log("git standard error:")
tg.t.Log(tg.stderr.String())
}
if status != nil {
tg.t.Logf("git %v failed unexpectedly: %v", args, status)
tg.t.FailNow()
}
}
// getStdout returns standard output of the testgo run as a string.
func (tg *testgoData) getStdout() string {
tg.t.Helper()
if !tg.ran {
tg.t.Fatal("internal testsuite error: stdout called before run")
}
return tg.stdout.String()
}
// getStderr returns standard error of the testgo run as a string.
func (tg *testgoData) getStderr() string {
tg.t.Helper()
if !tg.ran {
tg.t.Fatal("internal testsuite error: stdout called before run")
}
return tg.stderr.String()
}
// doGrepMatch looks for a regular expression in a buffer, and returns
// whether it is found. The regular expression is matched against
// each line separately, as with the grep command.
func (tg *testgoData) doGrepMatch(match string, b *bytes.Buffer) bool {
tg.t.Helper()
if !tg.ran {
tg.t.Fatal("internal testsuite error: grep called before run")
}
re := regexp.MustCompile(match)
for _, ln := range bytes.Split(b.Bytes(), []byte{'\n'}) {
if re.Match(ln) {
return true
}
}
return false
}
// doGrep looks for a regular expression in a buffer and fails if it
// is not found. The name argument is the name of the output we are
// searching, "output" or "error". The msg argument is logged on
// failure.
func (tg *testgoData) doGrep(match string, b *bytes.Buffer, name, msg string) {
tg.t.Helper()
if !tg.doGrepMatch(match, b) {
tg.t.Log(msg)
tg.t.Logf("pattern %v not found in standard %s", match, name)
tg.t.FailNow()
}
}
// grepStdout looks for a regular expression in the test run's
// standard output and fails, logging msg, if it is not found.
func (tg *testgoData) grepStdout(match, msg string) {
tg.t.Helper()
tg.doGrep(match, &tg.stdout, "output", msg)
}
// grepStderr looks for a regular expression in the test run's
// standard error and fails, logging msg, if it is not found.
func (tg *testgoData) grepStderr(match, msg string) {
tg.t.Helper()
tg.doGrep(match, &tg.stderr, "error", msg)
}
// grepBoth looks for a regular expression in the test run's standard
// output or stand error and fails, logging msg, if it is not found.
func (tg *testgoData) grepBoth(match, msg string) {
tg.t.Helper()
if !tg.doGrepMatch(match, &tg.stdout) && !tg.doGrepMatch(match, &tg.stderr) {
tg.t.Log(msg)
tg.t.Logf("pattern %v not found in standard output or standard error", match)
tg.t.FailNow()
}
}
// doGrepNot looks for a regular expression in a buffer and fails if
// it is found. The name and msg arguments are as for doGrep.
func (tg *testgoData) doGrepNot(match string, b *bytes.Buffer, name, msg string) {
tg.t.Helper()
if tg.doGrepMatch(match, b) {
tg.t.Log(msg)
tg.t.Logf("pattern %v found unexpectedly in standard %s", match, name)
tg.t.FailNow()
}
}
// grepStdoutNot looks for a regular expression in the test run's
// standard output and fails, logging msg, if it is found.
func (tg *testgoData) grepStdoutNot(match, msg string) {
tg.t.Helper()
tg.doGrepNot(match, &tg.stdout, "output", msg)
}
// grepStderrNot looks for a regular expression in the test run's
// standard error and fails, logging msg, if it is found.
func (tg *testgoData) grepStderrNot(match, msg string) {
tg.t.Helper()
tg.doGrepNot(match, &tg.stderr, "error", msg)
}
// grepBothNot looks for a regular expression in the test run's
// standard output or standard error and fails, logging msg, if it is
// found.
func (tg *testgoData) grepBothNot(match, msg string) {
tg.t.Helper()
if tg.doGrepMatch(match, &tg.stdout) || tg.doGrepMatch(match, &tg.stderr) {
tg.t.Log(msg)
tg.t.Fatalf("pattern %v found unexpectedly in standard output or standard error", match)
}
}
// doGrepCount counts the number of times a regexp is seen in a buffer.
func (tg *testgoData) doGrepCount(match string, b *bytes.Buffer) int {
tg.t.Helper()
if !tg.ran {
tg.t.Fatal("internal testsuite error: doGrepCount called before run")
}
re := regexp.MustCompile(match)
c := 0
for _, ln := range bytes.Split(b.Bytes(), []byte{'\n'}) {
if re.Match(ln) {
c++
}
}
return c
}
// grepCountBoth returns the number of times a regexp is seen in both
// standard output and standard error.
func (tg *testgoData) grepCountBoth(match string) int {
tg.t.Helper()
return tg.doGrepCount(match, &tg.stdout) + tg.doGrepCount(match, &tg.stderr)
}
// creatingTemp records that the test plans to create a temporary file
// or directory. If the file or directory exists already, it will be
// removed. When the test completes, the file or directory will be
// removed if it exists.
func (tg *testgoData) creatingTemp(path string) {
tg.t.Helper()
if filepath.IsAbs(path) && !strings.HasPrefix(path, tg.tempdir) {
tg.t.Fatalf("internal testsuite error: creatingTemp(%q) with absolute path not in temporary directory", path)
}
// If we have changed the working directory, make sure we have
// an absolute path, because we are going to change directory
// back before we remove the temporary.
if !filepath.IsAbs(path) {
if tg.wd == "" || strings.HasPrefix(tg.wd, testGOROOT) {
tg.t.Fatalf("internal testsuite error: creatingTemp(%q) within GOROOT/src", path)
}
path = filepath.Join(tg.wd, path)
}
tg.must(robustio.RemoveAll(path))
tg.temps = append(tg.temps, path)
}
// makeTempdir makes a temporary directory for a run of testgo. If
// the temporary directory was already created, this does nothing.
func (tg *testgoData) makeTempdir() {
tg.t.Helper()
if tg.tempdir == "" {
var err error
tg.tempdir, err = ioutil.TempDir("", "gotest")
tg.must(err)
}
}
// tempFile adds a temporary file for a run of testgo.
func (tg *testgoData) tempFile(path, contents string) {
tg.t.Helper()
tg.makeTempdir()
tg.must(os.MkdirAll(filepath.Join(tg.tempdir, filepath.Dir(path)), 0755))
bytes := []byte(contents)
if strings.HasSuffix(path, ".go") {
formatted, err := format.Source(bytes)
if err == nil {
bytes = formatted
}
}
tg.must(ioutil.WriteFile(filepath.Join(tg.tempdir, path), bytes, 0644))
}
// tempDir adds a temporary directory for a run of testgo.
func (tg *testgoData) tempDir(path string) {
tg.t.Helper()
tg.makeTempdir()
if err := os.MkdirAll(filepath.Join(tg.tempdir, path), 0755); err != nil && !os.IsExist(err) {
tg.t.Fatal(err)
}
}
// path returns the absolute pathname to file with the temporary
// directory.
func (tg *testgoData) path(name string) string {
tg.t.Helper()
if tg.tempdir == "" {
tg.t.Fatalf("internal testsuite error: path(%q) with no tempdir", name)
}
if name == "." {
return tg.tempdir
}
return filepath.Join(tg.tempdir, name)
}
// mustExist fails if path does not exist.
func (tg *testgoData) mustExist(path string) {
tg.t.Helper()
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
tg.t.Fatalf("%s does not exist but should", path)
}
tg.t.Fatalf("%s stat failed: %v", path, err)
}
}
// mustNotExist fails if path exists.
func (tg *testgoData) mustNotExist(path string) {
tg.t.Helper()
if _, err := os.Stat(path); err == nil || !os.IsNotExist(err) {
tg.t.Fatalf("%s exists but should not (%v)", path, err)
}
}
// mustHaveContent succeeds if filePath is a path to a file,
// and that file is readable and not empty.
func (tg *testgoData) mustHaveContent(filePath string) {
tg.mustExist(filePath)
f, err := os.Stat(filePath)
if err != nil {
tg.t.Fatal(err)
}
if f.Size() == 0 {
tg.t.Fatalf("expected %s to have data, but is empty", filePath)
}
}
// wantExecutable fails with msg if path is not executable.
func (tg *testgoData) wantExecutable(path, msg string) {
tg.t.Helper()
if st, err := os.Stat(path); err != nil {
if !os.IsNotExist(err) {
tg.t.Log(err)
}
tg.t.Fatal(msg)
} else {
if runtime.GOOS != "windows" && st.Mode()&0111 == 0 {
tg.t.Fatalf("binary %s exists but is not executable", path)
}
}
}
// wantArchive fails if path is not an archive.
func (tg *testgoData) wantArchive(path string) {
tg.t.Helper()
f, err := os.Open(path)
if err != nil {
tg.t.Fatal(err)
}
buf := make([]byte, 100)
io.ReadFull(f, buf)
f.Close()
if !bytes.HasPrefix(buf, []byte("!<arch>\n")) {
tg.t.Fatalf("file %s exists but is not an archive", path)
}
}
// isStale reports whether pkg is stale, and why
func (tg *testgoData) isStale(pkg string) (bool, string) {
tg.t.Helper()
tg.run("list", "-f", "{{.Stale}}:{{.StaleReason}}", pkg)
v := strings.TrimSpace(tg.getStdout())
f := strings.SplitN(v, ":", 2)
if len(f) == 2 {
switch f[0] {
case "true":
return true, f[1]
case "false":
return false, f[1]
}
}
tg.t.Fatalf("unexpected output checking staleness of package %v: %v", pkg, v)
panic("unreachable")
}
// wantStale fails with msg if pkg is not stale.
func (tg *testgoData) wantStale(pkg, reason, msg string) {
tg.t.Helper()
stale, why := tg.isStale(pkg)
if !stale {
tg.t.Fatal(msg)
}
// We always accept the reason as being "not installed but
// available in build cache", because when that is the case go
// list doesn't try to sort out the underlying reason why the
// package is not installed.
if reason == "" && why != "" || !strings.Contains(why, reason) && !strings.Contains(why, "not installed but available in build cache") {
tg.t.Errorf("wrong reason for Stale=true: %q, want %q", why, reason)
}
}
// wantNotStale fails with msg if pkg is stale.
func (tg *testgoData) wantNotStale(pkg, reason, msg string) {
tg.t.Helper()
stale, why := tg.isStale(pkg)
if stale {
tg.t.Fatal(msg)
}
if reason == "" && why != "" || !strings.Contains(why, reason) {
tg.t.Errorf("wrong reason for Stale=false: %q, want %q", why, reason)
}
}
// If -testwork is specified, the test prints the name of the temp directory
// and does not remove it when done, so that a programmer can
// poke at the test file tree afterward.
var testWork = flag.Bool("testwork", false, "")
// cleanup cleans up a test that runs testgo.
func (tg *testgoData) cleanup() {
tg.t.Helper()
if tg.wd != "" {
wd, _ := os.Getwd()
tg.t.Logf("ended in %s", wd)
if err := os.Chdir(tg.wd); err != nil {
// We are unlikely to be able to continue.
fmt.Fprintln(os.Stderr, "could not restore working directory, crashing:", err)
os.Exit(2)
}
}
if *testWork {
tg.t.Logf("TESTWORK=%s\n", tg.path("."))
return
}
for _, path := range tg.temps {
tg.check(removeAll(path))
}
if tg.tempdir != "" {
tg.check(removeAll(tg.tempdir))
}
}
func removeAll(dir string) error {
// module cache has 0444 directories;
// make them writable in order to remove content.
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return nil // ignore errors walking in file system
}
if info.IsDir() {
os.Chmod(path, 0777)
}
return nil
})
return robustio.RemoveAll(dir)
}
// failSSH puts an ssh executable in the PATH that always fails.
// This is to stub out uses of ssh by go get.
func (tg *testgoData) failSSH() {
tg.t.Helper()
wd, err := os.Getwd()
if err != nil {
tg.t.Fatal(err)
}
fail := filepath.Join(wd, "testdata/failssh")
tg.setenv("PATH", fmt.Sprintf("%v%c%v", fail, filepath.ListSeparator, os.Getenv("PATH")))
}
func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
if testing.Short() {
t.Skip("skipping lengthy test in short mode")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
// Copy the runtime packages into a temporary GOROOT
// so that we can change files.
for _, copydir := range []string{
"src/runtime",
"src/internal/bytealg",
"src/internal/cpu",
"src/math/bits",
"src/unsafe",
filepath.Join("pkg", runtime.GOOS+"_"+runtime.GOARCH),
filepath.Join("pkg/tool", runtime.GOOS+"_"+runtime.GOARCH),
"pkg/include",
} {
srcdir := filepath.Join(testGOROOT, copydir)
tg.tempDir(filepath.Join("goroot", copydir))
err := filepath.Walk(srcdir,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
srcrel, err := filepath.Rel(srcdir, path)
if err != nil {
return err
}
dest := filepath.Join("goroot", copydir, srcrel)
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
tg.tempFile(dest, string(data))
if err := os.Chmod(tg.path(dest), info.Mode()|0200); err != nil {
return err
}
return nil
})
if err != nil {
t.Fatal(err)
}
}
tg.setenv("GOROOT", tg.path("goroot"))
addVar := func(name string, idx int) (restore func()) {
data, err := ioutil.ReadFile(name)
if err != nil {
t.Fatal(err)
}
old := data
data = append(data, fmt.Sprintf("var DummyUnusedVar%d bool\n", idx)...)
if err := ioutil.WriteFile(name, append(data, '\n'), 0666); err != nil {
t.Fatal(err)
}
tg.sleep()
return func() {
if err := ioutil.WriteFile(name, old, 0666); err != nil {
t.Fatal(err)
}
}
}
// Every main package depends on the "runtime".
tg.tempFile("d1/src/p1/p1.go", `package main; func main(){}`)
tg.setenv("GOPATH", tg.path("d1"))
// Pass -i flag to rebuild everything outdated.
tg.run("install", "-i", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, before any changes")
// Changing mtime of runtime/internal/sys/sys.go
// should have no effect: only the content matters.
// In fact this should be true even outside a release branch.
sys := tg.path("goroot/src/runtime/internal/sys/sys.go")
tg.sleep()
restore := addVar(sys, 0)
restore()
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after updating mtime of runtime/internal/sys/sys.go")
// But changing content of any file should have an effect.
// Previously zversion.go was the only one that mattered;
// now they all matter, so keep using sys.go.
restore = addVar(sys, 1)
defer restore()
tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go")
restore()
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after changing back to old release")
addVar(sys, 2)
tg.wantStale("p1", "stale dependency: runtime", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go again")
tg.run("install", "-i", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with new release")
// Restore to "old" release.
restore()
tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after restoring sys.go")
tg.run("install", "-i", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with old release")
}
func TestInternalPackagesInGOROOTAreRespected(t *testing.T) {
skipIfGccgo(t, "gccgo does not have GOROOT")
tg := testgo(t)
defer tg.cleanup()
tg.runFail("build", "-v", "./testdata/testinternal")
tg.grepBoth(`testinternal(\/|\\)p\.go\:3\:8\: use of internal package net/http/internal not allowed`, "wrong error message for testdata/testinternal")
}
func TestInternalPackagesOutsideGOROOTAreRespected(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.runFail("build", "-v", "./testdata/testinternal2")
tg.grepBoth(`testinternal2(\/|\\)p\.go\:3\:8\: use of internal package .*internal/w not allowed`, "wrote error message for testdata/testinternal2")
}
func TestInternalPackageErrorsAreHandled(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("list", "./testdata/testinternal3")
}
func TestInternalCache(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/testinternal4"))
tg.runFail("build", "p")
tg.grepStderr("internal", "did not fail to build p")
}
// cmd/go: custom import path checking should not apply to Go packages without import comment.
func TestIssue10952(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
const importPath = "github.com/zombiezen/go-get-issue-10952"
tg.run("get", "-d", "-u", importPath)
repoDir := tg.path("src/" + importPath)
tg.runGit(repoDir, "remote", "set-url", "origin", "https://"+importPath+".git")
tg.run("get", "-d", "-u", importPath)
}
func TestIssue16471(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
tg.must(os.MkdirAll(tg.path("src/rsc.io/go-get-issue-10952"), 0755))
tg.runGit(tg.path("src/rsc.io"), "clone", "https://github.com/zombiezen/go-get-issue-10952")
tg.runFail("get", "-u", "rsc.io/go-get-issue-10952")
tg.grepStderr("rsc.io/go-get-issue-10952 is a custom import path for https://github.com/rsc/go-get-issue-10952, but .* is checked out from https://github.com/zombiezen/go-get-issue-10952", "did not detect updated import path")
}
// Test git clone URL that uses SCP-like syntax and custom import path checking.
func TestIssue11457(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
const importPath = "rsc.io/go-get-issue-11457"
tg.run("get", "-d", "-u", importPath)
repoDir := tg.path("src/" + importPath)
tg.runGit(repoDir, "remote", "set-url", "origin", "[email protected]:rsc/go-get-issue-11457")
// At this time, custom import path checking compares remotes verbatim (rather than
// just the host and path, skipping scheme and user), so we expect go get -u to fail.
// However, the goal of this test is to verify that gitRemoteRepo correctly parsed
// the SCP-like syntax, and we expect it to appear in the error message.
tg.runFail("get", "-d", "-u", importPath)
want := " is checked out from ssh://[email protected]/rsc/go-get-issue-11457"
if !strings.HasSuffix(strings.TrimSpace(tg.getStderr()), want) {
t.Error("expected clone URL to appear in stderr")
}
}
func TestGetGitDefaultBranch(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
// This repo has two branches, master and another-branch.
// The another-branch is the default that you get from 'git clone'.
// The go get command variants should not override this.
const importPath = "github.com/rsc/go-get-default-branch"
tg.run("get", "-d", importPath)
repoDir := tg.path("src/" + importPath)
tg.runGit(repoDir, "branch", "--contains", "HEAD")
tg.grepStdout(`\* another-branch`, "not on correct default branch")
tg.run("get", "-d", "-u", importPath)
tg.runGit(repoDir, "branch", "--contains", "HEAD")
tg.grepStdout(`\* another-branch`, "not on correct default branch")
}
// Security issue. Don't disable. See golang.org/issue/22125.
func TestAccidentalGitCheckout(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("get", "-u", "vcs-test.golang.org/go/test1-svn-git")
tg.grepStderr("src[\\\\/]vcs-test.* uses git, but parent .*src[\\\\/]vcs-test.* uses svn", "get did not fail for right reason")
if _, err := os.Stat(tg.path("SrC")); err == nil {
// This case only triggers on a case-insensitive file system.
tg.runFail("get", "-u", "vcs-test.golang.org/go/test2-svn-git/test2main")
tg.grepStderr("src[\\\\/]vcs-test.* uses git, but parent .*src[\\\\/]vcs-test.* uses svn", "get did not fail for right reason")
}
}
func TestRelativeImportsGoTest(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "./testdata/testimport")
}
func TestRelativeImportsGoTestDashI(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// don't let test -i overwrite runtime
tg.wantNotStale("runtime", "", "must be non-stale before test -i")
tg.run("test", "-i", "./testdata/testimport")
}
func TestRelativeImportsInCommandLinePackage(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
files, err := filepath.Glob("./testdata/testimport/*.go")
tg.must(err)
tg.run(append([]string{"test"}, files...)...)
}
func TestVersionControlErrorMessageIncludesCorrectDirectory(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/shadow/root1"))
tg.runFail("get", "-u", "foo")
// TODO(iant): We should not have to use strconv.Quote here.
// The code in vcs.go should be changed so that it is not required.
quoted := strconv.Quote(filepath.Join("testdata", "shadow", "root1", "src", "foo"))
quoted = quoted[1 : len(quoted)-1]
tg.grepStderr(regexp.QuoteMeta(quoted), "go get -u error does not mention shadow/root1/src/foo")
}
func TestInstallFailsWithNoBuildableFiles(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.setenv("CGO_ENABLED", "0")
tg.runFail("install", "cgotest")
tg.grepStderr("build constraints exclude all Go files", "go install cgotest did not report 'build constraints exclude all Go files'")
}
// Issue 21895
func TestMSanAndRaceRequireCgo(t *testing.T) {
if !canMSan && !canRace {
t.Skip("skipping because both msan and the race detector are not supported")
}
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("triv.go", `package main; func main() {}`)
tg.setenv("CGO_ENABLED", "0")
if canRace {
tg.runFail("install", "-race", "triv.go")
tg.grepStderr("-race requires cgo", "did not correctly report that -race requires cgo")
tg.grepStderrNot("-msan", "reported that -msan instead of -race requires cgo")
}
if canMSan {
tg.runFail("install", "-msan", "triv.go")
tg.grepStderr("-msan requires cgo", "did not correctly report that -msan requires cgo")
tg.grepStderrNot("-race", "reported that -race instead of -msan requires cgo")
}
}
func TestRelativeGOBINFail(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("triv.go", `package main; func main() {}`)
tg.cd(tg.path("."))
tg.setenv("GOBIN", ".")
tg.cd(tg.path("."))
tg.runFail("install")
tg.grepStderr("cannot install, GOBIN must be an absolute path", "go install must fail if $GOBIN is a relative path")
}
func TestPackageMainTestCompilerFlags(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.tempFile("src/p1/p1.go", "package main\n")
tg.tempFile("src/p1/p1_test.go", "package main\nimport \"testing\"\nfunc Test(t *testing.T){}\n")
tg.run("test", "-c", "-n", "p1")
tg.grepBothNot(`([\\/]compile|gccgo).* (-p main|-fgo-pkgpath=main).*p1\.go`, "should not have run compile -p main p1.go")
tg.grepStderr(`([\\/]compile|gccgo).* (-p p1|-fgo-pkgpath=p1).*p1\.go`, "should have run compile -p p1 p1.go")
}
// Issue 12690
func TestPackageNotStaleWithTrailingSlash(t *testing.T) {
skipIfGccgo(t, "gccgo does not have GOROOT")
tg := testgo(t)
defer tg.cleanup()
// Make sure the packages below are not stale.
tg.wantNotStale("runtime", "", "must be non-stale before test runs")
tg.wantNotStale("os", "", "must be non-stale before test runs")
tg.wantNotStale("io", "", "must be non-stale before test runs")
goroot := runtime.GOROOT()
tg.setenv("GOROOT", goroot+"/")
tg.wantNotStale("runtime", "", "with trailing slash in GOROOT, runtime listed as stale")
tg.wantNotStale("os", "", "with trailing slash in GOROOT, os listed as stale")
tg.wantNotStale("io", "", "with trailing slash in GOROOT, io listed as stale")
}
func TestGoGetNonPkg(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.tempDir("gobin")
tg.setenv("GOPATH", tg.path("."))
tg.setenv("GOBIN", tg.path("gobin"))
tg.runFail("get", "-d", "golang.org/x/tools")
tg.grepStderr("golang.org/x/tools: no Go files", "missing error")
tg.runFail("get", "-d", "-u", "golang.org/x/tools")
tg.grepStderr("golang.org/x/tools: no Go files", "missing error")
tg.runFail("get", "-d", "golang.org/x/tools")
tg.grepStderr("golang.org/x/tools: no Go files", "missing error")
}
func TestGoGetTestOnlyPkg(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.tempDir("gopath")
tg.setenv("GOPATH", tg.path("gopath"))
tg.run("get", "golang.org/x/tour/content...")
tg.run("get", "-t", "golang.org/x/tour/content...")
}
// Issue 4104.
func TestGoTestWithPackageListedMultipleTimes(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("test", "errors", "errors", "errors", "errors", "errors")
if strings.Contains(strings.TrimSpace(tg.getStdout()), "\n") {
t.Error("go test errors errors errors errors errors tested the same package multiple times")
}
}
func TestGoListHasAConsistentOrder(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("list", "std")
first := tg.getStdout()
tg.run("list", "std")
if first != tg.getStdout() {
t.Error("go list std ordering is inconsistent")
}
}
func TestGoListStdDoesNotIncludeCommands(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("list", "std")
tg.grepStdoutNot("cmd/", "go list std shows commands")
}
func TestGoListCmdOnlyShowsCommands(t *testing.T) {
skipIfGccgo(t, "gccgo does not have GOROOT")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("list", "cmd")
out := strings.TrimSpace(tg.getStdout())
for _, line := range strings.Split(out, "\n") {
if !strings.Contains(line, "cmd/") {
t.Error("go list cmd shows non-commands")
break
}
}
}
func TestGoListDedupsPackages(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("list", "xtestonly", "./testdata/src/xtestonly/...")
got := strings.TrimSpace(tg.getStdout())
const want = "xtestonly"
if got != want {
t.Errorf("got %q; want %q", got, want)
}
}
func TestGoListDeps(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src/p1/p2/p3/p4")
tg.setenv("GOPATH", tg.path("."))
tg.tempFile("src/p1/p.go", "package p1\nimport _ \"p1/p2\"\n")
tg.tempFile("src/p1/p2/p.go", "package p2\nimport _ \"p1/p2/p3\"\n")
tg.tempFile("src/p1/p2/p3/p.go", "package p3\nimport _ \"p1/p2/p3/p4\"\n")
tg.tempFile("src/p1/p2/p3/p4/p.go", "package p4\n")
tg.run("list", "-f", "{{.Deps}}", "p1")
tg.grepStdout("p1/p2/p3/p4", "Deps(p1) does not mention p4")
tg.run("list", "-deps", "p1")
tg.grepStdout("p1/p2/p3/p4", "-deps p1 does not mention p4")
if runtime.Compiler != "gccgo" {
// Check the list is in dependency order.
tg.run("list", "-deps", "math")
want := "internal/cpu\nunsafe\nmath/bits\nmath\n"
out := tg.stdout.String()
if !strings.Contains(out, "internal/cpu") {
// Some systems don't use internal/cpu.
want = "unsafe\nmath/bits\nmath\n"
}
if tg.stdout.String() != want {
t.Fatalf("list -deps math: wrong order\nhave %q\nwant %q", tg.stdout.String(), want)
}
}
}
func TestGoListTest(t *testing.T) {
skipIfGccgo(t, "gccgo does not have standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.tempdir)
tg.run("list", "-test", "-deps", "sort")
tg.grepStdout(`^sort.test$`, "missing test main")
tg.grepStdout(`^sort$`, "missing real sort")
tg.grepStdout(`^sort \[sort.test\]$`, "missing test copy of sort")
tg.grepStdout(`^testing \[sort.test\]$`, "missing test copy of testing")
tg.grepStdoutNot(`^testing$`, "unexpected real copy of testing")
tg.run("list", "-test", "sort")
tg.grepStdout(`^sort.test$`, "missing test main")
tg.grepStdout(`^sort$`, "missing real sort")
tg.grepStdout(`^sort \[sort.test\]$`, "unexpected test copy of sort")
tg.grepStdoutNot(`^testing \[sort.test\]$`, "unexpected test copy of testing")
tg.grepStdoutNot(`^testing$`, "unexpected real copy of testing")
tg.run("list", "-test", "cmd/dist", "cmd/doc")
tg.grepStdout(`^cmd/dist$`, "missing cmd/dist")
tg.grepStdout(`^cmd/doc$`, "missing cmd/doc")
tg.grepStdout(`^cmd/doc\.test$`, "missing cmd/doc test")
tg.grepStdoutNot(`^cmd/dist\.test$`, "unexpected cmd/dist test")
tg.grepStdoutNot(`^testing`, "unexpected testing")
tg.run("list", "-test", "runtime/cgo")
tg.grepStdout(`^runtime/cgo$`, "missing runtime/cgo")
tg.run("list", "-deps", "-f", "{{if .DepOnly}}{{.ImportPath}}{{end}}", "sort")
tg.grepStdout(`^internal/reflectlite$`, "missing internal/reflectlite")
tg.grepStdoutNot(`^sort`, "unexpected sort")
}
func TestGoListCompiledCgo(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.tempdir)
tg.run("list", "-f", `{{join .CgoFiles "\n"}}`, "net")
if tg.stdout.String() == "" {
t.Skip("net does not use cgo")
}
if strings.Contains(tg.stdout.String(), tg.tempdir) {
t.Fatalf(".CgoFiles unexpectedly mentioned cache %s", tg.tempdir)
}
tg.run("list", "-compiled", "-f", `{{.Dir}}{{"\n"}}{{join .CompiledGoFiles "\n"}}`, "net")
if !strings.Contains(tg.stdout.String(), tg.tempdir) {
t.Fatalf(".CompiledGoFiles with -compiled did not mention cache %s", tg.tempdir)
}
dir := ""
for _, file := range strings.Split(tg.stdout.String(), "\n") {
if file == "" {
continue
}
if dir == "" {
dir = file
continue
}
if !strings.Contains(file, "/") && !strings.Contains(file, `\`) {
file = filepath.Join(dir, file)
}
if _, err := os.Stat(file); err != nil {
t.Fatalf("cannot find .CompiledGoFiles result %s: %v", file, err)
}
}
}
func TestGoListExport(t *testing.T) {
skipIfGccgo(t, "gccgo does not have standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.tempdir)
tg.run("list", "-f", "{{.Export}}", "strings")
if tg.stdout.String() != "" {
t.Fatalf(".Export without -export unexpectedly set")
}
tg.run("list", "-export", "-f", "{{.Export}}", "strings")
file := strings.TrimSpace(tg.stdout.String())
if file == "" {
t.Fatalf(".Export with -export was empty")
}
if _, err := os.Stat(file); err != nil {
t.Fatalf("cannot find .Export result %s: %v", file, err)
}
}
// Issue 4096. Validate the output of unsuccessful go install foo/quxx.
func TestUnsuccessfulGoInstallShouldMentionMissingPackage(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(`cannot find package "foo/quxx" in any of`) != 1 {
t.Error(`go install foo/quxx expected error: .*cannot find package "foo/quxx" in any of`)
}
}
func TestGOROOTSearchFailureReporting(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("foo", "quxx"))+` \(from \$GOROOT\)$`) != 1 {
t.Error(`go install foo/quxx expected error: .*foo/quxx (from $GOROOT)`)
}
}
func TestMultipleGOPATHEntriesReportedSeparately(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
sep := string(filepath.ListSeparator)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b"))
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(`testdata[/\\].[/\\]src[/\\]foo[/\\]quxx`) != 2 {
t.Error(`go install foo/quxx expected error: .*testdata/a/src/foo/quxx (from $GOPATH)\n.*testdata/b/src/foo/quxx`)
}
}
// Test (from $GOPATH) annotation is reported for the first GOPATH entry,
func TestMentionGOPATHInFirstGOPATHEntry(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
sep := string(filepath.ListSeparator)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b"))
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("testdata", "a", "src", "foo", "quxx"))+` \(from \$GOPATH\)$`) != 1 {
t.Error(`go install foo/quxx expected error: .*testdata/a/src/foo/quxx (from $GOPATH)`)
}
}
// but not on the second.
func TestMentionGOPATHNotOnSecondEntry(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
sep := string(filepath.ListSeparator)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b"))
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("testdata", "b", "src", "foo", "quxx"))+`$`) != 1 {
t.Error(`go install foo/quxx expected error: .*testdata/b/src/foo/quxx`)
}
}
func homeEnvName() string {
switch runtime.GOOS {
case "windows":
return "USERPROFILE"
case "plan9":
return "home"
default:
return "HOME"
}
}
func tempEnvName() string {
switch runtime.GOOS {
case "windows":
return "TMP"
case "plan9":
return "TMPDIR" // actually plan 9 doesn't have one at all but this is fine
default:
return "TMPDIR"
}
}
func TestDefaultGOPATH(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("home/go")
tg.setenv(homeEnvName(), tg.path("home"))
tg.run("env", "GOPATH")
tg.grepStdout(regexp.QuoteMeta(tg.path("home/go")), "want GOPATH=$HOME/go")
tg.setenv("GOROOT", tg.path("home/go"))
tg.run("env", "GOPATH")
tg.grepStdoutNot(".", "want unset GOPATH because GOROOT=$HOME/go")
tg.setenv("GOROOT", tg.path("home/go")+"/")
tg.run("env", "GOPATH")
tg.grepStdoutNot(".", "want unset GOPATH because GOROOT=$HOME/go/")
}
func TestDefaultGOPATHGet(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", "")
tg.tempDir("home")
tg.setenv(homeEnvName(), tg.path("home"))
// warn for creating directory
tg.run("get", "-v", "github.com/golang/example/hello")
tg.grepStderr("created GOPATH="+regexp.QuoteMeta(tg.path("home/go"))+"; see 'go help gopath'", "did not create GOPATH")
// no warning if directory already exists
tg.must(robustio.RemoveAll(tg.path("home/go")))
tg.tempDir("home/go")
tg.run("get", "github.com/golang/example/hello")
tg.grepStderrNot(".", "expected no output on standard error")
// error if $HOME/go is a file
tg.must(robustio.RemoveAll(tg.path("home/go")))
tg.tempFile("home/go", "")
tg.runFail("get", "github.com/golang/example/hello")
tg.grepStderr(`mkdir .*[/\\]go: .*(not a directory|cannot find the path)`, "expected error because $HOME/go is a file")
}
func TestDefaultGOPATHPrintedSearchList(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", "")
tg.tempDir("home")
tg.setenv(homeEnvName(), tg.path("home"))
tg.runFail("install", "github.com/golang/example/hello")
tg.grepStderr(regexp.QuoteMeta(tg.path("home/go/src/github.com/golang/example/hello"))+`.*from \$GOPATH`, "expected default GOPATH")
}
// Issue 4186. go get cannot be used to download packages to $GOROOT.
// Test that without GOPATH set, go get should fail.
func TestGoGetIntoGOROOT(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
// Fails because GOROOT=GOPATH
tg.setenv("GOPATH", tg.path("."))
tg.setenv("GOROOT", tg.path("."))
tg.runFail("get", "-d", "github.com/golang/example/hello")
tg.grepStderr("warning: GOPATH set to GOROOT", "go should detect GOPATH=GOROOT")
tg.grepStderr(`\$GOPATH must not be set to \$GOROOT`, "go should detect GOPATH=GOROOT")
// Fails because GOROOT=GOPATH after cleaning.
tg.setenv("GOPATH", tg.path(".")+"/")
tg.setenv("GOROOT", tg.path("."))
tg.runFail("get", "-d", "github.com/golang/example/hello")
tg.grepStderr("warning: GOPATH set to GOROOT", "go should detect GOPATH=GOROOT")
tg.grepStderr(`\$GOPATH must not be set to \$GOROOT`, "go should detect GOPATH=GOROOT")
tg.setenv("GOPATH", tg.path("."))
tg.setenv("GOROOT", tg.path(".")+"/")
tg.runFail("get", "-d", "github.com/golang/example/hello")
tg.grepStderr("warning: GOPATH set to GOROOT", "go should detect GOPATH=GOROOT")
tg.grepStderr(`\$GOPATH must not be set to \$GOROOT`, "go should detect GOPATH=GOROOT")
// Fails because GOROOT=$HOME/go so default GOPATH unset.
tg.tempDir("home/go")
tg.setenv(homeEnvName(), tg.path("home"))
tg.setenv("GOPATH", "")
tg.setenv("GOROOT", tg.path("home/go"))
tg.runFail("get", "-d", "github.com/golang/example/hello")
tg.grepStderr(`\$GOPATH not set`, "expected GOPATH not set")
tg.setenv(homeEnvName(), tg.path("home")+"/")
tg.setenv("GOPATH", "")
tg.setenv("GOROOT", tg.path("home/go"))
tg.runFail("get", "-d", "github.com/golang/example/hello")
tg.grepStderr(`\$GOPATH not set`, "expected GOPATH not set")
tg.setenv(homeEnvName(), tg.path("home"))
tg.setenv("GOPATH", "")
tg.setenv("GOROOT", tg.path("home/go")+"/")
tg.runFail("get", "-d", "github.com/golang/example/hello")
tg.grepStderr(`\$GOPATH not set`, "expected GOPATH not set")
}
func TestLdflagsArgumentsWithSpacesIssue3941(t *testing.T) {
skipIfGccgo(t, "gccgo does not support -ldflags -X")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("main.go", `package main
var extern string
func main() {
println(extern)
}`)
tg.run("run", "-ldflags", `-X "main.extern=hello world"`, tg.path("main.go"))
tg.grepStderr("^hello world", `ldflags -X "main.extern=hello world"' failed`)
}
func TestGoTestCpuprofileLeavesBinaryBehind(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.makeTempdir()
tg.cd(tg.path("."))
tg.run("test", "-cpuprofile", "errors.prof", "errors")
tg.wantExecutable("errors.test"+exeSuffix, "go test -cpuprofile did not create errors.test")
}
func TestGoTestCpuprofileDashOControlsBinaryLocation(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.makeTempdir()
tg.cd(tg.path("."))
tg.run("test", "-cpuprofile", "errors.prof", "-o", "myerrors.test"+exeSuffix, "errors")
tg.wantExecutable("myerrors.test"+exeSuffix, "go test -cpuprofile -o myerrors.test did not create myerrors.test")
}
func TestGoTestMutexprofileLeavesBinaryBehind(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.makeTempdir()
tg.cd(tg.path("."))
tg.run("test", "-mutexprofile", "errors.prof", "errors")
tg.wantExecutable("errors.test"+exeSuffix, "go test -mutexprofile did not create errors.test")
}
func TestGoTestMutexprofileDashOControlsBinaryLocation(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.makeTempdir()
tg.cd(tg.path("."))
tg.run("test", "-mutexprofile", "errors.prof", "-o", "myerrors.test"+exeSuffix, "errors")
tg.wantExecutable("myerrors.test"+exeSuffix, "go test -mutexprofile -o myerrors.test did not create myerrors.test")
}
func TestGoTestDashCDashOControlsBinaryLocation(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.run("test", "-c", "-o", tg.path("myerrors.test"+exeSuffix), "errors")
tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -c -o myerrors.test did not create myerrors.test")
}
func TestGoTestDashOWritesBinary(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.run("test", "-o", tg.path("myerrors.test"+exeSuffix), "errors")
tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -o myerrors.test did not create myerrors.test")
}
func TestGoTestDashIDashOWritesBinary(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
// don't let test -i overwrite runtime
tg.wantNotStale("runtime", "", "must be non-stale before test -i")
tg.run("test", "-v", "-i", "-o", tg.path("myerrors.test"+exeSuffix), "errors")
tg.grepBothNot("PASS|FAIL", "test should not have run")
tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -o myerrors.test did not create myerrors.test")
}
// Issue 4568.
func TestSymlinksList(t *testing.T) {
testenv.MustHaveSymlink(t)
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.tempDir("src")
tg.must(os.Symlink(tg.path("."), tg.path("src/dir1")))
tg.tempFile("src/dir1/p.go", "package p")
tg.setenv("GOPATH", tg.path("."))
tg.cd(tg.path("src"))
tg.run("list", "-f", "{{.Root}}", "dir1")
if strings.TrimSpace(tg.getStdout()) != tg.path(".") {
t.Error("confused by symlinks")
}
}
// Issue 14054.
func TestSymlinksVendor(t *testing.T) {
testenv.MustHaveSymlink(t)
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.tempDir("gopath/src/dir1/vendor/v")
tg.tempFile("gopath/src/dir1/p.go", "package main\nimport _ `v`\nfunc main(){}")
tg.tempFile("gopath/src/dir1/vendor/v/v.go", "package v")
tg.must(os.Symlink(tg.path("gopath/src/dir1"), tg.path("symdir1")))
tg.setenv("GOPATH", tg.path("gopath"))
tg.cd(tg.path("symdir1"))
tg.run("list", "-f", "{{.Root}}", ".")
if strings.TrimSpace(tg.getStdout()) != tg.path("gopath") {
t.Error("list confused by symlinks")
}
// All of these should succeed, not die in vendor-handling code.
tg.run("run", "p.go")
tg.run("build")
tg.run("install")
}
// Issue 15201.
func TestSymlinksVendor15201(t *testing.T) {
testenv.MustHaveSymlink(t)
tg := testgo(t)
defer tg.cleanup()
tg.tempDir("gopath/src/x/y/_vendor/src/x")
tg.must(os.Symlink("../../..", tg.path("gopath/src/x/y/_vendor/src/x/y")))
tg.tempFile("gopath/src/x/y/w/w.go", "package w\nimport \"x/y/z\"\n")
tg.must(os.Symlink("../_vendor/src", tg.path("gopath/src/x/y/w/vendor")))
tg.tempFile("gopath/src/x/y/z/z.go", "package z\n")
tg.setenv("GOPATH", tg.path("gopath/src/x/y/_vendor")+string(filepath.ListSeparator)+tg.path("gopath"))
tg.cd(tg.path("gopath/src"))
tg.run("list", "./...")
}
func TestSymlinksInternal(t *testing.T) {
testenv.MustHaveSymlink(t)
tg := testgo(t)
defer tg.cleanup()
tg.tempDir("gopath/src/dir1/internal/v")
tg.tempFile("gopath/src/dir1/p.go", "package main\nimport _ `dir1/internal/v`\nfunc main(){}")
tg.tempFile("gopath/src/dir1/internal/v/v.go", "package v")
tg.must(os.Symlink(tg.path("gopath/src/dir1"), tg.path("symdir1")))
tg.setenv("GOPATH", tg.path("gopath"))
tg.cd(tg.path("symdir1"))
tg.run("list", "-f", "{{.Root}}", ".")
if strings.TrimSpace(tg.getStdout()) != tg.path("gopath") {
t.Error("list confused by symlinks")
}
// All of these should succeed, not die in internal-handling code.
tg.run("run", "p.go")
tg.run("build")
tg.run("install")
}
// Issue 4515.
func TestInstallWithTags(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("bin")
tg.tempFile("src/example/a/main.go", `package main
func main() {}`)
tg.tempFile("src/example/b/main.go", `// +build mytag
package main
func main() {}`)
tg.setenv("GOPATH", tg.path("."))
tg.run("install", "-tags", "mytag", "example/a", "example/b")
tg.wantExecutable(tg.path("bin/a"+exeSuffix), "go install example/a example/b did not install binaries")
tg.wantExecutable(tg.path("bin/b"+exeSuffix), "go install example/a example/b did not install binaries")
tg.must(os.Remove(tg.path("bin/a" + exeSuffix)))
tg.must(os.Remove(tg.path("bin/b" + exeSuffix)))
tg.run("install", "-tags", "mytag", "example/...")
tg.wantExecutable(tg.path("bin/a"+exeSuffix), "go install example/... did not install binaries")
tg.wantExecutable(tg.path("bin/b"+exeSuffix), "go install example/... did not install binaries")
tg.run("list", "-tags", "mytag", "example/b...")
if strings.TrimSpace(tg.getStdout()) != "example/b" {
t.Error("go list example/b did not find example/b")
}
}
// Issue 4773
func TestCaseCollisions(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src/example/a/pkg")
tg.tempDir("src/example/a/Pkg")
tg.tempDir("src/example/b")
tg.setenv("GOPATH", tg.path("."))
tg.tempFile("src/example/a/a.go", `package p
import (
_ "example/a/pkg"
_ "example/a/Pkg"
)`)
tg.tempFile("src/example/a/pkg/pkg.go", `package pkg`)
tg.tempFile("src/example/a/Pkg/pkg.go", `package pkg`)
tg.run("list", "-json", "example/a")
tg.grepStdout("case-insensitive import collision", "go list -json example/a did not report import collision")
tg.runFail("build", "example/a")
tg.grepStderr("case-insensitive import collision", "go build example/a did not report import collision")
tg.tempFile("src/example/b/file.go", `package b`)
tg.tempFile("src/example/b/FILE.go", `package b`)
f, err := os.Open(tg.path("src/example/b"))
tg.must(err)
names, err := f.Readdirnames(0)
tg.must(err)
tg.check(f.Close())
args := []string{"list"}
if len(names) == 2 {
// case-sensitive file system, let directory read find both files
args = append(args, "example/b")
} else {
// case-insensitive file system, list files explicitly on command line
args = append(args, tg.path("src/example/b/file.go"), tg.path("src/example/b/FILE.go"))
}
tg.runFail(args...)
tg.grepStderr("case-insensitive file name collision", "go list example/b did not report file name collision")
tg.runFail("list", "example/a/pkg", "example/a/Pkg")
tg.grepStderr("case-insensitive import collision", "go list example/a/pkg example/a/Pkg did not report import collision")
tg.run("list", "-json", "-e", "example/a/pkg", "example/a/Pkg")
tg.grepStdout("case-insensitive import collision", "go list -json -e example/a/pkg example/a/Pkg did not report import collision")
tg.runFail("build", "example/a/pkg", "example/a/Pkg")
tg.grepStderr("case-insensitive import collision", "go build example/a/pkg example/a/Pkg did not report import collision")
}
// Issue 17451, 17662.
func TestSymlinkWarning(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.tempDir("src/example/xx")
tg.tempDir("yy/zz")
tg.tempFile("yy/zz/zz.go", "package zz\n")
if err := os.Symlink(tg.path("yy"), tg.path("src/example/xx/yy")); err != nil {
t.Skipf("symlink failed: %v", err)
}
tg.run("list", "example/xx/z...")
tg.grepStdoutNot(".", "list should not have matched anything")
tg.grepStderr("matched no packages", "list should have reported that pattern matched no packages")
tg.grepStderrNot("symlink", "list should not have reported symlink")
tg.run("list", "example/xx/...")
tg.grepStdoutNot(".", "list should not have matched anything")
tg.grepStderr("matched no packages", "list should have reported that pattern matched no packages")
tg.grepStderr("ignoring symlink", "list should have reported symlink")
}
// Issue 8181.
func TestGoGetDashTIssue8181(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.run("get", "-v", "-t", "github.com/rsc/go-get-issue-8181/a", "github.com/rsc/go-get-issue-8181/b")
tg.run("list", "...")
tg.grepStdout("x/build/gerrit", "missing expected x/build/gerrit")
}
func TestIssue11307(t *testing.T) {
// go get -u was not working except in checkout directory
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.run("get", "github.com/rsc/go-get-issue-11307")
tg.run("get", "-u", "github.com/rsc/go-get-issue-11307") // was failing
}
func TestShadowingLogic(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tg := testgo(t)
defer tg.cleanup()
pwd := tg.pwd()
sep := string(filepath.ListSeparator)
tg.setenv("GOPATH", filepath.Join(pwd, "testdata", "shadow", "root1")+sep+filepath.Join(pwd, "testdata", "shadow", "root2"))
// The math in root1 is not "math" because the standard math is.
tg.run("list", "-f", "({{.ImportPath}}) ({{.ConflictDir}})", "./testdata/shadow/root1/src/math")
pwdForwardSlash := strings.ReplaceAll(pwd, string(os.PathSeparator), "/")
if !strings.HasPrefix(pwdForwardSlash, "/") {
pwdForwardSlash = "/" + pwdForwardSlash
}
// The output will have makeImportValid applies, but we only
// bother to deal with characters we might reasonably see.
for _, r := range " :" {
pwdForwardSlash = strings.ReplaceAll(pwdForwardSlash, string(r), "_")
}
want := "(_" + pwdForwardSlash + "/testdata/shadow/root1/src/math) (" + filepath.Join(runtime.GOROOT(), "src", "math") + ")"
if strings.TrimSpace(tg.getStdout()) != want {
t.Error("shadowed math is not shadowed; looking for", want)
}
// The foo in root1 is "foo".
tg.run("list", "-f", "({{.ImportPath}}) ({{.ConflictDir}})", "./testdata/shadow/root1/src/foo")
if strings.TrimSpace(tg.getStdout()) != "(foo) ()" {
t.Error("unshadowed foo is shadowed")
}
// The foo in root2 is not "foo" because the foo in root1 got there first.
tg.run("list", "-f", "({{.ImportPath}}) ({{.ConflictDir}})", "./testdata/shadow/root2/src/foo")
want = "(_" + pwdForwardSlash + "/testdata/shadow/root2/src/foo) (" + filepath.Join(pwd, "testdata", "shadow", "root1", "src", "foo") + ")"
if strings.TrimSpace(tg.getStdout()) != want {
t.Error("shadowed foo is not shadowed; looking for", want)
}
// The error for go install should mention the conflicting directory.
tg.runFail("install", "./testdata/shadow/root2/src/foo")
want = "go install: no install location for " + filepath.Join(pwd, "testdata", "shadow", "root2", "src", "foo") + ": hidden by " + filepath.Join(pwd, "testdata", "shadow", "root1", "src", "foo")
if strings.TrimSpace(tg.getStderr()) != want {
t.Error("wrong shadowed install error; looking for", want)
}
}
// Only succeeds if source order is preserved.
func TestSourceFileNameOrderPreserved(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "testdata/example1_test.go", "testdata/example2_test.go")
}
// Check that coverage analysis works at all.
// Don't worry about the exact numbers but require not 0.0%.
func checkCoverage(tg *testgoData, data string) {
tg.t.Helper()
if regexp.MustCompile(`[^0-9]0\.0%`).MatchString(data) {
tg.t.Error("some coverage results are 0.0%")
}
}
func TestCoverageRuns(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-short", "-coverpkg=strings", "strings", "regexp")
data := tg.getStdout() + tg.getStderr()
tg.run("test", "-short", "-cover", "strings", "math", "regexp")
data += tg.getStdout() + tg.getStderr()
checkCoverage(tg, data)
}
func TestCoverageDotImport(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("test", "-coverpkg=coverdot1,coverdot2", "coverdot2")
data := tg.getStdout() + tg.getStderr()
checkCoverage(tg, data)
}
func TestCoverageSyncAtomicImport(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("test", "-short", "-cover", "-covermode=atomic", "-coverpkg=coverdep/p1", "coverdep")
}
func TestCoverageDepLoop(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
// coverdep2/p1's xtest imports coverdep2/p2 which imports coverdep2/p1.
// Make sure that coverage on coverdep2/p2 recompiles coverdep2/p2.
tg.run("test", "-short", "-cover", "coverdep2/p1")
tg.grepStdout("coverage: 100.0% of statements", "expected 100.0% coverage")
}
func TestCoverageNoStatements(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-cover", "./testdata/testcover/pkg4")
tg.grepStdout("[no statements]", "expected [no statements] for pkg4")
}
func TestCoverageErrorLine(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.setenv("GOTMPDIR", tg.tempdir)
tg.runFail("test", "coverbad")
tg.grepStderr(`coverbad[\\/]p\.go:4`, "did not find coverbad/p.go:4")
if canCgo {
tg.grepStderr(`coverbad[\\/]p1\.go:6`, "did not find coverbad/p1.go:6")
}
tg.grepStderrNot(regexp.QuoteMeta(tg.tempdir), "found temporary directory in error")
stderr := tg.getStderr()
tg.runFail("test", "-cover", "coverbad")
stderr2 := tg.getStderr()
// It's OK that stderr2 drops the character position in the error,
// because of the //line directive (see golang.org/issue/22662).
stderr = strings.ReplaceAll(stderr, "p.go:4:2:", "p.go:4:")
if stderr != stderr2 {
t.Logf("test -cover changed error messages:\nbefore:\n%s\n\nafter:\n%s", stderr, stderr2)
t.Skip("golang.org/issue/22660")
t.FailNow()
}
}
func TestTestBuildFailureOutput(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
// Doesn't build, -x output should not claim to run test.
tg.runFail("test", "-x", "coverbad")
tg.grepStderrNot(`[\\/]coverbad\.test( |$)`, "claimed to run test")
}
func TestCoverageFunc(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("test", "-outputdir="+tg.tempdir, "-coverprofile=cover.out", "coverasm")
tg.run("tool", "cover", "-func="+tg.path("cover.out"))
tg.grepStdout(`\tg\t*100.0%`, "did not find g 100% covered")
tg.grepStdoutNot(`\tf\t*[0-9]`, "reported coverage for assembly function f")
}
// Issue 24588.
func TestCoverageDashC(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("test", "-c", "-o", tg.path("coverdep"), "-coverprofile="+tg.path("no/such/dir/cover.out"), "coverdep")
tg.wantExecutable(tg.path("coverdep"), "go -test -c -coverprofile did not create executable")
}
func TestTestEmpty(t *testing.T) {
if !canRace {
t.Skip("no race detector")
}
wd, _ := os.Getwd()
testdata := filepath.Join(wd, "testdata")
for _, dir := range []string{"pkg", "test", "xtest", "pkgtest", "pkgxtest", "pkgtestxtest", "testxtest"} {
t.Run(dir, func(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", testdata)
tg.cd(filepath.Join(testdata, "src/empty/"+dir))
tg.run("test", "-cover", "-coverpkg=.", "-race")
})
if testing.Short() {
break
}
}
}
func TestNoGoError(t *testing.T) {
wd, _ := os.Getwd()
testdata := filepath.Join(wd, "testdata")
for _, dir := range []string{"empty/test", "empty/xtest", "empty/testxtest", "exclude", "exclude/ignore", "exclude/empty"} {
t.Run(dir, func(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", testdata)
tg.cd(filepath.Join(testdata, "src"))
tg.runFail("build", "./"+dir)
var want string
if strings.Contains(dir, "test") {
want = "no non-test Go files in "
} else if dir == "exclude" {
want = "build constraints exclude all Go files in "
} else {
want = "no Go files in "
}
tg.grepStderr(want, "wrong reason for failure")
})
}
}
func TestTestRaceInstall(t *testing.T) {
if !canRace {
t.Skip("no race detector")
}
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.tempDir("pkg")
pkgdir := tg.path("pkg")
tg.run("install", "-race", "-pkgdir="+pkgdir, "std")
tg.run("test", "-race", "-pkgdir="+pkgdir, "-i", "-v", "empty/pkg")
if tg.getStderr() != "" {
t.Error("go test -i -race: rebuilds cached packages")
}
}
func TestBuildDryRunWithCgo(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("foo.go", `package main
/*
#include <limits.h>
*/
import "C"
func main() {
println(C.INT_MAX)
}`)
tg.run("build", "-n", tg.path("foo.go"))
tg.grepStderrNot(`os.Stat .* no such file or directory`, "unexpected stat of archive file")
}
func TestCgoDependsOnSyscall(t *testing.T) {
if testing.Short() {
t.Skip("skipping test that removes $GOROOT/pkg/*_race in short mode")
}
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
if !canRace {
t.Skip("skipping because race detector not supported")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
files, err := filepath.Glob(filepath.Join(runtime.GOROOT(), "pkg", "*_race"))
tg.must(err)
for _, file := range files {
tg.check(robustio.RemoveAll(file))
}
tg.tempFile("src/foo/foo.go", `
package foo
//#include <stdio.h>
import "C"`)
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "-race", "foo")
}
func TestCgoShowsFullPathNames(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/y/dirname/foo.go", `
package foo
import "C"
func f() {`)
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "x/y/dirname")
tg.grepBoth("x/y/dirname", "error did not use full path")
}
func TestCgoHandlesWlORIGIN(t *testing.T) {
tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/origin/origin.go", `package origin
// #cgo !darwin LDFLAGS: -Wl,-rpath,$ORIGIN
// void f(void) {}
import "C"
func f() { C.f() }`)
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "origin")
}
func TestCgoPkgConfig(t *testing.T) {
tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("env", "PKG_CONFIG")
pkgConfig := strings.TrimSpace(tg.getStdout())
testenv.MustHaveExecPath(t, pkgConfig)
if out, err := exec.Command(pkgConfig, "--atleast-pkgconfig-version", "0.24").CombinedOutput(); err != nil {
t.Skipf("%s --atleast-pkgconfig-version 0.24: %v\n%s", pkgConfig, err, out)
}
// OpenBSD's pkg-config is strict about whitespace and only
// supports backslash-escaped whitespace. It does not support
// quotes, which the normal freedesktop.org pkg-config does
// support. See https://man.openbsd.org/pkg-config.1
tg.tempFile("foo.pc", `
Name: foo
Description: The foo library
Version: 1.0.0
Cflags: -Dhello=10 -Dworld=+32 -DDEFINED_FROM_PKG_CONFIG=hello\ world
`)
tg.tempFile("foo.go", `package main
/*
#cgo pkg-config: foo
int value() {
return DEFINED_FROM_PKG_CONFIG;
}
*/
import "C"
import "os"
func main() {
if C.value() != 42 {
println("value() =", C.value(), "wanted 42")
os.Exit(1)
}
}
`)
tg.setenv("PKG_CONFIG_PATH", tg.path("."))
tg.run("run", tg.path("foo.go"))
}
// "go test -c -test.bench=XXX errors" should not hang.
// "go test -c" should also produce reproducible binaries.
// "go test -c" should also appear to write a new binary every time,
// even if it's really just updating the mtime on an existing up-to-date binary.
func TestIssue6480(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.makeTempdir()
tg.cd(tg.path("."))
tg.run("test", "-c", "-test.bench=XXX", "errors")
tg.run("test", "-c", "-o", "errors2.test", "errors")
data1, err := ioutil.ReadFile("errors.test" + exeSuffix)
tg.must(err)
data2, err := ioutil.ReadFile("errors2.test") // no exeSuffix because -o above doesn't have it
tg.must(err)
if !bytes.Equal(data1, data2) {
t.Fatalf("go test -c errors produced different binaries when run twice")
}
start := time.Now()
tg.run("test", "-x", "-c", "-test.bench=XXX", "errors")
tg.grepStderrNot(`[\\/]link|gccgo`, "incorrectly relinked up-to-date test binary")
info, err := os.Stat("errors.test" + exeSuffix)
if err != nil {
t.Fatal(err)
}
start = truncateLike(start, info.ModTime())
if info.ModTime().Before(start) {
t.Fatalf("mtime of errors.test predates test -c command (%v < %v)", info.ModTime(), start)
}
start = time.Now()
tg.run("test", "-x", "-c", "-o", "errors2.test", "errors")
tg.grepStderrNot(`[\\/]link|gccgo`, "incorrectly relinked up-to-date test binary")
info, err = os.Stat("errors2.test")
if err != nil {
t.Fatal(err)
}
start = truncateLike(start, info.ModTime())
if info.ModTime().Before(start) {
t.Fatalf("mtime of errors2.test predates test -c command (%v < %v)", info.ModTime(), start)
}
}
// truncateLike returns the result of truncating t to the apparent precision of p.
func truncateLike(t, p time.Time) time.Time {
nano := p.UnixNano()
d := 1 * time.Nanosecond
for nano%int64(d) == 0 && d < 1*time.Second {
d *= 10
}
for nano%int64(d) == 0 && d < 2*time.Second {
d *= 2
}
return t.Truncate(d)
}
// cmd/cgo: undefined reference when linking a C-library using gccgo
func TestIssue7573(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
testenv.MustHaveExecPath(t, "gccgo")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/cgoref/cgoref.go", `
package main
// #cgo LDFLAGS: -L alibpath -lalib
// void f(void) {}
import "C"
func main() { C.f() }`)
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "-n", "-compiler", "gccgo", "cgoref")
tg.grepStderr(`gccgo.*\-L [^ ]*alibpath \-lalib`, `no Go-inline "#cgo LDFLAGS:" ("-L alibpath -lalib") passed to gccgo linking stage`)
}
func TestListTemplateContextFunction(t *testing.T) {
t.Parallel()
for _, tt := range []struct {
v string
want string
}{
{"GOARCH", runtime.GOARCH},
{"GOOS", runtime.GOOS},
{"GOROOT", filepath.Clean(runtime.GOROOT())},
{"GOPATH", os.Getenv("GOPATH")},
{"CgoEnabled", ""},
{"UseAllFiles", ""},
{"Compiler", ""},
{"BuildTags", ""},
{"ReleaseTags", ""},
{"InstallSuffix", ""},
} {
tt := tt
t.Run(tt.v, func(t *testing.T) {
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tmpl := "{{context." + tt.v + "}}"
tg.run("list", "-f", tmpl)
if tt.want == "" {
return
}
if got := strings.TrimSpace(tg.getStdout()); got != tt.want {
t.Errorf("go list -f %q: got %q; want %q", tmpl, got, tt.want)
}
})
}
}
func TestGoBuildTestOnly(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.tempFile("src/testonly/t_test.go", `package testonly`)
tg.tempFile("src/testonly2/t.go", `package testonly2`)
tg.cd(tg.path("src"))
// Named explicitly, test-only packages should be reported as unbuildable/uninstallable,
// even if there is a wildcard also matching.
tg.runFail("build", "testonly", "testonly...")
tg.grepStderr("no non-test Go files in", "go build ./xtestonly produced unexpected error")
tg.runFail("install", "./testonly")
tg.grepStderr("no non-test Go files in", "go install ./testonly produced unexpected error")
// Named through a wildcards, the test-only packages should be silently ignored.
tg.run("build", "testonly...")
tg.run("install", "./testonly...")
}
func TestGoTestFooTestWorks(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "testdata/standalone_test.go")
}
func TestGoTestTestMainSeesTestingFlags(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "testdata/standalone_testmain_flag_test.go")
}
// Issue 22388
func TestGoTestMainWithWrongSignature(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.runFail("test", "testdata/standalone_main_wrong_test.go")
tg.grepStderr(`wrong signature for TestMain, must be: func TestMain\(m \*testing.M\)`, "detected wrong error message")
}
func TestGoTestMainAsNormalTest(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "testdata/standalone_main_normal_test.go")
tg.grepBoth(okPattern, "go test did not say ok")
}
func TestGoTestXtestonlyWorks(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("clean", "-i", "xtestonly")
tg.run("test", "xtestonly")
}
func TestGoTestBuildsAnXtestContainingOnlyNonRunnableExamples(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-v", "./testdata/norunexample")
tg.grepStdout("File with non-runnable example was built.", "file with non-runnable example was not built")
}
func TestGoGenerateHandlesSimpleCommand(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping because windows has no echo command")
}
tg := testgo(t)
defer tg.cleanup()
tg.run("generate", "./testdata/generate/test1.go")
tg.grepStdout("Success", "go generate ./testdata/generate/test1.go generated wrong output")
}
func TestGoGenerateHandlesCommandAlias(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping because windows has no echo command")
}
tg := testgo(t)
defer tg.cleanup()
tg.run("generate", "./testdata/generate/test2.go")
tg.grepStdout("Now is the time for all good men", "go generate ./testdata/generate/test2.go generated wrong output")
}
func TestGoGenerateVariableSubstitution(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping because windows has no echo command")
}
tg := testgo(t)
defer tg.cleanup()
tg.run("generate", "./testdata/generate/test3.go")
tg.grepStdout(runtime.GOARCH+" test3.go:7 pabc xyzp/test3.go/123", "go generate ./testdata/generate/test3.go generated wrong output")
}
func TestGoGenerateRunFlag(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping because windows has no echo command")
}
tg := testgo(t)
defer tg.cleanup()
tg.run("generate", "-run", "y.s", "./testdata/generate/test4.go")
tg.grepStdout("yes", "go generate -run yes ./testdata/generate/test4.go did not select yes")
tg.grepStdoutNot("no", "go generate -run yes ./testdata/generate/test4.go selected no")
}
func TestGoGenerateEnv(t *testing.T) {
switch runtime.GOOS {
case "plan9", "windows":
t.Skipf("skipping because %s does not have the env command", runtime.GOOS)
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("env.go", "package main\n\n//go:generate env")
tg.run("generate", tg.path("env.go"))
for _, v := range []string{"GOARCH", "GOOS", "GOFILE", "GOLINE", "GOPACKAGE", "DOLLAR"} {
tg.grepStdout("^"+v+"=", "go generate environment missing "+v)
}
}
func TestGoGenerateXTestPkgName(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping because windows has no echo command")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("env_test.go", "package main_test\n\n//go:generate echo $GOPACKAGE")
tg.run("generate", tg.path("env_test.go"))
want := "main_test"
if got := strings.TrimSpace(tg.getStdout()); got != want {
t.Errorf("go generate in XTest file got package name %q; want %q", got, want)
}
}
func TestGoGetCustomDomainWildcard(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.run("get", "-u", "rsc.io/pdf/...")
tg.wantExecutable(tg.path("bin/pdfpasswd"+exeSuffix), "did not build rsc/io/pdf/pdfpasswd")
}
func TestGoGetInternalWildcard(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
// used to fail with errors about internal packages
tg.run("get", "github.com/rsc/go-get-issue-11960/...")
}
func TestGoVetWithExternalTests(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.runFail("vet", "vetpkg")
tg.grepBoth("Printf", "go vet vetpkg did not find missing argument for Printf")
}
func TestGoVetWithTags(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.runFail("vet", "-tags", "tagtest", "vetpkg")
tg.grepBoth(`c\.go.*Printf`, "go vet vetpkg did not run scan tagged file")
}
func TestGoVetWithFlagsOn(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.runFail("vet", "-printf", "vetpkg")
tg.grepBoth("Printf", "go vet -printf vetpkg did not find missing argument for Printf")
}
func TestGoVetWithFlagsOff(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("vet", "-printf=false", "vetpkg")
}
// Issue 23395.
func TestGoVetWithOnlyTestFiles(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/p/p_test.go", "package p; import \"testing\"; func TestMe(*testing.T) {}")
tg.setenv("GOPATH", tg.path("."))
tg.run("vet", "p")
}
// Issue 24193.
func TestVetWithOnlyCgoFiles(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/p/p.go", "package p; import \"C\"; func F() {}")
tg.setenv("GOPATH", tg.path("."))
tg.run("vet", "p")
}
// Issue 9767, 19769.
func TestGoGetDotSlashDownload(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.tempDir("src/rsc.io")
tg.setenv("GOPATH", tg.path("."))
tg.cd(tg.path("src/rsc.io"))
tg.run("get", "./pprof_mac_fix")
}
// Test that you cannot use a local import in a package
// accessed by a non-local import (found in a GOPATH/GOROOT).
// See golang.org/issue/17475.
func TestImportLocal(t *testing.T) {
tooSlow(t)
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.tempFile("src/dir/x/x.go", `package x
var X int
`)
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "dir/x")
// Ordinary import should work.
tg.tempFile("src/dir/p0/p.go", `package p0
import "dir/x"
var _ = x.X
`)
tg.run("build", "dir/p0")
// Relative import should not.
tg.tempFile("src/dir/p1/p.go", `package p1
import "../x"
var _ = x.X
`)
tg.runFail("build", "dir/p1")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in a test.
tg.tempFile("src/dir/p2/p.go", `package p2
`)
tg.tempFile("src/dir/p2/p_test.go", `package p2
import "../x"
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/p2")
tg.runFail("test", "dir/p2")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in an xtest.
tg.tempFile("src/dir/p2/p_test.go", `package p2_test
import "../x"
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/p2")
tg.runFail("test", "dir/p2")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// Relative import starting with ./ should not work either.
tg.tempFile("src/dir/d.go", `package dir
import "./x"
var _ = x.X
`)
tg.runFail("build", "dir")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in a test.
tg.tempFile("src/dir/d.go", `package dir
`)
tg.tempFile("src/dir/d_test.go", `package dir
import "./x"
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir")
tg.runFail("test", "dir")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in an xtest.
tg.tempFile("src/dir/d_test.go", `package dir_test
import "./x"
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir")
tg.runFail("test", "dir")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// Relative import plain ".." should not work.
tg.tempFile("src/dir/x/y/y.go", `package dir
import ".."
var _ = x.X
`)
tg.runFail("build", "dir/x/y")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in a test.
tg.tempFile("src/dir/x/y/y.go", `package y
`)
tg.tempFile("src/dir/x/y/y_test.go", `package y
import ".."
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/x/y")
tg.runFail("test", "dir/x/y")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in an x test.
tg.tempFile("src/dir/x/y/y_test.go", `package y_test
import ".."
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/x/y")
tg.runFail("test", "dir/x/y")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// Relative import "." should not work.
tg.tempFile("src/dir/x/xx.go", `package x
import "."
var _ = x.X
`)
tg.runFail("build", "dir/x")
tg.grepStderr("cannot import current directory", "did not diagnose import current directory")
// ... even in a test.
tg.tempFile("src/dir/x/xx.go", `package x
`)
tg.tempFile("src/dir/x/xx_test.go", `package x
import "."
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/x")
tg.runFail("test", "dir/x")
tg.grepStderr("cannot import current directory", "did not diagnose import current directory")
// ... even in an xtest.
tg.tempFile("src/dir/x/xx.go", `package x
`)
tg.tempFile("src/dir/x/xx_test.go", `package x_test
import "."
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/x")
tg.runFail("test", "dir/x")
tg.grepStderr("cannot import current directory", "did not diagnose import current directory")
}
func TestGoGetInsecure(t *testing.T) {
test := func(t *testing.T, modules bool) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.failSSH()
if modules {
tg.setenv("GOPATH", tg.path("gp"))
tg.tempFile("go.mod", "module m")
tg.cd(tg.path("."))
tg.setenv("GO111MODULE", "on")
tg.setenv("GOPROXY", "")
} else {
tg.setenv("GOPATH", tg.path("."))
tg.setenv("GO111MODULE", "off")
}
const repo = "insecure.go-get-issue-15410.appspot.com/pkg/p"
// Try go get -d of HTTP-only repo (should fail).
tg.runFail("get", "-d", repo)
// Try again with -insecure (should succeed).
tg.run("get", "-d", "-insecure", repo)
// Try updating without -insecure (should fail).
tg.runFail("get", "-d", "-u", "-f", repo)
if modules {
tg.run("list", "-m", "...")
tg.grepStdout("insecure.go-get-issue", "should find insecure module")
}
}
t.Run("gopath", func(t *testing.T) { test(t, false) })
t.Run("modules", func(t *testing.T) { test(t, true) })
}
func TestGoGetUpdateInsecure(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
const repo = "github.com/golang/example"
// Clone the repo via HTTP manually.
cmd := exec.Command("git", "clone", "-q", "http://"+repo, tg.path("src/"+repo))
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("cloning %v repo: %v\n%s", repo, err, out)
}
// Update without -insecure should fail.
// Update with -insecure should succeed.
// We need -f to ignore import comments.
const pkg = repo + "/hello"
tg.runFail("get", "-d", "-u", "-f", pkg)
tg.run("get", "-d", "-u", "-f", "-insecure", pkg)
}
func TestGoGetUpdateUnknownProtocol(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
const repo = "github.com/golang/example"
// Clone the repo via HTTPS manually.
repoDir := tg.path("src/" + repo)
cmd := exec.Command("git", "clone", "-q", "https://"+repo, repoDir)
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("cloning %v repo: %v\n%s", repo, err, out)
}
// Configure the repo to use a protocol unknown to cmd/go
// that still actually works.
cmd = exec.Command("git", "remote", "set-url", "origin", "xyz://"+repo)
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("git remote set-url: %v\n%s", err, out)
}
cmd = exec.Command("git", "config", "--local", "url.https://github.com/.insteadOf", "xyz://github.com/")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("git config: %v\n%s", err, out)
}
// We need -f to ignore import comments.
tg.run("get", "-d", "-u", "-f", repo+"/hello")
}
func TestGoGetInsecureCustomDomain(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
const repo = "insecure.go-get-issue-15410.appspot.com/pkg/p"
tg.runFail("get", "-d", repo)
tg.run("get", "-d", "-insecure", repo)
}
func TestGoRunDirs(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.cd("testdata/rundir")
tg.runFail("run", "x.go", "sub/sub.go")
tg.grepStderr("named files must all be in one directory; have ./ and sub/", "wrong output")
tg.runFail("run", "sub/sub.go", "x.go")
tg.grepStderr("named files must all be in one directory; have sub/ and ./", "wrong output")
}
func TestGoInstallPkgdir(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.makeTempdir()
pkg := tg.path(".")
tg.run("install", "-pkgdir", pkg, "sync")
tg.mustExist(filepath.Join(pkg, "sync.a"))
tg.mustNotExist(filepath.Join(pkg, "sync/atomic.a"))
tg.run("install", "-i", "-pkgdir", pkg, "sync")
tg.mustExist(filepath.Join(pkg, "sync.a"))
tg.mustExist(filepath.Join(pkg, "sync/atomic.a"))
}
func TestGoTestRaceInstallCgo(t *testing.T) {
if !canRace {
t.Skip("skipping because race detector not supported")
}
// golang.org/issue/10500.
// This used to install a race-enabled cgo.
tg := testgo(t)
defer tg.cleanup()
tg.run("tool", "-n", "cgo")
cgo := strings.TrimSpace(tg.stdout.String())
old, err := os.Stat(cgo)
tg.must(err)
// For this test, we don't actually care whether 'go test -race -i' succeeds.
// It may fail, for example, if GOROOT was installed from source as root and
// is now read-only.
// We only care that — regardless of whether it succeeds — it does not
// overwrite cmd/cgo.
runArgs := []string{"test", "-race", "-i", "runtime/race"}
if status := tg.doRun(runArgs); status != nil {
tg.t.Logf("go %v failure ignored: %v", runArgs, status)
}
new, err := os.Stat(cgo)
tg.must(err)
if !new.ModTime().Equal(old.ModTime()) {
t.Fatalf("go test -i runtime/race reinstalled cmd/cgo")
}
}
func TestGoGetUpdate(t *testing.T) {
// golang.org/issue/9224.
// The recursive updating was trying to walk to
// former dependencies, not current ones.
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
rewind := func() {
tg.run("get", "github.com/rsc/go-get-issue-9224-cmd")
cmd := exec.Command("git", "reset", "--hard", "HEAD~")
cmd.Dir = tg.path("src/github.com/rsc/go-get-issue-9224-lib")
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("git: %v\n%s", err, out)
}
}
rewind()
tg.run("get", "-u", "github.com/rsc/go-get-issue-9224-cmd")
// Again with -d -u.
rewind()
tg.run("get", "-d", "-u", "github.com/rsc/go-get-issue-9224-cmd")
}
// Issue #20512.
func TestGoGetRace(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
if !canRace {
t.Skip("skipping because race detector not supported")
}
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.run("get", "-race", "github.com/rsc/go-get-issue-9224-cmd")
}
func TestGoGetDomainRoot(t *testing.T) {
// golang.org/issue/9357.
// go get foo.io (not foo.io/subdir) was not working consistently.
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
// go-get-issue-9357.appspot.com is running
// the code at github.com/rsc/go-get-issue-9357,
// a trivial Go on App Engine app that serves a
// <meta> tag for the domain root.
tg.run("get", "-d", "go-get-issue-9357.appspot.com")
tg.run("get", "go-get-issue-9357.appspot.com")
tg.run("get", "-u", "go-get-issue-9357.appspot.com")
tg.must(robustio.RemoveAll(tg.path("src/go-get-issue-9357.appspot.com")))
tg.run("get", "go-get-issue-9357.appspot.com")
tg.must(robustio.RemoveAll(tg.path("src/go-get-issue-9357.appspot.com")))
tg.run("get", "-u", "go-get-issue-9357.appspot.com")
}
func TestGoInstallShadowedGOPATH(t *testing.T) {
// golang.org/issue/3652.
// go get foo.io (not foo.io/subdir) was not working consistently.
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("gopath1")+string(filepath.ListSeparator)+tg.path("gopath2"))
tg.tempDir("gopath1/src/test")
tg.tempDir("gopath2/src/test")
tg.tempFile("gopath2/src/test/main.go", "package main\nfunc main(){}\n")
tg.cd(tg.path("gopath2/src/test"))
tg.runFail("install")
tg.grepStderr("no install location for.*gopath2.src.test: hidden by .*gopath1.src.test", "missing error")
}
func TestGoBuildGOPATHOrder(t *testing.T) {
// golang.org/issue/14176#issuecomment-179895769
// golang.org/issue/14192
// -I arguments to compiler could end up not in GOPATH order,
// leading to unexpected import resolution in the compiler.
// This is still not a complete fix (see golang.org/issue/14271 and next test)
// but it is clearly OK and enough to fix both of the two reported
// instances of the underlying problem. It will have to do for now.
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("p1")+string(filepath.ListSeparator)+tg.path("p2"))
tg.tempFile("p1/src/foo/foo.go", "package foo\n")
tg.tempFile("p2/src/baz/baz.go", "package baz\n")
tg.tempFile("p2/pkg/"+runtime.GOOS+"_"+runtime.GOARCH+"/foo.a", "bad\n")
tg.tempFile("p1/src/bar/bar.go", `
package bar
import _ "baz"
import _ "foo"
`)
tg.run("install", "-x", "bar")
}
func TestGoBuildGOPATHOrderBroken(t *testing.T) {
// This test is known not to work.
// See golang.org/issue/14271.
t.Skip("golang.org/issue/14271")
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.tempFile("p1/src/foo/foo.go", "package foo\n")
tg.tempFile("p2/src/baz/baz.go", "package baz\n")
tg.tempFile("p1/pkg/"+runtime.GOOS+"_"+runtime.GOARCH+"/baz.a", "bad\n")
tg.tempFile("p2/pkg/"+runtime.GOOS+"_"+runtime.GOARCH+"/foo.a", "bad\n")
tg.tempFile("p1/src/bar/bar.go", `
package bar
import _ "baz"
import _ "foo"
`)
colon := string(filepath.ListSeparator)
tg.setenv("GOPATH", tg.path("p1")+colon+tg.path("p2"))
tg.run("install", "-x", "bar")
tg.setenv("GOPATH", tg.path("p2")+colon+tg.path("p1"))
tg.run("install", "-x", "bar")
}
func TestIssue11709(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("run.go", `
package main
import "os"
func main() {
if os.Getenv("TERM") != "" {
os.Exit(1)
}
}`)
tg.unsetenv("TERM")
tg.run("run", tg.path("run.go"))
}
func TestIssue12096(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("test_test.go", `
package main
import ("os"; "testing")
func TestEnv(t *testing.T) {
if os.Getenv("TERM") != "" {
t.Fatal("TERM is set")
}
}`)
tg.unsetenv("TERM")
tg.run("test", tg.path("test_test.go"))
}
func TestGoBuildOutput(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.cd(tg.path("."))
nonExeSuffix := ".exe"
if exeSuffix == ".exe" {
nonExeSuffix = ""
}
tg.tempFile("x.go", "package main\nfunc main(){}\n")
tg.run("build", "x.go")
tg.wantExecutable("x"+exeSuffix, "go build x.go did not write x"+exeSuffix)
tg.must(os.Remove(tg.path("x" + exeSuffix)))
tg.mustNotExist("x" + nonExeSuffix)
tg.run("build", "-o", "myprog", "x.go")
tg.mustNotExist("x")
tg.mustNotExist("x.exe")
tg.wantExecutable("myprog", "go build -o myprog x.go did not write myprog")
tg.mustNotExist("myprog.exe")
tg.tempFile("p.go", "package p\n")
tg.run("build", "p.go")
tg.mustNotExist("p")
tg.mustNotExist("p.a")
tg.mustNotExist("p.o")
tg.mustNotExist("p.exe")
tg.run("build", "-o", "p.a", "p.go")
tg.wantArchive("p.a")
tg.run("build", "cmd/gofmt")
tg.wantExecutable("gofmt"+exeSuffix, "go build cmd/gofmt did not write gofmt"+exeSuffix)
tg.must(os.Remove(tg.path("gofmt" + exeSuffix)))
tg.mustNotExist("gofmt" + nonExeSuffix)
tg.run("build", "-o", "mygofmt", "cmd/gofmt")
tg.wantExecutable("mygofmt", "go build -o mygofmt cmd/gofmt did not write mygofmt")
tg.mustNotExist("mygofmt.exe")
tg.mustNotExist("gofmt")
tg.mustNotExist("gofmt.exe")
tg.run("build", "sync/atomic")
tg.mustNotExist("atomic")
tg.mustNotExist("atomic.exe")
tg.run("build", "-o", "myatomic.a", "sync/atomic")
tg.wantArchive("myatomic.a")
tg.mustNotExist("atomic")
tg.mustNotExist("atomic.a")
tg.mustNotExist("atomic.exe")
tg.runFail("build", "-o", "whatever", "cmd/gofmt", "sync/atomic")
tg.grepStderr("multiple packages", "did not reject -o with multiple packages")
}
func TestGoBuildARM(t *testing.T) {
if testing.Short() {
t.Skip("skipping cross-compile in short mode")
}
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.cd(tg.path("."))
tg.setenv("GOARCH", "arm")
tg.setenv("GOOS", "linux")
tg.setenv("GOARM", "5")
tg.tempFile("hello.go", `package main
func main() {}`)
tg.run("build", "hello.go")
tg.grepStderrNot("unable to find math.a", "did not build math.a correctly")
}
// For issue 14337.
func TestParallelTest(t *testing.T) {
tooSlow(t)
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.makeTempdir()
const testSrc = `package package_test
import (
"testing"
)
func TestTest(t *testing.T) {
}`
tg.tempFile("src/p1/p1_test.go", strings.Replace(testSrc, "package_test", "p1_test", 1))
tg.tempFile("src/p2/p2_test.go", strings.Replace(testSrc, "package_test", "p2_test", 1))
tg.tempFile("src/p3/p3_test.go", strings.Replace(testSrc, "package_test", "p3_test", 1))
tg.tempFile("src/p4/p4_test.go", strings.Replace(testSrc, "package_test", "p4_test", 1))
tg.setenv("GOPATH", tg.path("."))
tg.run("test", "-p=4", "p1", "p2", "p3", "p4")
}
func TestCgoConsistentResults(t *testing.T) {
tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
switch runtime.GOOS {
case "solaris", "illumos":
testenv.SkipFlaky(t, 13247)
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
exe1 := tg.path("cgotest1" + exeSuffix)
exe2 := tg.path("cgotest2" + exeSuffix)
tg.run("build", "-o", exe1, "cgotest")
tg.run("build", "-x", "-o", exe2, "cgotest")
b1, err := ioutil.ReadFile(exe1)
tg.must(err)
b2, err := ioutil.ReadFile(exe2)
tg.must(err)
if !tg.doGrepMatch(`-fdebug-prefix-map=\$WORK`, &tg.stderr) {
t.Skip("skipping because C compiler does not support -fdebug-prefix-map")
}
if !bytes.Equal(b1, b2) {
t.Error("building cgotest twice did not produce the same output")
}
}
// Issue 14444: go get -u .../ duplicate loads errors
func TestGoGetUpdateAllDoesNotTryToLoadDuplicates(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.run("get", "-u", ".../")
tg.grepStderrNot("duplicate loads of", "did not remove old packages from cache")
}
func TestBinaryOnlyPackages(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.tempFile("src/p1/p1.go", `//go:binary-only-package
package p1
`)
tg.wantStale("p1", "binary-only packages are no longer supported", "p1 is binary-only, and this message should always be printed")
tg.runFail("install", "p1")
tg.grepStderr("binary-only packages are no longer supported", "did not report attempt to compile binary-only package")
tg.tempFile("src/p1/p1.go", `
package p1
import "fmt"
func F(b bool) { fmt.Printf("hello from p1\n"); if b { F(false) } }
`)
tg.run("install", "p1")
os.Remove(tg.path("src/p1/p1.go"))
tg.mustNotExist(tg.path("src/p1/p1.go"))
tg.tempFile("src/p2/p2.go", `//go:binary-only-packages-are-not-great
package p2
import "p1"
func F() { p1.F(true) }
`)
tg.runFail("install", "p2")
tg.grepStderr("no Go files", "did not complain about missing sources")
tg.tempFile("src/p1/missing.go", `//go:binary-only-package
package p1
import _ "fmt"
func G()
`)
tg.wantStale("p1", "binary-only package", "should NOT want to rebuild p1 (first)")
tg.runFail("install", "p2")
tg.grepStderr("p1: binary-only packages are no longer supported", "did not report error for binary-only p1")
tg.run("list", "-deps", "-f", "{{.ImportPath}}: {{.BinaryOnly}}", "p2")
tg.grepStdout("p1: true", "p1 not listed as BinaryOnly")
tg.grepStdout("p2: false", "p2 listed as BinaryOnly")
}
// Issue 16050.
func TestAlwaysLinkSysoFiles(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src/syso")
tg.tempFile("src/syso/a.syso", ``)
tg.tempFile("src/syso/b.go", `package syso`)
tg.setenv("GOPATH", tg.path("."))
// We should see the .syso file regardless of the setting of
// CGO_ENABLED.
tg.setenv("CGO_ENABLED", "1")
tg.run("list", "-f", "{{.SysoFiles}}", "syso")
tg.grepStdout("a.syso", "missing syso file with CGO_ENABLED=1")
tg.setenv("CGO_ENABLED", "0")
tg.run("list", "-f", "{{.SysoFiles}}", "syso")
tg.grepStdout("a.syso", "missing syso file with CGO_ENABLED=0")
}
// Issue 16120.
func TestGenerateUsesBuildContext(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("this test won't run under Windows")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src/gen")
tg.tempFile("src/gen/gen.go", "package gen\n//go:generate echo $GOOS $GOARCH\n")
tg.setenv("GOPATH", tg.path("."))
tg.setenv("GOOS", "linux")
tg.setenv("GOARCH", "amd64")
tg.run("generate", "gen")
tg.grepStdout("linux amd64", "unexpected GOOS/GOARCH combination")
tg.setenv("GOOS", "darwin")
tg.setenv("GOARCH", "386")
tg.run("generate", "gen")
tg.grepStdout("darwin 386", "unexpected GOOS/GOARCH combination")
}
// Issue 14450: go get -u .../ tried to import not downloaded package
func TestGoGetUpdateWithWildcard(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
const aPkgImportPath = "github.com/tmwh/go-get-issue-14450/a"
tg.run("get", aPkgImportPath)
tg.runFail("get", "-u", ".../")
tg.grepStderr("cannot find package.*d-dependency/e", "should have detected e missing")
// Even though get -u failed, the source for others should be downloaded.
var expectedPkgPaths = []string{
"src/github.com/tmwh/go-get-issue-14450/b",
"src/github.com/tmwh/go-get-issue-14450-b-dependency/c",
"src/github.com/tmwh/go-get-issue-14450-b-dependency/d",
}
for _, importPath := range expectedPkgPaths {
_, err := os.Stat(tg.path(importPath))
tg.must(err)
}
const notExpectedPkgPath = "src/github.com/tmwh/go-get-issue-14450-c-dependency/e"
tg.mustNotExist(tg.path(notExpectedPkgPath))
}
func TestGoEnv(t *testing.T) {
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.setenv("GOOS", "freebsd") // to avoid invalid pair errors
tg.setenv("GOARCH", "arm")
tg.run("env", "GOARCH")
tg.grepStdout("^arm$", "GOARCH not honored")
tg.run("env", "GCCGO")
tg.grepStdout(".", "GCCGO unexpectedly empty")
tg.run("env", "CGO_CFLAGS")
tg.grepStdout(".", "default CGO_CFLAGS unexpectedly empty")
tg.setenv("CGO_CFLAGS", "-foobar")
tg.run("env", "CGO_CFLAGS")
tg.grepStdout("^-foobar$", "CGO_CFLAGS not honored")
tg.setenv("CC", "gcc -fmust -fgo -ffaster")
tg.run("env", "CC")
tg.grepStdout("gcc", "CC not found")
tg.run("env", "GOGCCFLAGS")
tg.grepStdout("-ffaster", "CC arguments not found")
}
const (
noMatchesPattern = `(?m)^ok.*\[no tests to run\]`
okPattern = `(?m)^ok`
)
func TestMatchesNoTests(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.run("test", "-run", "ThisWillNotMatch", "testdata/standalone_test.go")
tg.grepBoth(noMatchesPattern, "go test did not say [no tests to run]")
}
func TestMatchesNoBenchmarksIsOK(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.run("test", "-run", "^$", "-bench", "ThisWillNotMatch", "testdata/standalone_benchmark_test.go")
tg.grepBothNot(noMatchesPattern, "go test did say [no tests to run]")
tg.grepBoth(okPattern, "go test did not say ok")
}
func TestMatchesOnlyExampleIsOK(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.run("test", "-run", "Example", "testdata/example1_test.go")
tg.grepBothNot(noMatchesPattern, "go test did say [no tests to run]")
tg.grepBoth(okPattern, "go test did not say ok")
}
func TestMatchesOnlyBenchmarkIsOK(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.run("test", "-run", "^$", "-bench", ".", "testdata/standalone_benchmark_test.go")
tg.grepBothNot(noMatchesPattern, "go test did say [no tests to run]")
tg.grepBoth(okPattern, "go test did not say ok")
}
func TestBenchmarkLabelsOutsideGOPATH(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.run("test", "-run", "^$", "-bench", ".", "testdata/standalone_benchmark_test.go")
tg.grepStdout(`(?m)^goos: `+runtime.GOOS, "go test did not print goos")
tg.grepStdout(`(?m)^goarch: `+runtime.GOARCH, "go test did not print goarch")
tg.grepBothNot(`(?m)^pkg:`, "go test did say pkg:")
}
func TestMatchesOnlyTestIsOK(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
tg.run("test", "-run", "Test", "testdata/standalone_test.go")
tg.grepBothNot(noMatchesPattern, "go test did say [no tests to run]")
tg.grepBoth(okPattern, "go test did not say ok")
}
func TestMatchesNoTestsWithSubtests(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-run", "ThisWillNotMatch", "testdata/standalone_sub_test.go")
tg.grepBoth(noMatchesPattern, "go test did not say [no tests to run]")
}
func TestMatchesNoSubtestsMatch(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-run", "Test/ThisWillNotMatch", "testdata/standalone_sub_test.go")
tg.grepBoth(noMatchesPattern, "go test did not say [no tests to run]")
}
func TestMatchesNoSubtestsDoesNotOverrideFailure(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.runFail("test", "-run", "TestThatFails/ThisWillNotMatch", "testdata/standalone_fail_sub_test.go")
tg.grepBothNot(noMatchesPattern, "go test did say [no tests to run]")
tg.grepBoth("FAIL", "go test did not say FAIL")
}
func TestMatchesOnlySubtestIsOK(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-run", "Test/Sub", "testdata/standalone_sub_test.go")
tg.grepBothNot(noMatchesPattern, "go test did say [no tests to run]")
tg.grepBoth(okPattern, "go test did not say ok")
}
func TestMatchesNoSubtestsParallel(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-run", "Test/Sub/ThisWillNotMatch", "testdata/standalone_parallel_sub_test.go")
tg.grepBoth(noMatchesPattern, "go test did not say [no tests to run]")
}
func TestMatchesOnlySubtestParallelIsOK(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-run", "Test/Sub/Nested", "testdata/standalone_parallel_sub_test.go")
tg.grepBothNot(noMatchesPattern, "go test did say [no tests to run]")
tg.grepBoth(okPattern, "go test did not say ok")
}
// Issue 18845
func TestBenchTimeout(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-bench", ".", "-timeout", "750ms", "testdata/timeoutbench_test.go")
}
// Issue 19394
func TestWriteProfilesOnTimeout(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.tempDir("profiling")
tg.tempFile("profiling/timeouttest_test.go", `package timeouttest_test
import "testing"
import "time"
func TestSleep(t *testing.T) { time.Sleep(time.Second) }`)
tg.cd(tg.path("profiling"))
tg.runFail(
"test",
"-cpuprofile", tg.path("profiling/cpu.pprof"), "-memprofile", tg.path("profiling/mem.pprof"),
"-timeout", "1ms")
tg.mustHaveContent(tg.path("profiling/cpu.pprof"))
tg.mustHaveContent(tg.path("profiling/mem.pprof"))
}
func TestLinkXImportPathEscape(t *testing.T) {
// golang.org/issue/16710
skipIfGccgo(t, "gccgo does not support -ldflags -X")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
exe := tg.path("linkx" + exeSuffix)
tg.creatingTemp(exe)
tg.run("build", "-o", exe, "-ldflags", "-X=my.pkg.Text=linkXworked", "my.pkg/main")
out, err := exec.Command(exe).CombinedOutput()
if err != nil {
tg.t.Fatal(err)
}
if string(out) != "linkXworked\n" {
tg.t.Log(string(out))
tg.t.Fatal(`incorrect output: expected "linkXworked\n"`)
}
}
// Issue 18044.
func TestLdBindNow(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("LD_BIND_NOW", "1")
tg.run("help")
}
// Issue 18225.
// This is really a cmd/asm issue but this is a convenient place to test it.
func TestConcurrentAsm(t *testing.T) {
skipIfGccgo(t, "gccgo does not use cmd/asm")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
asm := `DATA ·constants<>+0x0(SB)/8,$0
GLOBL ·constants<>(SB),8,$8
`
tg.tempFile("go/src/p/a.s", asm)
tg.tempFile("go/src/p/b.s", asm)
tg.tempFile("go/src/p/p.go", `package p`)
tg.setenv("GOPATH", tg.path("go"))
tg.run("build", "p")
}
// Issue 18778.
func TestDotDotDotOutsideGOPATH(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("pkgs/a.go", `package x`)
tg.tempFile("pkgs/a_test.go", `package x_test
import "testing"
func TestX(t *testing.T) {}`)
tg.tempFile("pkgs/a/a.go", `package a`)
tg.tempFile("pkgs/a/a_test.go", `package a_test
import "testing"
func TestA(t *testing.T) {}`)
tg.cd(tg.path("pkgs"))
tg.run("build", "./...")
tg.run("test", "./...")
tg.run("list", "./...")
tg.grepStdout("pkgs$", "expected package not listed")
tg.grepStdout("pkgs/a", "expected package not listed")
}
// Issue 18975.
func TestFFLAGS(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("p/src/p/main.go", `package main
// #cgo FFLAGS: -no-such-fortran-flag
import "C"
func main() {}
`)
tg.tempFile("p/src/p/a.f", `! comment`)
tg.setenv("GOPATH", tg.path("p"))
// This should normally fail because we are passing an unknown flag,
// but issue #19080 points to Fortran compilers that succeed anyhow.
// To work either way we call doRun directly rather than run or runFail.
tg.doRun([]string{"build", "-x", "p"})
tg.grepStderr("no-such-fortran-flag", `missing expected "-no-such-fortran-flag"`)
}
// Issue 19198.
// This is really a cmd/link issue but this is a convenient place to test it.
func TestDuplicateGlobalAsmSymbols(t *testing.T) {
skipIfGccgo(t, "gccgo does not use cmd/asm")
tooSlow(t)
if runtime.GOARCH != "386" && runtime.GOARCH != "amd64" {
t.Skipf("skipping test on %s", runtime.GOARCH)
}
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
asm := `
#include "textflag.h"
DATA sym<>+0x0(SB)/8,$0
GLOBL sym<>(SB),(NOPTR+RODATA),$8
TEXT ·Data(SB),NOSPLIT,$0
MOVB sym<>(SB), AX
MOVB AX, ret+0(FP)
RET
`
tg.tempFile("go/src/a/a.s", asm)
tg.tempFile("go/src/a/a.go", `package a; func Data() uint8`)
tg.tempFile("go/src/b/b.s", asm)
tg.tempFile("go/src/b/b.go", `package b; func Data() uint8`)
tg.tempFile("go/src/p/p.go", `
package main
import "a"
import "b"
import "C"
func main() {
_ = a.Data() + b.Data()
}
`)
tg.setenv("GOPATH", tg.path("go"))
exe := tg.path("p.exe")
tg.creatingTemp(exe)
tg.run("build", "-o", exe, "p")
}
func TestBuildTagsNoComma(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("go"))
tg.run("build", "-tags", "tag1 tag2", "math")
tg.runFail("build", "-tags", "tag1,tag2 tag3", "math")
tg.grepBoth("space-separated list contains comma", "-tags with a comma-separated list didn't error")
}
func copyFile(src, dst string, perm os.FileMode) error {
sf, err := os.Open(src)
if err != nil {
return err
}
defer sf.Close()
df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
_, err = io.Copy(df, sf)
err2 := df.Close()
if err != nil {
return err
}
return err2
}
// TestExecutableGOROOT verifies that the cmd/go binary itself uses
// os.Executable (when available) to locate GOROOT.
func TestExecutableGOROOT(t *testing.T) {
skipIfGccgo(t, "gccgo has no GOROOT")
// Note: Must not call tg methods inside subtests: tg is attached to outer t.
tg := testgo(t)
tg.unsetenv("GOROOT")
defer tg.cleanup()
check := func(t *testing.T, exe, want string) {
cmd := exec.Command(exe, "env", "GOROOT")
cmd.Env = tg.env
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("%s env GOROOT: %v, %s", exe, err, out)
}
goroot, err := filepath.EvalSymlinks(strings.TrimSpace(string(out)))
if err != nil {
t.Fatal(err)
}
want, err = filepath.EvalSymlinks(want)
if err != nil {
t.Fatal(err)
}
if !strings.EqualFold(goroot, want) {
t.Errorf("go env GOROOT:\nhave %s\nwant %s", goroot, want)
} else {
t.Logf("go env GOROOT: %s", goroot)
}
}
tg.makeTempdir()
tg.tempDir("new/bin")
newGoTool := tg.path("new/bin/go" + exeSuffix)
tg.must(copyFile(tg.goTool(), newGoTool, 0775))
newRoot := tg.path("new")
t.Run("RelocatedExe", func(t *testing.T) {
// Should fall back to default location in binary,
// which is the GOROOT we used when building testgo.exe.
check(t, newGoTool, testGOROOT)
})
// If the binary is sitting in a bin dir next to ../pkg/tool, that counts as a GOROOT,
// so it should find the new tree.
tg.tempDir("new/pkg/tool")
t.Run("RelocatedTree", func(t *testing.T) {
check(t, newGoTool, newRoot)
})
tg.tempDir("other/bin")
symGoTool := tg.path("other/bin/go" + exeSuffix)
// Symlink into go tree should still find go tree.
t.Run("SymlinkedExe", func(t *testing.T) {
testenv.MustHaveSymlink(t)
if err := os.Symlink(newGoTool, symGoTool); err != nil {
t.Fatal(err)
}
check(t, symGoTool, newRoot)
})
tg.must(robustio.RemoveAll(tg.path("new/pkg")))
// Binaries built in the new tree should report the
// new tree when they call runtime.GOROOT.
t.Run("RuntimeGoroot", func(t *testing.T) {
// Build a working GOROOT the easy way, with symlinks.
testenv.MustHaveSymlink(t)
if err := os.Symlink(filepath.Join(testGOROOT, "src"), tg.path("new/src")); err != nil {
t.Fatal(err)
}
if err := os.Symlink(filepath.Join(testGOROOT, "pkg"), tg.path("new/pkg")); err != nil {
t.Fatal(err)
}
cmd := exec.Command(newGoTool, "run", "testdata/print_goroot.go")
cmd.Env = tg.env
cmd.Stderr = os.Stderr
out, err := cmd.Output()
if err != nil {
t.Fatalf("%s run testdata/print_goroot.go: %v, %s", newGoTool, err, out)
}
goroot, err := filepath.EvalSymlinks(strings.TrimSpace(string(out)))
if err != nil {
t.Fatal(err)
}
want, err := filepath.EvalSymlinks(tg.path("new"))
if err != nil {
t.Fatal(err)
}
if !strings.EqualFold(goroot, want) {
t.Errorf("go run testdata/print_goroot.go:\nhave %s\nwant %s", goroot, want)
} else {
t.Logf("go run testdata/print_goroot.go: %s", goroot)
}
})
}
func TestNeedVersion(t *testing.T) {
skipIfGccgo(t, "gccgo does not use cmd/compile")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("goversion.go", `package main; func main() {}`)
path := tg.path("goversion.go")
tg.setenv("TESTGO_VERSION", "go1.testgo")
tg.runFail("run", path)
tg.grepStderr("compile", "does not match go tool version")
}
func TestCgoFlagContainsSpace(t *testing.T) {
tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
tg.cd(tg.path("."))
tg.tempFile("main.go", `package main
// #cgo CFLAGS: -I"c flags"
// #cgo LDFLAGS: -L"ld flags"
import "C"
func main() {}
`)
tg.run("run", "-x", "main.go")
tg.grepStderr(`"-I[^"]+c flags"`, "did not find quoted c flags")
tg.grepStderrNot(`"-I[^"]+c flags".*"-I[^"]+c flags"`, "found too many quoted c flags")
tg.grepStderr(`"-L[^"]+ld flags"`, "did not find quoted ld flags")
tg.grepStderrNot(`"-L[^"]+c flags".*"-L[^"]+c flags"`, "found too many quoted ld flags")
}
// Issue 9737: verify that GOARM and GO386 affect the computed build ID.
func TestBuildIDContainsArchModeEnv(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
var tg *testgoData
testWith := func(before, after func()) func(*testing.T) {
return func(t *testing.T) {
tg = testgo(t)
defer tg.cleanup()
tg.tempFile("src/mycmd/x.go", `package main
func main() {}`)
tg.setenv("GOPATH", tg.path("."))
tg.cd(tg.path("src/mycmd"))
tg.setenv("GOOS", "linux")
before()
tg.run("install", "mycmd")
after()
tg.wantStale("mycmd", "stale dependency", "should be stale after environment variable change")
}
}
t.Run("386", testWith(func() {
tg.setenv("GOARCH", "386")
tg.setenv("GO386", "387")
}, func() {
tg.setenv("GO386", "sse2")
}))
t.Run("arm", testWith(func() {
tg.setenv("GOARCH", "arm")
tg.setenv("GOARM", "5")
}, func() {
tg.setenv("GOARM", "7")
}))
}
func TestListTests(t *testing.T) {
tooSlow(t)
var tg *testgoData
testWith := func(listName, expected string) func(*testing.T) {
return func(t *testing.T) {
tg = testgo(t)
defer tg.cleanup()
tg.run("test", "./testdata/src/testlist/...", fmt.Sprintf("-list=%s", listName))
tg.grepStdout(expected, fmt.Sprintf("-test.list=%s returned %q, expected %s", listName, tg.getStdout(), expected))
}
}
t.Run("Test", testWith("Test", "TestSimple"))
t.Run("Bench", testWith("Benchmark", "BenchmarkSimple"))
t.Run("Example1", testWith("Example", "ExampleSimple"))
t.Run("Example2", testWith("Example", "ExampleWithEmptyOutput"))
}
func TestBuildmodePIE(t *testing.T) {
if testing.Short() && testenv.Builder() == "" {
t.Skipf("skipping in -short mode on non-builder")
}
platform := fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)
switch platform {
case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x",
"android/amd64", "android/arm", "android/arm64", "android/386",
"freebsd/amd64":
case "darwin/amd64":
default:
t.Skipf("skipping test because buildmode=pie is not supported on %s", platform)
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("main.go", `package main; func main() { print("hello") }`)
src := tg.path("main.go")
obj := tg.path("main")
tg.run("build", "-buildmode=pie", "-o", obj, src)
switch runtime.GOOS {
case "linux", "android", "freebsd":
f, err := elf.Open(obj)
if err != nil {
t.Fatal(err)
}
defer f.Close()
if f.Type != elf.ET_DYN {
t.Errorf("PIE type must be ET_DYN, but %s", f.Type)
}
case "darwin":
f, err := macho.Open(obj)
if err != nil {
t.Fatal(err)
}
defer f.Close()
if f.Flags&macho.FlagDyldLink == 0 {
t.Error("PIE must have DyldLink flag, but not")
}
if f.Flags&macho.FlagPIE == 0 {
t.Error("PIE must have PIE flag, but not")
}
default:
panic("unreachable")
}
out, err := exec.Command(obj).CombinedOutput()
if err != nil {
t.Fatal(err)
}
if string(out) != "hello" {
t.Errorf("got %q; want %q", out, "hello")
}
}
func TestExecBuildX(t *testing.T) {
tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
testenv.MustHaveExecPath(t, "/usr/bin/env")
testenv.MustHaveExecPath(t, "bash")
tg := testgo(t)
defer tg.cleanup()
tg.tempDir("cache")
tg.setenv("GOCACHE", tg.path("cache"))
// Before building our test main.go, ensure that an up-to-date copy of
// runtime/cgo is present in the cache. If it isn't, the 'go build' step below
// will fail with "can't open import". See golang.org/issue/29004.
tg.run("build", "runtime/cgo")
tg.tempFile("main.go", `package main; import "C"; func main() { print("hello") }`)
src := tg.path("main.go")
obj := tg.path("main")
tg.run("build", "-x", "-o", obj, src)
sh := tg.path("test.sh")
cmds := tg.getStderr()
err := ioutil.WriteFile(sh, []byte("set -e\n"+cmds), 0666)
if err != nil {
t.Fatal(err)
}
out, err := exec.Command(obj).CombinedOutput()
if err != nil {
t.Fatal(err)
}
if string(out) != "hello" {
t.Fatalf("got %q; want %q", out, "hello")
}
err = os.Remove(obj)
if err != nil {
t.Fatal(err)
}
out, err = exec.Command("/usr/bin/env", "bash", "-x", sh).CombinedOutput()
if err != nil {
t.Fatalf("/bin/sh %s: %v\n%s", sh, err, out)
}
t.Logf("shell output:\n%s", out)
out, err = exec.Command(obj).CombinedOutput()
if err != nil {
t.Fatal(err)
}
if string(out) != "hello" {
t.Fatalf("got %q; want %q", out, "hello")
}
matches := regexp.MustCompile(`^WORK=(.*)\n`).FindStringSubmatch(cmds)
if len(matches) == 0 {
t.Fatal("no WORK directory")
}
tg.must(robustio.RemoveAll(matches[1]))
}
func TestParallelNumber(t *testing.T) {
tooSlow(t)
for _, n := range [...]string{"-1", "0"} {
t.Run(n, func(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.runFail("test", "-parallel", n, "testdata/standalone_parallel_sub_test.go")
tg.grepBoth("-parallel can only be given", "go test -parallel with N<1 did not error")
})
}
}
func TestWrongGOOSErrorBeforeLoadError(t *testing.T) {
skipIfGccgo(t, "gccgo assumes cross-compilation is always possible")
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.setenv("GOOS", "windwos")
tg.runFail("build", "exclude")
tg.grepStderr("unsupported GOOS/GOARCH pair", "GOOS=windwos go build exclude did not report 'unsupported GOOS/GOARCH pair'")
}
func TestUpxCompression(t *testing.T) {
if runtime.GOOS != "linux" ||
(runtime.GOARCH != "amd64" && runtime.GOARCH != "386") {
t.Skipf("skipping upx test on %s/%s", runtime.GOOS, runtime.GOARCH)
}
testenv.MustHaveExecPath(t, "upx")
out, err := exec.Command("upx", "--version").CombinedOutput()
if err != nil {
t.Fatalf("upx --version failed: %v", err)
}
// upx --version prints `upx <version>` in the first line of output:
// upx 3.94
// [...]
re := regexp.MustCompile(`([[:digit:]]+)\.([[:digit:]]+)`)
upxVersion := re.FindStringSubmatch(string(out))
if len(upxVersion) != 3 {
t.Fatalf("bad upx version string: %s", upxVersion)
}
major, err1 := strconv.Atoi(upxVersion[1])
minor, err2 := strconv.Atoi(upxVersion[2])
if err1 != nil || err2 != nil {
t.Fatalf("bad upx version string: %s", upxVersion[0])
}
// Anything below 3.94 is known not to work with go binaries
if (major < 3) || (major == 3 && minor < 94) {
t.Skipf("skipping because upx version %v.%v is too old", major, minor)
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("main.go", `package main; import "fmt"; func main() { fmt.Print("hello upx") }`)
src := tg.path("main.go")
obj := tg.path("main")
tg.run("build", "-o", obj, src)
out, err = exec.Command("upx", obj).CombinedOutput()
if err != nil {
t.Logf("executing upx\n%s\n", out)
t.Fatalf("upx failed with %v", err)
}
out, err = exec.Command(obj).CombinedOutput()
if err != nil {
t.Logf("%s", out)
t.Fatalf("running compressed go binary failed with error %s", err)
}
if string(out) != "hello upx" {
t.Fatalf("bad output from compressed go binary:\ngot %q; want %q", out, "hello upx")
}
}
func TestCacheListStale(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.path("cache"))
tg.tempFile("gopath/src/p/p.go", "package p; import _ \"q\"; func F(){}\n")
tg.tempFile("gopath/src/q/q.go", "package q; func F(){}\n")
tg.tempFile("gopath/src/m/m.go", "package main; import _ \"q\"; func main(){}\n")
tg.setenv("GOPATH", tg.path("gopath"))
tg.run("install", "p", "m")
tg.run("list", "-f={{.ImportPath}} {{.Stale}}", "m", "q", "p")
tg.grepStdout("^m false", "m should not be stale")
tg.grepStdout("^q true", "q should be stale")
tg.grepStdout("^p false", "p should not be stale")
}
func TestCacheCoverage(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.makeTempdir()
tg.setenv("GOCACHE", tg.path("c1"))
tg.run("test", "-cover", "-short", "strings")
tg.run("test", "-cover", "-short", "math", "strings")
}
func TestIssue22588(t *testing.T) {
// Don't get confused by stderr coming from tools.
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
if _, err := os.Stat("/usr/bin/time"); err != nil {
t.Skip(err)
}
tg.run("list", "-f={{.Stale}}", "runtime")
tg.run("list", "-toolexec=/usr/bin/time", "-f={{.Stale}}", "runtime")
tg.grepStdout("false", "incorrectly reported runtime as stale")
}
func TestIssue22531(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.tempdir)
tg.setenv("GOCACHE", tg.path("cache"))
tg.tempFile("src/m/main.go", "package main /* c1 */; func main() {}\n")
tg.run("install", "-x", "m")
tg.run("list", "-f", "{{.Stale}}", "m")
tg.grepStdout("false", "reported m as stale after install")
tg.run("tool", "buildid", tg.path("bin/m"+exeSuffix))
// The link action ID did not include the full main build ID,
// even though the full main build ID is written into the
// eventual binary. That caused the following install to
// be a no-op, thinking the gofmt binary was up-to-date,
// even though .Stale could see it was not.
tg.tempFile("src/m/main.go", "package main /* c2 */; func main() {}\n")
tg.run("install", "-x", "m")
tg.run("list", "-f", "{{.Stale}}", "m")
tg.grepStdout("false", "reported m as stale after reinstall")
tg.run("tool", "buildid", tg.path("bin/m"+exeSuffix))
}
func TestIssue22596(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.path("cache"))
tg.tempFile("gopath1/src/p/p.go", "package p; func F(){}\n")
tg.tempFile("gopath2/src/p/p.go", "package p; func F(){}\n")
tg.setenv("GOPATH", tg.path("gopath1"))
tg.run("list", "-f={{.Target}}", "p")
target1 := strings.TrimSpace(tg.getStdout())
tg.run("install", "p")
tg.wantNotStale("p", "", "p stale after install")
tg.setenv("GOPATH", tg.path("gopath2"))
tg.run("list", "-f={{.Target}}", "p")
target2 := strings.TrimSpace(tg.getStdout())
tg.must(os.MkdirAll(filepath.Dir(target2), 0777))
tg.must(copyFile(target1, target2, 0666))
tg.wantStale("p", "build ID mismatch", "p not stale after copy from gopath1")
tg.run("install", "p")
tg.wantNotStale("p", "", "p stale after install2")
}
func TestTestCache(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.tempdir)
tg.setenv("GOCACHE", tg.path("cache"))
if runtime.Compiler != "gccgo" {
// timeout here should not affect result being cached
// or being retrieved later.
tg.run("test", "-x", "-timeout=10s", "errors")
tg.grepStderr(`[\\/]compile|gccgo`, "did not run compiler")
tg.grepStderr(`[\\/]link|gccgo`, "did not run linker")
tg.grepStderr(`errors\.test`, "did not run test")
tg.run("test", "-x", "errors")
tg.grepStdout(`ok \terrors\t\(cached\)`, "did not report cached result")
tg.grepStderrNot(`[\\/]compile|gccgo`, "incorrectly ran compiler")
tg.grepStderrNot(`[\\/]link|gccgo`, "incorrectly ran linker")
tg.grepStderrNot(`errors\.test`, "incorrectly ran test")
tg.grepStderrNot("DO NOT USE", "poisoned action status leaked")
// Even very low timeouts do not disqualify cached entries.
tg.run("test", "-timeout=1ns", "-x", "errors")
tg.grepStderrNot(`errors\.test`, "incorrectly ran test")
tg.run("clean", "-testcache")
tg.run("test", "-x", "errors")
tg.grepStderr(`errors\.test`, "did not run test")
}
// The -p=1 in the commands below just makes the -x output easier to read.
t.Log("\n\nINITIAL\n\n")
tg.tempFile("src/p1/p1.go", "package p1\nvar X = 1\n")
tg.tempFile("src/p2/p2.go", "package p2\nimport _ \"p1\"\nvar X = 1\n")
tg.tempFile("src/t/t1/t1_test.go", "package t\nimport \"testing\"\nfunc Test1(*testing.T) {}\n")
tg.tempFile("src/t/t2/t2_test.go", "package t\nimport _ \"p1\"\nimport \"testing\"\nfunc Test2(*testing.T) {}\n")
tg.tempFile("src/t/t3/t3_test.go", "package t\nimport \"p1\"\nimport \"testing\"\nfunc Test3(t *testing.T) {t.Log(p1.X)}\n")
tg.tempFile("src/t/t4/t4_test.go", "package t\nimport \"p2\"\nimport \"testing\"\nfunc Test4(t *testing.T) {t.Log(p2.X)}")
tg.run("test", "-x", "-v", "-short", "t/...")
t.Log("\n\nREPEAT\n\n")
tg.run("test", "-x", "-v", "-short", "t/...")
tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t1")
tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t2")
tg.grepStdout(`ok \tt/t3\t\(cached\)`, "did not cache t3")
tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t4")
tg.grepStderrNot(`[\\/](compile|gccgo) `, "incorrectly ran compiler")
tg.grepStderrNot(`[\\/](link|gccgo) `, "incorrectly ran linker")
tg.grepStderrNot(`p[0-9]\.test`, "incorrectly ran test")
t.Log("\n\nCOMMENT\n\n")
// Changing the program text without affecting the compiled package
// should result in the package being rebuilt but nothing more.
tg.tempFile("src/p1/p1.go", "package p1\nvar X = 01\n")
tg.run("test", "-p=1", "-x", "-v", "-short", "t/...")
tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t1")
tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t2")
tg.grepStdout(`ok \tt/t3\t\(cached\)`, "did not cache t3")
tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t4")
tg.grepStderrNot(`([\\/](compile|gccgo) ).*t[0-9]_test\.go`, "incorrectly ran compiler")
tg.grepStderrNot(`[\\/](link|gccgo) `, "incorrectly ran linker")
tg.grepStderrNot(`t[0-9]\.test.*test\.short`, "incorrectly ran test")
t.Log("\n\nCHANGE\n\n")
// Changing the actual package should have limited effects.
tg.tempFile("src/p1/p1.go", "package p1\nvar X = 02\n")
tg.run("test", "-p=1", "-x", "-v", "-short", "t/...")
// p2 should have been rebuilt.
tg.grepStderr(`([\\/]compile|gccgo).*p2.go`, "did not recompile p2")
// t1 does not import anything, should not have been rebuilt.
tg.grepStderrNot(`([\\/]compile|gccgo).*t1_test.go`, "incorrectly recompiled t1")
tg.grepStderrNot(`([\\/]link|gccgo).*t1_test`, "incorrectly relinked t1_test")
tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t/t1")
// t2 imports p1 and must be rebuilt and relinked,
// but the change should not have any effect on the test binary,
// so the test should not have been rerun.
tg.grepStderr(`([\\/]compile|gccgo).*t2_test.go`, "did not recompile t2")
tg.grepStderr(`([\\/]link|gccgo).*t2\.test`, "did not relink t2_test")
// This check does not currently work with gccgo, as garbage
// collection of unused variables is not turned on by default.
if runtime.Compiler != "gccgo" {
tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t/t2")
}
// t3 imports p1, and changing X changes t3's test binary.
tg.grepStderr(`([\\/]compile|gccgo).*t3_test.go`, "did not recompile t3")
tg.grepStderr(`([\\/]link|gccgo).*t3\.test`, "did not relink t3_test")
tg.grepStderr(`t3\.test.*-test.short`, "did not rerun t3_test")
tg.grepStdoutNot(`ok \tt/t3\t\(cached\)`, "reported cached t3_test result")
// t4 imports p2, but p2 did not change, so t4 should be relinked, not recompiled,
// and not rerun.
tg.grepStderrNot(`([\\/]compile|gccgo).*t4_test.go`, "incorrectly recompiled t4")
tg.grepStderr(`([\\/]link|gccgo).*t4\.test`, "did not relink t4_test")
// This check does not currently work with gccgo, as garbage
// collection of unused variables is not turned on by default.
if runtime.Compiler != "gccgo" {
tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t/t4")
}
}
func TestTestVet(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("p1_test.go", `
package p
import "testing"
func Test(t *testing.T) {
t.Logf("%d") // oops
}
`)
tg.runFail("test", tg.path("p1_test.go"))
tg.grepStderr(`Logf format %d`, "did not diagnose bad Logf")
tg.run("test", "-vet=off", tg.path("p1_test.go"))
tg.grepStdout(`^ok`, "did not print test summary")
tg.tempFile("p1.go", `
package p
import "fmt"
func F() {
fmt.Printf("%d") // oops
}
`)
tg.runFail("test", tg.path("p1.go"))
tg.grepStderr(`Printf format %d`, "did not diagnose bad Printf")
tg.run("test", "-x", "-vet=shift", tg.path("p1.go"))
tg.grepStderr(`[\\/]vet.*-shift`, "did not run vet with -shift")
tg.grepStdout(`\[no test files\]`, "did not print test summary")
tg.run("test", "-vet=off", tg.path("p1.go"))
tg.grepStdout(`\[no test files\]`, "did not print test summary")
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("test", "vetcycle") // must not fail; #22890
tg.runFail("test", "vetfail/...")
tg.grepStderr(`Printf format %d`, "did not diagnose bad Printf")
tg.grepStdout(`ok\s+vetfail/p2`, "did not run vetfail/p2")
// Use -a so that we need to recompute the vet-specific export data for
// vetfail/p1.
tg.run("test", "-a", "vetfail/p2")
tg.grepStderrNot(`invalid.*constraint`, "did diagnose bad build constraint in vetxonly mode")
}
func TestTestSkipVetAfterFailedBuild(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("x_test.go", `package x
func f() {
return 1
}
`)
tg.runFail("test", tg.path("x_test.go"))
tg.grepStderrNot(`vet`, "vet should be skipped after the failed build")
}
func TestTestVetRebuild(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
// golang.org/issue/23701.
// b_test imports b with augmented method from export_test.go.
// b_test also imports a, which imports b.
// Must not accidentally see un-augmented b propagate through a to b_test.
tg.tempFile("src/a/a.go", `package a
import "b"
type Type struct{}
func (*Type) M() b.T {return 0}
`)
tg.tempFile("src/b/b.go", `package b
type T int
type I interface {M() T}
`)
tg.tempFile("src/b/export_test.go", `package b
func (*T) Method() *T { return nil }
`)
tg.tempFile("src/b/b_test.go", `package b_test
import (
"testing"
"a"
. "b"
)
func TestBroken(t *testing.T) {
x := new(T)
x.Method()
_ = new(a.Type)
}
`)
tg.setenv("GOPATH", tg.path("."))
tg.run("test", "b")
tg.run("vet", "b")
}
func TestInstallDeps(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.tempdir)
tg.tempFile("src/p1/p1.go", "package p1\nvar X = 1\n")
tg.tempFile("src/p2/p2.go", "package p2\nimport _ \"p1\"\n")
tg.tempFile("src/main1/main.go", "package main\nimport _ \"p2\"\nfunc main() {}\n")
tg.run("list", "-f={{.Target}}", "p1")
p1 := strings.TrimSpace(tg.getStdout())
tg.run("list", "-f={{.Target}}", "p2")
p2 := strings.TrimSpace(tg.getStdout())
tg.run("list", "-f={{.Target}}", "main1")
main1 := strings.TrimSpace(tg.getStdout())
tg.run("install", "main1")
tg.mustExist(main1)
tg.mustNotExist(p2)
tg.mustNotExist(p1)
tg.run("install", "p2")
tg.mustExist(p2)
tg.mustNotExist(p1)
// don't let install -i overwrite runtime
tg.wantNotStale("runtime", "", "must be non-stale before install -i")
tg.run("install", "-i", "main1")
tg.mustExist(p1)
tg.must(os.Remove(p1))
tg.run("install", "-i", "p2")
tg.mustExist(p1)
}
func TestGoTestJSON(t *testing.T) {
skipIfGccgo(t, "gccgo does not have standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.tempdir)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
// It would be nice to test that the output is interlaced
// but it seems to be impossible to do that in a short test
// that isn't also flaky. Just check that we get JSON output.
tg.run("test", "-json", "-short", "-v", "errors", "empty/pkg", "skipper")
tg.grepStdout(`"Package":"errors"`, "did not see JSON output")
tg.grepStdout(`"Action":"run"`, "did not see JSON output")
tg.grepStdout(`"Action":"output","Package":"empty/pkg","Output":".*no test files`, "did not see no test files print")
tg.grepStdout(`"Action":"skip","Package":"empty/pkg"`, "did not see skip")
tg.grepStdout(`"Action":"output","Package":"skipper","Test":"Test","Output":"--- SKIP:`, "did not see SKIP output")
tg.grepStdout(`"Action":"skip","Package":"skipper","Test":"Test"`, "did not see skip result for Test")
tg.run("test", "-json", "-short", "-v", "errors")
tg.grepStdout(`"Action":"output","Package":"errors","Output":".*\(cached\)`, "did not see no cached output")
tg.run("test", "-json", "-bench=NONE", "-short", "-v", "errors")
tg.grepStdout(`"Package":"errors"`, "did not see JSON output")
tg.grepStdout(`"Action":"run"`, "did not see JSON output")
tg.run("test", "-o", tg.path("errors.test.exe"), "-c", "errors")
tg.run("tool", "test2json", "-p", "errors", tg.path("errors.test.exe"), "-test.v", "-test.short")
tg.grepStdout(`"Package":"errors"`, "did not see JSON output")
tg.grepStdout(`"Action":"run"`, "did not see JSON output")
tg.grepStdout(`\{"Action":"pass","Package":"errors"\}`, "did not see final pass")
}
func TestFailFast(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tests := []struct {
run string
failfast bool
nfail int
}{
{"TestFailingA", true, 1},
{"TestFailing[AB]", true, 1},
{"TestFailing[AB]", false, 2},
// mix with non-failing tests:
{"TestA|TestFailing[AB]", true, 1},
{"TestA|TestFailing[AB]", false, 2},
// mix with parallel tests:
{"TestFailingB|TestParallelFailingA", true, 2},
{"TestFailingB|TestParallelFailingA", false, 2},
{"TestFailingB|TestParallelFailing[AB]", true, 3},
{"TestFailingB|TestParallelFailing[AB]", false, 3},
// mix with parallel sub-tests
{"TestFailingB|TestParallelFailing[AB]|TestParallelFailingSubtestsA", true, 3},
{"TestFailingB|TestParallelFailing[AB]|TestParallelFailingSubtestsA", false, 5},
{"TestParallelFailingSubtestsA", true, 1},
// only parallels:
{"TestParallelFailing[AB]", false, 2},
// non-parallel subtests:
{"TestFailingSubtestsA", true, 1},
{"TestFailingSubtestsA", false, 2},
// fatal test
{"TestFatal[CD]", true, 1},
{"TestFatal[CD]", false, 2},
}
for _, tt := range tests {
t.Run(tt.run, func(t *testing.T) {
tg.runFail("test", "./testdata/src/failfast_test.go", "-run="+tt.run, "-failfast="+strconv.FormatBool(tt.failfast))
nfail := strings.Count(tg.getStdout(), "FAIL - ")
if nfail != tt.nfail {
t.Errorf("go test -run=%s -failfast=%t printed %d FAILs, want %d", tt.run, tt.failfast, nfail, tt.nfail)
}
})
}
}
// Issue 22986.
func TestImportPath(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/a/a.go", `
package main
import (
"log"
p "a/p-1.0"
)
func main() {
if !p.V {
log.Fatal("false")
}
}`)
tg.tempFile("src/a/a_test.go", `
package main_test
import (
p "a/p-1.0"
"testing"
)
func TestV(t *testing.T) {
if !p.V {
t.Fatal("false")
}
}`)
tg.tempFile("src/a/p-1.0/p.go", `
package p
var V = true
func init() {}
`)
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "-o", tg.path("a.exe"), "a")
tg.run("test", "a")
}
func TestBadCommandLines(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/x.go", "package x\n")
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "x")
tg.tempFile("src/x/@y.go", "package x\n")
tg.runFail("build", "x")
tg.grepStderr("invalid input file name \"@y.go\"", "did not reject @y.go")
tg.must(os.Remove(tg.path("src/x/@y.go")))
tg.tempFile("src/x/-y.go", "package x\n")
tg.runFail("build", "x")
tg.grepStderr("invalid input file name \"-y.go\"", "did not reject -y.go")
tg.must(os.Remove(tg.path("src/x/-y.go")))
if runtime.Compiler == "gccgo" {
tg.runFail("build", "-gccgoflags=all=@x", "x")
} else {
tg.runFail("build", "-gcflags=all=@x", "x")
}
tg.grepStderr("invalid command-line argument @x in command", "did not reject @x during exec")
tg.tempFile("src/@x/x.go", "package x\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "@x")
tg.grepStderr("invalid input directory name \"@x\"|cannot use path@version syntax", "did not reject @x directory")
tg.tempFile("src/@x/y/y.go", "package y\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "@x/y")
tg.grepStderr("invalid import path \"@x/y\"|cannot use path@version syntax", "did not reject @x/y import path")
tg.tempFile("src/-x/x.go", "package x\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "--", "-x")
tg.grepStderr("invalid input directory name \"-x\"", "did not reject -x directory")
tg.tempFile("src/-x/y/y.go", "package y\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "--", "-x/y")
tg.grepStderr("invalid import path \"-x/y\"", "did not reject -x/y import path")
}
func TestBadCgoDirectives(t *testing.T) {
if !canCgo {
t.Skip("no cgo")
}
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("src/x/x.go", "package x\n")
tg.setenv("GOPATH", tg.path("."))
if runtime.Compiler == "gc" {
tg.tempFile("src/x/x.go", `package x
//go:cgo_ldflag "-fplugin=foo.so"
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("//go:cgo_ldflag .* only allowed in cgo-generated code", "did not reject //go:cgo_ldflag directive")
}
tg.must(os.Remove(tg.path("src/x/x.go")))
tg.runFail("build", "x")
tg.grepStderr("no Go files", "did not report missing source code")
tg.tempFile("src/x/_cgo_yy.go", `package x
//go:cgo_ldflag "-fplugin=foo.so"
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("no Go files", "did not report missing source code") // _* files are ignored...
if runtime.Compiler == "gc" {
tg.runFail("build", tg.path("src/x/_cgo_yy.go")) // ... but if forced, the comment is rejected
// Actually, today there is a separate issue that _ files named
// on the command line are ignored. Once that is fixed,
// we want to see the cgo_ldflag error.
tg.grepStderr("//go:cgo_ldflag only allowed in cgo-generated code|no Go files", "did not reject //go:cgo_ldflag directive")
}
tg.must(os.Remove(tg.path("src/x/_cgo_yy.go")))
tg.tempFile("src/x/x.go", "package x\n")
tg.tempFile("src/x/y.go", `package x
// #cgo CFLAGS: -fplugin=foo.so
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("invalid flag in #cgo CFLAGS: -fplugin=foo.so", "did not reject -fplugin")
tg.tempFile("src/x/y.go", `package x
// #cgo CFLAGS: -Ibar -fplugin=foo.so
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("invalid flag in #cgo CFLAGS: -fplugin=foo.so", "did not reject -fplugin")
tg.tempFile("src/x/y.go", `package x
// #cgo pkg-config: -foo
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("invalid pkg-config package name: -foo", "did not reject pkg-config: -foo")
tg.tempFile("src/x/y.go", `package x
// #cgo pkg-config: @foo
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("invalid pkg-config package name: @foo", "did not reject pkg-config: -foo")
tg.tempFile("src/x/y.go", `package x
// #cgo CFLAGS: @foo
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("invalid flag in #cgo CFLAGS: @foo", "did not reject @foo flag")
tg.tempFile("src/x/y.go", `package x
// #cgo CFLAGS: -D
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("invalid flag in #cgo CFLAGS: -D without argument", "did not reject trailing -I flag")
// Note that -I @foo is allowed because we rewrite it into -I /path/to/src/@foo
// before the check is applied. There's no such rewrite for -D.
tg.tempFile("src/x/y.go", `package x
// #cgo CFLAGS: -D @foo
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("invalid flag in #cgo CFLAGS: -D @foo", "did not reject -D @foo flag")
tg.tempFile("src/x/y.go", `package x
// #cgo CFLAGS: -D@foo
import "C"
`)
tg.runFail("build", "x")
tg.grepStderr("invalid flag in #cgo CFLAGS: -D@foo", "did not reject -D@foo flag")
tg.setenv("CGO_CFLAGS", "-D@foo")
tg.tempFile("src/x/y.go", `package x
import "C"
`)
tg.run("build", "-n", "x")
tg.grepStderr("-D@foo", "did not find -D@foo in commands")
}
func TestTwoPkgConfigs(t *testing.T) {
if !canCgo {
t.Skip("no cgo")
}
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
t.Skipf("no shell scripts on %s", runtime.GOOS)
}
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/a.go", `package x
// #cgo pkg-config: --static a
import "C"
`)
tg.tempFile("src/x/b.go", `package x
// #cgo pkg-config: --static a
import "C"
`)
tg.tempFile("pkg-config.sh", `#!/bin/sh
echo $* >>`+tg.path("pkg-config.out"))
tg.must(os.Chmod(tg.path("pkg-config.sh"), 0755))
tg.setenv("GOPATH", tg.path("."))
tg.setenv("PKG_CONFIG", tg.path("pkg-config.sh"))
tg.run("build", "x")
out, err := ioutil.ReadFile(tg.path("pkg-config.out"))
tg.must(err)
out = bytes.TrimSpace(out)
want := "--cflags --static --static -- a a\n--libs --static --static -- a a"
if !bytes.Equal(out, []byte(want)) {
t.Errorf("got %q want %q", out, want)
}
}
func TestCgoCache(t *testing.T) {
if !canCgo {
t.Skip("no cgo")
}
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/a.go", `package main
// #ifndef VAL
// #define VAL 0
// #endif
// int val = VAL;
import "C"
import "fmt"
func main() { fmt.Println(C.val) }
`)
tg.setenv("GOPATH", tg.path("."))
exe := tg.path("x.exe")
tg.run("build", "-o", exe, "x")
tg.setenv("CGO_LDFLAGS", "-lnosuchlibraryexists")
tg.runFail("build", "-o", exe, "x")
tg.grepStderr(`nosuchlibraryexists`, "did not run linker with changed CGO_LDFLAGS")
}
// Issue 23982
func TestFilepathUnderCwdFormat(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("test", "-x", "-cover", "log")
tg.grepStderrNot(`\.log\.cover\.go`, "-x output should contain correctly formatted filepath under cwd")
}
// Issue 24396.
func TestDontReportRemoveOfEmptyDir(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/a/a.go", `package a`)
tg.setenv("GOPATH", tg.path("."))
tg.run("install", "-x", "a")
tg.run("install", "-x", "a")
// The second install should have printed only a WORK= line,
// nothing else.
if bytes.Count(tg.stdout.Bytes(), []byte{'\n'})+bytes.Count(tg.stderr.Bytes(), []byte{'\n'}) > 1 {
t.Error("unnecessary output when installing installed package")
}
}
// Issue 24704.
func TestLinkerTmpDirIsDeleted(t *testing.T) {
skipIfGccgo(t, "gccgo does not use cmd/link")
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("a.go", `package main; import "C"; func main() {}`)
tg.run("build", "-ldflags", "-v", "-o", os.DevNull, tg.path("a.go"))
// Find line that has "host link:" in linker output.
stderr := tg.getStderr()
var hostLinkLine string
for _, line := range strings.Split(stderr, "\n") {
if !strings.Contains(line, "host link:") {
continue
}
hostLinkLine = line
break
}
if hostLinkLine == "" {
t.Fatal(`fail to find with "host link:" string in linker output`)
}
// Find parameter, like "/tmp/go-link-408556474/go.o" inside of
// "host link:" line, and extract temp directory /tmp/go-link-408556474
// out of it.
tmpdir := hostLinkLine
i := strings.Index(tmpdir, `go.o"`)
if i == -1 {
t.Fatalf(`fail to find "go.o" in "host link:" line %q`, hostLinkLine)
}
tmpdir = tmpdir[:i-1]
i = strings.LastIndex(tmpdir, `"`)
if i == -1 {
t.Fatalf(`fail to find " in "host link:" line %q`, hostLinkLine)
}
tmpdir = tmpdir[i+1:]
// Verify that temp directory has been removed.
_, err := os.Stat(tmpdir)
if err == nil {
t.Fatalf("temp directory %q has not been removed", tmpdir)
}
if !os.IsNotExist(err) {
t.Fatalf("Stat(%q) returns unexpected error: %v", tmpdir, err)
}
}
func testCDAndGOPATHAreDifferent(tg *testgoData, cd, gopath string) {
skipIfGccgo(tg.t, "gccgo does not support -ldflags -X")
tg.setenv("GOPATH", gopath)
tg.tempDir("dir")
exe := tg.path("dir/a.exe")
tg.cd(cd)
tg.run("build", "-o", exe, "-ldflags", "-X=my.pkg.Text=linkXworked")
out, err := exec.Command(exe).CombinedOutput()
if err != nil {
tg.t.Fatal(err)
}
if string(out) != "linkXworked\n" {
tg.t.Errorf(`incorrect output with GOPATH=%q and CD=%q: expected "linkXworked\n", but have %q`, gopath, cd, string(out))
}
}
func TestCDAndGOPATHAreDifferent(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
gopath := filepath.Join(tg.pwd(), "testdata")
cd := filepath.Join(gopath, "src/my.pkg/main")
testCDAndGOPATHAreDifferent(tg, cd, gopath)
if runtime.GOOS == "windows" {
testCDAndGOPATHAreDifferent(tg, cd, strings.ReplaceAll(gopath, `\`, `/`))
testCDAndGOPATHAreDifferent(tg, cd, strings.ToUpper(gopath))
testCDAndGOPATHAreDifferent(tg, cd, strings.ToLower(gopath))
}
}
// Issue 25579.
func TestGoBuildDashODevNull(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("build", "-o", os.DevNull, filepath.Join(tg.pwd(), "testdata", "src", "hello", "hello.go"))
tg.mustNotExist("hello")
tg.mustNotExist("hello.exe")
}
// Issue 25093.
func TestCoverpkgTestOnly(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/a/a.go", `package a
func F(i int) int {
return i*i
}`)
tg.tempFile("src/atest/a_test.go", `
package a_test
import ( "a"; "testing" )
func TestF(t *testing.T) { a.F(2) }
`)
tg.setenv("GOPATH", tg.path("."))
tg.run("test", "-coverpkg=a", "atest")
tg.grepStderrNot("no packages being tested depend on matches", "bad match message")
tg.grepStdout("coverage: 100", "no coverage")
}
|
[
"\"GO_GCFLAGS\"",
"\"PATH\"",
"\"GOPATH\"",
"\"TERM\"",
"\"TERM\"",
"\"GODEBUG\"",
"\"GODEBUG\"",
"\"GODEBUG\"",
"\"GODEBUG\"",
"\"GODEBUG\""
] |
[] |
[
"GO_GCFLAGS",
"GOPATH",
"TERM",
"GODEBUG",
"PATH"
] |
[]
|
["GO_GCFLAGS", "GOPATH", "TERM", "GODEBUG", "PATH"]
|
go
| 5 | 0 | |
bbolt/bucket_test.go
|
package bbolt_test
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"log"
"math/rand"
"os"
"strconv"
"strings"
"testing"
"testing/quick"
bolt "github.com/maxymania/go-unstable/bbolt"
)
// Ensure that a bucket that gets a non-existent key returns nil.
func TestBucket_Get_NonExistent(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if v := b.Get([]byte("foo")); v != nil {
t.Fatal("expected nil value")
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket can read a value that is not flushed yet.
func TestBucket_Get_FromNode(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
if v := b.Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) {
t.Fatalf("unexpected value: %v", v)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket retrieved via Get() returns a nil.
func TestBucket_Get_IncompatibleValue(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil {
t.Fatal(err)
}
if tx.Bucket([]byte("widgets")).Get([]byte("foo")) != nil {
t.Fatal("expected nil value")
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a slice returned from a bucket has a capacity equal to its length.
// This also allows slices to be appended to since it will require a realloc by Go.
//
// https://github.com/boltdb/bolt/issues/544
func TestBucket_Get_Capacity(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
// Write key to a bucket.
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("bucket"))
if err != nil {
return err
}
return b.Put([]byte("key"), []byte("val"))
}); err != nil {
t.Fatal(err)
}
// Retrieve value and attempt to append to it.
if err := db.Update(func(tx *bolt.Tx) error {
k, v := tx.Bucket([]byte("bucket")).Cursor().First()
// Verify capacity.
if len(k) != cap(k) {
t.Fatalf("unexpected key slice capacity: %d", cap(k))
} else if len(v) != cap(v) {
t.Fatalf("unexpected value slice capacity: %d", cap(v))
}
// Ensure slice can be appended to without a segfault.
k = append(k, []byte("123")...)
v = append(v, []byte("123")...)
_, _ = k, v // to pass ineffassign
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket can write a key/value.
func TestBucket_Put(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
v := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
if !bytes.Equal([]byte("bar"), v) {
t.Fatalf("unexpected value: %v", v)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket can rewrite a key in the same transaction.
func TestBucket_Put_Repeat(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("baz")); err != nil {
t.Fatal(err)
}
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
if !bytes.Equal([]byte("baz"), value) {
t.Fatalf("unexpected value: %v", value)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket can write a bunch of large values.
func TestBucket_Put_Large(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
count, factor := 100, 200
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
for i := 1; i < count; i++ {
if err := b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor))); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 1; i < count; i++ {
value := b.Get([]byte(strings.Repeat("0", i*factor)))
if !bytes.Equal(value, []byte(strings.Repeat("X", (count-i)*factor))) {
t.Fatalf("unexpected value: %v", value)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a database can perform multiple large appends safely.
func TestDB_Put_VeryLarge(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
n, batchN := 400000, 200000
ksize, vsize := 8, 500
db := MustOpenDB()
defer db.MustClose()
for i := 0; i < n; i += batchN {
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
for j := 0; j < batchN; j++ {
k, v := make([]byte, ksize), make([]byte, vsize)
binary.BigEndian.PutUint32(k, uint32(i+j))
if err := b.Put(k, v); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
}
}
// Ensure that a setting a value on a key with a bucket value returns an error.
func TestBucket_Put_IncompatibleValue(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b0, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil {
t.Fatal(err)
}
if err := b0.Put([]byte("foo"), []byte("bar")); err != bolt.ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a setting a value while the transaction is closed returns an error.
func TestBucket_Put_Closed(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
tx, err := db.Begin(true)
if err != nil {
t.Fatal(err)
}
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := tx.Rollback(); err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("bar")); err != bolt.ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
// Ensure that setting a value on a read-only bucket returns an error.
func TestBucket_Put_ReadOnly(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
if err := b.Put([]byte("foo"), []byte("bar")); err != bolt.ErrTxNotWritable {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket can delete an existing key.
func TestBucket_Delete(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
if err := b.Delete([]byte("foo")); err != nil {
t.Fatal(err)
}
if v := b.Get([]byte("foo")); v != nil {
t.Fatalf("unexpected value: %v", v)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that deleting a large set of keys will work correctly.
func TestBucket_Delete_Large(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 100; i++ {
if err := b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024))); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 0; i < 100; i++ {
if err := b.Delete([]byte(strconv.Itoa(i))); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 0; i < 100; i++ {
if v := b.Get([]byte(strconv.Itoa(i))); v != nil {
t.Fatalf("unexpected value: %v, i=%d", v, i)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Deleting a very large list of keys will cause the freelist to use overflow.
func TestBucket_Delete_FreelistOverflow(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
db := MustOpenDB()
defer db.MustClose()
k := make([]byte, 16)
for i := uint64(0); i < 10000; i++ {
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("0"))
if err != nil {
t.Fatalf("bucket error: %s", err)
}
for j := uint64(0); j < 1000; j++ {
binary.BigEndian.PutUint64(k[:8], i)
binary.BigEndian.PutUint64(k[8:], j)
if err := b.Put(k, nil); err != nil {
t.Fatalf("put error: %s", err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Delete all of them in one large transaction
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("0"))
c := b.Cursor()
for k, _ := c.First(); k != nil; k, _ = c.Next() {
if err := c.Delete(); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
// Check more than an overflow's worth of pages are freed.
stats := db.Stats()
freePages := stats.FreePageN + stats.PendingPageN
if freePages <= 0xFFFF {
t.Fatalf("expected more than 0xFFFF free pages, got %v", freePages)
}
// Free page count should be preserved on reopen.
if err := db.DB.Close(); err != nil {
t.Fatal(err)
}
db.MustReopen()
if reopenFreePages := db.Stats().FreePageN; freePages != reopenFreePages {
t.Fatalf("expected %d free pages, got %+v", freePages, db.Stats())
}
}
// Ensure that deleting of non-existing key is a no-op.
func TestBucket_Delete_NonExisting(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if _, err = b.CreateBucket([]byte("nested")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
if err := b.Delete([]byte("foo")); err != nil {
t.Fatal(err)
}
if b.Bucket([]byte("nested")) == nil {
t.Fatal("nested bucket has been deleted")
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that accessing and updating nested buckets is ok across transactions.
func TestBucket_Nested(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
// Create a widgets bucket.
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
// Create a widgets/foo bucket.
_, err = b.CreateBucket([]byte("foo"))
if err != nil {
t.Fatal(err)
}
// Create a widgets/bar key.
if err := b.Put([]byte("bar"), []byte("0000")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
// Update widgets/bar.
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
if err := b.Put([]byte("bar"), []byte("xxxx")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
// Cause a split.
if err := db.Update(func(tx *bolt.Tx) error {
var b = tx.Bucket([]byte("widgets"))
for i := 0; i < 10000; i++ {
if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
// Insert into widgets/foo/baz.
if err := db.Update(func(tx *bolt.Tx) error {
var b = tx.Bucket([]byte("widgets"))
if err := b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
// Verify.
if err := db.View(func(tx *bolt.Tx) error {
var b = tx.Bucket([]byte("widgets"))
if v := b.Bucket([]byte("foo")).Get([]byte("baz")); !bytes.Equal(v, []byte("yyyy")) {
t.Fatalf("unexpected value: %v", v)
}
if v := b.Get([]byte("bar")); !bytes.Equal(v, []byte("xxxx")) {
t.Fatalf("unexpected value: %v", v)
}
for i := 0; i < 10000; i++ {
if v := b.Get([]byte(strconv.Itoa(i))); !bytes.Equal(v, []byte(strconv.Itoa(i))) {
t.Fatalf("unexpected value: %v", v)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that deleting a bucket using Delete() returns an error.
func TestBucket_Delete_Bucket(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if _, err := b.CreateBucket([]byte("foo")); err != nil {
t.Fatal(err)
}
if err := b.Delete([]byte("foo")); err != bolt.ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that deleting a key on a read-only bucket returns an error.
func TestBucket_Delete_ReadOnly(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.View(func(tx *bolt.Tx) error {
if err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")); err != bolt.ErrTxNotWritable {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a deleting value while the transaction is closed returns an error.
func TestBucket_Delete_Closed(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
tx, err := db.Begin(true)
if err != nil {
t.Fatal(err)
}
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := tx.Rollback(); err != nil {
t.Fatal(err)
}
if err := b.Delete([]byte("foo")); err != bolt.ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
// Ensure that deleting a bucket causes nested buckets to be deleted.
func TestBucket_DeleteBucket_Nested(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
foo, err := widgets.CreateBucket([]byte("foo"))
if err != nil {
t.Fatal(err)
}
bar, err := foo.CreateBucket([]byte("bar"))
if err != nil {
t.Fatal(err)
}
if err := bar.Put([]byte("baz"), []byte("bat")); err != nil {
t.Fatal(err)
}
if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed.
func TestBucket_DeleteBucket_Nested2(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
foo, err := widgets.CreateBucket([]byte("foo"))
if err != nil {
t.Fatal(err)
}
bar, err := foo.CreateBucket([]byte("bar"))
if err != nil {
t.Fatal(err)
}
if err := bar.Put([]byte("baz"), []byte("bat")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Update(func(tx *bolt.Tx) error {
widgets := tx.Bucket([]byte("widgets"))
if widgets == nil {
t.Fatal("expected widgets bucket")
}
foo := widgets.Bucket([]byte("foo"))
if foo == nil {
t.Fatal("expected foo bucket")
}
bar := foo.Bucket([]byte("bar"))
if bar == nil {
t.Fatal("expected bar bucket")
}
if v := bar.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) {
t.Fatalf("unexpected value: %v", v)
}
if err := tx.DeleteBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.View(func(tx *bolt.Tx) error {
if tx.Bucket([]byte("widgets")) != nil {
t.Fatal("expected bucket to be deleted")
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that deleting a child bucket with multiple pages causes all pages to get collected.
// NOTE: Consistency check in bolt_test.DB.Close() will panic if pages not freed properly.
func TestBucket_DeleteBucket_Large(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
foo, err := widgets.CreateBucket([]byte("foo"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 1000; i++ {
if err := foo.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i))); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Update(func(tx *bolt.Tx) error {
if err := tx.DeleteBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a simple value retrieved via Bucket() returns a nil.
func TestBucket_Bucket_IncompatibleValue(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
if b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")); b != nil {
t.Fatal("expected nil bucket")
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that creating a bucket on an existing non-bucket key returns an error.
func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
if _, err := widgets.CreateBucket([]byte("foo")); err != bolt.ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that deleting a bucket on an existing non-bucket key returns an error.
func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != bolt.ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure bucket can set and update its sequence number.
func TestBucket_Sequence(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
bkt, err := tx.CreateBucket([]byte("0"))
if err != nil {
t.Fatal(err)
}
// Retrieve sequence.
if v := bkt.Sequence(); v != 0 {
t.Fatalf("unexpected sequence: %d", v)
}
// Update sequence.
if err := bkt.SetSequence(1000); err != nil {
t.Fatal(err)
}
// Read sequence again.
if v := bkt.Sequence(); v != 1000 {
t.Fatalf("unexpected sequence: %d", v)
}
return nil
}); err != nil {
t.Fatal(err)
}
// Verify sequence in separate transaction.
if err := db.View(func(tx *bolt.Tx) error {
if v := tx.Bucket([]byte("0")).Sequence(); v != 1000 {
t.Fatalf("unexpected sequence: %d", v)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket can return an autoincrementing sequence.
func TestBucket_NextSequence(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
woojits, err := tx.CreateBucket([]byte("woojits"))
if err != nil {
t.Fatal(err)
}
// Make sure sequence increments.
if seq, err := widgets.NextSequence(); err != nil {
t.Fatal(err)
} else if seq != 1 {
t.Fatalf("unexpecte sequence: %d", seq)
}
if seq, err := widgets.NextSequence(); err != nil {
t.Fatal(err)
} else if seq != 2 {
t.Fatalf("unexpected sequence: %d", seq)
}
// Buckets should be separate.
if seq, err := woojits.NextSequence(); err != nil {
t.Fatal(err)
} else if seq != 1 {
t.Fatalf("unexpected sequence: %d", 1)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket will persist an autoincrementing sequence even if its
// the only thing updated on the bucket.
// https://github.com/boltdb/bolt/issues/296
func TestBucket_NextSequence_Persist(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Update(func(tx *bolt.Tx) error {
if _, err := tx.Bucket([]byte("widgets")).NextSequence(); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Update(func(tx *bolt.Tx) error {
seq, err := tx.Bucket([]byte("widgets")).NextSequence()
if err != nil {
t.Fatalf("unexpected error: %s", err)
} else if seq != 2 {
t.Fatalf("unexpected sequence: %d", seq)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that retrieving the next sequence on a read-only bucket returns an error.
func TestBucket_NextSequence_ReadOnly(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.View(func(tx *bolt.Tx) error {
_, err := tx.Bucket([]byte("widgets")).NextSequence()
if err != bolt.ErrTxNotWritable {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that retrieving the next sequence for a bucket on a closed database return an error.
func TestBucket_NextSequence_Closed(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
tx, err := db.Begin(true)
if err != nil {
t.Fatal(err)
}
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := tx.Rollback(); err != nil {
t.Fatal(err)
}
if _, err := b.NextSequence(); err != bolt.ErrTxClosed {
t.Fatal(err)
}
}
// Ensure a user can loop over all key/value pairs in a bucket.
func TestBucket_ForEach(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("0000")); err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("baz"), []byte("0001")); err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("bar"), []byte("0002")); err != nil {
t.Fatal(err)
}
var index int
if err := b.ForEach(func(k, v []byte) error {
switch index {
case 0:
if !bytes.Equal(k, []byte("bar")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte("0002")) {
t.Fatalf("unexpected value: %v", v)
}
case 1:
if !bytes.Equal(k, []byte("baz")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte("0001")) {
t.Fatalf("unexpected value: %v", v)
}
case 2:
if !bytes.Equal(k, []byte("foo")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte("0000")) {
t.Fatalf("unexpected value: %v", v)
}
}
index++
return nil
}); err != nil {
t.Fatal(err)
}
if index != 3 {
t.Fatalf("unexpected index: %d", index)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure a database can stop iteration early.
func TestBucket_ForEach_ShortCircuit(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("bar"), []byte("0000")); err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("baz"), []byte("0000")); err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("0000")); err != nil {
t.Fatal(err)
}
var index int
if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
index++
if bytes.Equal(k, []byte("baz")) {
return errors.New("marker")
}
return nil
}); err == nil || err.Error() != "marker" {
t.Fatalf("unexpected error: %s", err)
}
if index != 2 {
t.Fatalf("unexpected index: %d", index)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that looping over a bucket on a closed database returns an error.
func TestBucket_ForEach_Closed(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
tx, err := db.Begin(true)
if err != nil {
t.Fatal(err)
}
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := tx.Rollback(); err != nil {
t.Fatal(err)
}
if err := b.ForEach(func(k, v []byte) error { return nil }); err != bolt.ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
// Ensure that an error is returned when inserting with an empty key.
func TestBucket_Put_EmptyKey(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte(""), []byte("bar")); err != bolt.ErrKeyRequired {
t.Fatalf("unexpected error: %s", err)
}
if err := b.Put(nil, []byte("bar")); err != bolt.ErrKeyRequired {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that an error is returned when inserting with a key that's too large.
func TestBucket_Put_KeyTooLarge(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put(make([]byte, 32769), []byte("bar")); err != bolt.ErrKeyTooLarge {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that an error is returned when inserting a value that's too large.
func TestBucket_Put_ValueTooLarge(t *testing.T) {
// Skip this test on DroneCI because the machine is resource constrained.
if os.Getenv("DRONE") == "true" {
t.Skip("not enough RAM for test")
}
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)); err != bolt.ErrValueTooLarge {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure a bucket can calculate stats.
func TestBucket_Stats(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
// Add bucket with fewer keys but one big value.
bigKey := []byte("really-big-value")
for i := 0; i < 500; i++ {
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("woojits"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
if err := db.Update(func(tx *bolt.Tx) error {
if err := tx.Bucket([]byte("woojits")).Put(bigKey, []byte(strings.Repeat("*", 10000))); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
if err := db.View(func(tx *bolt.Tx) error {
stats := tx.Bucket([]byte("woojits")).Stats()
if stats.BranchPageN != 1 {
t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
} else if stats.BranchOverflowN != 0 {
t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
} else if stats.LeafPageN != 7 {
t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
} else if stats.LeafOverflowN != 2 {
t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
} else if stats.KeyN != 501 {
t.Fatalf("unexpected KeyN: %d", stats.KeyN)
} else if stats.Depth != 2 {
t.Fatalf("unexpected Depth: %d", stats.Depth)
}
branchInuse := 16 // branch page header
branchInuse += 7 * 16 // branch elements
branchInuse += 7 * 3 // branch keys (6 3-byte keys)
if stats.BranchInuse != branchInuse {
t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
}
leafInuse := 7 * 16 // leaf page header
leafInuse += 501 * 16 // leaf elements
leafInuse += 500*3 + len(bigKey) // leaf keys
leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values
if stats.LeafInuse != leafInuse {
t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
}
// Only check allocations for 4KB pages.
if db.Info().PageSize == 4096 {
if stats.BranchAlloc != 4096 {
t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
} else if stats.LeafAlloc != 36864 {
t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
}
if stats.BucketN != 1 {
t.Fatalf("unexpected BucketN: %d", stats.BucketN)
} else if stats.InlineBucketN != 0 {
t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
} else if stats.InlineBucketInuse != 0 {
t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure a bucket with random insertion utilizes fill percentage correctly.
func TestBucket_Stats_RandomFill(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
} else if os.Getpagesize() != 4096 {
t.Skip("invalid page size for test")
}
db := MustOpenDB()
defer db.MustClose()
// Add a set of values in random order. It will be the same random
// order so we can maintain consistency between test runs.
var count int
rand := rand.New(rand.NewSource(42))
for _, i := range rand.Perm(1000) {
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("woojits"))
if err != nil {
t.Fatal(err)
}
b.FillPercent = 0.9
for _, j := range rand.Perm(100) {
index := (j * 10000) + i
if err := b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")); err != nil {
t.Fatal(err)
}
count++
}
return nil
}); err != nil {
t.Fatal(err)
}
}
db.MustCheck()
if err := db.View(func(tx *bolt.Tx) error {
stats := tx.Bucket([]byte("woojits")).Stats()
if stats.KeyN != 100000 {
t.Fatalf("unexpected KeyN: %d", stats.KeyN)
}
if stats.BranchPageN != 98 {
t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
} else if stats.BranchOverflowN != 0 {
t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
} else if stats.BranchInuse != 130984 {
t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
} else if stats.BranchAlloc != 401408 {
t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
}
if stats.LeafPageN != 3412 {
t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
} else if stats.LeafOverflowN != 0 {
t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
} else if stats.LeafInuse != 4742482 {
t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
} else if stats.LeafAlloc != 13975552 {
t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure a bucket can calculate stats.
func TestBucket_Stats_Small(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
// Add a bucket that fits on a single root leaf.
b, err := tx.CreateBucket([]byte("whozawhats"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
if err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("whozawhats"))
stats := b.Stats()
if stats.BranchPageN != 0 {
t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
} else if stats.BranchOverflowN != 0 {
t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
} else if stats.LeafPageN != 0 {
t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
} else if stats.LeafOverflowN != 0 {
t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
} else if stats.KeyN != 1 {
t.Fatalf("unexpected KeyN: %d", stats.KeyN)
} else if stats.Depth != 1 {
t.Fatalf("unexpected Depth: %d", stats.Depth)
} else if stats.BranchInuse != 0 {
t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
} else if stats.LeafInuse != 0 {
t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
}
if db.Info().PageSize == 4096 {
if stats.BranchAlloc != 0 {
t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
} else if stats.LeafAlloc != 0 {
t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
}
if stats.BucketN != 1 {
t.Fatalf("unexpected BucketN: %d", stats.BucketN)
} else if stats.InlineBucketN != 1 {
t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
} else if stats.InlineBucketInuse != 16+16+6 {
t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
func TestBucket_Stats_EmptyBucket(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
// Add a bucket that fits on a single root leaf.
if _, err := tx.CreateBucket([]byte("whozawhats")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
if err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("whozawhats"))
stats := b.Stats()
if stats.BranchPageN != 0 {
t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
} else if stats.BranchOverflowN != 0 {
t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
} else if stats.LeafPageN != 0 {
t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
} else if stats.LeafOverflowN != 0 {
t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
} else if stats.KeyN != 0 {
t.Fatalf("unexpected KeyN: %d", stats.KeyN)
} else if stats.Depth != 1 {
t.Fatalf("unexpected Depth: %d", stats.Depth)
} else if stats.BranchInuse != 0 {
t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
} else if stats.LeafInuse != 0 {
t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
}
if db.Info().PageSize == 4096 {
if stats.BranchAlloc != 0 {
t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
} else if stats.LeafAlloc != 0 {
t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
}
if stats.BucketN != 1 {
t.Fatalf("unexpected BucketN: %d", stats.BucketN)
} else if stats.InlineBucketN != 1 {
t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
} else if stats.InlineBucketInuse != 16 {
t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure a bucket can calculate stats.
func TestBucket_Stats_Nested(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("foo"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 100; i++ {
if err := b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))); err != nil {
t.Fatal(err)
}
}
bar, err := b.CreateBucket([]byte("bar"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
if err := bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
t.Fatal(err)
}
}
baz, err := bar.CreateBucket([]byte("baz"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
if err := baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
if err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("foo"))
stats := b.Stats()
if stats.BranchPageN != 0 {
t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
} else if stats.BranchOverflowN != 0 {
t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
} else if stats.LeafPageN != 2 {
t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
} else if stats.LeafOverflowN != 0 {
t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
} else if stats.KeyN != 122 {
t.Fatalf("unexpected KeyN: %d", stats.KeyN)
} else if stats.Depth != 3 {
t.Fatalf("unexpected Depth: %d", stats.Depth)
} else if stats.BranchInuse != 0 {
t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
}
foo := 16 // foo (pghdr)
foo += 101 * 16 // foo leaf elements
foo += 100*2 + 100*2 // foo leaf key/values
foo += 3 + 16 // foo -> bar key/value
bar := 16 // bar (pghdr)
bar += 11 * 16 // bar leaf elements
bar += 10 + 10 // bar leaf key/values
bar += 3 + 16 // bar -> baz key/value
baz := 16 // baz (inline) (pghdr)
baz += 10 * 16 // baz leaf elements
baz += 10 + 10 // baz leaf key/values
if stats.LeafInuse != foo+bar+baz {
t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
}
if db.Info().PageSize == 4096 {
if stats.BranchAlloc != 0 {
t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
} else if stats.LeafAlloc != 8192 {
t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
}
if stats.BucketN != 3 {
t.Fatalf("unexpected BucketN: %d", stats.BucketN)
} else if stats.InlineBucketN != 1 {
t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
} else if stats.InlineBucketInuse != baz {
t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure a large bucket can calculate stats.
func TestBucket_Stats_Large(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
db := MustOpenDB()
defer db.MustClose()
var index int
for i := 0; i < 100; i++ {
// Add bucket with lots of keys.
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 1000; i++ {
if err := b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))); err != nil {
t.Fatal(err)
}
index++
}
return nil
}); err != nil {
t.Fatal(err)
}
}
db.MustCheck()
if err := db.View(func(tx *bolt.Tx) error {
stats := tx.Bucket([]byte("widgets")).Stats()
if stats.BranchPageN != 13 {
t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
} else if stats.BranchOverflowN != 0 {
t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
} else if stats.LeafPageN != 1196 {
t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
} else if stats.LeafOverflowN != 0 {
t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
} else if stats.KeyN != 100000 {
t.Fatalf("unexpected KeyN: %d", stats.KeyN)
} else if stats.Depth != 3 {
t.Fatalf("unexpected Depth: %d", stats.Depth)
} else if stats.BranchInuse != 25257 {
t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
} else if stats.LeafInuse != 2596916 {
t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
}
if db.Info().PageSize == 4096 {
if stats.BranchAlloc != 53248 {
t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
} else if stats.LeafAlloc != 4898816 {
t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
}
if stats.BucketN != 1 {
t.Fatalf("unexpected BucketN: %d", stats.BucketN)
} else if stats.InlineBucketN != 0 {
t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
} else if stats.InlineBucketInuse != 0 {
t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket can write random keys and values across multiple transactions.
func TestBucket_Put_Single(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
index := 0
if err := quick.Check(func(items testdata) bool {
db := MustOpenDB()
defer db.MustClose()
m := make(map[string][]byte)
if err := db.Update(func(tx *bolt.Tx) error {
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
for _, item := range items {
if err := db.Update(func(tx *bolt.Tx) error {
if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil {
panic("put error: " + err.Error())
}
m[string(item.Key)] = item.Value
return nil
}); err != nil {
t.Fatal(err)
}
// Verify all key/values so far.
if err := db.View(func(tx *bolt.Tx) error {
i := 0
for k, v := range m {
value := tx.Bucket([]byte("widgets")).Get([]byte(k))
if !bytes.Equal(value, v) {
t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v)
db.CopyTempFile()
t.FailNow()
}
i++
}
return nil
}); err != nil {
t.Fatal(err)
}
}
index++
return true
}, qconfig()); err != nil {
t.Error(err)
}
}
// Ensure that a transaction can insert multiple key/value pairs at once.
func TestBucket_Put_Multiple(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
if err := quick.Check(func(items testdata) bool {
db := MustOpenDB()
defer db.MustClose()
// Bulk insert all values.
if err := db.Update(func(tx *bolt.Tx) error {
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
if err := b.Put(item.Key, item.Value); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
// Verify all items exist.
if err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
value := b.Get(item.Key)
if !bytes.Equal(item.Value, value) {
db.CopyTempFile()
t.Fatalf("exp=%x; got=%x", item.Value, value)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
return true
}, qconfig()); err != nil {
t.Error(err)
}
}
// Ensure that a transaction can delete all key/value pairs and return to a single leaf page.
func TestBucket_Delete_Quick(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
if err := quick.Check(func(items testdata) bool {
db := MustOpenDB()
defer db.MustClose()
// Bulk insert all values.
if err := db.Update(func(tx *bolt.Tx) error {
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
if err := b.Put(item.Key, item.Value); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
// Remove items one at a time and check consistency.
for _, item := range items {
if err := db.Update(func(tx *bolt.Tx) error {
return tx.Bucket([]byte("widgets")).Delete(item.Key)
}); err != nil {
t.Fatal(err)
}
}
// Anything before our deletion index should be nil.
if err := db.View(func(tx *bolt.Tx) error {
if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3))
return nil
}); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
return true
}, qconfig()); err != nil {
t.Error(err)
}
}
func ExampleBucket_Put() {
// Open the database.
db, err := bolt.Open(tempfile(), 0666, nil)
if err != nil {
log.Fatal(err)
}
defer os.Remove(db.Path())
// Start a write transaction.
if err := db.Update(func(tx *bolt.Tx) error {
// Create a bucket.
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
return err
}
// Set the value "bar" for the key "foo".
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
return err
}
return nil
}); err != nil {
log.Fatal(err)
}
// Read value back in a different read-only transaction.
if err := db.View(func(tx *bolt.Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
fmt.Printf("The value of 'foo' is: %s\n", value)
return nil
}); err != nil {
log.Fatal(err)
}
// Close database to release file lock.
if err := db.Close(); err != nil {
log.Fatal(err)
}
// Output:
// The value of 'foo' is: bar
}
func ExampleBucket_Delete() {
// Open the database.
db, err := bolt.Open(tempfile(), 0666, nil)
if err != nil {
log.Fatal(err)
}
defer os.Remove(db.Path())
// Start a write transaction.
if err := db.Update(func(tx *bolt.Tx) error {
// Create a bucket.
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
return err
}
// Set the value "bar" for the key "foo".
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
return err
}
// Retrieve the key back from the database and verify it.
value := b.Get([]byte("foo"))
fmt.Printf("The value of 'foo' was: %s\n", value)
return nil
}); err != nil {
log.Fatal(err)
}
// Delete the key in a different write transaction.
if err := db.Update(func(tx *bolt.Tx) error {
return tx.Bucket([]byte("widgets")).Delete([]byte("foo"))
}); err != nil {
log.Fatal(err)
}
// Retrieve the key again.
if err := db.View(func(tx *bolt.Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
if value == nil {
fmt.Printf("The value of 'foo' is now: nil\n")
}
return nil
}); err != nil {
log.Fatal(err)
}
// Close database to release file lock.
if err := db.Close(); err != nil {
log.Fatal(err)
}
// Output:
// The value of 'foo' was: bar
// The value of 'foo' is now: nil
}
func ExampleBucket_ForEach() {
// Open the database.
db, err := bolt.Open(tempfile(), 0666, nil)
if err != nil {
log.Fatal(err)
}
defer os.Remove(db.Path())
// Insert data into a bucket.
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("animals"))
if err != nil {
return err
}
if err := b.Put([]byte("dog"), []byte("fun")); err != nil {
return err
}
if err := b.Put([]byte("cat"), []byte("lame")); err != nil {
return err
}
if err := b.Put([]byte("liger"), []byte("awesome")); err != nil {
return err
}
// Iterate over items in sorted key order.
if err := b.ForEach(func(k, v []byte) error {
fmt.Printf("A %s is %s.\n", k, v)
return nil
}); err != nil {
return err
}
return nil
}); err != nil {
log.Fatal(err)
}
// Close database to release file lock.
if err := db.Close(); err != nil {
log.Fatal(err)
}
// Output:
// A cat is lame.
// A dog is fun.
// A liger is awesome.
}
|
[
"\"DRONE\""
] |
[] |
[
"DRONE"
] |
[]
|
["DRONE"]
|
go
| 1 | 0 | |
typha/pkg/daemon/daemon.go
|
// Copyright (c) 2017-2018,2020-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daemon
import (
"context"
"fmt"
"net/http"
"os"
"os/signal"
"runtime"
"runtime/debug"
"runtime/pprof"
"strings"
"syscall"
"time"
"github.com/docopt/docopt-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/calico/libcalico-go/lib/seedrng"
"github.com/projectcalico/calico/libcalico-go/lib/apiconfig"
bapi "github.com/projectcalico/calico/libcalico-go/lib/backend/api"
"github.com/projectcalico/calico/libcalico-go/lib/backend/syncersv1/bgpsyncer"
"github.com/projectcalico/calico/libcalico-go/lib/backend/syncersv1/felixsyncer"
"github.com/projectcalico/calico/libcalico-go/lib/backend/syncersv1/nodestatussyncer"
"github.com/projectcalico/calico/libcalico-go/lib/backend/syncersv1/tunnelipsyncer"
"github.com/projectcalico/calico/libcalico-go/lib/clientv3"
"github.com/projectcalico/calico/libcalico-go/lib/health"
"github.com/projectcalico/calico/libcalico-go/lib/upgrade/migrator"
"github.com/projectcalico/calico/libcalico-go/lib/upgrade/migrator/clients"
"github.com/projectcalico/calico/typha/pkg/buildinfo"
"github.com/projectcalico/calico/typha/pkg/calc"
"github.com/projectcalico/calico/typha/pkg/config"
"github.com/projectcalico/calico/typha/pkg/jitter"
"github.com/projectcalico/calico/typha/pkg/k8s"
"github.com/projectcalico/calico/typha/pkg/logutils"
"github.com/projectcalico/calico/typha/pkg/snapcache"
"github.com/projectcalico/calico/typha/pkg/syncproto"
"github.com/projectcalico/calico/typha/pkg/syncserver"
)
const usage = `Typha, Calico's fan-out proxy.
Usage:
calico-typha [options]
Options:
-c --config-file=<filename> Config file to load [default: /etc/calico/typha.cfg].
--version Print the version and exit.
`
// TyphaDaemon handles the lifecycle of the Typha process. The main() function of the Typha executable
// should simply call InitializeAndServeForever() to start the Typha server. The lifecycle is broken out into
// several individual methods for ease of testing.
type TyphaDaemon struct {
BuildInfoLogCxt *log.Entry
ConfigFilePath string
DatastoreClient DatastoreClient
ConfigParams *config.Config
// The components of the server, created in CreateServer() below.
SyncerPipelines []*syncerPipeline
CachesBySyncerType map[syncproto.SyncerType]syncserver.BreadcrumbProvider
Server *syncserver.Server
// The functions below default to real library functions but they can be overridden for testing.
NewClientV3 func(config apiconfig.CalicoAPIConfig) (DatastoreClient, error)
ConfigureEarlyLogging func()
ConfigureLogging func(configParams *config.Config)
// Health monitoring.
healthAggregator *health.HealthAggregator
// Node counting.
nodeCounter *calc.NodeCounter
}
type syncerPipeline struct {
Type syncproto.SyncerType
Syncer bapi.Syncer
SyncerToValidator *calc.SyncerCallbacksDecoupler
Validator *calc.ValidationFilter
ValidatorToCache *calc.SyncerCallbacksDecoupler
Cache *snapcache.Cache
}
func (p syncerPipeline) Start(cxt context.Context) {
logCxt := log.WithField("syncerType", p.Type)
logCxt.Info("Starting syncer")
p.Syncer.Start()
logCxt.Info("Starting syncer-to-validator decoupler")
go p.SyncerToValidator.SendTo(p.Validator)
logCxt.Info("Starting validator-to-cache decoupler")
go p.ValidatorToCache.SendTo(p.Cache)
logCxt.Info("Starting cache")
p.Cache.Start(cxt)
logCxt.Info("Started syncer pipeline")
}
func New() *TyphaDaemon {
return &TyphaDaemon{
NewClientV3: func(config apiconfig.CalicoAPIConfig) (DatastoreClient, error) {
client, err := clientv3.New(config)
if err != nil {
return nil, err
}
return ClientV3Shim{client.(RealClientV3), config}, nil
},
ConfigureEarlyLogging: logutils.ConfigureEarlyLogging,
ConfigureLogging: logutils.ConfigureLogging,
CachesBySyncerType: map[syncproto.SyncerType]syncserver.BreadcrumbProvider{},
}
}
func (t *TyphaDaemon) InitializeAndServeForever(cxt context.Context) error {
t.DoEarlyRuntimeSetup()
t.ParseCommandLineArgs(nil)
err := t.LoadConfiguration(cxt)
if err != nil { // Should only happen if context is canceled.
return err
}
t.CreateServer()
t.Start(cxt)
t.WaitAndShutDown(cxt)
return nil
}
// DoEarlyRuntimeSetup does early runtime/logging configuration that needs to happen before we do any work.
func (t *TyphaDaemon) DoEarlyRuntimeSetup() {
// Go's RNG is not seeded by default. Do that now.
seedrng.EnsureSeeded()
// Special-case handling for environment variable-configured logging:
// Initialise early so we can trace out config parsing.
t.ConfigureEarlyLogging()
}
// ParseCommandLineArgs parses the command line args and either exits with a usage warning or stores the parsed
// arguments on fields of the struct.
func (t *TyphaDaemon) ParseCommandLineArgs(argv []string) {
// Parse command-line args.
version := "Version: " + buildinfo.GitVersion + "\n" +
"Full git commit ID: " + buildinfo.GitRevision + "\n" +
"Build date: " + buildinfo.BuildDate + "\n"
p := &docopt.Parser{OptionsFirst: false, SkipHelpFlags: false}
arguments, err := p.ParseArgs(usage, argv, version)
if err != nil {
println(usage)
log.Fatalf("Failed to parse usage, exiting: %v", err)
}
t.ConfigFilePath = arguments["--config-file"].(string)
t.BuildInfoLogCxt = log.WithFields(log.Fields{
"version": buildinfo.GitVersion,
"buildDate": buildinfo.BuildDate,
"gitCommit": buildinfo.GitRevision,
"GOMAXPROCS": runtime.GOMAXPROCS(0),
})
t.BuildInfoLogCxt.Info("Typha starting up")
log.Infof("Command line arguments: %v", arguments)
}
// LoadConfiguration uses the command-line configuration and environment variables to load our configuration.
// It initializes the datastore connection.
func (t *TyphaDaemon) LoadConfiguration(ctx context.Context) error {
// Log out the kubernetes server details that we use in BPF mode.
log.WithFields(log.Fields{
"KUBERNETES_SERVICE_HOST": os.Getenv("KUBERNETES_SERVICE_HOST"),
"KUBERNETES_SERVICE_PORT": os.Getenv("KUBERNETES_SERVICE_PORT"),
}).Info("Kubernetes server override env vars.")
// Load the configuration from all the different sources including the
// datastore and merge. Keep retrying on failure. We'll sit in this
// loop until the datastore is ready.
log.Infof("Loading configuration...")
var configParams *config.Config
var datastoreConfig apiconfig.CalicoAPIConfig
configRetry:
for {
if err := ctx.Err(); err != nil {
log.WithError(err).Warn("Context canceled.")
return err
}
// Load locally-defined config, including the datastore connection
// parameters. First the environment variables.
configParams = config.New()
envConfig := config.LoadConfigFromEnvironment(os.Environ())
// Then, the config file.
fileConfig, err := config.LoadConfigFile(t.ConfigFilePath)
if err != nil {
log.WithError(err).WithField("configFile", t.ConfigFilePath).Error(
"Failed to load configuration file")
time.Sleep(1 * time.Second)
continue configRetry
}
// Parse and merge the local config.
_, err = configParams.UpdateFrom(envConfig, config.EnvironmentVariable)
if err != nil {
log.WithError(err).WithField("configFile", t.ConfigFilePath).Error(
"Failed to parse configuration environment variable")
time.Sleep(1 * time.Second)
continue configRetry
}
_, err = configParams.UpdateFrom(fileConfig, config.ConfigFile)
if err != nil {
log.WithError(err).WithField("configFile", t.ConfigFilePath).Error(
"Failed to parse configuration file")
time.Sleep(1 * time.Second)
continue configRetry
}
// Validate the config params
err = configParams.Validate()
if err != nil {
log.WithError(err).Error(
"Failed to parse/validate configuration.")
time.Sleep(1 * time.Second)
continue configRetry
}
// We should now have enough config to connect to the datastore.
datastoreConfig = configParams.DatastoreConfig()
t.DatastoreClient, err = t.NewClientV3(datastoreConfig)
if err != nil {
log.WithError(err).Error("Failed to connect to datastore")
time.Sleep(1 * time.Second)
continue configRetry
}
break configRetry
}
// If we get here, we've loaded the configuration successfully.
// Update log levels before we do anything else.
t.ConfigureLogging(configParams)
// Since we may have enabled more logging, log with the build context
// again.
t.BuildInfoLogCxt.WithField("config", configParams).Info(
"Successfully loaded configuration.")
if datastoreConfig.Spec.DatastoreType == apiconfig.Kubernetes {
// Special case: for KDD v1 datamodel to v3 datamodel upgrade, we need to ensure that the datastore migration
// has completed before we start serving requests. Otherwise, we might serve partially-migrated data to
// Felix.
// Get a v1 client, so we can check if there's any data there to migrate.
log.Info("Using Kubernetes API datastore, checking if we need to migrate v1 -> v3")
var civ1 clients.V1ClientInterface
var err error
for {
if err := ctx.Err(); err != nil {
log.WithError(err).Warn("Context canceled.")
return err
}
civ1, err = clients.LoadKDDClientV1FromAPIConfigV3(&datastoreConfig)
if err != nil {
log.WithError(err).Error("Failed to connect to Kubernetes datastore (Calico v1 API)")
time.Sleep(1 * time.Second)
continue
}
break
}
// Use the migration helper to determine if need to perform a migration, and if so
// perform the migration.
mh := migrator.New(t.DatastoreClient, civ1, nil)
for {
if err := ctx.Err(); err != nil {
log.WithError(err).Warn("Context canceled.")
return err
}
if migrate, err := mh.ShouldMigrate(); err != nil {
log.WithError(err).Error("Failed to determine migration requirements")
time.Sleep(1 * time.Second)
continue
} else if migrate {
log.Info("Need to migrate Kubernetes v1 configuration to v3")
if _, err := mh.Migrate(); err != nil {
log.WithError(err).Error("Failed to migrate Kubernetes v1 configuration to v3")
time.Sleep(1 * time.Second)
continue
}
log.Info("Successfully migrated Kubernetes v1 configuration to v3")
break
}
log.Info("Migration not required.")
break
}
}
// Ensure that, as soon as we are able to connect to the datastore at all, it is initialized.
// Note: we block further start-up while we do this, which means, if we're stuck here for long enough,
// the liveness healthcheck will time out and start to fail. That's fairly reasonable, being stuck here
// likely means we have some persistent datastore connection issue and restarting Typha may solve that.
for {
if err := ctx.Err(); err != nil {
log.WithError(err).Warn("Context canceled.")
return err
}
var err error
func() { // Closure to avoid leaking the defer.
log.Info("Initializing the datastore (if needed).")
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
err = t.DatastoreClient.EnsureInitialized(ctx, "", "typha")
}()
if err != nil {
log.WithError(err).Error("Failed to initialize datastore")
time.Sleep(1 * time.Second)
continue
}
log.Info("Datastore initialized.")
break
}
t.ConfigParams = configParams
return nil
}
func (t *TyphaDaemon) addSyncerPipeline(
syncerType syncproto.SyncerType,
newSyncer func(callbacks bapi.SyncerCallbacks) bapi.Syncer,
) {
// Get a Syncer from the datastore, which will feed the validator layer with updates.
syncerToValidator := calc.NewSyncerCallbacksDecoupler()
syncer := newSyncer(syncerToValidator)
log.Debugf("Created Syncer: %#v", syncer)
toCache := calc.NewSyncerCallbacksDecoupler()
var validator *calc.ValidationFilter
if syncerType == syncproto.SyncerTypeFelix {
// If this is a felix syncer, insert a counter after the validation filter which is used to track
// the number of nodes in the cluster. We only want to count nodes once, which is why we only do this
// for the felix syncer and not the BGP syncer as well.
t.nodeCounter = calc.NewNodeCounter(toCache)
validator = calc.NewValidationFilter(t.nodeCounter)
} else {
// Otherwise, just go from validator to cache directly.
validator = calc.NewValidationFilter(toCache)
}
// Create our snapshot cache, which stores point-in-time copies of the datastore contents.
cache := snapcache.New(snapcache.Config{
MaxBatchSize: t.ConfigParams.SnapshotCacheMaxBatchSize,
HealthAggregator: t.healthAggregator,
HealthName: string(syncerType),
})
pipeline := &syncerPipeline{
Type: syncerType,
Syncer: syncer,
SyncerToValidator: syncerToValidator,
Validator: validator,
ValidatorToCache: toCache,
Cache: cache,
}
t.SyncerPipelines = append(t.SyncerPipelines, pipeline)
t.CachesBySyncerType[syncerType] = cache
}
// CreateServer creates and configures (but does not start) the server components.
func (t *TyphaDaemon) CreateServer() {
// Health monitoring, for liveness and readiness endpoints.
t.healthAggregator = health.NewHealthAggregator()
// Now create the Syncer and caching layer (one pipeline for each syncer we support).
t.addSyncerPipeline(syncproto.SyncerTypeFelix, t.DatastoreClient.FelixSyncerByIface)
t.addSyncerPipeline(syncproto.SyncerTypeBGP, t.DatastoreClient.BGPSyncerByIface)
t.addSyncerPipeline(syncproto.SyncerTypeTunnelIPAllocation, t.DatastoreClient.TunnelIPAllocationSyncerByIface)
t.addSyncerPipeline(syncproto.SyncerTypeNodeStatus, t.DatastoreClient.NodeStatusSyncerByIface)
// Create the server, which listens for connections from Felix.
t.Server = syncserver.New(
t.CachesBySyncerType,
syncserver.Config{
MaxMessageSize: t.ConfigParams.ServerMaxMessageSize,
MinBatchingAgeThreshold: t.ConfigParams.ServerMinBatchingAgeThresholdSecs,
MaxFallBehind: t.ConfigParams.ServerMaxFallBehindSecs,
NewClientFallBehindGracePeriod: t.ConfigParams.ServerNewClientFallBehindGracePeriod,
PingInterval: t.ConfigParams.ServerPingIntervalSecs,
PongTimeout: t.ConfigParams.ServerPongTimeoutSecs,
DropInterval: t.ConfigParams.ConnectionDropIntervalSecs,
MaxConns: t.ConfigParams.MaxConnectionsUpperLimit,
Port: t.ConfigParams.ServerPort,
HealthAggregator: t.healthAggregator,
KeyFile: t.ConfigParams.ServerKeyFile,
CertFile: t.ConfigParams.ServerCertFile,
CAFile: t.ConfigParams.CAFile,
ClientCN: t.ConfigParams.ClientCN,
ClientURISAN: t.ConfigParams.ClientURISAN,
},
)
}
// Start starts all the server components in background goroutines.
func (t *TyphaDaemon) Start(cxt context.Context) {
// Now we've connected everything up, start the background processing threads.
log.Info("Starting the datastore Syncer/cache layer")
for _, s := range t.SyncerPipelines {
s.Start(cxt)
}
t.Server.Start(cxt)
if t.ConfigParams.ConnectionRebalancingMode == "kubernetes" {
log.Info("Kubernetes connection rebalancing is enabled, starting k8s poll goroutine.")
k8sAPI := k8s.NewK8sAPI(t.nodeCounter)
ticker := jitter.NewTicker(
t.ConfigParams.K8sServicePollIntervalSecs,
t.ConfigParams.K8sServicePollIntervalSecs/10)
go k8s.PollK8sForConnectionLimit(cxt, t.ConfigParams, ticker.C, k8sAPI, t.Server, len(t.CachesBySyncerType))
}
log.Info("Started the datastore Syncer/cache layer/server.")
if t.ConfigParams.PrometheusMetricsEnabled {
log.Info("Prometheus metrics enabled. Starting server.")
go servePrometheusMetrics(t.ConfigParams)
}
if t.ConfigParams.HealthEnabled {
log.WithFields(log.Fields{
"host": t.ConfigParams.HealthHost,
"port": t.ConfigParams.HealthPort,
}).Info("Health enabled. Starting server.")
t.healthAggregator.ServeHTTP(t.ConfigParams.HealthEnabled, t.ConfigParams.HealthHost, t.ConfigParams.HealthPort)
}
}
// WaitAndShutDown waits for OS signals or context.Done() and exits as appropriate.
func (t *TyphaDaemon) WaitAndShutDown(cxt context.Context) {
// Hook and process the signals we care about
usr1SignalChan := make(chan os.Signal, 1)
signal.Notify(usr1SignalChan, syscall.SIGUSR1)
termChan := make(chan os.Signal, 1)
signal.Notify(termChan, syscall.SIGTERM)
for {
select {
case <-termChan:
log.Fatal("Received SIGTERM, shutting down")
case <-usr1SignalChan:
log.Info("Received SIGUSR1, emitting heap profile")
dumpHeapMemoryProfile(t.ConfigParams)
case <-cxt.Done():
log.Info("Context asked us to stop.")
return
}
}
}
// ClientV3Shim wraps a real client, allowing its syncer to be mocked.
type ClientV3Shim struct {
RealClientV3
config apiconfig.CalicoAPIConfig
}
func (s ClientV3Shim) FelixSyncerByIface(callbacks bapi.SyncerCallbacks) bapi.Syncer {
return felixsyncer.New(s.Backend(), s.config.Spec, callbacks, true)
}
func (s ClientV3Shim) BGPSyncerByIface(callbacks bapi.SyncerCallbacks) bapi.Syncer {
return bgpsyncer.New(s.Backend(), callbacks, "", s.config.Spec)
}
func (s ClientV3Shim) TunnelIPAllocationSyncerByIface(callbacks bapi.SyncerCallbacks) bapi.Syncer {
return tunnelipsyncer.New(s.Backend(), callbacks, "")
}
func (s ClientV3Shim) NodeStatusSyncerByIface(callbacks bapi.SyncerCallbacks) bapi.Syncer {
return nodestatussyncer.New(s.Backend(), callbacks)
}
// DatastoreClient is our interface to the datastore, used for mocking in the UTs.
type DatastoreClient interface {
clientv3.Interface
FelixSyncerByIface(callbacks bapi.SyncerCallbacks) bapi.Syncer
BGPSyncerByIface(callbacks bapi.SyncerCallbacks) bapi.Syncer
TunnelIPAllocationSyncerByIface(callbacks bapi.SyncerCallbacks) bapi.Syncer
NodeStatusSyncerByIface(callbacks bapi.SyncerCallbacks) bapi.Syncer
}
// RealClientV3 is the real API of the V3 client, including the semi-private API that we use to get the backend.
type RealClientV3 interface {
clientv3.Interface
Backend() bapi.Client
}
// TODO Typha: Share with Felix.
func dumpHeapMemoryProfile(configParams *config.Config) {
// If a memory profile file name is configured, dump a heap memory profile. If the
// configured filename includes "<timestamp>", that will be replaced with a stamp indicating
// the current time.
memProfFileName := configParams.DebugMemoryProfilePath
if memProfFileName != "" {
logCxt := log.WithField("file", memProfFileName)
logCxt.Info("Asked to create a memory profile.")
// If the configured file name includes "<timestamp>", replace that with the current
// time.
if strings.Contains(memProfFileName, "<timestamp>") {
timestamp := time.Now().Format("2006-01-02-15:04:05")
memProfFileName = strings.Replace(memProfFileName, "<timestamp>", timestamp, 1)
logCxt = log.WithField("file", memProfFileName)
}
// Open a file with that name.
memProfFile, err := os.Create(memProfFileName)
if err != nil {
logCxt.WithError(err).Fatal("Could not create memory profile file")
memProfFile = nil
} else {
defer func() {
err := memProfFile.Close()
if err != nil {
log.WithError(err).Error("Error while closing memory profile file.")
}
}()
logCxt.Info("Writing memory profile...")
// The initial resync uses a lot of scratch space so now is
// a good time to force a GC and return any RAM that we can.
debug.FreeOSMemory()
if err := pprof.WriteHeapProfile(memProfFile); err != nil {
logCxt.WithError(err).Error("Could not write memory profile")
}
logCxt.Info("Finished writing memory profile")
}
}
}
// TODO Typha: Share with Felix.
func servePrometheusMetrics(configParams *config.Config) {
for {
log.WithFields(log.Fields{
"host": configParams.PrometheusMetricsHost,
"port": configParams.PrometheusMetricsPort,
}).Info("Starting prometheus metrics endpoint")
if configParams.PrometheusGoMetricsEnabled && configParams.PrometheusProcessMetricsEnabled {
log.Info("Including Golang & Process metrics")
} else {
if !configParams.PrometheusGoMetricsEnabled {
log.Info("Discarding Golang metrics")
prometheus.Unregister(collectors.NewGoCollector())
}
if !configParams.PrometheusProcessMetricsEnabled {
log.Info("Discarding process metrics")
prometheus.Unregister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}))
}
}
http.Handle("/metrics", promhttp.Handler())
err := http.ListenAndServe(fmt.Sprintf("[%v]:%v",
configParams.PrometheusMetricsHost, configParams.PrometheusMetricsPort), nil)
log.WithError(err).Error(
"Prometheus metrics endpoint failed, trying to restart it...")
time.Sleep(1 * time.Second)
}
}
|
[
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
] |
[] |
[
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT"
] |
[]
|
["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT"]
|
go
| 2 | 0 | |
nvmain/Tests/Regressions.py
|
#!/usr/bin/python
from __future__ import print_function
from optparse import OptionParser
import subprocess
import json
import sys
import os
import shutil
import re
parser = OptionParser()
parser.add_option("-c", "--configs", type="string", help="Path to file with list of configurations to test.", default="TestConfigs")
parser.add_option("-n", "--no-gem5", action="store_true", help="Skip gem5 tests.")
parser.add_option("-g", "--gem5-path", type="string", help="Path to gem5 directory.")
parser.add_option("-a", "--arch", type="string", help="gem5 architecture to test.", default="X86")
parser.add_option("-b", "--build", type="string", help="NVMain standalone/gem5 build to test (e.g., *.fast, *.prof, *.debug)", default="fast")
parser.add_option("-t", "--tempfile", type="string", help="Temporary file to write test output.", default=".temp")
parser.add_option("-f", "--max-fuzz", type="float", help="Maximum percentage stat values can be wrong.", default="1.0") # No more than 1% difference
(options, args) = parser.parse_args()
#
# Make sure our nvmain executable is found.
#
nvmainexec = ".." + os.sep + "nvmain." + options.build
if not os.path.isfile(nvmainexec) or not os.access(nvmainexec, os.X_OK):
print("Could not find Nvmain executable: '%s'" % nvmainexec)
print("Exiting...")
sys.exit(1)
#
# Find out if we are testing with gem5 or not.
#
testgem5 = True
gem5path = os.environ['M5_PATH']
if options.gem5_path:
gem5path = options.gem5_path
gem5exec = gem5path + os.sep + "build" + os.sep + options.arch + os.sep + "gem5." + options.build
if options.no_gem5:
testgem5 = False
if not os.path.isfile(gem5exec) or not os.access(gem5exec, os.X_OK):
print("Could not run gem5 executable: '%s'" % gem5exec)
print("Skipping gem5 tests.")
testgem5 = False
#
# Read in the list of config files to test
#
json_data = open('Tests.json')
testdata = json.load(json_data)
#
# Run all tests with each trace
#
for trace in testdata["traces"]:
for idx, test in enumerate(testdata["tests"]):
faillog = testdata["tests"][idx]["name"] + ".out"
# Reset log each time for correct stat comparison
testlog = open(options.tempfile, 'w')
command = [nvmainexec, testdata["tests"][idx]["config"], trace, testdata["tests"][idx]["cycles"]]
command.extend(testdata["tests"][idx]["overrides"].split(" "))
sys.stdout.write("Testing " + testdata["tests"][idx]["name"] + " with " + trace + " ... ")
sys.stdout.flush()
#for arg in command:
# print arg,
#print ""
try:
subprocess.check_call(command, stdout=testlog, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
expectedrc = testdata["tests"][idx]["returncode"]
if e.returncode != expectedrc:
print("[Failed RC=%u]" % e.returncode)
shutil.copyfile(options.tempfile, faillog)
continue
testlog.close()
checkcount = 0
checkcounter = 0
passedchecks = []
for check in testdata["tests"][idx]["checks"]:
checkcount = checkcount + 1
with open(options.tempfile, 'r') as flog:
for line in flog:
for check in testdata["tests"][idx]["checks"]:
if check in line:
checkcounter = checkcounter + 1
passedchecks.append(check)
elif check[0] == 'i': # Skip for general stat checks
# See if the stat is there, but the value is slightly off
checkstat = check.split(' ')[0]
fval = re.compile("[0-9.]")
checkvalue = float(''.join(c for c in check.split(' ')[1] if fval.match(c)))
if checkstat in line:
refvalue = float(''.join(c for c in line.split(' ')[1] if fval.match(c)))
try:
fuzz = max( (1.0 - (checkvalue / refvalue)) * 100.0, (1.0 - (refvalue / checkvalue)) * 100.0)
if fuzz < options.max_fuzz:
checkcounter = checkcounter + 1
passedchecks.append(check)
else:
print("Stat '%s' has value '%s' while reference has '%s'. Fuzz = %f" % (checkstat, checkvalue, refvalue, fuzz))
except ZeroDivisionError:
print("Warning: Stat '%s' has reference value (%s) or check value (%s) of zero." % (checkstat, refvalue, checkvalue))
if checkcounter == checkcount:
print("[Passed %d/%d]" % (checkcounter, checkcount))
shutil.copyfile(options.tempfile, faillog)
else:
print("[Failed %d/%d]" % (checkcounter, checkcount))
shutil.copyfile(options.tempfile, faillog)
for check in testdata["tests"][idx]["checks"]:
if not check in passedchecks:
print("Check %s failed." % check)
|
[] |
[] |
[
"M5_PATH"
] |
[]
|
["M5_PATH"]
|
python
| 1 | 0 | |
web/web.go
|
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package web
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
stdlog "log"
"math"
"net"
"net/http"
"net/http/pprof"
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
"sync"
"sync/atomic"
template_text "text/template"
"time"
"github.com/alecthomas/units"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
conntrack "github.com/mwitkow/go-conntrack"
"github.com/opentracing-contrib/go-stdlib/nethttp"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
io_prometheus_client "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"github.com/prometheus/common/route"
"github.com/prometheus/common/server"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/soheilhy/cmux"
"golang.org/x/net/netutil"
"google.golang.org/grpc"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/template"
"github.com/prometheus/prometheus/util/httputil"
api_v1 "github.com/prometheus/prometheus/web/api/v1"
api_v2 "github.com/prometheus/prometheus/web/api/v2"
"github.com/prometheus/prometheus/web/ui"
)
// Paths that are handled by the React / Reach router that should all be served the main React app's index.html.
var reactRouterPaths = []string{
"/",
"/alerts",
"/config",
"/flags",
"/graph",
"/rules",
"/service-discovery",
"/status",
"/targets",
"/tsdb-status",
"/version",
}
// withStackTrace logs the stack trace in case the request panics. The function
// will re-raise the error which will then be handled by the net/http package.
// It is needed because the go-kit log package doesn't manage properly the
// panics from net/http (see https://github.com/go-kit/kit/issues/233).
func withStackTracer(h http.Handler, l log.Logger) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
level.Error(l).Log("msg", "panic while serving request", "client", r.RemoteAddr, "url", r.URL, "err", err, "stack", buf)
panic(err)
}
}()
h.ServeHTTP(w, r)
})
}
type metrics struct {
requestCounter *prometheus.CounterVec
requestDuration *prometheus.HistogramVec
responseSize *prometheus.HistogramVec
}
func newMetrics(r prometheus.Registerer) *metrics {
m := &metrics{
requestCounter: prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "prometheus_http_requests_total",
Help: "Counter of HTTP requests.",
},
[]string{"handler", "code"},
),
requestDuration: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "prometheus_http_request_duration_seconds",
Help: "Histogram of latencies for HTTP requests.",
Buckets: []float64{.1, .2, .4, 1, 3, 8, 20, 60, 120},
},
[]string{"handler"},
),
responseSize: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "prometheus_http_response_size_bytes",
Help: "Histogram of response size for HTTP requests.",
Buckets: prometheus.ExponentialBuckets(100, 10, 8),
},
[]string{"handler"},
),
}
if r != nil {
r.MustRegister(m.requestCounter, m.requestDuration, m.responseSize)
}
return m
}
func (m *metrics) instrumentHandlerWithPrefix(prefix string) func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return m.instrumentHandler(prefix+handlerName, handler)
}
}
func (m *metrics) instrumentHandler(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return promhttp.InstrumentHandlerCounter(
m.requestCounter.MustCurryWith(prometheus.Labels{"handler": handlerName}),
promhttp.InstrumentHandlerDuration(
m.requestDuration.MustCurryWith(prometheus.Labels{"handler": handlerName}),
promhttp.InstrumentHandlerResponseSize(
m.responseSize.MustCurryWith(prometheus.Labels{"handler": handlerName}),
handler,
),
),
)
}
// PrometheusVersion contains build information about Prometheus.
type PrometheusVersion = api_v1.PrometheusVersion
// Handler serves various HTTP endpoints of the Prometheus server
type Handler struct {
logger log.Logger
gatherer prometheus.Gatherer
metrics *metrics
scrapeManager *scrape.Manager
ruleManager *rules.Manager
queryEngine *promql.Engine
lookbackDelta time.Duration
context context.Context
tsdb func() *tsdb.DB
storage storage.Storage
notifier *notifier.Manager
apiV1 *api_v1.API
router *route.Router
quitCh chan struct{}
reloadCh chan chan error
options *Options
config *config.Config
versionInfo *PrometheusVersion
birth time.Time
cwd string
flagsMap map[string]string
mtx sync.RWMutex
now func() model.Time
ready uint32 // ready is uint32 rather than boolean to be able to use atomic functions.
}
// ApplyConfig updates the config field of the Handler struct
func (h *Handler) ApplyConfig(conf *config.Config) error {
h.mtx.Lock()
defer h.mtx.Unlock()
h.config = conf
return nil
}
// Options for the web Handler.
type Options struct {
Context context.Context
TSDB func() *tsdb.DB
TSDBRetentionDuration model.Duration
TSDBMaxBytes units.Base2Bytes
Storage storage.Storage
QueryEngine *promql.Engine
LookbackDelta time.Duration
ScrapeManager *scrape.Manager
RuleManager *rules.Manager
Notifier *notifier.Manager
Version *PrometheusVersion
Flags map[string]string
ListenAddress string
CORSOrigin *regexp.Regexp
ReadTimeout time.Duration
MaxConnections int
ExternalURL *url.URL
RoutePrefix string
UseLocalAssets bool
UserAssetsPath string
ConsoleTemplatesPath string
ConsoleLibrariesPath string
EnableLifecycle bool
EnableAdminAPI bool
PageTitle string
RemoteReadSampleLimit int
RemoteReadConcurrencyLimit int
RemoteReadBytesInFrame int
Gatherer prometheus.Gatherer
Registerer prometheus.Registerer
}
// New initializes a new web Handler.
func New(logger log.Logger, o *Options) *Handler {
if logger == nil {
logger = log.NewNopLogger()
}
m := newMetrics(o.Registerer)
router := route.New().
WithInstrumentation(m.instrumentHandler).
WithInstrumentation(setPathWithPrefix(""))
cwd, err := os.Getwd()
if err != nil {
cwd = "<error retrieving current working directory>"
}
h := &Handler{
logger: logger,
gatherer: o.Gatherer,
metrics: m,
router: router,
quitCh: make(chan struct{}),
reloadCh: make(chan chan error),
options: o,
versionInfo: o.Version,
birth: time.Now(),
cwd: cwd,
flagsMap: o.Flags,
context: o.Context,
scrapeManager: o.ScrapeManager,
ruleManager: o.RuleManager,
queryEngine: o.QueryEngine,
lookbackDelta: o.LookbackDelta,
tsdb: o.TSDB,
storage: o.Storage,
notifier: o.Notifier,
now: model.Now,
ready: 0,
}
h.apiV1 = api_v1.NewAPI(h.queryEngine, h.storage, h.scrapeManager, h.notifier,
func() config.Config {
h.mtx.RLock()
defer h.mtx.RUnlock()
return *h.config
},
o.Flags,
api_v1.GlobalURLOptions{
ListenAddress: o.ListenAddress,
Host: o.ExternalURL.Host,
Scheme: o.ExternalURL.Scheme,
},
h.testReady,
func() api_v1.TSDBAdmin {
return h.options.TSDB()
},
h.options.EnableAdminAPI,
logger,
h.ruleManager,
h.options.RemoteReadSampleLimit,
h.options.RemoteReadConcurrencyLimit,
h.options.RemoteReadBytesInFrame,
h.options.CORSOrigin,
h.runtimeInfo,
h.versionInfo,
)
if o.RoutePrefix != "/" {
// If the prefix is missing for the root path, prepend it.
router.Get("/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, o.RoutePrefix, http.StatusFound)
})
router = router.WithPrefix(o.RoutePrefix)
}
readyf := h.testReady
router.Get("/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, path.Join(o.ExternalURL.Path, "/graph"), http.StatusFound)
})
router.Get("/alerts", readyf(h.alerts))
router.Get("/graph", readyf(h.graph))
router.Get("/status", readyf(h.status))
router.Get("/flags", readyf(h.flags))
router.Get("/config", readyf(h.serveConfig))
router.Get("/rules", readyf(h.rules))
router.Get("/targets", readyf(h.targets))
router.Get("/version", readyf(h.version))
router.Get("/service-discovery", readyf(h.serviceDiscovery))
router.Get("/metrics", promhttp.Handler().ServeHTTP)
router.Get("/federate", readyf(httputil.CompressionHandler{
Handler: http.HandlerFunc(h.federation),
}.ServeHTTP))
router.Get("/consoles/*filepath", readyf(h.consoles))
router.Get("/static/*filepath", func(w http.ResponseWriter, r *http.Request) {
r.URL.Path = path.Join("/static", route.Param(r.Context(), "filepath"))
fs := server.StaticFileServer(ui.Assets)
fs.ServeHTTP(w, r)
})
// Make sure that "<path-prefix>/new" is redirected to "<path-prefix>/new/" and
// not just the naked "/new/", which would be the default behavior of the router
// with the "RedirectTrailingSlash" option (https://godoc.org/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash),
// and which breaks users with a --web.route-prefix that deviates from the path derived
// from the external URL.
router.Get("/new", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, path.Join(o.ExternalURL.Path, "new")+"/", http.StatusFound)
})
router.Get("/new/*filepath", func(w http.ResponseWriter, r *http.Request) {
p := route.Param(r.Context(), "filepath")
// For paths that the React/Reach router handles, we want to serve the
// index.html, but with replaced path prefix placeholder.
for _, rp := range reactRouterPaths {
if p != rp {
continue
}
f, err := ui.Assets.Open("/static/react/index.html")
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Error opening React index.html: %v", err)
return
}
idx, err := ioutil.ReadAll(f)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Error reading React index.html: %v", err)
return
}
prefixedIdx := bytes.ReplaceAll(idx, []byte("PATH_PREFIX_PLACEHOLDER"), []byte(o.ExternalURL.Path))
prefixedIdx = bytes.ReplaceAll(prefixedIdx, []byte("CONSOLES_LINK_PLACEHOLDER"), []byte(h.consolesPath()))
w.Write(prefixedIdx)
return
}
// For all other paths, serve auxiliary assets.
r.URL.Path = path.Join("/static/react/", p)
fs := server.StaticFileServer(ui.Assets)
fs.ServeHTTP(w, r)
})
if o.UserAssetsPath != "" {
router.Get("/user/*filepath", route.FileServe(o.UserAssetsPath))
}
if o.EnableLifecycle {
router.Post("/-/quit", h.quit)
router.Put("/-/quit", h.quit)
router.Post("/-/reload", h.reload)
router.Put("/-/reload", h.reload)
} else {
forbiddenAPINotEnabled := func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusForbidden)
w.Write([]byte("Lifecycle API is not enabled."))
}
router.Post("/-/quit", forbiddenAPINotEnabled)
router.Put("/-/quit", forbiddenAPINotEnabled)
router.Post("/-/reload", forbiddenAPINotEnabled)
router.Put("/-/reload", forbiddenAPINotEnabled)
}
router.Get("/-/quit", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write([]byte("Only POST or PUT requests allowed"))
})
router.Get("/-/reload", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write([]byte("Only POST or PUT requests allowed"))
})
router.Get("/debug/*subpath", serveDebug)
router.Post("/debug/*subpath", serveDebug)
router.Get("/-/healthy", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Prometheus is Healthy.\n")
})
router.Get("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Prometheus is Ready.\n")
}))
return h
}
func serveDebug(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
subpath := route.Param(ctx, "subpath")
if subpath == "/pprof" {
http.Redirect(w, req, req.URL.Path+"/", http.StatusMovedPermanently)
return
}
if !strings.HasPrefix(subpath, "/pprof/") {
http.NotFound(w, req)
return
}
subpath = strings.TrimPrefix(subpath, "/pprof/")
switch subpath {
case "cmdline":
pprof.Cmdline(w, req)
case "profile":
pprof.Profile(w, req)
case "symbol":
pprof.Symbol(w, req)
case "trace":
pprof.Trace(w, req)
default:
req.URL.Path = "/debug/pprof/" + subpath
pprof.Index(w, req)
}
}
// Ready sets Handler to be ready.
func (h *Handler) Ready() {
atomic.StoreUint32(&h.ready, 1)
}
// Verifies whether the server is ready or not.
func (h *Handler) isReady() bool {
ready := atomic.LoadUint32(&h.ready)
return ready > 0
}
// Checks if server is ready, calls f if it is, returns 503 if it is not.
func (h *Handler) testReady(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if h.isReady() {
f(w, r)
} else {
w.WriteHeader(http.StatusServiceUnavailable)
fmt.Fprintf(w, "Service Unavailable")
}
}
}
// Checks if server is ready, calls f if it is, returns 503 if it is not.
func (h *Handler) testReadyHandler(f http.Handler) http.HandlerFunc {
return h.testReady(f.ServeHTTP)
}
// Quit returns the receive-only quit channel.
func (h *Handler) Quit() <-chan struct{} {
return h.quitCh
}
// Reload returns the receive-only channel that signals configuration reload requests.
func (h *Handler) Reload() <-chan chan error {
return h.reloadCh
}
// Run serves the HTTP endpoints.
func (h *Handler) Run(ctx context.Context) error {
level.Info(h.logger).Log("msg", "Start listening for connections", "address", h.options.ListenAddress)
listener, err := net.Listen("tcp", h.options.ListenAddress)
if err != nil {
return err
}
listener = netutil.LimitListener(listener, h.options.MaxConnections)
// Monitor incoming connections with conntrack.
listener = conntrack.NewListener(listener,
conntrack.TrackWithName("http"),
conntrack.TrackWithTracing())
var (
m = cmux.New(listener)
// See https://github.com/grpc/grpc-go/issues/2636 for why we need to use MatchWithWriters().
grpcl = m.MatchWithWriters(cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"))
httpl = m.Match(cmux.HTTP1Fast())
grpcSrv = grpc.NewServer()
)
av2 := api_v2.New(
h.options.TSDB,
h.options.EnableAdminAPI,
)
av2.RegisterGRPC(grpcSrv)
hh, err := av2.HTTPHandler(ctx, h.options.ListenAddress)
if err != nil {
return err
}
hhFunc := h.testReadyHandler(hh)
operationName := nethttp.OperationNameFunc(func(r *http.Request) string {
return fmt.Sprintf("%s %s", r.Method, r.URL.Path)
})
mux := http.NewServeMux()
mux.Handle("/", h.router)
apiPath := "/api"
if h.options.RoutePrefix != "/" {
apiPath = h.options.RoutePrefix + apiPath
level.Info(h.logger).Log("msg", "router prefix", "prefix", h.options.RoutePrefix)
}
av1 := route.New().
WithInstrumentation(h.metrics.instrumentHandlerWithPrefix("/api/v1")).
WithInstrumentation(setPathWithPrefix(apiPath + "/v1"))
h.apiV1.Register(av1)
mux.Handle(apiPath+"/v1/", http.StripPrefix(apiPath+"/v1", av1))
mux.Handle(apiPath+"/", http.StripPrefix(apiPath,
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
httputil.SetCORS(w, h.options.CORSOrigin, r)
hhFunc(w, r)
}),
))
errlog := stdlog.New(log.NewStdlibAdapter(level.Error(h.logger)), "", 0)
httpSrv := &http.Server{
Handler: withStackTracer(nethttp.Middleware(opentracing.GlobalTracer(), mux, operationName), h.logger),
ErrorLog: errlog,
ReadTimeout: h.options.ReadTimeout,
}
errCh := make(chan error)
go func() {
errCh <- httpSrv.Serve(httpl)
}()
go func() {
errCh <- grpcSrv.Serve(grpcl)
}()
go func() {
errCh <- m.Serve()
}()
select {
case e := <-errCh:
return e
case <-ctx.Done():
httpSrv.Shutdown(ctx)
grpcSrv.GracefulStop()
return nil
}
}
func (h *Handler) alerts(w http.ResponseWriter, r *http.Request) {
var groups []*rules.Group
for _, group := range h.ruleManager.RuleGroups() {
if group.HasAlertingRules() {
groups = append(groups, group)
}
}
alertStatus := AlertStatus{
Groups: groups,
AlertStateToRowClass: map[rules.AlertState]string{
rules.StateInactive: "success",
rules.StatePending: "warning",
rules.StateFiring: "danger",
},
Counts: alertCounts(groups),
}
h.executeTemplate(w, "alerts.html", alertStatus)
}
func alertCounts(groups []*rules.Group) AlertByStateCount {
result := AlertByStateCount{}
for _, group := range groups {
for _, alert := range group.AlertingRules() {
switch alert.State() {
case rules.StateInactive:
result.Inactive++
case rules.StatePending:
result.Pending++
case rules.StateFiring:
result.Firing++
}
}
}
return result
}
func (h *Handler) consoles(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
name := route.Param(ctx, "filepath")
file, err := http.Dir(h.options.ConsoleTemplatesPath).Open(name)
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
defer file.Close()
text, err := ioutil.ReadAll(file)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
ctx = httputil.ContextFromRequest(ctx, r)
// Provide URL parameters as a map for easy use. Advanced users may have need for
// parameters beyond the first, so provide RawParams.
rawParams, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
params := map[string]string{}
for k, v := range rawParams {
params[k] = v[0]
}
externalLabels := map[string]string{}
h.mtx.RLock()
els := h.config.GlobalConfig.ExternalLabels
h.mtx.RUnlock()
for _, el := range els {
externalLabels[el.Name] = el.Value
}
// Inject some convenience variables that are easier to remember for users
// who are not used to Go's templating system.
defs := []string{
"{{$rawParams := .RawParams }}",
"{{$params := .Params}}",
"{{$path := .Path}}",
"{{$externalLabels := .ExternalLabels}}",
}
data := struct {
RawParams url.Values
Params map[string]string
Path string
ExternalLabels map[string]string
}{
RawParams: rawParams,
Params: params,
Path: strings.TrimLeft(name, "/"),
ExternalLabels: externalLabels,
}
tmpl := template.NewTemplateExpander(
ctx,
strings.Join(append(defs, string(text)), ""),
"__console_"+name,
data,
h.now(),
template.QueryFunc(rules.EngineQueryFunc(h.queryEngine, h.storage)),
h.options.ExternalURL,
)
filenames, err := filepath.Glob(h.options.ConsoleLibrariesPath + "/*.lib")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
result, err := tmpl.ExpandHTML(filenames)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
io.WriteString(w, result)
}
func (h *Handler) graph(w http.ResponseWriter, r *http.Request) {
h.executeTemplate(w, "graph.html", nil)
}
func (h *Handler) status(w http.ResponseWriter, r *http.Request) {
status := struct {
Birth time.Time
CWD string
Version *PrometheusVersion
Alertmanagers []*url.URL
GoroutineCount int
GOMAXPROCS int
GOGC string
GODEBUG string
CorruptionCount int64
ChunkCount int64
TimeSeriesCount int64
LastConfigTime time.Time
ReloadConfigSuccess bool
StorageRetention string
NumSeries uint64
MaxTime int64
MinTime int64
Stats *index.PostingsStats
Duration string
}{
Birth: h.birth,
CWD: h.cwd,
Version: h.versionInfo,
Alertmanagers: h.notifier.Alertmanagers(),
GoroutineCount: runtime.NumGoroutine(),
GOMAXPROCS: runtime.GOMAXPROCS(0),
GOGC: os.Getenv("GOGC"),
GODEBUG: os.Getenv("GODEBUG"),
}
if h.options.TSDBRetentionDuration != 0 {
status.StorageRetention = h.options.TSDBRetentionDuration.String()
}
if h.options.TSDBMaxBytes != 0 {
if status.StorageRetention != "" {
status.StorageRetention = status.StorageRetention + " or "
}
status.StorageRetention = status.StorageRetention + h.options.TSDBMaxBytes.String()
}
metrics, err := h.gatherer.Gather()
if err != nil {
http.Error(w, fmt.Sprintf("error gathering runtime status: %s", err), http.StatusInternalServerError)
return
}
for _, mF := range metrics {
switch *mF.Name {
case "prometheus_tsdb_head_chunks":
status.ChunkCount = int64(toFloat64(mF))
case "prometheus_tsdb_head_series":
status.TimeSeriesCount = int64(toFloat64(mF))
case "prometheus_tsdb_wal_corruptions_total":
status.CorruptionCount = int64(toFloat64(mF))
case "prometheus_config_last_reload_successful":
status.ReloadConfigSuccess = toFloat64(mF) != 0
case "prometheus_config_last_reload_success_timestamp_seconds":
status.LastConfigTime = time.Unix(int64(toFloat64(mF)), 0)
}
}
db := h.tsdb()
startTime := time.Now().UnixNano()
status.Stats = db.Head().PostingsCardinalityStats("__name__")
status.Duration = fmt.Sprintf("%.3f", float64(time.Now().UnixNano()-startTime)/float64(1e9))
status.NumSeries = db.Head().NumSeries()
status.MaxTime = db.Head().MaxTime()
status.MinTime = db.Head().MaxTime()
h.executeTemplate(w, "status.html", status)
}
func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) {
status := api_v1.RuntimeInfo{
StartTime: h.birth,
CWD: h.cwd,
GoroutineCount: runtime.NumGoroutine(),
GOMAXPROCS: runtime.GOMAXPROCS(0),
GOGC: os.Getenv("GOGC"),
GODEBUG: os.Getenv("GODEBUG"),
}
if h.options.TSDBRetentionDuration != 0 {
status.StorageRetention = h.options.TSDBRetentionDuration.String()
}
if h.options.TSDBMaxBytes != 0 {
if status.StorageRetention != "" {
status.StorageRetention = status.StorageRetention + " or "
}
status.StorageRetention = status.StorageRetention + h.options.TSDBMaxBytes.String()
}
metrics, err := h.gatherer.Gather()
if err != nil {
return status, errors.Errorf("error gathering runtime status: %s", err)
}
for _, mF := range metrics {
switch *mF.Name {
case "prometheus_tsdb_head_chunks":
status.ChunkCount = int64(toFloat64(mF))
case "prometheus_tsdb_head_series":
status.TimeSeriesCount = int64(toFloat64(mF))
case "prometheus_tsdb_wal_corruptions_total":
status.CorruptionCount = int64(toFloat64(mF))
case "prometheus_config_last_reload_successful":
status.ReloadConfigSuccess = toFloat64(mF) != 0
case "prometheus_config_last_reload_success_timestamp_seconds":
status.LastConfigTime = time.Unix(int64(toFloat64(mF)), 0)
}
}
return status, nil
}
func toFloat64(f *io_prometheus_client.MetricFamily) float64 {
m := *f.Metric[0]
if m.Gauge != nil {
return m.Gauge.GetValue()
}
if m.Counter != nil {
return m.Counter.GetValue()
}
if m.Untyped != nil {
return m.Untyped.GetValue()
}
return math.NaN()
}
func (h *Handler) flags(w http.ResponseWriter, r *http.Request) {
h.executeTemplate(w, "flags.html", h.flagsMap)
}
func (h *Handler) serveConfig(w http.ResponseWriter, r *http.Request) {
h.mtx.RLock()
defer h.mtx.RUnlock()
h.executeTemplate(w, "config.html", h.config.String())
}
func (h *Handler) rules(w http.ResponseWriter, r *http.Request) {
h.executeTemplate(w, "rules.html", h.ruleManager)
}
func (h *Handler) serviceDiscovery(w http.ResponseWriter, r *http.Request) {
var index []string
targets := h.scrapeManager.TargetsAll()
for job := range targets {
index = append(index, job)
}
sort.Strings(index)
scrapeConfigData := struct {
Index []string
Targets map[string][]*scrape.Target
Active []int
Dropped []int
Total []int
}{
Index: index,
Targets: make(map[string][]*scrape.Target),
Active: make([]int, len(index)),
Dropped: make([]int, len(index)),
Total: make([]int, len(index)),
}
for i, job := range scrapeConfigData.Index {
scrapeConfigData.Targets[job] = make([]*scrape.Target, 0, len(targets[job]))
scrapeConfigData.Total[i] = len(targets[job])
for _, target := range targets[job] {
// Do not display more than 100 dropped targets per job to avoid
// returning too much data to the clients.
if target.Labels().Len() == 0 {
scrapeConfigData.Dropped[i]++
if scrapeConfigData.Dropped[i] > 100 {
continue
}
} else {
scrapeConfigData.Active[i]++
}
scrapeConfigData.Targets[job] = append(scrapeConfigData.Targets[job], target)
}
}
h.executeTemplate(w, "service-discovery.html", scrapeConfigData)
}
func (h *Handler) targets(w http.ResponseWriter, r *http.Request) {
tps := h.scrapeManager.TargetsActive()
for _, targets := range tps {
sort.Slice(targets, func(i, j int) bool {
iJobLabel := targets[i].Labels().Get(model.JobLabel)
jJobLabel := targets[j].Labels().Get(model.JobLabel)
if iJobLabel == jJobLabel {
return targets[i].Labels().Get(model.InstanceLabel) < targets[j].Labels().Get(model.InstanceLabel)
}
return iJobLabel < jJobLabel
})
}
h.executeTemplate(w, "targets.html", struct {
TargetPools map[string][]*scrape.Target
}{
TargetPools: tps,
})
}
func (h *Handler) version(w http.ResponseWriter, r *http.Request) {
dec := json.NewEncoder(w)
if err := dec.Encode(h.versionInfo); err != nil {
http.Error(w, fmt.Sprintf("error encoding JSON: %s", err), http.StatusInternalServerError)
}
}
func (h *Handler) quit(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Requesting termination... Goodbye!")
close(h.quitCh)
}
func (h *Handler) reload(w http.ResponseWriter, r *http.Request) {
rc := make(chan error)
h.reloadCh <- rc
if err := <-rc; err != nil {
http.Error(w, fmt.Sprintf("failed to reload config: %s", err), http.StatusInternalServerError)
}
}
func (h *Handler) consolesPath() string {
if _, err := os.Stat(h.options.ConsoleTemplatesPath + "/index.html"); !os.IsNotExist(err) {
return h.options.ExternalURL.Path + "/consoles/index.html"
}
if h.options.UserAssetsPath != "" {
if _, err := os.Stat(h.options.UserAssetsPath + "/index.html"); !os.IsNotExist(err) {
return h.options.ExternalURL.Path + "/user/index.html"
}
}
return ""
}
func tmplFuncs(consolesPath string, opts *Options) template_text.FuncMap {
return template_text.FuncMap{
"since": func(t time.Time) time.Duration {
return time.Since(t) / time.Millisecond * time.Millisecond
},
"consolesPath": func() string { return consolesPath },
"pathPrefix": func() string { return opts.ExternalURL.Path },
"pageTitle": func() string { return opts.PageTitle },
"buildVersion": func() string { return opts.Version.Revision },
"globalURL": func(u *url.URL) *url.URL {
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return u
}
for _, lhr := range api_v1.LocalhostRepresentations {
if host == lhr {
_, ownPort, err := net.SplitHostPort(opts.ListenAddress)
if err != nil {
return u
}
if port == ownPort {
// Only in the case where the target is on localhost and its port is
// the same as the one we're listening on, we know for sure that
// we're monitoring our own process and that we need to change the
// scheme, hostname, and port to the externally reachable ones as
// well. We shouldn't need to touch the path at all, since if a
// path prefix is defined, the path under which we scrape ourselves
// should already contain the prefix.
u.Scheme = opts.ExternalURL.Scheme
u.Host = opts.ExternalURL.Host
} else {
// Otherwise, we only know that localhost is not reachable
// externally, so we replace only the hostname by the one in the
// external URL. It could be the wrong hostname for the service on
// this port, but it's still the best possible guess.
host, _, err := net.SplitHostPort(opts.ExternalURL.Host)
if err != nil {
return u
}
u.Host = host + ":" + port
}
break
}
}
return u
},
"numHealthy": func(pool []*scrape.Target) int {
alive := len(pool)
for _, p := range pool {
if p.Health() != scrape.HealthGood {
alive--
}
}
return alive
},
"targetHealthToClass": func(th scrape.TargetHealth) string {
switch th {
case scrape.HealthUnknown:
return "warning"
case scrape.HealthGood:
return "success"
default:
return "danger"
}
},
"ruleHealthToClass": func(rh rules.RuleHealth) string {
switch rh {
case rules.HealthUnknown:
return "warning"
case rules.HealthGood:
return "success"
default:
return "danger"
}
},
"alertStateToClass": func(as rules.AlertState) string {
switch as {
case rules.StateInactive:
return "success"
case rules.StatePending:
return "warning"
case rules.StateFiring:
return "danger"
default:
panic("unknown alert state")
}
},
}
}
func (h *Handler) getTemplate(name string) (string, error) {
var tmpl string
appendf := func(name string) error {
f, err := ui.Assets.Open(path.Join("/templates", name))
if err != nil {
return err
}
defer f.Close()
b, err := ioutil.ReadAll(f)
if err != nil {
return err
}
tmpl += string(b)
return nil
}
err := appendf("_base.html")
if err != nil {
return "", errors.Wrap(err, "error reading base template")
}
err = appendf(name)
if err != nil {
return "", errors.Wrapf(err, "error reading page template %s", name)
}
return tmpl, nil
}
func (h *Handler) executeTemplate(w http.ResponseWriter, name string, data interface{}) {
text, err := h.getTemplate(name)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
tmpl := template.NewTemplateExpander(
h.context,
text,
name,
data,
h.now(),
template.QueryFunc(rules.EngineQueryFunc(h.queryEngine, h.storage)),
h.options.ExternalURL,
)
tmpl.Funcs(tmplFuncs(h.consolesPath(), h.options))
result, err := tmpl.ExpandHTML(nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
io.WriteString(w, result)
}
// AlertStatus bundles alerting rules and the mapping of alert states to row classes.
type AlertStatus struct {
Groups []*rules.Group
AlertStateToRowClass map[rules.AlertState]string
Counts AlertByStateCount
}
type AlertByStateCount struct {
Inactive int32
Pending int32
Firing int32
}
func setPathWithPrefix(prefix string) func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
handler(w, r.WithContext(httputil.ContextWithPath(r.Context(), prefix+r.URL.Path)))
}
}
}
|
[
"\"GOGC\"",
"\"GODEBUG\"",
"\"GOGC\"",
"\"GODEBUG\""
] |
[] |
[
"GOGC",
"GODEBUG"
] |
[]
|
["GOGC", "GODEBUG"]
|
go
| 2 | 0 | |
cmd/kube-scheduler/app/options/options_test.go
|
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"time"
apimachineryconfig "k8s.io/apimachinery/pkg/apis/config"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/diff"
apiserverconfig "k8s.io/apiserver/pkg/apis/config"
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
)
func TestSchedulerOptions(t *testing.T) {
// temp dir
tmpDir, err := ioutil.TempDir("", "scheduler-options")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
// record the username requests were made with
username := ""
// https server
server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
username, _, _ = req.BasicAuth()
if username == "" {
username = "none, tls"
}
w.WriteHeader(200)
w.Write([]byte(`ok`))
}))
defer server.Close()
// http server
insecureserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
username, _, _ = req.BasicAuth()
if username == "" {
username = "none, http"
}
w.WriteHeader(200)
w.Write([]byte(`ok`))
}))
defer insecureserver.Close()
// config file and kubeconfig
configFile := filepath.Join(tmpDir, "scheduler.yaml")
configKubeconfig := filepath.Join(tmpDir, "config.kubeconfig")
if err := ioutil.WriteFile(configFile, []byte(fmt.Sprintf(`
apiVersion: kubescheduler.config.k8s.io/v1alpha1
kind: KubeSchedulerConfiguration
clientConnection:
kubeconfig: "%s"
leaderElection:
leaderElect: true`, configKubeconfig)), os.FileMode(0600)); err != nil {
t.Fatal(err)
}
if err := ioutil.WriteFile(configKubeconfig, []byte(fmt.Sprintf(`
apiVersion: v1
kind: Config
clusters:
- cluster:
insecure-skip-tls-verify: true
server: %s
name: default
contexts:
- context:
cluster: default
user: default
name: default
current-context: default
users:
- name: default
user:
username: config
`, server.URL)), os.FileMode(0600)); err != nil {
t.Fatal(err)
}
oldconfigFile := filepath.Join(tmpDir, "scheduler_old.yaml")
if err := ioutil.WriteFile(oldconfigFile, []byte(fmt.Sprintf(`
apiVersion: componentconfig/v1alpha1
kind: KubeSchedulerConfiguration
clientConnection:
kubeconfig: "%s"
leaderElection:
leaderElect: true`, configKubeconfig)), os.FileMode(0600)); err != nil {
t.Fatal(err)
}
invalidconfigFile := filepath.Join(tmpDir, "scheduler_invalid.yaml")
if err := ioutil.WriteFile(invalidconfigFile, []byte(fmt.Sprintf(`
apiVersion: componentconfig/v1alpha2
kind: KubeSchedulerConfiguration
clientConnection:
kubeconfig: "%s"
leaderElection:
leaderElect: true`, configKubeconfig)), os.FileMode(0600)); err != nil {
t.Fatal(err)
}
// flag-specified kubeconfig
flagKubeconfig := filepath.Join(tmpDir, "flag.kubeconfig")
if err := ioutil.WriteFile(flagKubeconfig, []byte(fmt.Sprintf(`
apiVersion: v1
kind: Config
clusters:
- cluster:
insecure-skip-tls-verify: true
server: %s
name: default
contexts:
- context:
cluster: default
user: default
name: default
current-context: default
users:
- name: default
user:
username: flag
`, server.URL)), os.FileMode(0600)); err != nil {
t.Fatal(err)
}
// Insulate this test from picking up in-cluster config when run inside a pod
// We can't assume we have permissions to write to /var/run/secrets/... from a unit test to mock in-cluster config for testing
originalHost := os.Getenv("KUBERNETES_SERVICE_HOST")
if len(originalHost) > 0 {
os.Setenv("KUBERNETES_SERVICE_HOST", "")
defer os.Setenv("KUBERNETES_SERVICE_HOST", originalHost)
}
defaultSource := "DefaultProvider"
defaultBindTimeoutSeconds := int64(600)
testcases := []struct {
name string
options *Options
expectedUsername string
expectedError string
expectedConfig kubeschedulerconfig.KubeSchedulerConfiguration
}{
{
name: "config file",
options: &Options{
ConfigFile: configFile,
ComponentConfig: func() kubeschedulerconfig.KubeSchedulerConfiguration {
cfg, err := newDefaultComponentConfig()
if err != nil {
t.Fatal(err)
}
return *cfg
}(),
},
expectedUsername: "config",
expectedConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
SchedulerName: "default-scheduler",
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{Provider: &defaultSource},
HardPodAffinitySymmetricWeight: 1,
HealthzBindAddress: "0.0.0.0:10251",
MetricsBindAddress: "0.0.0.0:10251",
FailureDomains: "kubernetes.io/hostname,failure-domain.beta.kubernetes.io/zone,failure-domain.beta.kubernetes.io/region",
LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{
LeaderElectionConfiguration: apiserverconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpoints",
},
LockObjectNamespace: "kube-system",
LockObjectName: "kube-scheduler",
},
ClientConnection: apimachineryconfig.ClientConnectionConfiguration{
Kubeconfig: configKubeconfig,
QPS: 50,
Burst: 100,
ContentType: "application/vnd.kubernetes.protobuf",
},
PercentageOfNodesToScore: 50,
BindTimeoutSeconds: &defaultBindTimeoutSeconds,
},
},
{
name: "config file in componentconfig/v1alpha1",
options: &Options{
ConfigFile: oldconfigFile,
ComponentConfig: func() kubeschedulerconfig.KubeSchedulerConfiguration {
cfg, err := newDefaultComponentConfig()
if err != nil {
t.Fatal(err)
}
return *cfg
}(),
},
// TODO: switch this to expect an error in 1.13 when the special-case coercion is removed from loadConfig
// expectedError: "no kind \"KubeSchedulerConfiguration\" is registered for version \"componentconfig/v1alpha1\"",
expectedUsername: "config",
expectedConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
SchedulerName: "default-scheduler",
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{Provider: &defaultSource},
HardPodAffinitySymmetricWeight: 1,
HealthzBindAddress: "0.0.0.0:10251",
MetricsBindAddress: "0.0.0.0:10251",
FailureDomains: "kubernetes.io/hostname,failure-domain.beta.kubernetes.io/zone,failure-domain.beta.kubernetes.io/region",
LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{
LeaderElectionConfiguration: apiserverconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpoints",
},
LockObjectNamespace: "kube-system",
LockObjectName: "kube-scheduler",
},
ClientConnection: apimachineryconfig.ClientConnectionConfiguration{
Kubeconfig: configKubeconfig,
QPS: 50,
Burst: 100,
ContentType: "application/vnd.kubernetes.protobuf",
},
PercentageOfNodesToScore: 50,
BindTimeoutSeconds: &defaultBindTimeoutSeconds,
},
},
{
name: "invalid config file in componentconfig/v1alpha2",
options: &Options{ConfigFile: invalidconfigFile},
expectedError: "no kind \"KubeSchedulerConfiguration\" is registered for version \"componentconfig/v1alpha2\"",
},
{
name: "kubeconfig flag",
options: &Options{
ComponentConfig: func() kubeschedulerconfig.KubeSchedulerConfiguration {
cfg, _ := newDefaultComponentConfig()
cfg.ClientConnection.Kubeconfig = flagKubeconfig
return *cfg
}(),
},
expectedUsername: "flag",
expectedConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
SchedulerName: "default-scheduler",
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{Provider: &defaultSource},
HardPodAffinitySymmetricWeight: 1,
HealthzBindAddress: "", // defaults empty when not running from config file
MetricsBindAddress: "", // defaults empty when not running from config file
FailureDomains: "kubernetes.io/hostname,failure-domain.beta.kubernetes.io/zone,failure-domain.beta.kubernetes.io/region",
LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{
LeaderElectionConfiguration: apiserverconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpoints",
},
LockObjectNamespace: "kube-system",
LockObjectName: "kube-scheduler",
},
ClientConnection: apimachineryconfig.ClientConnectionConfiguration{
Kubeconfig: flagKubeconfig,
QPS: 50,
Burst: 100,
ContentType: "application/vnd.kubernetes.protobuf",
},
PercentageOfNodesToScore: 50,
BindTimeoutSeconds: &defaultBindTimeoutSeconds,
},
},
{
name: "overridden master",
options: &Options{Master: insecureserver.URL},
expectedUsername: "none, http",
},
{
name: "no config",
options: &Options{},
expectedError: "no configuration has been provided",
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
// create the config
config, err := tc.options.Config()
// handle errors
if err != nil {
if tc.expectedError == "" {
t.Error(err)
} else if !strings.Contains(err.Error(), tc.expectedError) {
t.Errorf("expected %q, got %q", tc.expectedError, err.Error())
}
return
}
if !reflect.DeepEqual(config.ComponentConfig, tc.expectedConfig) {
t.Errorf("config.diff:\n%s", diff.ObjectReflectDiff(tc.expectedConfig, config.ComponentConfig))
}
// ensure we have a client
if config.Client == nil {
t.Error("unexpected nil client")
return
}
// test the client talks to the endpoint we expect with the credentials we expect
username = ""
_, err = config.Client.Discovery().RESTClient().Get().AbsPath("/").DoRaw()
if err != nil {
t.Error(err)
return
}
if username != tc.expectedUsername {
t.Errorf("expected server call with user %s, got %s", tc.expectedUsername, username)
}
})
}
}
|
[
"\"KUBERNETES_SERVICE_HOST\""
] |
[] |
[
"KUBERNETES_SERVICE_HOST"
] |
[]
|
["KUBERNETES_SERVICE_HOST"]
|
go
| 1 | 0 | |
src/config.py
|
import configparser
import numpy as np
import os
class Config:
def _select_val(self, section: str, key: str = None):
if section in self._custom and key in self._custom[section]:
return self._custom[section][key]
elif section in self._config:
return self._config[section][key]
elif section in self._pickit_config:
return self._pickit_config[section][key]
elif section in self._shop_config:
return self._shop_config[section][key]
else:
return self._game_config[section][key]
def __init__(self, print_warnings: bool = False):
# print_warnings, what a hack... here it is, not making the effort
# passing a single config instance through bites me in the ass
self._print_warnings = print_warnings
self._config = configparser.ConfigParser()
self._config.read('config/params.ini')
self._game_config = configparser.ConfigParser()
self._game_config.read('config/game.ini')
self._pickit_config = configparser.ConfigParser()
self._pickit_config.read('config/pickit.ini')
self._shop_config = configparser.ConfigParser()
self._shop_config.read('config/shop.ini')
self._custom = configparser.ConfigParser()
if os.environ.get('RUN_ENV') != "test" and os.path.exists('config/custom.ini'):
self._custom.read('config/custom.ini')
self.general = {
"saved_games_folder": self._select_val("general", "saved_games_folder"),
"name": self._select_val("general", "name"),
"monitor": int(self._select_val("general", "monitor")),
"max_game_length_s": float(self._select_val("general", "max_game_length_s")),
"exit_key": self._select_val("general", "exit_key"),
"resume_key": self._select_val("general", "resume_key"),
"auto_settings_key": self._select_val("general", "auto_settings_key"),
"graphic_debugger_key": self._select_val("general", "graphic_debugger_key"),
"logg_lvl": self._select_val("general", "logg_lvl"),
"randomize_runs": bool(int(self._select_val("general", "randomize_runs"))),
"difficulty": self._select_val("general", "difficulty"),
"custom_message_hook": self._select_val("general", "custom_message_hook"),
"discord_status_count": False if not self._select_val("general", "discord_status_count") else int(self._select_val("general", "discord_status_count")),
"info_screenshots": bool(int(self._select_val("general", "info_screenshots"))),
"loot_screenshots": bool(int(self._select_val("general", "loot_screenshots"))),
}
# Added for dclone ip hunting
self.dclone = {
"region_ips": self._select_val("dclone", "region_ips"),
"dclone_hotip": self._select_val("dclone", "dclone_hotip"),
}
self.routes = {}
for key in self._config["routes"]:
self.routes[key] = bool(int(self._select_val("routes", key)))
self.char = {
"type": self._select_val("char", "type"),
"show_items": self._select_val("char", "show_items"),
"inventory_screen": self._select_val("char", "inventory_screen"),
"stand_still": self._select_val("char", "stand_still"),
"force_move": self._select_val("char", "force_move"),
"num_loot_columns": int(self._select_val("char", "num_loot_columns")),
"take_health_potion": float(self._select_val("char", "take_health_potion")),
"take_mana_potion": float(self._select_val("char", "take_mana_potion")),
"take_rejuv_potion_health": float(self._select_val("char", "take_rejuv_potion_health")),
"take_rejuv_potion_mana": float(self._select_val("char", "take_rejuv_potion_mana")),
"heal_merc": float(self._select_val("char", "heal_merc")),
"heal_rejuv_merc": float(self._select_val("char", "heal_rejuv_merc")),
"chicken": float(self._select_val("char", "chicken")),
"merc_chicken": float(self._select_val("char", "merc_chicken")),
"tp": self._select_val("char", "tp"),
"belt_rows": int(self._select_val("char", "belt_rows")),
"show_belt": self._select_val("char", "show_belt"),
"potion1": self._select_val("char", "potion1"),
"potion2": self._select_val("char", "potion2"),
"potion3": self._select_val("char", "potion3"),
"potion4": self._select_val("char", "potion4"),
"belt_rejuv_columns": int(self._select_val("char", "belt_rejuv_columns")),
"belt_hp_columns": int(self._select_val("char", "belt_hp_columns")),
"belt_mp_columns": int(self._select_val("char", "belt_mp_columns")),
"stash_gold": bool(int(self._select_val("char", "stash_gold"))),
"gold_trav_only": bool(int(self._select_val("char", "gold_trav_only"))),
"use_merc": bool(int(self._select_val("char", "use_merc"))),
"pre_buff_every_run": bool(int(self._select_val("char", "pre_buff_every_run"))),
"cta_available": bool(int(self._select_val("char", "cta_available"))),
"weapon_switch": self._select_val("char", "weapon_switch"),
"battle_orders": self._select_val("char", "battle_orders"),
"battle_command": self._select_val("char", "battle_command"),
"casting_frames": int(self._select_val("char", "casting_frames")),
"atk_len_trav": float(self._select_val("char", "atk_len_trav")),
"atk_len_pindle": float(self._select_val("char", "atk_len_pindle")),
"atk_len_eldritch": float(self._select_val("char", "atk_len_eldritch")),
"atk_len_shenk": float(self._select_val("char", "atk_len_shenk")),
"atk_len_nihlatak": float(self._select_val("char", "atk_len_nihlatak")),
}
self.sorceress = dict(self._config["sorceress"])
if "sorceress" in self._custom:
self.sorceress.update(dict(self._custom["sorceress"]))
self.hammerdin = self._config["hammerdin"]
if "hammerdin" in self._custom:
self.hammerdin.update(self._custom["hammerdin"])
self.trapsin = self._config["trapsin"]
if "trapsin" in self._custom:
self.trapsin.update(self._custom["trapsin"])
self.advanced_options = {
"pathing_delay_factor": min(max(int(self._select_val("advanced_options", "pathing_delay_factor")), 1), 10),
"message_headers": self._select_val("advanced_options", "message_headers"),
"message_body_template": self._select_val("advanced_options", "message_body_template"),
"message_highlight": bool(int(self._select_val("advanced_options", "message_highlight"))),
}
self.items = {}
for key in self._pickit_config["items"]:
self.items[key] = int(self._select_val("items", key))
if self.items[key] and not os.path.exists(f"./assets/items/{key}.png") and self._print_warnings:
print(f"Warning: You activated {key} in pickit, but there is no img available in assets/items")
self.colors = {}
for key in self._game_config["colors"]:
self.colors[key] = np.split(np.array([int(x) for x in self._select_val("colors", key).split(",")]), 2)
self.ui_pos = {}
for key in self._game_config["ui_pos"]:
self.ui_pos[key] = int(self._select_val("ui_pos", key))
self.ui_roi = {}
for key in self._game_config["ui_roi"]:
self.ui_roi[key] = np.array([int(x) for x in self._select_val("ui_roi", key).split(",")])
self.path = {}
for key in self._game_config["path"]:
self.path[key] = np.reshape(np.array([int(x) for x in self._select_val("path", key).split(",")]), (-1, 2))
self.shop = {
"shop_trap_claws": bool(int(self._select_val("claws", "shop_trap_claws"))),
"shop_melee_claws": bool(int(self._select_val("claws", "shop_melee_claws"))),
"shop_3_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_3_skills_ias_gloves"))),
"shop_2_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_2_skills_ias_gloves"))),
"trap_min_score": int(self._select_val("claws", "trap_min_score")),
"melee_min_score": int(self._select_val("claws", "melee_min_score")),
}
if __name__ == "__main__":
config = Config(print_warnings=True)
# Check if any added items miss templates
for k in config.items:
if not os.path.exists(f"./assets/items/{k}.png"):
print(f"Template not found: {k}")
# Check if any item templates miss a config
for filename in os.listdir(f'assets/items'):
filename = filename.lower()
if filename.endswith('.png'):
item_name = filename[:-4]
blacklist_item = item_name.startswith("bl__")
if item_name not in config.items and not blacklist_item:
print(f"Config not found for: " + filename)
|
[] |
[] |
[
"RUN_ENV"
] |
[]
|
["RUN_ENV"]
|
python
| 1 | 0 | |
tensorflow/tools/ci_build/sizetrack_helper.py
|
#!/usr/bin/env python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Used for Google-internal artifact size tracking.
See go/tf-devinfra/sizetrack.
INVOCATION: The following flags are required:
sizetrack_helper.py \
--artifact=ARTIFACT, or --manual_bytes=MANUAL_BYTES
--artifact_id=ARTIFACT_ID \
--team=TEAM \
... other optional args ...
On Windows you might need something like:
C:\Python38\python.exe C:\path\to\sizetrack_helper.py ...
PREREQUISITES:
1. Your current activated GCP user must have access scopes and IAM permissions
to do the following:
1. Query and load data into BigQuery
2. Upload files to GCS
2. Your environment must match the following criteria:
1. Current directory is a git repository
2. CL-based commits have a PiperOrigin-RevId trailer. This is the case
for any use of Copybara Single-source-of-truth, e.g. TensorFlow.
Only these commits are considered when running commands.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import datetime
import os
import os.path
import platform
import subprocess
parser = argparse.ArgumentParser(
usage=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--project",
type=str,
default="tensorflow-testing",
help="GCP project you can access.")
parser.add_argument(
"--dataset",
type=str,
default="sizetracker",
help="BigQuery dataset containing --table")
parser.add_argument(
"--table", type=str, default="tensorflow_devinfra", help="BigQuery table.")
parser.add_argument(
"--upload",
action="store_true",
help="Upload the artifact to --bucket for analysis.")
parser.add_argument(
"--bucket",
type=str,
default="gs://tf-sizetracker-artifacts",
help="GCS bucket for artifacts.")
parser.add_argument(
"--team",
type=str,
help="For grouping in the dashboard and buckets; e.g. tf-lite-team.")
parser.add_argument(
"--artifact_id",
type=str,
help="Unique ID for your artifact, used for sorting dashboards.")
parser.add_argument(
"-n",
"--dry_run",
action="store_true",
help="Dry run: do not load to BigQuery or upload to GCS.")
parser.add_argument(
"--job",
type=str,
help="Name of job calling this script. Default: $KOKORO_JOB_NAME.")
parser.add_argument(
"--print_schema",
action="store_true",
help="Print the table schema and don't do anything else.")
size = parser.add_mutually_exclusive_group()
size.add_argument(
"--artifact",
type=argparse.FileType("r"),
help="Local file you are measuring.")
size.add_argument(
"--manual_bytes",
type=int,
help="Manually set the recorded size instead of providing an artifact.")
FLAGS = parser.parse_args()
TABLE_NAME = "{}.{}".format(FLAGS.dataset, FLAGS.table)
PROJECT_LEVEL_TABLE_NAME = "{}:{}".format(FLAGS.project, TABLE_NAME)
CL_TRAILER = "PiperOrigin-RevId"
PRETTY_COMMIT_DATE = "%cI"
PRETTY_CL = "%(trailers:key={},valueonly)".format(CL_TRAILER)
PRETTY_HEAD_INFO = "%h\t{cl}\t%s\t%ae\t%aI\t%ce\t%cI".format(cl=PRETTY_CL)
PRETTY_EARLY = "{cl}\t%aI\t%cI".format(cl=PRETTY_CL)
PRETTY_COMMIT = "%h"
# This is a BigQuery table schema defined as CSV
# See https://cloud.google.com/bigquery/docs/schemas
SCHEMA = ",".join([
"id:string",
"filename:string",
# These 6 lines are from git's format=pretty
# %h $CL_PRETTY %s %ae %aI %ce %cI
"commit:string",
"cl:int64",
"description:string",
"author:string",
"author_date:timestamp",
"committer:string",
"commit_date:timestamp",
# Done with format=pretty
"earliest_commit:string",
"earliest_cl:int64",
"earliest_author_date:timestamp",
"earliest_commit_date:timestamp",
"all_commits:string",
"all_cls:string",
"bytes:int64",
"team:string",
"logged_date:timestamp",
"uploaded_to:string",
"job:string",
])
# Select the earliest recorded commit in the same table for the same artifact
# and team. Used to determine the full range of tested commits for each
# invocation. Returns empty string if there are no earlier records.
BQ_GET_EARLIEST_INCLUDED_COMMIT = """
SELECT
commit
FROM {table} WHERE
commit_date < '{earlier_than_this_date}'
AND id = '{artifact_id}'
AND team = '{team}'
ORDER BY commit_date DESC LIMIT 1
"""
# pylint: disable=unused-argument
def git_pretty(commit_range, pretty_format, n=None):
r"""Run git log and return the cleaned results.
Git is assumed to be available in the PATH.
The PiperOrigin-RevId trailer always picks up an extra newline, so this splits
entries on a null byte (\0, or %x00 for git log) and removes newlines.
Args:
commit_range: Standard range given to git log, e.g. HEAD~1..HEAD
pretty_format: See https://git-scm.com/docs/pretty-formats
n: Number of commits to get. By default, get all within commit_range.
Returns:
List of strings of whatever the format string was.
"""
n = [] if n is None else ["-n", "1"]
try:
ret = subprocess.run([
"git", "log", *n, "--date", "iso", "--grep", CL_TRAILER, commit_range,
"--pretty=format:" + pretty_format + "%x00"
],
check=True,
universal_newlines=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
except subprocess.CalledProcessError as e:
print(e.stderr)
print(e.stdout)
raise e
out = ret.stdout.replace("\n", "")
# Split by \0 and make list of text, extra whitespace and empty lines removed
return list(filter(None, map(str.strip, out.split("\0"))))
def gcloud(tool, args, stdin=None):
r"""Run a Google cloud utility.
On Linux and MacOS, utilities are assumed to be in the PATH.
On Windows, utilities are assumed to be available as
C:\Program Files (x86)\Google\Cloud SDK\google-cloud-sdk\bin\{tool}.cmd
Args:
tool: CLI tool, e.g. bq, gcloud, gsutil
args: List of arguments, same format as subprocess.run
stdin: String to send to stdin
Returns:
String, the stdout of the tool
"""
if platform.system() == "Windows":
tool = (r"C:\Program Files (x86)\Google\Cloud "
r"SDK\google-cloud-sdk\bin\{}.cmd").format(tool)
try:
ret = subprocess.run([tool, *args],
check=True,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
input=stdin)
except subprocess.CalledProcessError as e:
print(e.stderr)
print(e.stdout)
raise e
return ret.stdout.strip()
def get_all_tested_commits():
"""Get details about the full commit range tested by this invocation."""
head_info = git_pretty("HEAD", PRETTY_HEAD_INFO, n=1)
_, _, _, _, _, _, current_commit_date = head_info[0].split("\t")
query_earliest_included_commit = BQ_GET_EARLIEST_INCLUDED_COMMIT.format(
table=TABLE_NAME,
earlier_than_this_date=current_commit_date,
artifact_id=FLAGS.artifact_id,
team=FLAGS.team)
# --format=csv returns an empty string if no results, or else two lines:
# commit
# COMMIT_HASH
earliest_commit = gcloud(
"bq", [
"--project_id", FLAGS.project, "--headless", "-q", "query",
"--format", "csv", "--nouse_legacy_sql"
],
stdin=query_earliest_included_commit)
# Compute the commit/CL range since the last test
if earliest_commit:
earliest_commit = earliest_commit.splitlines()[-1] # Ignore CSV header
early_cl, early_author_date, early_commit_date = git_pretty(
earliest_commit, PRETTY_EARLY, n=1)[0].split("\t")
all_range = "{commit}..HEAD".format(commit=earliest_commit)
all_commits = ",".join(git_pretty(all_range, PRETTY_COMMIT))
all_changelists = ",".join(git_pretty(all_range, PRETTY_CL))
return [
earliest_commit, early_cl, early_author_date, early_commit_date,
all_commits, all_changelists
]
# If the artifact has never been tracked before this commit
# Empty cells in CSV loads are loaded as NULL values
else:
return [""] * 6
def get_upload_path():
"""Generate URL for 'gsutil cp'."""
if FLAGS.upload and FLAGS.artifact:
artifact_filename = os.path.basename(FLAGS.artifact.name)
ts = datetime.datetime.now(
datetime.timezone.utc).replace(microsecond=0).isoformat()
# note: not os.path.join here, because gsutil is always linux-style
# Using a timestamp prevents duplicate entries
path = "{bucket}/{team}/{artifact_id}/{now}.{artifact_filename}".format(
bucket=FLAGS.bucket,
team=FLAGS.team,
artifact_id=FLAGS.artifact_id,
now=ts,
artifact_filename=artifact_filename)
return path
else:
return ""
def build_row():
"""Assemble one row of data about this artifact."""
(earliest_commit, early_cl, early_author_date, early_commit_date, all_commits,
all_changelists) = get_all_tested_commits()
# Use UTC to make sure machines in different timezones load consistent data
current_time = datetime.datetime.now(datetime.timezone.utc).isoformat()
artifact_filename = ("NO_FILE" if not FLAGS.artifact else os.path.basename(
FLAGS.artifact.name))
size_bytes = FLAGS.manual_bytes or os.path.getsize(FLAGS.artifact.name)
head_info = git_pretty("HEAD", PRETTY_HEAD_INFO, n=1)
all_head_info_items = head_info[0].split("\t")
return [
FLAGS.artifact_id,
artifact_filename,
*all_head_info_items,
earliest_commit,
early_cl,
early_author_date,
early_commit_date,
all_commits,
all_changelists,
size_bytes,
FLAGS.team,
current_time,
get_upload_path(),
FLAGS.job,
]
def main():
# Validate flags
if FLAGS.print_schema:
print(SCHEMA)
exit(0)
elif not FLAGS.team or not FLAGS.artifact_id or not (FLAGS.artifact or
FLAGS.manual_bytes):
print(
"--team and --artifact_id are required if --print_schema is not "
"specified.\nYou must also specify one of --artifact or --manual_bytes."
"\nPass -h or --help for usage.")
exit(1)
if not FLAGS.job:
FLAGS.job = os.environ.get("KOKORO_JOB_NAME", "NO_JOB")
# Generate data about this artifact into a Tab Separated Value file
next_tsv_row = build_row()
# Upload artifact into GCS if it exists
if FLAGS.upload and FLAGS.artifact:
upload_path = get_upload_path()
if FLAGS.dry_run:
print("DRY RUN: Would gsutil cp to:\n{}".format(upload_path))
else:
gcloud("gsutil", ["cp", FLAGS.artifact.name, upload_path])
# Load into BigQuery
if FLAGS.dry_run:
print("DRY RUN: Generated this TSV row:")
print("\t".join(map(str, next_tsv_row)))
else:
with open("data.tsv", "w") as tsvfile:
writer = csv.writer(tsvfile, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
writer.writerow(next_tsv_row)
gcloud("bq", [
"--project_id", FLAGS.project, "--headless", "-q", "load",
"--source_format", "CSV", "--field_delimiter", "tab",
PROJECT_LEVEL_TABLE_NAME, "data.tsv", SCHEMA
])
if __name__ == "__main__":
main()
|
[] |
[] |
[
"KOKORO_JOB_NAME"
] |
[]
|
["KOKORO_JOB_NAME"]
|
python
| 1 | 0 | |
libml/data.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input data for image models.
"""
import functools
import itertools
import os
import numpy as np
import tensorflow.compat.v1 as tf
from absl import app
from absl import flags
from tqdm import tqdm
from libml import augment as augment_module
from libml import utils
from libml.augment import AugmentPair, NOAUGMENT
import pickle
# Data directory. Value is initialized in _data_setup
#
# Note that if you need to use DATA_DIR outside of this module then
# you should do following:
# from libml import data as libml_data
# ...
# dir = libml_data.DATA_DIR
#
# If you directly import DATA_DIR:
# from libml.data import DATA_DIR
# then None will be imported.
DATA_DIR = None
_DATA_CACHE = None
SAMPLES_PER_CLASS = [1, 2, 3, 4, 5, 10, 25, 100, 400]
flags.DEFINE_string('dataset', 'cifar10.1@4000-5000', 'Data to train on.')
flags.DEFINE_integer('para_parse', 1, 'Parallel parsing.')
flags.DEFINE_integer('para_augment', 5, 'Parallel augmentation.')
flags.DEFINE_integer('shuffle', 8192, 'Size of dataset shuffling.')
flags.DEFINE_string('p_unlabeled', '', 'Probability distribution of unlabeled.')
flags.DEFINE_bool('whiten', False, 'Whether to normalize images.')
flags.DEFINE_string('data_dir', None,
'Data directory. '
'If None then environment variable ML_DATA '
'will be used as a data directory.')
FLAGS = flags.FLAGS
def _data_setup():
# set up data directory
global DATA_DIR
# DATA_DIR = FLAGS.data_dir or os.environ['ML_DATA']
# for debug
DATA_DIR = './datasets'
app.call_after_init(_data_setup)
def record_parse_mnist(serialized_example, image_shape=None):
features = tf.parse_single_example(
serialized_example,
features={'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64)})
image = tf.image.decode_image(features['image'])
if image_shape:
image.set_shape(image_shape)
image = tf.pad(image, [[2] * 2, [2] * 2, [0] * 2])
image = tf.cast(image, tf.float32) * (2.0 / 255) - 1.0
return dict(image=image, label=features['label'])
def record_parse(serialized_example, image_shape=None):
if FLAGS.soft_labels:
features = tf.parse_single_example(
serialized_example,
features={'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.string)})
else:
features = tf.parse_single_example(
serialized_example,
features={'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64)})
image = tf.image.decode_image(features['image'])
if image_shape:
image.set_shape(image_shape)
image = tf.cast(image, tf.float32) * (2.0 / 255) - 1.0
return dict(image=image, label=features['label'])
'''
def record_parse(serialized_example, image_shape=None):
features = tf.parse_single_example(
serialized_example,
features={'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64)})
image = tf.image.decode_image(features['image'])
if image_shape:
image.set_shape(image_shape)
image = tf.cast(image, tf.float32) * (2.0 / 255) - 1.0
return dict(image=image, label=features['label'])
'''
def compute_mean_std(data: tf.data.Dataset):
data = data.map(lambda x: x['image']).batch(1024).prefetch(1)
data = data.make_one_shot_iterator().get_next()
count = 0
stats = []
with tf.Session(config=utils.get_config()) as sess:
def iterator():
while True:
try:
yield sess.run(data)
except tf.errors.OutOfRangeError:
break
for batch in tqdm(iterator(), unit='kimg', desc='Computing dataset mean and std'):
ratio = batch.shape[0] / 1024.
count += ratio
stats.append((batch.mean((0, 1, 2)) * ratio, (batch ** 2).mean((0, 1, 2)) * ratio))
mean = sum(x[0] for x in stats) / count
sigma = sum(x[1] for x in stats) / count - mean ** 2
std = np.sqrt(sigma)
print('Mean %s Std: %s' % (mean, std))
return mean, std
class DataSet:
"""Wrapper for tf.data.Dataset to permit extensions."""
def __init__(self, data: tf.data.Dataset, augment_fn: AugmentPair, parse_fn=record_parse, image_shape=None):
self.data = data
self.parse_fn = parse_fn
self.augment_fn = augment_fn
self.image_shape = image_shape
@classmethod
def from_files(cls, filenames: list, augment_fn: AugmentPair, parse_fn=record_parse, image_shape=None):
filenames_in = filenames
filenames = sorted(sum([tf.gfile.Glob(x) for x in filenames], []))
if not filenames:
raise ValueError('Empty dataset, did you mount gcsfuse bucket?', filenames_in)
if len(filenames) > 4:
def fetch_dataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
# Read the data from disk in parallel
dataset = tf.data.Dataset.from_tensor_slices(filenames)
#dataset.apply(pickle.loads())
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
fetch_dataset,
cycle_length=min(16, len(filenames)),
sloppy=True))
else:
dataset = tf.data.TFRecordDataset(filenames)
return cls(dataset,
augment_fn=augment_fn,
parse_fn=parse_fn,
image_shape=image_shape)
@classmethod
def empty_data(cls, image_shape, augment_fn: AugmentPair = None):
def _get_null_input(_):
return dict(image=tf.zeros(image_shape, tf.float32),
label=tf.constant(0, tf.int64))
return cls(tf.data.Dataset.range(FLAGS.batch).map(_get_null_input),
parse_fn=None,
augment_fn=augment_fn,
image_shape=image_shape)
def __getattr__(self, item):
if item in self.__dict__:
return self.__dict__[item]
def call_and_update(*args, **kwargs):
v = getattr(self.__dict__['data'], item)(*args, **kwargs)
if isinstance(v, tf.data.Dataset):
return self.__class__(v,
parse_fn=self.parse_fn,
augment_fn=self.augment_fn,
image_shape=self.image_shape)
return v
return call_and_update
def parse(self):
if self.parse_fn:
para = 4 * max(1, len(utils.get_available_gpus())) * FLAGS.para_parse
if self.image_shape:
return self.map(lambda x: self.parse_fn(x, self.image_shape), para)
else:
return self.map(self.parse_fn, para)
return self
def numpy_augment(self, *args, **kwargs):
return self.augment_fn.numpy(*args, **kwargs)
def augment(self):
if self.augment_fn:
para = max(1, len(utils.get_available_gpus())) * FLAGS.para_augment
return self.map(self.augment_fn.tf, para)
return self
def memoize(self):
"""Call before parsing, since it calls for parse inside."""
data = []
with tf.Session(config=utils.get_config()) as session:
it = self.parse().prefetch(16).make_one_shot_iterator().get_next()
try:
while 1:
data.append(session.run(it))
except tf.errors.OutOfRangeError:
pass
images = np.stack([x['image'] for x in data])
labels = np.stack([x['label'] for x in data])
def tf_get(index, image_shape):
def get(index):
return images[index], labels[index]
image, label = tf.py_func(get, [index], [tf.float32, tf.int64])
return dict(image=tf.reshape(image, image_shape), label=label, index=index)
return self.__class__(tf.data.Dataset.range(len(data)),
parse_fn=tf_get,
augment_fn=self.augment_fn,
image_shape=self.image_shape)
class DataSets:
def __init__(self, name, train_labeled: DataSet, train_unlabeled: DataSet, test: DataSet, valid: DataSet,
height=32, width=32, colors=3, nclass=10, mean=0, std=1, p_labeled=None, p_unlabeled=None):
self.name = name
self.train_labeled = train_labeled
self.train_unlabeled = train_unlabeled
self.test = test
self.valid = valid
self.height = height
self.width = width
self.colors = colors
self.nclass = nclass
self.mean = mean
self.std = std
self.p_labeled = p_labeled
self.p_unlabeled = p_unlabeled
@classmethod
def creator(cls, name, seed, label, valid, augment, parse_fn=record_parse, do_memoize=False,
nclass=10, colors=3, height=32, width=32):
if not isinstance(augment, list):
augment = augment(name)
fullname = '.%d@%d' % (seed, label)
root = os.path.join(DATA_DIR, 'SSL2', name)
def create():
p_labeled = p_unlabeled = None
if FLAGS.p_unlabeled:
sequence = FLAGS.p_unlabeled.split(',')
p_unlabeled = np.array(list(map(float, sequence)), dtype=np.float32)
p_unlabeled /= np.max(p_unlabeled)
image_shape = [height, width, colors]
train_labeled = DataSet.from_files(
[root + fullname + '-label.tfrecord'], augment[0], parse_fn, image_shape)
train_unlabeled = DataSet.from_files(
[root + '-unlabel.tfrecord'], augment[1], parse_fn, image_shape)
if do_memoize:
train_labeled = train_labeled.memoize()
train_unlabeled = train_unlabeled.memoize()
if FLAGS.whiten:
mean, std = compute_mean_std(train_labeled.concatenate(train_unlabeled))
else:
mean, std = 0, 1
test_data = DataSet.from_files(
[os.path.join(DATA_DIR, '%s-test.tfrecord' % name)], NOAUGMENT, parse_fn, image_shape=image_shape)
return cls(name + '.' + FLAGS.augment + fullname + '-' + str(valid)
+ ('/' + FLAGS.p_unlabeled if FLAGS.p_unlabeled else ''),
train_labeled=train_labeled,
train_unlabeled=train_unlabeled.skip(valid),
valid=train_unlabeled.take(valid),
test=test_data,
nclass=nclass, p_labeled=p_labeled, p_unlabeled=p_unlabeled,
height=height, width=width, colors=colors, mean=mean, std=std)
return name + fullname + '-' + str(valid), create
def create_datasets(augment_fn):
d = {}
d.update([DataSets.creator('cifar10', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 5000])])
d.update([DataSets.creator('cifar10voters1', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10voters2', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10voters3', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10voters4', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10h', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10hf', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10hf1', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10hf2', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10hf3', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10hf4', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10hf5', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10hf6', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10hf7', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10hfs1', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10hfs2', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10hfs3', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10hfs4', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10hfs5', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10hfs6', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar10hfs7', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('plankton', seed, label, valid, augment_fn, colors=1, width=64, height=64)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('plankton_hard', seed, label, valid, augment_fn, colors=1, width=64, height=64)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('plankton_votes1', seed, label, valid, augment_fn, colors=1, width=64, height=64)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('plankton_votes2', seed, label, valid, augment_fn, colors=1, width=64, height=64)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('plankton_votes3', seed, label, valid, augment_fn, colors=1, width=64, height=64)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('plankton_votes4', seed, label, valid, augment_fn, colors=1, width=64, height=64)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 1000])])
d.update([DataSets.creator('cifar100', seed, label, valid, augment_fn, nclass=100)
for seed, label, valid in itertools.product(range(6), [400, 1000, 2500, 10000], [1, 5000])])
d.update([DataSets.creator('fashion_mnist', seed, label, valid, augment_fn, height=32, width=32, colors=1,
parse_fn=record_parse_mnist)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 5000])])
d.update([DataSets.creator('stl10', seed, label, valid, augment_fn, height=96, width=96)
for seed, label, valid in itertools.product(range(6), [1000, 5000], [1, 500])])
d.update([DataSets.creator('svhn', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 5000])])
d.update([DataSets.creator('svhn_noextra', seed, label, valid, augment_fn)
for seed, label, valid in itertools.product(range(6), [10 * x for x in SAMPLES_PER_CLASS], [1, 5000])])
return d
DATASETS = functools.partial(create_datasets, augment_module.augment_function)
PAIR_DATASETS = functools.partial(create_datasets, augment_module.pair_augment_function)
MANY_DATASETS = functools.partial(create_datasets, augment_module.many_augment_function)
QUAD_DATASETS = functools.partial(create_datasets, augment_module.quad_augment_function)
|
[] |
[] |
[
"ML_DATA"
] |
[]
|
["ML_DATA"]
|
python
| 1 | 0 | |
oneflow/python/test/ops/test_copy_comm_net_pass_empty.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import oneflow.typing as oft
import unittest
import os
def ccrelu(x, name):
return (
flow.user_op_builder(name)
.Op("ccrelu")
.Input("in", [x])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.num_nodes_required(2)
def test_multi_node_comm_net(test_case):
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
func_config.default_data_type(flow.float)
flow.config.gpu_device_num(1)
@flow.global_function(function_config=func_config)
def ReluJob(x: oft.Numpy.Placeholder((10, 2))):
with flow.scope.placement("gpu", "0:0"):
out0 = ccrelu(x, "my_op_0_0")
with flow.scope.placement("gpu", "1:0"):
out1 = ccrelu(out0, "my_op_1_0")
with flow.scope.placement("gpu", "0:0"):
out2 = ccrelu(out1, "my_op_print")
return out2
index = [-2, -1, 0, 1, 2]
data = []
for i in index:
data.append(np.ones((10, 2,), dtype=np.float32) * i)
for i in range(5):
ret = ReluJob(data[i]).get().numpy()
print(ret)
if index[i] > 0:
test_case.assertTrue(
np.array_equal(ret, np.ones((10, 2,), dtype=np.float32) * index[i])
)
else:
test_case.assertTrue(
np.array_equal(ret, np.zeros((10, 2,), dtype=np.float32))
)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.num_nodes_required(2)
def test_multi_node_comm_net_dynamic(test_case):
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
func_config.default_placement_scope(flow.scope.placement("gpu", "0:0"))
func_config.default_data_type(flow.float)
flow.config.machine_num(2)
flow.config.gpu_device_num(1)
@flow.global_function(function_config=func_config)
def ReluJob(x: oft.ListNumpy.Placeholder((10, 2))):
with flow.scope.placement("gpu", "0:0"):
out0 = flow.math.relu(x)
with flow.scope.placement("gpu", "1:0"):
out1 = flow.math.relu(out0)
with flow.scope.placement("gpu", "0:0"):
out2 = flow.math.relu(out1)
return out2
index = [-2, -1, 0, 1, 2]
data = []
for i in index:
data.append(np.ones((5, 2,), dtype=np.float32) * i)
for i in range(5):
ret = ReluJob([data[i]]).get().numpy_list()[0]
print(ret)
if index[i] > 0:
test_case.assertTrue(
np.array_equal(ret, np.ones((5, 2,), dtype=np.float32) * index[i])
)
else:
test_case.assertTrue(
np.array_equal(ret, np.zeros((5, 2,), dtype=np.float32))
)
@flow.unittest.num_nodes_required(2)
def test_multi_node_comm_net_dynamic_empty(test_case):
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
func_config.default_placement_scope(flow.scope.placement("cpu", "0:0"))
func_config.default_data_type(flow.float)
flow.config.machine_num(2)
flow.config.gpu_device_num(1)
@flow.global_function(function_config=func_config)
def ReluJob(x: oft.ListNumpy.Placeholder((10, 2))):
with flow.scope.placement("cpu", "0:0"):
out0 = flow.math.relu(x)
with flow.scope.placement("cpu", "1:0"):
out1 = flow.math.relu(out0)
with flow.scope.placement("cpu", "0:0"):
out2 = flow.math.relu(out1)
return out2
index = [-2, -1, 0, 1, 2]
data = []
for i in index:
data.append(np.ones((0, 0,), dtype=np.float32) * i)
for i in range(5):
ret = ReluJob([data[i]]).get().numpy_list()[0]
print(ret)
if index[i] > 0:
test_case.assertTrue(
np.array_equal(ret, np.ones((0, 0,), dtype=np.float32) * index[i])
)
else:
test_case.assertTrue(
np.array_equal(ret, np.zeros((0, 0,), dtype=np.float32))
)
|
[] |
[] |
[
"ONEFLOW_TEST_CPU_ONLY"
] |
[]
|
["ONEFLOW_TEST_CPU_ONLY"]
|
python
| 1 | 0 | |
pkg/config/config.go
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2018 Datadog, Inc.
package config
import (
"bytes"
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"time"
yaml "gopkg.in/yaml.v2"
"github.com/DataDog/datadog-agent/pkg/util/log"
"github.com/DataDog/datadog-agent/pkg/secrets"
"github.com/DataDog/datadog-agent/pkg/version"
)
// DefaultForwarderRecoveryInterval is the default recovery interval, also used if
// the user-provided value is invalid.
const DefaultForwarderRecoveryInterval = 2
// DefaultSite is the default site the Agent sends data to.
const DefaultSite = "datadoghq.com"
const infraURLPrefix = "https://app."
// Datadog is the global configuration object
var (
Datadog Config
proxies *Proxy
)
// MetadataProviders helps unmarshalling `metadata_providers` config param
type MetadataProviders struct {
Name string `mapstructure:"name"`
Interval time.Duration `mapstructure:"interval"`
}
// ConfigurationProviders helps unmarshalling `config_providers` config param
type ConfigurationProviders struct {
Name string `mapstructure:"name"`
Polling bool `mapstructure:"polling"`
PollInterval string `mapstructure:"poll_interval"`
TemplateURL string `mapstructure:"template_url"`
TemplateDir string `mapstructure:"template_dir"`
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
CAFile string `mapstructure:"ca_file"`
CAPath string `mapstructure:"ca_path"`
CertFile string `mapstructure:"cert_file"`
KeyFile string `mapstructure:"key_file"`
Token string `mapstructure:"token"`
GraceTimeSeconds int `mapstructure:"grace_time_seconds"`
}
// Listeners helps unmarshalling `listeners` config param
type Listeners struct {
Name string `mapstructure:"name"`
}
// Proxy represents the configuration for proxies in the agent
type Proxy struct {
HTTP string `mapstructure:"http"`
HTTPS string `mapstructure:"https"`
NoProxy []string `mapstructure:"no_proxy"`
}
func init() {
// Configure Datadog global configuration
Datadog = NewConfig("datadog", "DD", strings.NewReplacer(".", "_"))
// Configuration defaults
initConfig(Datadog)
}
// initConfig initializes the config defaults on a config
func initConfig(config Config) {
// Agent
// Don't set a default on 'site' to allow detecting with viper whether it's set in config
config.BindEnv("site")
config.BindEnv("dd_url")
config.BindEnvAndSetDefault("app_key", "")
config.SetDefault("proxy", nil)
config.BindEnvAndSetDefault("skip_ssl_validation", false)
config.BindEnvAndSetDefault("hostname", "")
config.BindEnvAndSetDefault("tags", []string{})
config.BindEnvAndSetDefault("tag_value_split_separator", map[string]string{})
config.BindEnvAndSetDefault("conf_path", ".")
config.BindEnvAndSetDefault("confd_path", defaultConfdPath)
config.BindEnvAndSetDefault("additional_checksd", defaultAdditionalChecksPath)
config.BindEnvAndSetDefault("log_payloads", false)
config.BindEnvAndSetDefault("log_file", "")
config.BindEnvAndSetDefault("log_level", "info")
config.BindEnvAndSetDefault("log_to_syslog", false)
config.BindEnvAndSetDefault("log_to_console", true)
config.BindEnvAndSetDefault("logging_frequency", int64(20))
config.BindEnvAndSetDefault("disable_file_logging", false)
config.BindEnvAndSetDefault("syslog_uri", "")
config.BindEnvAndSetDefault("syslog_rfc", false)
config.BindEnvAndSetDefault("syslog_pem", "")
config.BindEnvAndSetDefault("syslog_key", "")
config.BindEnvAndSetDefault("syslog_tls_verify", true)
config.BindEnvAndSetDefault("cmd_host", "localhost")
config.BindEnvAndSetDefault("cmd_port", 5001)
config.BindEnvAndSetDefault("cluster_agent.cmd_port", 5005)
config.BindEnvAndSetDefault("default_integration_http_timeout", 9)
config.BindEnvAndSetDefault("enable_metadata_collection", true)
config.BindEnvAndSetDefault("enable_gohai", true)
config.BindEnvAndSetDefault("check_runners", int64(4))
config.BindEnvAndSetDefault("auth_token_file_path", "")
config.BindEnvAndSetDefault("bind_host", "localhost")
config.BindEnvAndSetDefault("health_port", int64(0))
// if/when the default is changed to true, make the default platform
// dependent; default should remain false on Windows to maintain backward
// compatibility with Agent5 behavior/win
config.BindEnvAndSetDefault("hostname_fqdn", false)
config.BindEnvAndSetDefault("cluster_name", "")
// secrets backend
config.BindEnv("secret_backend_command")
config.BindEnv("secret_backend_arguments")
config.BindEnvAndSetDefault("secret_backend_output_max_size", 1024)
config.BindEnvAndSetDefault("secret_backend_timeout", 5)
// Retry settings
config.BindEnvAndSetDefault("forwarder_backoff_factor", 2)
config.BindEnvAndSetDefault("forwarder_backoff_base", 2)
config.BindEnvAndSetDefault("forwarder_backoff_max", 64)
config.BindEnvAndSetDefault("forwarder_recovery_interval", DefaultForwarderRecoveryInterval)
config.BindEnvAndSetDefault("forwarder_recovery_reset", false)
// Use to output logs in JSON format
config.BindEnvAndSetDefault("log_format_json", false)
// IPC API server timeout
config.BindEnvAndSetDefault("server_timeout", 15)
// Use to force client side TLS version to 1.2
config.BindEnvAndSetDefault("force_tls_12", false)
// Agent GUI access port
config.BindEnvAndSetDefault("GUI_port", defaultGuiPort)
if IsContainerized() {
config.SetDefault("procfs_path", "/host/proc")
config.SetDefault("container_proc_root", "/host/proc")
config.SetDefault("container_cgroup_root", "/host/sys/fs/cgroup/")
} else {
config.SetDefault("container_proc_root", "/proc")
// for amazon linux the cgroup directory on host is /cgroup/
// we pick memory.stat to make sure it exists and not empty
if _, err := os.Stat("/cgroup/memory/memory.stat"); !os.IsNotExist(err) {
config.SetDefault("container_cgroup_root", "/cgroup/")
} else {
config.SetDefault("container_cgroup_root", "/sys/fs/cgroup/")
}
}
config.BindEnv("procfs_path")
config.BindEnv("container_proc_root")
config.BindEnv("container_cgroup_root")
config.BindEnvAndSetDefault("proc_root", "/proc")
config.BindEnvAndSetDefault("histogram_aggregates", []string{"max", "median", "avg", "count"})
config.BindEnvAndSetDefault("histogram_percentiles", []string{"0.95"})
// Serializer
config.BindEnvAndSetDefault("use_v2_api.series", false)
config.BindEnvAndSetDefault("use_v2_api.events", false)
config.BindEnvAndSetDefault("use_v2_api.service_checks", false)
// Serializer: allow user to blacklist any kind of payload to be sent
config.BindEnvAndSetDefault("enable_payloads.events", true)
config.BindEnvAndSetDefault("enable_payloads.series", true)
config.BindEnvAndSetDefault("enable_payloads.service_checks", true)
config.BindEnvAndSetDefault("enable_payloads.sketches", true)
config.BindEnvAndSetDefault("enable_payloads.json_to_v1_intake", true)
// Forwarder
config.BindEnvAndSetDefault("forwarder_timeout", 20)
config.BindEnvAndSetDefault("forwarder_retry_queue_max_size", 30)
config.BindEnvAndSetDefault("forwarder_num_workers", 1)
// Dogstatsd
config.BindEnvAndSetDefault("use_dogstatsd", true)
config.BindEnvAndSetDefault("dogstatsd_port", 8125) // Notice: 0 means UDP port closed
config.BindEnvAndSetDefault("dogstatsd_buffer_size", 1024*8) // 8KB buffer
config.BindEnvAndSetDefault("dogstatsd_non_local_traffic", false)
config.BindEnvAndSetDefault("dogstatsd_socket", "") // Notice: empty means feature disabled
config.BindEnvAndSetDefault("dogstatsd_stats_port", 5000)
config.BindEnvAndSetDefault("dogstatsd_stats_enable", false)
config.BindEnvAndSetDefault("dogstatsd_stats_buffer", 10)
config.BindEnvAndSetDefault("dogstatsd_expiry_seconds", 300)
config.BindEnvAndSetDefault("dogstatsd_origin_detection", false) // Only supported for socket traffic
config.BindEnvAndSetDefault("dogstatsd_so_rcvbuf", 0)
config.BindEnvAndSetDefault("dogstatsd_tags", []string{})
config.BindEnvAndSetDefault("statsd_forward_host", "")
config.BindEnvAndSetDefault("statsd_forward_port", 0)
config.BindEnvAndSetDefault("statsd_metric_namespace", "")
// Autoconfig
config.BindEnvAndSetDefault("autoconf_template_dir", "/datadog/check_configs")
config.BindEnvAndSetDefault("exclude_pause_container", true)
config.BindEnvAndSetDefault("ac_include", []string{})
config.BindEnvAndSetDefault("ac_exclude", []string{})
config.BindEnvAndSetDefault("ad_config_poll_interval", int64(10)) // in seconds
config.BindEnvAndSetDefault("extra_listeners", []string{})
config.BindEnvAndSetDefault("extra_config_providers", []string{})
// Docker
config.BindEnvAndSetDefault("docker_query_timeout", int64(5))
config.BindEnvAndSetDefault("docker_labels_as_tags", map[string]string{})
config.BindEnvAndSetDefault("docker_env_as_tags", map[string]string{})
config.BindEnvAndSetDefault("kubernetes_pod_labels_as_tags", map[string]string{})
config.BindEnvAndSetDefault("kubernetes_pod_annotations_as_tags", map[string]string{})
config.BindEnvAndSetDefault("kubernetes_node_labels_as_tags", map[string]string{})
config.BindEnvAndSetDefault("container_cgroup_prefix", "")
// CRI
config.BindEnvAndSetDefault("cri_socket_path", "") // empty is disabled
config.BindEnvAndSetDefault("cri_connection_timeout", int64(1)) // in seconds
config.BindEnvAndSetDefault("cri_query_timeout", int64(5)) // in seconds
// Kubernetes
config.BindEnvAndSetDefault("kubernetes_kubelet_host", "")
config.BindEnvAndSetDefault("kubernetes_http_kubelet_port", 10255)
config.BindEnvAndSetDefault("kubernetes_https_kubelet_port", 10250)
config.BindEnvAndSetDefault("kubelet_tls_verify", true)
config.BindEnvAndSetDefault("collect_kubernetes_events", false)
config.BindEnvAndSetDefault("kubelet_client_ca", "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt")
config.BindEnvAndSetDefault("kubelet_auth_token_path", "")
config.BindEnvAndSetDefault("kubelet_client_crt", "")
config.BindEnvAndSetDefault("kubelet_client_key", "")
config.BindEnvAndSetDefault("kubelet_wait_on_missing_container", 0)
config.BindEnvAndSetDefault("kubernetes_collect_metadata_tags", true)
config.BindEnvAndSetDefault("kubernetes_metadata_tag_update_freq", 60) // Polling frequency of the Agent to the DCA in seconds (gets the local cache if the DCA is disabled)
config.BindEnvAndSetDefault("kubernetes_apiserver_client_timeout", 10)
config.BindEnvAndSetDefault("kubernetes_map_services_on_ip", false) // temporary opt-out of the new mapping logic
config.BindEnvAndSetDefault("kubernetes_apiserver_use_protobuf", false)
// Kube ApiServer
config.BindEnvAndSetDefault("kubernetes_kubeconfig_path", "")
config.BindEnvAndSetDefault("leader_lease_duration", "60")
config.BindEnvAndSetDefault("leader_election", false)
config.BindEnvAndSetDefault("kube_resources_namespace", "")
// Datadog cluster agent
config.BindEnvAndSetDefault("cluster_agent.enabled", false)
config.BindEnvAndSetDefault("cluster_agent.auth_token", "")
config.BindEnvAndSetDefault("cluster_agent.url", "")
config.BindEnvAndSetDefault("cluster_agent.kubernetes_service_name", "datadog-cluster-agent")
config.BindEnvAndSetDefault("metrics_port", "5000")
// ECS
config.BindEnvAndSetDefault("ecs_agent_url", "") // Will be autodetected
config.BindEnvAndSetDefault("ecs_agent_container_name", "ecs-agent")
config.BindEnvAndSetDefault("collect_ec2_tags", false)
// GCE
config.BindEnvAndSetDefault("collect_gce_tags", true)
// Cloud Foundry
config.BindEnvAndSetDefault("cloud_foundry", false)
config.BindEnvAndSetDefault("bosh_id", "")
// JMXFetch
config.BindEnvAndSetDefault("jmx_custom_jars", []string{})
config.BindEnvAndSetDefault("jmx_use_cgroup_memory_limit", false)
// Go_expvar server port
config.BindEnvAndSetDefault("expvar_port", "5000")
// Trace agent
config.BindEnvAndSetDefault("apm_config.enabled", true)
// Process agent
config.BindEnv("process_config.process_dd_url", "")
// Logs Agent
// External Use: modify those parameters to configure the logs-agent.
// enable the logs-agent:
config.BindEnvAndSetDefault("logs_enabled", false)
config.BindEnvAndSetDefault("log_enabled", false) // deprecated, use logs_enabled instead
// collect all logs from all containers:
config.BindEnvAndSetDefault("logs_config.container_collect_all", false)
// add a socks5 proxy:
config.BindEnvAndSetDefault("logs_config.socks5_proxy_address", "")
// send the logs to a proxy:
config.BindEnvAndSetDefault("logs_config.logs_dd_url", "") // must respect format '<HOST>:<PORT>' and '<PORT>' to be an integer
config.BindEnvAndSetDefault("logs_config.logs_no_ssl", false)
// send the logs to the port 443 of the logs-backend via TCP:
config.BindEnvAndSetDefault("logs_config.use_port_443", false)
// increase the read buffer size of the UDP sockets:
config.BindEnvAndSetDefault("logs_config.frame_size", 9000)
// increase the number of files that can be tailed in parallel:
config.BindEnvAndSetDefault("logs_config.open_files_limit", 100)
// Internal Use Only: avoid modifying those configuration parameters, this could lead to unexpected results.
config.BindEnvAndSetDefault("logset", "")
config.BindEnvAndSetDefault("logs_config.run_path", defaultRunPath)
config.BindEnvAndSetDefault("logs_config.dd_url", "agent-intake.logs.datadoghq.com")
config.BindEnvAndSetDefault("logs_config.dd_port", 10516)
config.BindEnvAndSetDefault("logs_config.dev_mode_use_proto", true)
config.BindEnvAndSetDefault("logs_config.dd_url_443", "agent-443-intake.logs.datadoghq.com")
config.BindEnvAndSetDefault("logs_config.stop_grace_period", 30)
// Tagger full cardinality mode
// Undocumented opt-in feature for now
config.BindEnvAndSetDefault("full_cardinality_tagging", false)
config.BindEnvAndSetDefault("histogram_copy_to_distribution", false)
config.BindEnvAndSetDefault("histogram_copy_to_distribution_prefix", "")
config.BindEnv("api_key")
config.BindEnvAndSetDefault("hpa_watcher_polling_freq", 10)
config.BindEnvAndSetDefault("hpa_watcher_gc_period", 60*5) // 5 minutes
config.BindEnvAndSetDefault("external_metrics_provider.enabled", false)
config.BindEnvAndSetDefault("external_metrics_provider.port", 443)
config.BindEnvAndSetDefault("hpa_configmap_name", "datadog-custom-metrics")
config.BindEnvAndSetDefault("external_metrics_provider.refresh_period", 30) // value in seconds. Frequency of batch calls to the ConfigMap persistent store (GlobalStore) by the Leader.
config.BindEnvAndSetDefault("external_metrics_provider.batch_window", 10) // value in seconds. Batch the events from the Autoscalers informer to push updates to the ConfigMap (GlobalStore)
config.BindEnvAndSetDefault("external_metrics_provider.max_age", 120) // value in seconds. 4 cycles from the HPA controller (up to Kubernetes 1.11) is enough to consider a metric stale
config.BindEnvAndSetDefault("external_metrics.aggregator", "avg") // aggregator used for the external metrics. Choose from [avg,sum,max,min]
config.BindEnvAndSetDefault("external_metrics_provider.bucket_size", 60*5) // Window to query to get the metric from Datadog.
config.BindEnvAndSetDefault("external_metrics_provider.rollup", 30) // Bucket size to circumvent time aggregation side effects.
config.BindEnvAndSetDefault("kubernetes_informers_resync_period", 60*5) // value in seconds. Default to 5 minutes
config.BindEnvAndSetDefault("kubernetes_informers_restclient_timeout", 60) // value in seconds
config.BindEnvAndSetDefault("external_metrics_provider.local_copy_refresh_rate", 30) // value in seconds
// Cluster check Autodiscovery
config.BindEnvAndSetDefault("cluster_checks.enabled", false)
config.BindEnvAndSetDefault("cluster_checks.node_expiration_timeout", 30) // value in seconds
setAssetFs(config)
}
var (
ddURLs = map[string]interface{}{
"app.datadoghq.com": nil,
"app.datadoghq.eu": nil,
"app.datad0g.com": nil,
"app.datad0g.eu": nil,
}
)
// GetProxies returns the proxy settings from the configuration
func GetProxies() *Proxy {
return proxies
}
// loadProxyFromEnv overrides the proxy settings with environment variables
func loadProxyFromEnv(config Config) {
// Viper doesn't handle mixing nested variables from files and set
// manually. If we manually set one of the sub value for "proxy" all
// other values from the conf file will be shadowed when using
// 'config.Get("proxy")'. For that reason we first get the value from
// the conf files, overwrite them with the env variables and reset
// everything.
lookupEnvCaseInsensitive := func(key string) (string, bool) {
value, found := os.LookupEnv(key)
if !found {
value, found = os.LookupEnv(strings.ToLower(key))
}
if found {
log.Infof("Found '%v' env var, using it for the Agent proxy settings", key)
}
return value, found
}
lookupEnv := func(key string) (string, bool) {
value, found := os.LookupEnv(key)
if found {
log.Infof("Found '%v' env var, using it for the Agent proxy settings", key)
}
return value, found
}
var isSet bool
p := &Proxy{}
if isSet = config.IsSet("proxy"); isSet {
if err := config.UnmarshalKey("proxy", p); err != nil {
isSet = false
log.Errorf("Could not load proxy setting from the configuration (ignoring): %s", err)
}
}
if HTTP, found := lookupEnv("DD_PROXY_HTTP"); found {
isSet = true
p.HTTP = HTTP
} else if HTTP, found := lookupEnvCaseInsensitive("HTTP_PROXY"); found {
isSet = true
p.HTTP = HTTP
}
if HTTPS, found := lookupEnv("DD_PROXY_HTTPS"); found {
isSet = true
p.HTTPS = HTTPS
} else if HTTPS, found := lookupEnvCaseInsensitive("HTTPS_PROXY"); found {
isSet = true
p.HTTPS = HTTPS
}
if noProxy, found := lookupEnv("DD_PROXY_NO_PROXY"); found {
isSet = true
p.NoProxy = strings.Split(noProxy, " ") // space-separated list, consistent with viper
} else if noProxy, found := lookupEnvCaseInsensitive("NO_PROXY"); found {
isSet = true
p.NoProxy = strings.Split(noProxy, ",") // comma-separated list, consistent with other tools that use the NO_PROXY env var
}
// We have to set each value individually so both config.Get("proxy")
// and config.Get("proxy.http") work
if isSet {
config.Set("proxy.http", p.HTTP)
config.Set("proxy.https", p.HTTPS)
config.Set("proxy.no_proxy", p.NoProxy)
proxies = p
}
}
// Load reads configs files and initializes the config module
func Load() error {
log.Infof("config.Load()")
if err := Datadog.ReadInConfig(); err != nil {
log.Warnf("config.load() error %v", err)
return err
}
log.Infof("config.load succeeded")
// We have to init the secrets package before we can use it to decrypt
// anything.
secrets.Init(
Datadog.GetString("secret_backend_command"),
Datadog.GetStringSlice("secret_backend_arguments"),
Datadog.GetInt("secret_backend_timeout"),
Datadog.GetInt("secret_backend_output_max_size"),
)
if Datadog.IsSet("secret_backend_command") {
// Viper doesn't expose the final location of the file it
// loads. Since we are searching for 'datadog.yaml' in multiple
// localtions we let viper determine the one to use before
// updating it.
conf, err := yaml.Marshal(Datadog.AllSettings())
if err != nil {
return fmt.Errorf("unable to marshal configuration to YAML to decrypt secrets: %v", err)
}
finalConfig, err := secrets.Decrypt(conf)
if err != nil {
return fmt.Errorf("unable to decrypt secret from datadog.yaml: %v", err)
}
r := bytes.NewReader(finalConfig)
if err = Datadog.MergeConfig(r); err != nil {
return fmt.Errorf("could not update main configuration after decrypting secrets: %v", err)
}
}
loadProxyFromEnv(Datadog)
sanitizeAPIKey(Datadog)
return nil
}
// Avoid log ingestion breaking because of a newline in the API key
func sanitizeAPIKey(config Config) {
config.Set("api_key", strings.TrimSpace(config.GetString("api_key")))
}
// GetMainInfraEndpoint returns the main DD Infra URL defined in the config, based on the value of `site` and `dd_url`
func GetMainInfraEndpoint() string {
return getMainInfraEndpointWithConfig(Datadog)
}
// GetMainEndpoint returns the main DD URL defined in the config, based on `site` and the prefix, or ddURLKey
func GetMainEndpoint(prefix string, ddURLKey string) string {
return GetMainEndpointWithConfig(Datadog, prefix, ddURLKey)
}
// GetMultipleEndpoints returns the api keys per domain specified in the main agent config
func GetMultipleEndpoints() (map[string][]string, error) {
return getMultipleEndpointsWithConfig(Datadog)
}
// getDomainPrefix provides the right prefix for agent X.Y.Z
func getDomainPrefix(app string) string {
v, _ := version.New(version.AgentVersion, version.Commit)
return fmt.Sprintf("%d-%d-%d-%s.agent", v.Major, v.Minor, v.Patch, app)
}
// AddAgentVersionToDomain prefixes the domain with the agent version: X-Y-Z.domain
func AddAgentVersionToDomain(DDURL string, app string) (string, error) {
u, err := url.Parse(DDURL)
if err != nil {
return "", err
}
// we don't udpdate unknown URL (ie: proxy or custom StatsD server)
if _, found := ddURLs[u.Host]; !found {
return DDURL, nil
}
subdomain := strings.Split(u.Host, ".")[0]
newSubdomain := getDomainPrefix(app)
u.Host = strings.Replace(u.Host, subdomain, newSubdomain, 1)
return u.String(), nil
}
func getMainInfraEndpointWithConfig(config Config) string {
return GetMainEndpointWithConfig(config, infraURLPrefix, "dd_url")
}
// GetMainEndpointWithConfig implements the logic to extract the DD URL from a config, based on `site` and ddURLKey
func GetMainEndpointWithConfig(config Config, prefix string, ddURLKey string) (resolvedDDURL string) {
if config.IsSet(ddURLKey) && config.GetString(ddURLKey) != "" {
// value under ddURLKey takes precedence over 'site'
resolvedDDURL = config.GetString(ddURLKey)
if config.IsSet("site") {
log.Infof("'site' and '%s' are both set in config: setting main endpoint to '%s': \"%s\"", ddURLKey, ddURLKey, config.GetString(ddURLKey))
}
} else if config.GetString("site") != "" {
resolvedDDURL = prefix + strings.TrimSpace(config.GetString("site"))
} else {
resolvedDDURL = prefix + DefaultSite
}
return
}
// getMultipleEndpointsWithConfig implements the logic to extract the api keys per domain from an agent config
func getMultipleEndpointsWithConfig(config Config) (map[string][]string, error) {
// Validating domain
ddURL := getMainInfraEndpointWithConfig(config)
_, err := url.Parse(ddURL)
if err != nil {
return nil, fmt.Errorf("could not parse main endpoint: %s", err)
}
keysPerDomain := map[string][]string{
ddURL: {
config.GetString("api_key"),
},
}
var additionalEndpoints map[string][]string
err = config.UnmarshalKey("additional_endpoints", &additionalEndpoints)
if err != nil {
return keysPerDomain, err
}
// merge additional endpoints into keysPerDomain
for domain, apiKeys := range additionalEndpoints {
// Validating domain
_, err := url.Parse(domain)
if err != nil {
return nil, fmt.Errorf("could not parse url from 'additional_endpoints' %s: %s", domain, err)
}
if _, ok := keysPerDomain[domain]; ok {
for _, apiKey := range apiKeys {
keysPerDomain[domain] = append(keysPerDomain[domain], apiKey)
}
} else {
keysPerDomain[domain] = apiKeys
}
}
// dedupe api keys and remove domains with no api keys (or empty ones)
for domain, apiKeys := range keysPerDomain {
dedupedAPIKeys := make([]string, 0, len(apiKeys))
seen := make(map[string]bool)
for _, apiKey := range apiKeys {
trimmedAPIKey := strings.TrimSpace(apiKey)
if _, ok := seen[trimmedAPIKey]; !ok && trimmedAPIKey != "" {
seen[trimmedAPIKey] = true
dedupedAPIKeys = append(dedupedAPIKeys, trimmedAPIKey)
}
}
if len(dedupedAPIKeys) > 0 {
keysPerDomain[domain] = dedupedAPIKeys
} else {
log.Infof("No API key provided for domain \"%s\", removing domain from endpoints", domain)
delete(keysPerDomain, domain)
}
}
return keysPerDomain, nil
}
// IsContainerized returns whether the Agent is running on a Docker container
func IsContainerized() bool {
return os.Getenv("DOCKER_DD_AGENT") != ""
}
// FileUsedDir returns the absolute path to the folder containing the config
// file used to populate the registry
func FileUsedDir() string {
return filepath.Dir(Datadog.ConfigFileUsed())
}
// IsKubernetes returns whether the Agent is running on a kubernetes cluster
func IsKubernetes() bool {
// Injected by Kubernetes itself
if os.Getenv("KUBERNETES_SERVICE_PORT") != "" {
return true
}
// support of Datadog environment variable for Kubernetes
if os.Getenv("KUBERNETES") != "" {
return true
}
return false
}
|
[
"\"DOCKER_DD_AGENT\"",
"\"KUBERNETES_SERVICE_PORT\"",
"\"KUBERNETES\""
] |
[] |
[
"DOCKER_DD_AGENT",
"KUBERNETES",
"KUBERNETES_SERVICE_PORT"
] |
[]
|
["DOCKER_DD_AGENT", "KUBERNETES", "KUBERNETES_SERVICE_PORT"]
|
go
| 3 | 0 | |
models/process.go
|
package models
import (
"fmt"
"log"
"os"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/joho/godotenv"
"github.com/rapando/hash-engine/utils"
"github.com/streadway/amqp"
)
var (
q *amqp.Connection
qChan *amqp.Channel
)
func Process() {
start := time.Now()
defer func() {
fmt.Printf("\nDONE : %s\n", time.Since(start))
}()
var err error
err = godotenv.Load()
if err != nil {
log.Printf("unable to load dotenv because %v", err)
return
}
q, err = utils.QConnect(os.Getenv("Q_URI"))
if err != nil {
log.Printf("unable to connect to rabbitmq. exit")
os.Exit(3)
}
qChan, err = q.Channel()
if err != nil {
log.Printf("unable to create a rabbitmq channel because %v", err)
os.Exit(3)
}
var chars = "`1234567890-=\\][poiuytrewqasdfghjkl;'/.," +
"mnbvcxz~!@#$%^&*()_+|}{POIUYTREWQASDFGHJKL:\"?><MNBVCXZ "
chars = "randomcharacters"
var n = int64(len(chars))
combinations := GetNoOfCombinations(n)
log.Printf("a string with %d characters has %d combinations", n, combinations.Int64())
log.Println("getting combinations and pusblishing them to redis")
GetCombinations(chars, combinations, qChan)
}
|
[
"\"Q_URI\""
] |
[] |
[
"Q_URI"
] |
[]
|
["Q_URI"]
|
go
| 1 | 0 | |
pkg/scalers/azure_eventhub_scaler_test.go
|
package scalers
import (
"context"
"fmt"
"net/url"
"os"
"testing"
"github.com/kedacore/keda/pkg/scalers/azure"
eventhub "github.com/Azure/azure-event-hubs-go"
"github.com/Azure/azure-storage-blob-go/azblob"
)
const (
eventHubConsumerGroup = "testEventHubConsumerGroup"
eventHubConnectionSetting = "testEventHubConnectionSetting"
storageConnectionSetting = "testStorageConnectionSetting"
testEventHubNamespace = "kedatesteventhub"
testEventHubName = "eventhub1"
checkpointFormat = "{\"SequenceNumber\":%d,\"PartitionId\":\"%s\"}"
testContainerName = "azure-webjobs-eventhub"
)
type parseEventHubMetadataTestData struct {
metadata map[string]string
isError bool
}
type eventHubMetricIdentifier struct {
metadataTestData *parseEventHubMetadataTestData
name string
}
var sampleEventHubResolvedEnv = map[string]string{eventHubConnectionSetting: "none", storageConnectionSetting: "none"}
var parseEventHubMetadataDataset = []parseEventHubMetadataTestData{
{map[string]string{}, true},
// properly formed event hub metadata
{map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "connectionFromEnv": eventHubConnectionSetting, "unprocessedEventThreshold": "15"}, false},
// missing event hub connection setting
{map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "unprocessedEventThreshold": "15"}, true},
// missing storage connection setting
{map[string]string{"consumerGroup": eventHubConsumerGroup, "connectionFromEnv": eventHubConnectionSetting, "unprocessedEventThreshold": "15"}, true},
// missing event hub consumer group - should replace with default
{map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "connectionFromEnv": eventHubConnectionSetting, "unprocessedEventThreshold": "15"}, false},
// missing unprocessed event threshold - should replace with default
{map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "connectionFromEnv": eventHubConnectionSetting}, false},
// added blob container details
{map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "connectionFromEnv": eventHubConnectionSetting, "blobContainer": testContainerName}, false},
}
var eventHubMetricIdentifiers = []eventHubMetricIdentifier{
{&parseEventHubMetadataDataset[1], "azure-eventhub-none-testEventHubConsumerGroup"},
}
var testEventHubScaler = azureEventHubScaler{
metadata: &eventHubMetadata{
eventHubInfo: azure.EventHubInfo{
EventHubConnection: "none",
StorageConnection: "none",
},
},
}
func TestParseEventHubMetadata(t *testing.T) {
// Test first with valid resolved environment
for _, testData := range parseEventHubMetadataDataset {
_, err := parseAzureEventHubMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: sampleEventHubResolvedEnv, AuthParams: map[string]string{}})
if err != nil && !testData.isError {
t.Errorf("Expected success but got error: %s", err)
}
if testData.isError && err == nil {
t.Error("Expected error and got success")
}
}
}
func TestGetUnprocessedEventCountInPartition(t *testing.T) {
t.Log("This test will use the environment variable EVENTHUB_CONNECTION_STRING and STORAGE_CONNECTION_STRING if it is set.")
t.Log("If set, it will connect to the storage account and event hub to determine how many messages are in the event hub.")
t.Logf("EventHub has 1 message in partition 0 and 0 messages in partition 1")
eventHubKey := os.Getenv("AZURE_EVENTHUB_KEY")
storageConnectionString := os.Getenv("TEST_STORAGE_CONNECTION_STRING")
if eventHubKey != "" && storageConnectionString != "" {
eventHubConnectionString := fmt.Sprintf("Endpoint=sb://%s.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=%s;EntityPath=%s", testEventHubNamespace, eventHubKey, testEventHubName)
storageCredentials, endpoint, err := azure.ParseAzureStorageBlobConnection("none", storageConnectionString, "")
if err != nil {
t.Error(err)
t.FailNow()
}
t.Log("Creating event hub client...")
hubOption := eventhub.HubWithPartitionedSender("0")
client, err := eventhub.NewHubFromConnectionString(eventHubConnectionString, hubOption)
if err != nil {
t.Fatalf("Expected to create event hub client but got error: %s", err)
}
if eventHubConnectionString == "" {
t.Fatal("Event hub connection string needed for test")
}
if storageConnectionString == "" {
t.Fatal("Storage connection string needed for test")
}
// Can actually test that numbers return
testEventHubScaler.metadata.eventHubInfo.EventHubConnection = eventHubConnectionString
testEventHubScaler.metadata.eventHubInfo.StorageConnection = storageConnectionString
testEventHubScaler.client = client
testEventHubScaler.metadata.eventHubInfo.EventHubConsumerGroup = "$Default"
// Send 1 message to event hub first
t.Log("Sending message to event hub")
err = SendMessageToEventHub(client)
if err != nil {
t.Error(err)
}
// Create fake checkpoint with path azure-webjobs-eventhub/<eventhub-namespace-name>.servicebus.windows.net/<eventhub-name>/$Default
t.Log("Creating container..")
ctx, err := CreateNewCheckpointInStorage(endpoint, storageCredentials, client)
if err != nil {
t.Errorf("err creating container: %s", err)
}
partitionInfo0, err := testEventHubScaler.client.GetPartitionInformation(ctx, "0")
if err != nil {
t.Errorf("unable to get partitionRuntimeInfo for partition 0: %s", err)
}
partitionInfo1, err := testEventHubScaler.client.GetPartitionInformation(ctx, "0")
if err != nil {
t.Errorf("unable to get partitionRuntimeInfo for partition 1: %s", err)
}
unprocessedEventCountInPartition0, _, err0 := testEventHubScaler.GetUnprocessedEventCountInPartition(ctx, partitionInfo0)
unprocessedEventCountInPartition1, _, err1 := testEventHubScaler.GetUnprocessedEventCountInPartition(ctx, partitionInfo1)
if err0 != nil {
t.Errorf("Expected success but got error: %s", err0)
}
if err1 != nil {
t.Errorf("Expected success but got error: %s", err1)
}
if unprocessedEventCountInPartition0 != 1 {
t.Errorf("Expected 1 message in partition 0, got %d", unprocessedEventCountInPartition0)
}
if unprocessedEventCountInPartition1 != 0 {
t.Errorf("Expected 0 messages in partition 1, got %d", unprocessedEventCountInPartition1)
}
// Delete container - this will also delete checkpoint
t.Log("Deleting container...")
err = DeleteContainerInStorage(ctx, endpoint, storageCredentials)
if err != nil {
t.Error(err)
}
}
}
func TestGetUnprocessedEventCountIfNoCheckpointExists(t *testing.T) {
t.Log("This test will use the environment variable EVENTHUB_CONNECTION_STRING and STORAGE_CONNECTION_STRING if it is set.")
t.Log("If set, it will connect to the storage account and event hub to determine how many messages are in the event hub.")
t.Logf("EventHub has 1 message in partition 0 and 0 messages in partition 1")
eventHubKey := os.Getenv("AZURE_EVENTHUB_KEY")
storageConnectionString := os.Getenv("TEST_STORAGE_CONNECTION_STRING")
if eventHubKey != "" && storageConnectionString != "" {
eventHubConnectionString := fmt.Sprintf("Endpoint=sb://%s.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=%s;EntityPath=%s", testEventHubNamespace, eventHubKey, testEventHubName)
t.Log("Creating event hub client...")
hubOption := eventhub.HubWithPartitionedSender("0")
client, err := eventhub.NewHubFromConnectionString(eventHubConnectionString, hubOption)
if err != nil {
t.Errorf("Expected to create event hub client but got error: %s", err)
}
if eventHubConnectionString == "" {
t.Fatal("Event hub connection string needed for test")
}
if storageConnectionString == "" {
t.Fatal("Storage connection string needed for test")
}
// Can actually test that numbers return
testEventHubScaler.metadata.eventHubInfo.EventHubConnection = eventHubConnectionString
testEventHubScaler.metadata.eventHubInfo.StorageConnection = storageConnectionString
testEventHubScaler.client = client
testEventHubScaler.metadata.eventHubInfo.EventHubConsumerGroup = "$Default"
// Send 1 message to event hub first
t.Log("Sending message to event hub")
err = SendMessageToEventHub(client)
if err != nil {
t.Error(err)
}
ctx := context.Background()
partitionInfo0, err := testEventHubScaler.client.GetPartitionInformation(ctx, "0")
if err != nil {
t.Errorf("unable to get partitionRuntimeInfo for partition 0: %s", err)
}
partitionInfo1, err := testEventHubScaler.client.GetPartitionInformation(ctx, "1")
if err != nil {
t.Errorf("unable to get partitionRuntimeInfo for partition 1: %s", err)
}
unprocessedEventCountInPartition0, _, err0 := testEventHubScaler.GetUnprocessedEventCountInPartition(ctx, partitionInfo0)
unprocessedEventCountInPartition1, _, err1 := testEventHubScaler.GetUnprocessedEventCountInPartition(ctx, partitionInfo1)
if err0 != nil {
t.Errorf("Expected success but got error: %s", err0)
}
if err1 != nil {
t.Errorf("Expected success but got error: %s", err1)
}
if unprocessedEventCountInPartition0 != 1 {
t.Errorf("Expected 1 message in partition 0, got %d", unprocessedEventCountInPartition0)
}
if unprocessedEventCountInPartition1 != 0 {
t.Errorf("Expected 0 messages in partition 1, got %d", unprocessedEventCountInPartition1)
}
}
}
func TestGetUnprocessedEventCountWithoutCheckpointReturning1Message(t *testing.T) {
//After the first message the lastsequencenumber init to 0
partitionInfo := eventhub.HubPartitionRuntimeInformation{
PartitionID: "0",
LastSequenceNumber: 0,
BeginningSequenceNumber: 0,
}
unprocessedEventCountInPartition0 := GetUnprocessedEventCountWithoutCheckpoint(&partitionInfo)
if unprocessedEventCountInPartition0 != 1 {
t.Errorf("Expected 1 messages in partition 0, got %d", unprocessedEventCountInPartition0)
}
}
func TestGetUnprocessedEventCountWithoutCheckpointReturning0Message(t *testing.T) {
//An empty partition starts with an equal value on last-/beginning-sequencenumber other than 0
partitionInfo := eventhub.HubPartitionRuntimeInformation{
PartitionID: "0",
LastSequenceNumber: 255,
BeginningSequenceNumber: 255,
}
unprocessedEventCountInPartition0 := GetUnprocessedEventCountWithoutCheckpoint(&partitionInfo)
if unprocessedEventCountInPartition0 != 0 {
t.Errorf("Expected 0 messages in partition 0, got %d", unprocessedEventCountInPartition0)
}
}
func TestGetUnprocessedEventCountWithoutCheckpointReturning2Messages(t *testing.T) {
partitionInfo := eventhub.HubPartitionRuntimeInformation{
PartitionID: "0",
LastSequenceNumber: 1,
BeginningSequenceNumber: 0,
}
unprocessedEventCountInPartition0 := GetUnprocessedEventCountWithoutCheckpoint(&partitionInfo)
if unprocessedEventCountInPartition0 != 2 {
t.Errorf("Expected 0 messages in partition 0, got %d", unprocessedEventCountInPartition0)
}
}
func TestGetATotalLagOf20For2PartitionsOn100UnprocessedEvents(t *testing.T) {
lag := getTotalLagRelatedToPartitionAmount(100, 2, 10)
if lag != 20 {
t.Errorf("Expected a lag of 20 for 2 partitions, got %d", lag)
}
}
func TestGetATotalLagOf100For20PartitionsOn100UnprocessedEvents(t *testing.T) {
lag := getTotalLagRelatedToPartitionAmount(100, 20, 10)
if lag != 100 {
t.Errorf("Expected a lag of 100 for 20 partitions, got %d", lag)
}
}
func CreateNewCheckpointInStorage(endpoint *url.URL, credential azblob.Credential, client *eventhub.Hub) (context.Context, error) {
urlPath := fmt.Sprintf("%s.servicebus.windows.net/%s/$Default/", testEventHubNamespace, testEventHubName)
// Create container
ctx := context.Background()
path, _ := url.Parse(testContainerName)
url := endpoint.ResolveReference(path)
containerURL := azblob.NewContainerURL(*url, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
_, err := containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
if err != nil {
return ctx, fmt.Errorf("failed to create container: %s", err)
}
// Create directory checkpoints will be in
err = os.MkdirAll(urlPath, 0777)
if err != nil {
return ctx, fmt.Errorf("Unable to create directory: %s", err)
}
defer os.RemoveAll(urlPath)
file, err := os.Create(fmt.Sprintf("%s/file", urlPath))
if err != nil {
return ctx, fmt.Errorf("Unable to create folder: %s", err)
}
defer file.Close()
blobFolderURL := containerURL.NewBlockBlobURL(urlPath)
// Upload file
_, err = azblob.UploadFileToBlockBlob(ctx, file, blobFolderURL, azblob.UploadToBlockBlobOptions{
BlockSize: 4 * 1024 * 1024,
Parallelism: 16})
if err != nil {
return ctx, fmt.Errorf("Err uploading file to blob: %s", err)
}
// Make checkpoint blob files
if err := CreatePartitionFile(ctx, urlPath, "0", containerURL, client); err != nil {
return ctx, fmt.Errorf("failed to create partitionID 0 file: %s", err)
}
if err := CreatePartitionFile(ctx, urlPath, "1", containerURL, client); err != nil {
return ctx, fmt.Errorf("failed to create partitionID 1 file: %s", err)
}
return ctx, nil
}
func CreatePartitionFile(ctx context.Context, urlPathToPartition string, partitionID string, containerURL azblob.ContainerURL, client *eventhub.Hub) error {
// Create folder structure
filePath := urlPathToPartition + partitionID
partitionInfo, err := client.GetPartitionInformation(ctx, partitionID)
if err != nil {
return fmt.Errorf("unable to get partition info: %s", err)
}
f, err := os.Create(partitionID)
if err != nil {
return fmt.Errorf("unable to create file: %s", err)
}
if partitionID == "0" {
_, err = f.WriteString(fmt.Sprintf(checkpointFormat, partitionInfo.LastSequenceNumber-1, partitionID))
if err != nil {
return fmt.Errorf("unable to write to file: %s", err)
}
} else {
_, err = f.WriteString(fmt.Sprintf(checkpointFormat, partitionInfo.LastSequenceNumber, partitionID))
if err != nil {
return fmt.Errorf("unable to write to file: %s", err)
}
}
// Write checkpoints to file
file, err := os.Open(partitionID)
if err != nil {
return fmt.Errorf("Unable to create file: %s", err)
}
defer file.Close()
blobFileURL := containerURL.NewBlockBlobURL(filePath)
// Upload folder
_, err = azblob.UploadFileToBlockBlob(ctx, file, blobFileURL, azblob.UploadToBlockBlobOptions{
BlockSize: 4 * 1024 * 1024,
Parallelism: 16})
if err != nil {
return fmt.Errorf("Err uploading file to blob: %s", err)
}
return nil
}
func SendMessageToEventHub(client *eventhub.Hub) error {
ctx := context.Background()
err := client.Send(ctx, eventhub.NewEventFromString("1"))
if err != nil {
return fmt.Errorf("Error sending msg: %s", err)
}
return nil
}
func DeleteContainerInStorage(ctx context.Context, endpoint *url.URL, credential azblob.Credential) error {
path, _ := url.Parse(testContainerName)
url := endpoint.ResolveReference(path)
containerURL := azblob.NewContainerURL(*url, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
_, err := containerURL.Delete(ctx, azblob.ContainerAccessConditions{
ModifiedAccessConditions: azblob.ModifiedAccessConditions{},
})
if err != nil {
return fmt.Errorf("failed to delete container in blob storage: %s", err)
}
return nil
}
func TestEventHubGetMetricSpecForScaling(t *testing.T) {
for _, testData := range eventHubMetricIdentifiers {
meta, err := parseAzureEventHubMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: sampleEventHubResolvedEnv, AuthParams: map[string]string{}})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
mockEventHubScaler := azureEventHubScaler{meta, nil}
metricSpec := mockEventHubScaler.GetMetricSpecForScaling()
metricName := metricSpec[0].External.Metric.Name
if metricName != testData.name {
t.Error("Wrong External metric source name:", metricName)
}
}
}
|
[
"\"AZURE_EVENTHUB_KEY\"",
"\"TEST_STORAGE_CONNECTION_STRING\"",
"\"AZURE_EVENTHUB_KEY\"",
"\"TEST_STORAGE_CONNECTION_STRING\""
] |
[] |
[
"AZURE_EVENTHUB_KEY",
"TEST_STORAGE_CONNECTION_STRING"
] |
[]
|
["AZURE_EVENTHUB_KEY", "TEST_STORAGE_CONNECTION_STRING"]
|
go
| 2 | 0 | |
helpers/utils.py
|
import os
def get_wolf_app_id():
return os.environ.get('WOLF_APP_ID', 'YOUR_WOLF_API_KEY_HERE')
TEMPLATES = {
'WOLF': {
'SUCCESS': '\[\color{blue}{\\textbf{Expression:}}\]%s' \
'\[\color{green}{\\textbf{Computed:}}\]%s' \
'\[\\textbf{Plaintext:}\]%s',
'FAILURE': '\[\color{red}{\\textbf{Unable to compute:}}\]%s'
},
'GCAL': {
'SUCCESS': '\[\color{blue}{\\textbf{Expression:}}\]%s' \
'\[\color{green}{\\textbf{Computed:}}\]%s' \
'\[\\textbf{Plaintext:}\]%s',
'FAILURE': '\[\color{red}{\\textbf{Unable to compute:}}\]%s'
},
'BING': {
'SUCCESS': '\[\color{blue}{\\textbf{Expression:}}\]%s' \
'\[\\textbf{Plaintext:}\]%s',
'FAILURE': '\[\color{red}{\\textbf{Unable to compute:}}\]%s'
},
}
|
[] |
[] |
[
"WOLF_APP_ID"
] |
[]
|
["WOLF_APP_ID"]
|
python
| 1 | 0 | |
tools/test.py
|
import argparse
import os, sys
sys.path.append(os.getcwd())
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from tools.fuse_conv_bn import fuse_module
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.core import wrap_fp16_model
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
from PoseDet import assigners
from PoseDet import backbones
from PoseDet import datasets
from PoseDet import detectors
from PoseDet import losses
from PoseDet import pipelines
from PoseDet import roi_heads
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument('--config', default=None, help='test config file path')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
default='bbox',
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--show-pose', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='arguments in dict')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
config_file = './PoseDet/configs/PoseDet.py'
args = parse_args()
if args.config:
config_file = args.config
else:
args.config = config_file
# assert args.out or args.eval or args.format_only or args.show \
# or args.show_dir, \
# ('Please specify at least one operation (save/eval/format/show the '
# 'results / save the results) with the argument "--out", "--eval"'
# ', "--format-only", "--show" or "--show-dir"')
# if args.eval and args.format_only:
# raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_module(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr, SHOW_POSE=args.show_pose)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.options is None else args.options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
dataset.evaluate(outputs, args.eval, **kwargs)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES",
"LOCAL_RANK"
] |
[]
|
["CUDA_VISIBLE_DEVICES", "LOCAL_RANK"]
|
python
| 2 | 0 | |
sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/config.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2010, 2012-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2008 [email protected] <[email protected]>
# Copyright (c) 2010 Julien Jehannet <[email protected]>
# Copyright (c) 2013 Google, Inc.
# Copyright (c) 2013 John McGehee <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014-2016 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 John Kirkham <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2015 Aru Sahni <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""utilities for Pylint configuration :
* pylintrc
* pylint.d (PYLINTHOME)
"""
from __future__ import print_function
# TODO(cpopa): this module contains the logic for the
# configuration parser and for the command line parser,
# but it's really coupled to optparse's internals.
# The code was copied almost verbatim from logilab.common,
# in order to not depend on it anymore and it will definitely
# need a cleanup. It could be completely reengineered as well.
import contextlib
import collections
import copy
import io
import optparse
import os
import pickle
import re
import sys
import time
try:
import configparser
except ImportError:
from six.moves import configparser
from six.moves import range
from pylint import utils
USER_HOME = os.path.expanduser('~')
if 'PYLINTHOME' in os.environ:
PYLINT_HOME = os.environ['PYLINTHOME']
if USER_HOME == '~':
USER_HOME = os.path.dirname(PYLINT_HOME)
elif USER_HOME == '~':
PYLINT_HOME = ".pylint.d"
else:
PYLINT_HOME = os.path.join(USER_HOME, '.pylint.d')
def _get_pdata_path(base_name, recurs):
base_name = base_name.replace(os.sep, '_')
return os.path.join(PYLINT_HOME, "%s%s%s"%(base_name, recurs, '.stats'))
def load_results(base):
data_file = _get_pdata_path(base, 1)
try:
with open(data_file, _PICK_LOAD) as stream:
return pickle.load(stream)
except Exception: # pylint: disable=broad-except
return {}
if sys.version_info < (3, 0):
_PICK_DUMP, _PICK_LOAD = 'w', 'r'
else:
_PICK_DUMP, _PICK_LOAD = 'wb', 'rb'
def save_results(results, base):
if not os.path.exists(PYLINT_HOME):
try:
os.mkdir(PYLINT_HOME)
except OSError:
print('Unable to create directory %s' % PYLINT_HOME, file=sys.stderr)
data_file = _get_pdata_path(base, 1)
try:
with open(data_file, _PICK_DUMP) as stream:
pickle.dump(results, stream)
except (IOError, OSError) as ex:
print('Unable to create file %s: %s' % (data_file, ex), file=sys.stderr)
def find_pylintrc():
"""search the pylint rc file and return its path if it find it, else None
"""
# is there a pylint rc file in the current directory ?
if os.path.exists('pylintrc'):
return os.path.abspath('pylintrc')
if os.path.exists('.pylintrc'):
return os.path.abspath('.pylintrc')
if os.path.isfile('__init__.py'):
curdir = os.path.abspath(os.getcwd())
while os.path.isfile(os.path.join(curdir, '__init__.py')):
curdir = os.path.abspath(os.path.join(curdir, '..'))
if os.path.isfile(os.path.join(curdir, 'pylintrc')):
return os.path.join(curdir, 'pylintrc')
if os.path.isfile(os.path.join(curdir, '.pylintrc')):
return os.path.join(curdir, '.pylintrc')
if 'PYLINTRC' in os.environ and os.path.exists(os.environ['PYLINTRC']):
pylintrc = os.environ['PYLINTRC']
else:
user_home = os.path.expanduser('~')
if user_home == '~' or user_home == '/root':
pylintrc = ".pylintrc"
else:
pylintrc = os.path.join(user_home, '.pylintrc')
if not os.path.isfile(pylintrc):
pylintrc = os.path.join(user_home, '.config', 'pylintrc')
if not os.path.isfile(pylintrc):
if os.path.isfile('/etc/pylintrc'):
pylintrc = '/etc/pylintrc'
else:
pylintrc = None
return pylintrc
PYLINTRC = find_pylintrc()
ENV_HELP = '''
The following environment variables are used:
* PYLINTHOME
Path to the directory where the persistent for the run will be stored. If
not found, it defaults to ~/.pylint.d/ or .pylint.d (in the current working
directory).
* PYLINTRC
Path to the configuration file. See the documentation for the method used
to search for configuration file.
''' % globals()
class UnsupportedAction(Exception):
"""raised by set_option when it doesn't know what to do for an action"""
def _multiple_choice_validator(choices, name, value):
values = utils._check_csv(value)
for value in values:
if value not in choices:
msg = "option %s: invalid value: %r, should be in %s"
raise optparse.OptionValueError(msg % (name, value, choices))
return values
def _choice_validator(choices, name, value):
if value not in choices:
msg = "option %s: invalid value: %r, should be in %s"
raise optparse.OptionValueError(msg % (name, value, choices))
return value
# pylint: disable=unused-argument
def _csv_validator(_, name, value):
return utils._check_csv(value)
# pylint: disable=unused-argument
def _regexp_validator(_, name, value):
if hasattr(value, 'pattern'):
return value
return re.compile(value)
# pylint: disable=unused-argument
def _regexp_csv_validator(_, name, value):
return [_regexp_validator(_, name, val) for val in _csv_validator(_, name, value)]
def _yn_validator(opt, _, value):
if isinstance(value, int):
return bool(value)
if value in ('y', 'yes'):
return True
if value in ('n', 'no'):
return False
msg = "option %s: invalid yn value %r, should be in (y, yes, n, no)"
raise optparse.OptionValueError(msg % (opt, value))
VALIDATORS = {
'string': utils._unquote,
'int': int,
'regexp': re.compile,
'regexp_csv': _regexp_csv_validator,
'csv': _csv_validator,
'yn': _yn_validator,
'choice': lambda opt, name, value: _choice_validator(opt['choices'], name, value),
'multiple_choice': lambda opt, name, value: _multiple_choice_validator(opt['choices'],
name, value),
}
def _call_validator(opttype, optdict, option, value):
if opttype not in VALIDATORS:
raise Exception('Unsupported type "%s"' % opttype)
try:
return VALIDATORS[opttype](optdict, option, value)
except TypeError:
try:
return VALIDATORS[opttype](value)
except Exception:
raise optparse.OptionValueError('%s value (%r) should be of type %s' %
(option, value, opttype))
def _validate(value, optdict, name=''):
"""return a validated value for an option according to its type
optional argument name is only used for error message formatting
"""
try:
_type = optdict['type']
except KeyError:
# FIXME
return value
return _call_validator(_type, optdict, name, value)
def _level_options(group, outputlevel):
return [option for option in group.option_list
if (getattr(option, 'level', 0) or 0) <= outputlevel
and option.help is not optparse.SUPPRESS_HELP]
def _expand_default(self, option):
"""Patch OptionParser.expand_default with custom behaviour
This will handle defaults to avoid overriding values in the
configuration file.
"""
if self.parser is None or not self.default_tag:
return option.help
optname = option._long_opts[0][2:]
try:
provider = self.parser.options_manager._all_options[optname]
except KeyError:
value = None
else:
optdict = provider.get_option_def(optname)
optname = provider.option_attrname(optname, optdict)
value = getattr(provider.config, optname, optdict)
value = utils._format_option_value(optdict, value)
if value is optparse.NO_DEFAULT or not value:
value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(value))
@contextlib.contextmanager
def _patch_optparse():
orig_default = optparse.HelpFormatter
try:
optparse.HelpFormatter.expand_default = _expand_default
yield
finally:
optparse.HelpFormatter.expand_default = orig_default
def _multiple_choices_validating_option(opt, name, value):
return _multiple_choice_validator(opt.choices, name, value)
class Option(optparse.Option):
TYPES = optparse.Option.TYPES + ('regexp', 'regexp_csv', 'csv', 'yn',
'multiple_choice',
'non_empty_string')
ATTRS = optparse.Option.ATTRS + ['hide', 'level']
TYPE_CHECKER = copy.copy(optparse.Option.TYPE_CHECKER)
TYPE_CHECKER['regexp'] = _regexp_validator
TYPE_CHECKER['regexp_csv'] = _regexp_csv_validator
TYPE_CHECKER['csv'] = _csv_validator
TYPE_CHECKER['yn'] = _yn_validator
TYPE_CHECKER['multiple_choice'] = _multiple_choices_validating_option
def __init__(self, *opts, **attrs):
optparse.Option.__init__(self, *opts, **attrs)
if hasattr(self, "hide") and self.hide:
self.help = optparse.SUPPRESS_HELP
def _check_choice(self):
if self.type in ("choice", "multiple_choice"):
if self.choices is None:
raise optparse.OptionError(
"must supply a list of choices for type 'choice'", self)
elif not isinstance(self.choices, (tuple, list)):
raise optparse.OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise optparse.OptionError(
"must not supply choices for type %r" % self.type, self)
optparse.Option.CHECK_METHODS[2] = _check_choice
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
if self.type == 'named':
existant = getattr(values, self.dest)
if existant:
existant.update(value)
value = existant
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
class OptionParser(optparse.OptionParser):
def __init__(self, option_class=Option, *args, **kwargs):
optparse.OptionParser.__init__(self, option_class=Option, *args, **kwargs)
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
outputlevel = getattr(formatter, 'output_level', 0)
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading("Options"))
formatter.indent()
if self.option_list:
result.append(optparse.OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
if group.level <= outputlevel and (
group.description or _level_options(group, outputlevel)):
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def _match_long_opt(self, opt):
"""Disable abbreviations."""
if opt not in self._long_opt:
raise optparse.BadOptionError(opt)
return opt
# pylint: disable=abstract-method; by design?
class _ManHelpFormatter(optparse.HelpFormatter):
def __init__(self, indent_increment=0, max_help_position=24,
width=79, short_first=0):
optparse.HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_heading(self, heading):
return '.SH %s\n' % heading.upper()
def format_description(self, description):
return description
def format_option(self, option):
try:
optstring = option.option_strings
except AttributeError:
optstring = self.format_option_strings(option)
if option.help:
help_text = self.expand_default(option)
help = ' '.join([l.strip() for l in help_text.splitlines()])
else:
help = ''
return '''.IP "%s"
%s
''' % (optstring, help)
def format_head(self, optparser, pkginfo, section=1):
long_desc = ""
try:
pgm = optparser._get_prog_name()
except AttributeError:
# py >= 2.4.X (dunno which X exactly, at least 2)
pgm = optparser.get_prog_name()
short_desc = self.format_short_description(pgm, pkginfo.description)
if hasattr(pkginfo, "long_desc"):
long_desc = self.format_long_description(pgm, pkginfo.long_desc)
return '%s\n%s\n%s\n%s' % (self.format_title(pgm, section),
short_desc, self.format_synopsis(pgm),
long_desc)
@staticmethod
def format_title(pgm, section):
date = '-'.join(str(num) for num in time.localtime()[:3])
return '.TH %s %s "%s" %s' % (pgm, section, date, pgm)
@staticmethod
def format_short_description(pgm, short_desc):
return '''.SH NAME
.B %s
\- %s
''' % (pgm, short_desc.strip())
@staticmethod
def format_synopsis(pgm):
return '''.SH SYNOPSIS
.B %s
[
.I OPTIONS
] [
.I <arguments>
]
''' % pgm
@staticmethod
def format_long_description(pgm, long_desc):
long_desc = '\n'.join(line.lstrip()
for line in long_desc.splitlines())
long_desc = long_desc.replace('\n.\n', '\n\n')
if long_desc.lower().startswith(pgm):
long_desc = long_desc[len(pgm):]
return '''.SH DESCRIPTION
.B %s
%s
''' % (pgm, long_desc.strip())
@staticmethod
def format_tail(pkginfo):
tail = '''.SH SEE ALSO
/usr/share/doc/pythonX.Y-%s/
.SH BUGS
Please report bugs on the project\'s mailing list:
%s
.SH AUTHOR
%s <%s>
''' % (getattr(pkginfo, 'debian_name', pkginfo.modname),
pkginfo.mailinglist, pkginfo.author, pkginfo.author_email)
if hasattr(pkginfo, "copyright"):
tail += '''
.SH COPYRIGHT
%s
''' % pkginfo.copyright
return tail
class OptionsManagerMixIn(object):
"""Handle configuration from both a configuration file and command line options"""
def __init__(self, usage, config_file=None, version=None, quiet=0):
self.config_file = config_file
self.reset_parsers(usage, version=version)
# list of registered options providers
self.options_providers = []
# dictionary associating option name to checker
self._all_options = collections.OrderedDict()
self._short_options = {}
self._nocallback_options = {}
self._mygroups = {}
# verbosity
self.quiet = quiet
self._maxlevel = 0
def reset_parsers(self, usage='', version=None):
# configuration file parser
self.cfgfile_parser = configparser.ConfigParser()
# command line parser
self.cmdline_parser = OptionParser(usage=usage, version=version)
self.cmdline_parser.options_manager = self
self._optik_option_attrs = set(self.cmdline_parser.option_class.ATTRS)
def register_options_provider(self, provider, own_group=True):
"""register an options provider"""
assert provider.priority <= 0, "provider's priority can't be >= 0"
for i in range(len(self.options_providers)):
if provider.priority > self.options_providers[i].priority:
self.options_providers.insert(i, provider)
break
else:
self.options_providers.append(provider)
non_group_spec_options = [option for option in provider.options
if 'group' not in option[1]]
groups = getattr(provider, 'option_groups', ())
if own_group and non_group_spec_options:
self.add_option_group(provider.name.upper(), provider.__doc__,
non_group_spec_options, provider)
else:
for opt, optdict in non_group_spec_options:
self.add_optik_option(provider, self.cmdline_parser, opt, optdict)
for gname, gdoc in groups:
gname = gname.upper()
goptions = [option for option in provider.options
if option[1].get('group', '').upper() == gname]
self.add_option_group(gname, gdoc, goptions, provider)
def add_option_group(self, group_name, _, options, provider):
# add option group to the command line parser
if group_name in self._mygroups:
group = self._mygroups[group_name]
else:
group = optparse.OptionGroup(self.cmdline_parser,
title=group_name.capitalize())
self.cmdline_parser.add_option_group(group)
group.level = provider.level
self._mygroups[group_name] = group
# add section to the config file
if group_name != "DEFAULT":
self.cfgfile_parser.add_section(group_name)
# add provider's specific options
for opt, optdict in options:
self.add_optik_option(provider, group, opt, optdict)
def add_optik_option(self, provider, optikcontainer, opt, optdict):
args, optdict = self.optik_option(provider, opt, optdict)
option = optikcontainer.add_option(*args, **optdict)
self._all_options[opt] = provider
self._maxlevel = max(self._maxlevel, option.level or 0)
def optik_option(self, provider, opt, optdict):
"""get our personal option definition and return a suitable form for
use with optik/optparse
"""
optdict = copy.copy(optdict)
if 'action' in optdict:
self._nocallback_options[provider] = opt
else:
optdict['action'] = 'callback'
optdict['callback'] = self.cb_set_provider_option
# default is handled here and *must not* be given to optik if you
# want the whole machinery to work
if 'default' in optdict:
if ('help' in optdict
and optdict.get('default') is not None
and optdict['action'] not in ('store_true', 'store_false')):
optdict['help'] += ' [current: %default]'
del optdict['default']
args = ['--' + str(opt)]
if 'short' in optdict:
self._short_options[optdict['short']] = opt
args.append('-' + optdict['short'])
del optdict['short']
# cleanup option definition dict before giving it to optik
for key in list(optdict.keys()):
if key not in self._optik_option_attrs:
optdict.pop(key)
return args, optdict
def cb_set_provider_option(self, option, opt, value, parser):
"""optik callback for option setting"""
if opt.startswith('--'):
# remove -- on long option
opt = opt[2:]
else:
# short option, get its long equivalent
opt = self._short_options[opt[1:]]
# trick since we can't set action='store_true' on options
if value is None:
value = 1
self.global_set_option(opt, value)
def global_set_option(self, opt, value):
"""set option on the correct option provider"""
self._all_options[opt].set_option(opt, value)
def generate_config(self, stream=None, skipsections=(), encoding=None):
"""write a configuration file according to the current configuration
into the given stream or stdout
"""
options_by_section = {}
sections = []
for provider in self.options_providers:
for section, options in provider.options_by_section():
if section is None:
section = provider.name
if section in skipsections:
continue
options = [(n, d, v) for (n, d, v) in options
if d.get('type') is not None
and not d.get('deprecated')]
if not options:
continue
if section not in sections:
sections.append(section)
alloptions = options_by_section.setdefault(section, [])
alloptions += options
stream = stream or sys.stdout
encoding = utils._get_encoding(encoding, stream)
printed = False
for section in sections:
if printed:
print('\n', file=stream)
utils.format_section(stream, section.upper(),
options_by_section[section],
encoding)
printed = True
def generate_manpage(self, pkginfo, section=1, stream=None):
with _patch_optparse():
_generate_manpage(self.cmdline_parser, pkginfo,
section, stream=stream or sys.stdout,
level=self._maxlevel)
def load_provider_defaults(self):
"""initialize configuration using default values"""
for provider in self.options_providers:
provider.load_defaults()
def read_config_file(self, config_file=None):
"""read the configuration file but do not load it (i.e. dispatching
values to each options provider)
"""
helplevel = 1
while helplevel <= self._maxlevel:
opt = '-'.join(['long'] * helplevel) + '-help'
if opt in self._all_options:
break # already processed
# pylint: disable=unused-argument
def helpfunc(option, opt, val, p, level=helplevel):
print(self.help(level))
sys.exit(0)
helpmsg = '%s verbose help.' % ' '.join(['more'] * helplevel)
optdict = {'action': 'callback', 'callback': helpfunc,
'help': helpmsg}
provider = self.options_providers[0]
self.add_optik_option(provider, self.cmdline_parser, opt, optdict)
provider.options += ((opt, optdict),)
helplevel += 1
if config_file is None:
config_file = self.config_file
if config_file is not None:
config_file = os.path.expanduser(config_file)
if config_file and os.path.exists(config_file):
parser = self.cfgfile_parser
# Use this encoding in order to strip the BOM marker, if any.
with io.open(config_file, 'r', encoding='utf_8_sig') as fp:
# pylint: disable=deprecated-method
parser.readfp(fp)
# normalize sections'title
for sect, values in list(parser._sections.items()):
if not sect.isupper() and values:
parser._sections[sect.upper()] = values
elif not self.quiet:
msg = 'No config file found, using default configuration'
print(msg, file=sys.stderr)
return
def load_config_file(self):
"""dispatch values previously read from a configuration file to each
options provider)
"""
parser = self.cfgfile_parser
for section in parser.sections():
for option, value in parser.items(section):
try:
self.global_set_option(option, value)
except (KeyError, optparse.OptionError):
# TODO handle here undeclared options appearing in the config file
continue
def load_configuration(self, **kwargs):
"""override configuration according to given parameters"""
return self.load_configuration_from_config(kwargs)
def load_configuration_from_config(self, config):
for opt, opt_value in config.items():
opt = opt.replace('_', '-')
provider = self._all_options[opt]
provider.set_option(opt, opt_value)
def load_command_line_configuration(self, args=None):
"""Override configuration according to command line parameters
return additional arguments
"""
with _patch_optparse():
if args is None:
args = sys.argv[1:]
else:
args = list(args)
(options, args) = self.cmdline_parser.parse_args(args=args)
for provider in self._nocallback_options:
config = provider.config
for attr in config.__dict__.keys():
value = getattr(options, attr, None)
if value is None:
continue
setattr(config, attr, value)
return args
def add_help_section(self, title, description, level=0):
"""add a dummy option section for help purpose """
group = optparse.OptionGroup(self.cmdline_parser,
title=title.capitalize(),
description=description)
group.level = level
self._maxlevel = max(self._maxlevel, level)
self.cmdline_parser.add_option_group(group)
def help(self, level=0):
"""return the usage string for available options """
self.cmdline_parser.formatter.output_level = level
with _patch_optparse():
return self.cmdline_parser.format_help()
class OptionsProviderMixIn(object):
"""Mixin to provide options to an OptionsManager"""
# those attributes should be overridden
priority = -1
name = 'default'
options = ()
level = 0
def __init__(self):
self.config = optparse.Values()
self.load_defaults()
def load_defaults(self):
"""initialize the provider using default values"""
for opt, optdict in self.options:
action = optdict.get('action')
if action != 'callback':
# callback action have no default
if optdict is None:
optdict = self.get_option_def(opt)
default = optdict.get('default')
self.set_option(opt, default, action, optdict)
def option_attrname(self, opt, optdict=None):
"""get the config attribute corresponding to opt"""
if optdict is None:
optdict = self.get_option_def(opt)
return optdict.get('dest', opt.replace('-', '_'))
def option_value(self, opt):
"""get the current value for the given option"""
return getattr(self.config, self.option_attrname(opt), None)
def set_option(self, opt, value, action=None, optdict=None):
"""method called to set an option (registered in the options list)"""
if optdict is None:
optdict = self.get_option_def(opt)
if value is not None:
value = _validate(value, optdict, opt)
if action is None:
action = optdict.get('action', 'store')
if action == 'store':
setattr(self.config, self.option_attrname(opt, optdict), value)
elif action in ('store_true', 'count'):
setattr(self.config, self.option_attrname(opt, optdict), 0)
elif action == 'store_false':
setattr(self.config, self.option_attrname(opt, optdict), 1)
elif action == 'append':
opt = self.option_attrname(opt, optdict)
_list = getattr(self.config, opt, None)
if _list is None:
if isinstance(value, (list, tuple)):
_list = value
elif value is not None:
_list = []
_list.append(value)
setattr(self.config, opt, _list)
elif isinstance(_list, tuple):
setattr(self.config, opt, _list + (value,))
else:
_list.append(value)
elif action == 'callback':
optdict['callback'](None, opt, value, None)
else:
raise UnsupportedAction(action)
def get_option_def(self, opt):
"""return the dictionary defining an option given its name"""
assert self.options
for option in self.options:
if option[0] == opt:
return option[1]
raise optparse.OptionError('no such option %s in section %r'
% (opt, self.name), opt)
def options_by_section(self):
"""return an iterator on options grouped by section
(section, [list of (optname, optdict, optvalue)])
"""
sections = {}
for optname, optdict in self.options:
sections.setdefault(optdict.get('group'), []).append(
(optname, optdict, self.option_value(optname)))
if None in sections:
yield None, sections.pop(None)
for section, options in sorted(sections.items()):
yield section.upper(), options
def options_and_values(self, options=None):
if options is None:
options = self.options
for optname, optdict in options:
yield (optname, optdict, self.option_value(optname))
class ConfigurationMixIn(OptionsManagerMixIn, OptionsProviderMixIn):
"""basic mixin for simple configurations which don't need the
manager / providers model
"""
def __init__(self, *args, **kwargs):
if not args:
kwargs.setdefault('usage', '')
kwargs.setdefault('quiet', 1)
OptionsManagerMixIn.__init__(self, *args, **kwargs)
OptionsProviderMixIn.__init__(self)
if not getattr(self, 'option_groups', None):
self.option_groups = []
for _, optdict in self.options:
try:
gdef = (optdict['group'].upper(), '')
except KeyError:
continue
if gdef not in self.option_groups:
self.option_groups.append(gdef)
self.register_options_provider(self, own_group=False)
def _generate_manpage(optparser, pkginfo, section=1,
stream=sys.stdout, level=0):
formatter = _ManHelpFormatter()
formatter.output_level = level
formatter.parser = optparser
print(formatter.format_head(optparser, pkginfo, section), file=stream)
print(optparser.format_option_help(formatter), file=stream)
print(formatter.format_tail(pkginfo), file=stream)
|
[] |
[] |
[
"PYLINTHOME",
"PYLINTRC"
] |
[]
|
["PYLINTHOME", "PYLINTRC"]
|
python
| 2 | 0 | |
bsp/wch/arm/ch32f203r-evt/rtconfig.py
|
import os
# toolchains options
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='keil'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iccarm'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.3'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M3 '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rt-thread.map'
LFLAGS += r' --strict --scatter "board\linker_scripts\link.sct" '
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iccarm':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M3'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M3'
AFLAGS += ' --fpu None'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
def dist_handle(BSP_ROOT, dist_dir):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT, dist_dir)
|
[] |
[] |
[
"RTT_CC",
"RTT_ROOT",
"RTT_EXEC_PATH"
] |
[]
|
["RTT_CC", "RTT_ROOT", "RTT_EXEC_PATH"]
|
python
| 3 | 0 | |
base/java/src/main/java/compute/twosum.java
|
package compute;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
public class Compute {
void compute(){
// how many times twosun is called
int repeat = Integer.parseInt(System.getenv("REPEAT"));
int[] nums = new int[]{572, 815, 387, 418, 434, 530, 376, 190, 196, 74, 830, 561, 973, 771, 640, 37, 539, 369, 327, 51, 623, 575, 988, 44, 659, 48, 22, 776, 487, 873, 486, 169, 499, 82, 128, 31, 386, 691, 553, 848, 968, 874, 692, 404, 463, 285, 745, 631, 304, 271, 40, 921, 733, 56, 883, 517, 99, 580, 55, 81, 232, 971, 561, 683, 806, 994, 823, 219, 315, 564, 997, 976, 158, 208, 851, 206, 101, 989, 542, 985, 940, 116, 153, 47, 806, 944, 337, 903, 712, 138, 236, 777, 630, 912, 22, 140, 525, 270, 997, 763, 812, 597, 806, 423, 869, 926, 344, 494, 858, 519, 389, 627, 517, 964, 74, 432, 730, 843, 673, 985, 819, 397, 607, 34, 948, 648, 43, 212, 950, 235, 995, 76, 439, 614, 203, 313, 180, 760, 210, 813, 920, 229, 615, 730, 359, 863, 678, 43, 293, 978, 305, 106, 797, 769, 3, 700, 945, 135, 430, 965, 762, 479, 152, 121, 935, 809, 101, 271, 428, 608, 8, 983, 758, 662, 755, 190, 632, 792, 789, 174, 869, 622, 885, 626, 310, 128, 233, 82, 223, 339, 771, 741, 227, 131, 85, 51, 361, 343, 641, 568, 922, 145, 256, 177, 329, 959, 991, 293, 850, 858, 76, 291, 134, 254, 956, 971, 718, 391, 336, 899, 206, 642, 254, 851, 274, 239, 538, 418, 21, 232, 706, 275, 615, 568, 714, 234, 567, 994, 368, 54, 744, 498, 380, 594, 415, 286, 260, 582, 522, 795, 261, 437, 292, 887, 405, 293, 946, 678, 686, 682, 501, 238, 245, 380, 218, 591, 722, 519, 770, 359, 340, 215, 151, 368, 356, 795, 91, 250, 413, 970, 37, 941, 356, 648, 594, 513, 484, 364, 484, 909, 292, 501, 59, 982, 686, 827, 461, 60, 557, 178, 952, 218, 634, 785, 251, 290, 156, 300, 711, 322, 570, 820, 191, 755, 429, 950, 18, 917, 905, 905, 126, 790, 638, 94, 857, 235, 889, 611, 605, 203, 859, 749, 874, 530, 727, 764, 197, 537, 951, 919, 24, 341, 334, 505, 796, 619, 492, 295, 380, 128, 533, 600, 160, 51, 249, 5, 837, 905, 747, 505, 82, 158, 687, 507, 339, 575, 206, 28, 29, 91, 459, 118, 284, 995, 544, 3, 154, 89, 840, 364, 682, 700, 143, 173, 216, 290, 733, 525, 399, 574, 693, 500, 189, 590, 529, 972, 378, 299, 461, 866, 326, 43, 711, 460, 426, 947, 391, 536, 26, 579, 304, 852, 158, 621, 683, 901, 237, 22, 225, 59, 52, 798, 262, 754, 649, 504, 861, 472, 480, 570, 347, 891, 956, 347, 31, 784, 581, 668, 127, 628, 962, 698, 191, 313, 714, 893};
int target = 101;
for (int i=0; i<repeat;i++) {
twoSum(nums, target);
}
}
int[] twoSum(int[] nums, int target) {
Map<Integer, Integer> map = new HashMap<>();
int[] ret = new int[2];
for (int i = 0; i< nums.length; i++) {
if (map.containsKey(target-nums[i])) {
ret[0] = map.get(target-nums[i]);
ret[1] = i;
break;
}
map.put(nums[i], i);
}
return ret;
}
}
|
[
"\"REPEAT\""
] |
[] |
[
"REPEAT"
] |
[]
|
["REPEAT"]
|
java
| 1 | 0 | |
linux/benji.py
|
import tkinter as tk
from tkinter import ttk
import re
import os
import wikipedia
import time
import webbrowser
import json
import requests
import ctypes
import youtube_dl
import random
import urllib
import ssl
from bs4 import BeautifulSoup
from urllib.request import urlopen
import speech_recognition as sr
import requests
import pyttsx3
import sys
import threading
from datetime import datetime
import errno
import subprocess
requests.packages.urllib3.disable_warnings()
try:
_create_unverified_https_context=ssl._create_unverified_context
except 'AttributeError':
pass
else:
ssl._create_default_https_context=_create_unverified_https_context
headers = {'''user-agent':'Chrome/53.0.2785.143'''}
#speak=wicl.Dispatch("SAPI.SpVoice")
#reminder settings
reminder_mode = 0
reminder_dirloc = '/home/arib/'
reminder_filedir = reminder_dirloc+'.B.E.N.J.I.'
reminder_filename = reminder_filedir + '/reminders.txt'
reminder = str()
# Creating the graphical user interface
speak = pyttsx3.init()
def events(frame, put,link):
identity_keywords = ["who are you", "who r u", "what is your name"]
youtube_keywords = ["play ", "stream ", "queue "]
launch_keywords = ["open ", "launch "]
search_keywords = ["search ",]
wikipedia_keywords = ["wikipedia ", "wiki "]
download_music=["download","download music"]
reminder_keywords = ["set a reminder"]
calculator_keywords=["calculator","calc"]
youtube = ("play","stream","queue")
download = ("download","download music")
global reminder_mode
if reminder_mode or any(word in put for word in reminder_keywords) :
try :
if reminder_mode == 0 :
try :
os.makedirs(reminder_filedir)
os.chmod(reminder_dirloc, 0o777)
except OSError as e :
if e.errno != errno.EEXIST :
raise
speak.say("Reminder of what?")
speak.runAndWait()
reminder_mode = 1
elif reminder_mode == 1 :
subject = ' '.join(link)
global reminder
reminder = subject + '\t'
speak.say("When to remind you?")
speak.runAndWait()
reminder_mode = 2
elif reminder_mode == 2 :
reminder_mode = 0
date_as_string = ' '.join(link)
date = datetime.strptime(date_as_string, '%d %b %Y %I %M %p')
# global reminder
reminder = reminder + date_as_string
file_hand = open(reminder_filename, 'a')
file_hand.write(reminder)
file_hand.write('\n')
file_hand.close()
speak.say("Reminder Added")
speak.runAndWait()
except :
frame.displayText("Cannot set reminder")
#Play song on Youtube
elif put.startswith(youtube):
try:
link = '+'.join(link[1:])
# print(link)
say = link.replace('+', ' ')
url = 'https://www.youtube.com/results?search_query='+link
# webbrowser.open('https://www.youtube.com'+link)
fhand=urllib.request.urlopen(url).read()
soup = BeautifulSoup(fhand, "html.parser")
songs = soup.findAll('div', {'class': 'yt-lockup-video'})
hit = songs[0].find('a')['href']
# print(hit)
speak.say("playing "+say)
speak.runAndWait()
webbrowser.open('https://www.youtube.com'+hit)
except:
frame.displayText('Sorry Ethan. Looks like its not working!')
elif put.startswith(download):
link = '+'.join(link[1:])
# print(link)
say = link.replace('+', ' ')
url = 'https://www.youtube.com/results?search_query='+link
# webbrowser.open('https://www.youtube.com'+link)
fhand=urllib.request.urlopen(url).read()
soup = BeautifulSoup(fhand, "html.parser")
songs = soup.findAll('div', {'class': 'yt-lockup-video'})
hit = songs[0].find('a')['href']
# print(hit)
speak.say("downloading "+say)
speak.runAndWait()
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'quiet': True,
'restrictfilenames': True,
'outtmpl': os.environ['HOME']+'/Desktop/%(title)s.%(ext)s'
}
ydl = youtube_dl.YoutubeDL(ydl_opts)
ydl.download(['https://www.youtube.com'+hit])
speak.say("download completed.Check your desktop for the song")
speak.runAndWait()
#Calculator
elif any(word in put for word in calculator_keywords):
try:
speak.say("Opening Calaculator")
subprocess.run("gnome-calculator",shell=True,check=True)
speak.runAndWait()
except:
frame.displayText('Care to try again?')
#BENJI Intro
elif any(word in put for word in identity_keywords):
try:
speak.say("I am BENJI, a digital assistant declassified for civilian use. Previously I was used by the Impossible Missions Force")
speak.runAndWait()
except:
frame.displayText('Error. Try reading the ReadMe to know about me!')
#Open a webpage
elif any(word in put for word in launch_keywords):
try:
link = '+'.join(link[1:])
speak.say("opening "+link)
speak.runAndWait()
webbrowser.open('http://www.'+ link)
except:
frame.displayText('Sorry Ethan,unable to access it. Cannot hack either-IMF protocol!')
#Google search
elif any(word in put for word in search_keywords):
try:
link='+'.join(link[1:])
say=link.replace('+',' ')
speak.say("searching google for "+say)
speak.runAndWait()
webbrowser.open('https://www.google.com/search?q='+link)
except:
print('Nope, this is not working.')
#Google Images
elif put.startswith("images of "):
try:
link='+'.join(link[2:])
say=link.replace('+',' ')
speak.say("searching images of " + say)
speak.runAndWait()
webbrowser.open('https://www.google.co.in/search?q=' + link + '&source=lnms&tbm=isch')
except:
print('Could not search for images!')
#Gmail
elif put.startswith("gmail"):
try:
speak.say("Opening Gmail!")
speak.runAndWait()
webbrowser.open('https://www.google.com/gmail')
except:
print("Could not open Gmail!")
#Google Cloud Print
elif put.startswith("google cloud print"):
try:
speak.say("Opening google cloud print!")
speak.runAndWait()
webbrowser.open('https://www.google.com/cloudprint')
except:
print("Could not open Google Cloud Print!")
#Google Others
elif put.startswith("google "):
try:
say = link[1]
speak.say("Opening google " + say)
speak.runAndWait()
webbrowser.open('https://'+ say +'.google.com')
except:
print("Could not open Google " + say.capitalize() + "!")
#Blogger
elif put.startswith("blogger"):
try:
speak.say("Opening blogger!")
speak.runAndWait()
webbrowser.open('https://www.blogger.com')
except:
print("Could not open Blogger!")
#Wikipedia
elif any(word in put for word in wikipedia_keywords):
try:
link = '+'.join(link[1:])
say = link.replace('+', ' ')
wikisearch = wikipedia.page(say)
speak.say("Opening wikipedia page for" + say)
speak.runAndWait()
webbrowser.open(wikisearch.url)
except:
frame.displayText('Wikipedia could not either find the article or your Third-world connection is unstable')
#Lock the device
elif put.startswith('secure'):
try:
speak.say("locking the device")
speak.runAndWait()
subprocess.run("xdg-screensaver lock",shell=True,check=True)
except :
frame.displayText('Cannot lock device')
#News of various press agencies
elif put.startswith('al jazeera '):
try:
aljazeeraurl = ('https://newsapi.org/v1/articles?source=al-jazeera-english&sortBy=latest&apiKey=571863193daf421082a8666fe4b666f3')
newsresponce = requests.get(aljazeeraurl)
newsjson = newsresponce.json()
speak.say('Our agents from Al-Jazeera report this')
speak.runAndWait()
frame.displayText(' =====Al Jazeera===== \n')
i = 1
for item in newsjson['articles']:
frame.displayText(str(i) + '. ' + item['title'] + '\n')
frame.displayText(item['description'] + '\n')
i += 1
except:
frame.displayText('Qatari agents have refused to share this intel, Ethan')
elif put.startswith('bbc '):
try:
bbcurl = ('https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=571863193daf421082a8666fe4b666f3')
newsresponce = requests.get(bbcurl)
newsjson = newsresponce.json()
speak.say('Our agents from BBC report this')
speak.runAndWait()
frame.displayText(' =====BBC===== \n')
i = 1
for item in newsjson['articles']:
frame.displayText(str(i) + '. ' + item['title'] + '\n')
frame.displayText(item['description'] + '\n')
i += 1
except:
frame.displayText('MI6 is going crazy! Not allowing this!')
elif put.startswith('cricket '):
try:
cricketurl = ('https://newsapi.org/v1/articles?source=espn-cric-info&sortBy=latest&apiKey=571863193daf421082a8666fe4b666f3')
newsresponce = requests.get(cricketurl)
newsjson = newsresponce.json()
speak.say('Our agents from ESPN Cricket report this')
speak.runAndWait()
frame.displayText(' =====CRICKET NEWS===== \n')
i = 1
for item in newsjson['articles']:
frame.displayText(str(i) + '. ' + item['title'] + '\n')
frame.displayText(item['description'] + '\n')
i += 1
except:
frame.displayText('Connection not secure')
elif put.startswith('hindus '):
try:
hindusurl = ('https://newsapi.org/v1/articles?source=the-hindu&sortBy=latest&apiKey=571863193daf421082a8666fe4b666f3')
newsresponce = requests.get(hindusurl)
newsjson = newsresponce.json()
speak.say('Our agents from Hindu News report this')
speak.runAndWait()
frame.displayText(' =====HINDU NEWS===== \n')
i = 1
for item in newsjson['articles']:
frame.displayText(str(i) + '. ' + item['title'] + '\n')
frame.displayText(item['description'] + '\n')
i += 1
except:
frame.displayText('R&A W is blocking our reports, Ethan. Sorry! ')
# Finding files in pc
elif put.startswith('lookfor '):
try:
link1=put.split()
name=link1[1]
rex=regex.compile(name)
filepath=link1[2]
for root,dirs,files in os.walk(os.path.normpath(filepath)):
for f in files:
result = rex.search(f)
if result:
print (os.path.join(root, f))
except:
print("Error")
#A customized thread class for tracking reminders
class reminderThread(threading.Thread):
def __init__(self, frame):
threading.Thread.__init__(self)
self.event = threading.Event()
self.reminder_given_flag = False
self.frame = frame
def run(self):
while not self.event.is_set() :
upcoming_reminders = list()
self.removePastReminders()
try :
#reading the reminders from reminders.txt
file_hand = open(reminder_filename, 'r')
reminder_list = file_hand.readlines()
file_hand.close()
for line in reminder_list :
vals = line.split('\t')
date_time = datetime.strptime(vals[1].replace('\n',''), '%d %b %Y %I %M %p')
time_now = datetime.now()
#getting diff between time now and the reminder
time_diff = date_time - time_now
time_diff_hour = time_diff.days * 24 + time_diff.seconds // 3600
#if time diff less than 1 hour, add it to upcoming lists
if time_diff_hour < 1 :
upcoming_reminders.append(vals)
except :
pass
if not self.reminder_given_flag and len(upcoming_reminders) > 0 :
speak.say("You have " + str(len(upcoming_reminders))+" upcoming reminders")
speak.runAndWait()
for reminder in upcoming_reminders :
#wx.CallAfter(self.frame.displayText, reminder[0]+'\t\t'+reminder[1])
self.frame.displayText(reminder[0]+'\t\t'+reminder[1])
self.reminder_given_flag = True
time.sleep(1)
def removePastReminders(self):
try :
file_hand = open(reminder_filename, 'r')
reminder_list = file_hand.readlines()
file_hand.close()
new_list = list()
for reminder in reminder_list :
date_time = datetime.strptime(reminder.split('\t')[1].replace('\n',''), '%d %b %Y %I %M %p')
time_diff = date_time - datetime.now()
if time_diff.seconds >= 0 and time_diff.days >= 0 :
new_list.append(reminder)
file_hand = open(reminder_filename, 'w')
for line in new_list :
file_hand.write(line)
file_hand.close()
except FileNotFoundError :
pass
except :
self.frame.displayText("Error occured")
i=0
#A stdout class to redirect output to tkinter window
class StdRedirector(object):
def __init__(self, text_window):
self.text_window = text_window
def write(self, output):
self.text_window.insert(tk.END, output)
class MyFrame(tk.Frame):
def __init__(self,*args,**kwargs):
#new Thread to track reminders
global reminder_thread
reminder_thread = reminderThread(self)
tk.Frame.__init__(self,*args,**kwargs)
self.textBox = tk.Text(root,
height=1,width=30,
font=("Times", 16),
bg="#666", fg="#0f0",
spacing1=6, spacing3=6,
insertbackground="#0f0"
)
self.textBox.insert("1.0", "$>")
self.textBox.grid(row=1,column=1, padx=10, pady=10)
root.bind('<Return>', self.OnEnter)
root.bind('<Destroy>', self.onClose)
self.textBox.focus_set()
speak.say('''Hi Agent! BENJI at your service''')
speak.runAndWait()
self.photo1 = tk.PhotoImage(file="mic_icon.png")
self.btn = ttk.Button(root,command=self.OnClicked,
image=self.photo1, style="C.TButton")
self.btn.grid(row=1,column=2, padx=10, pady=20)
'''
self.output_window = tk.Toplevel()
output_text_window = tk.Text(self.output_window)
self.stddirec = StdRedirector(output_text_window)
sys.stdout = self.stddirec
output_text_window.pack()
self.output_window.withdraw()
'''
reminder_thread.start()
def OnEnter(self,event):
put=self.textBox.get("1.2","end-1c")
print(put)
self.textBox.delete('1.2',tk.END)
put=put.lower()
put = put.strip()
#put = re.sub(r'[?|$|.|!]', r'', put)
link=put.split()
events(self, put,link)
if put=='':
self.displayText('Reenter')
def OnClicked(self):
r = sr.Recognizer()
with sr.Microphone() as source:
speak.say('Hey I am Listening ')
speak.runAndWait()
audio = r.listen(source)
try:
put=r.recognize_google(audio)
self.displayText(put)
self.textBox.insert('1.2',put)
put=put.lower()
put = put.strip()
#put = re.sub(r'[?|$|.|!]', r'', put)
link=put.split()
events(self,put,link)
except sr.UnknownValueError:
self.displayText("Could not understand audio")
except sr.RequestError as e:
self.displayText("Could not request results; {0}".format(e))
def onClose(self, event):
global reminder_thread
reminder_thread.event.set()
#root.destroy()
def displayText(self, text):
try :
if not self.output_window.winfo_viewable() :
self.output_window.update()
self.output_window.deiconify()
except :
self.createOutputWindow()
print(text)
def createOutputWindow(self):
self.output_window = tk.Toplevel()
output_text_window = tk.Text(self.output_window)
self.stddirec = StdRedirector(output_text_window)
sys.stdout = self.stddirec
output_text_window.pack()
#Trigger the GUI. Light the fuse!
if __name__=="__main__":
root = tk.Tk()
view = MyFrame(root)
style = ttk.Style()
style.configure('C.TButton',
background='#555',
highlightthickness='0'
)
style.map("C.TButton",
background=[('pressed', '!disabled', '#333'), ('active', '#666')]
)
# root.geometry('{}x{}'.format(400, 100))
# view.pack(side="top",fill="both",expand=False)
root.iconphoto(True, tk.PhotoImage(file=os.path.join(sys.path[0],'benji_final.gif')))
root.title('B.E.N.J.I.')
root.configure(background="#444")
root.resizable(0,0)
root.mainloop()
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
gatekeeper_test.go
|
package gatekeeper
import (
"encoding/json"
"os"
"testing"
"time"
"github.com/nemosupremo/vault-gatekeeper/policy"
"github.com/nemosupremo/vault-gatekeeper/scheduler"
"github.com/nemosupremo/vault-gatekeeper/scheduler/mock"
"github.com/nemosupremo/vault-gatekeeper/vault"
"github.com/nemosupremo/vault-gatekeeper/vault/unsealer"
"github.com/franela/goreq"
"github.com/segmentio/ksuid"
"github.com/spf13/viper"
)
const rootPolicy = `{
"*":{
"roles":["wildcard"],
"num_uses": 1
},
"x":{
"roles":["invalid"],
"num_uses": 1
}
}`
const subPolicy = `{
"foo":{
"roles":["bar"],
"num_uses": 2
},
"x":{
"roles":["valid"],
"num_uses": 1
}
}`
var vaultToken = os.Getenv("VAULT_TOKEN")
var vaultAddr = os.Getenv("VAULT_ADDR")
func init() {
if vaultAddr == "" {
vaultAddr = "http://localhost:8200"
}
viper.SetDefault("vault-addr", vaultAddr)
}
func TestLoadPolicyV2(t *testing.T) {
secretPath := ksuid.New().String()
r, err := vault.Request{
goreq.Request{
Uri: vault.Path("/v1/sys/mounts/" + secretPath),
Method: "POST",
Body: struct {
Type string `json:"type"`
Options map[string]string `json:"options"`
}{"kv", map[string]string{"version": "2"}},
MaxRedirects: 10,
RedirectHeaders: true,
}.WithHeader("X-Vault-Token", vaultToken),
}.Do()
if err != nil || (r.StatusCode != 200 && r.StatusCode != 204) {
t.Fatalf("failed to mount v2 secret backend.")
}
pathKey := ksuid.New().String()
for i, policy := range []string{rootPolicy, subPolicy} {
var path string
switch i {
case 0:
path = "/v1/" + secretPath + "/data/" + pathKey
case 1:
path = "/v1/" + secretPath + "/data/" + pathKey + "/foo/bar"
default:
t.Fatalf("misconfigured test.")
}
if err := installPolicy(path, policy); err != nil {
if verr, ok := err.(vault.Error); ok {
t.Fatalf("Could not upload policy to vault: %v", verr)
} else {
t.Fatalf("Failed to upload policy to vault: %v", err)
}
}
}
g := &Gatekeeper{}
g.config.PolicyPath = "/v1/" + secretPath + "/data/" + pathKey
g.config.Vault.KvVersion = "2"
g.Token = vaultToken
if policies, err := g.loadPolicies(); err == nil {
mustGet := func(p *policy.Policy, ok bool) *policy.Policy {
if ok {
return p
}
t.Fatalf("Did not find a matching policy")
return nil
}
if !mustGet(policies.Get("default")).Has("wildcard") {
t.Fatalf("Expected default role to have wildcard.")
}
if !mustGet(policies.Get("foo")).Has("bar") {
t.Fatalf("Expected foo policy to have bar role.")
}
if mustGet(policies.Get("x")).Has("invalid") {
t.Fatalf("Expected x policy to not have invalid role.")
}
if !mustGet(policies.Get("x")).Has("valid") {
t.Fatalf("Expected x policy to have valid role.")
}
} else {
t.Fatalf("Loading policies failed: %v", err)
}
}
func TestLoadPolicyV1(t *testing.T) {
secretPath := ksuid.New().String()
r, err := vault.Request{
goreq.Request{
Uri: vault.Path("/v1/sys/mounts/" + secretPath),
Method: "POST",
Body: struct {
Type string `json:"type"`
Options map[string]string `json:"options"`
}{"kv", map[string]string{"version": "1"}},
MaxRedirects: 10,
RedirectHeaders: true,
}.WithHeader("X-Vault-Token", vaultToken),
}.Do()
if err != nil || (r.StatusCode != 200 && r.StatusCode != 204) {
t.Fatalf("failed to mount v1 secret backend.")
}
pathKey := ksuid.New().String()
for i, policy := range []string{rootPolicy, subPolicy} {
var path string
switch i {
case 0:
path = "/v1/" + secretPath + "/" + pathKey
case 1:
path = "/v1/" + secretPath + "/" + pathKey + "/foo/bar"
default:
t.Fatalf("misconfigured test.")
}
if err := installPolicy(path, policy); err != nil {
if verr, ok := err.(vault.Error); ok {
t.Fatalf("Could not upload policy to vault: %v", verr)
} else {
t.Fatalf("Failed to upload policy to vault: %v", err)
}
}
}
g := &Gatekeeper{}
g.config.PolicyPath = "/v1/" + secretPath + "/" + pathKey
g.config.Vault.KvVersion = "1"
g.Token = vaultToken
if policies, err := g.loadPolicies(); err == nil {
mustGet := func(p *policy.Policy, ok bool) *policy.Policy {
if ok {
return p
}
t.Fatalf("Did not find a matching policy")
return nil
}
if !mustGet(policies.Get("default")).Has("wildcard") {
t.Fatalf("Expected default role to have wildcard.")
}
if !mustGet(policies.Get("foo")).Has("bar") {
t.Fatalf("Expected foo policy to have bar role.")
}
if mustGet(policies.Get("x")).Has("invalid") {
t.Fatalf("Expected x policy to not have invalid role.")
}
if !mustGet(policies.Get("x")).Has("valid") {
t.Fatalf("Expected x policy to have valid role.")
}
} else {
t.Fatalf("Loading policies failed: %v", err)
}
}
func installPolicy(path string, policy string) error {
r, err := vault.Request{
goreq.Request{
Uri: vault.Path(path),
MaxRedirects: 10,
RedirectHeaders: true,
Body: struct {
Data json.RawMessage `json:"data"`
}{json.RawMessage(policy)},
ContentType: "application/json",
Method: "POST",
}.WithHeader("X-Vault-Token", vaultToken),
}.Do()
if err == nil {
defer r.Body.Close()
switch r.StatusCode {
case 200, 204:
return nil
default:
var e vault.Error
e.Code = r.StatusCode
if err := r.Body.FromJsonTo(&e); err == nil {
return e
} else {
return err
}
}
} else {
return err
}
}
func createAuthEndpoint(authType string) (string, error) {
authPath := ksuid.New().String()
r, err := vault.Request{
goreq.Request{
Uri: vault.Path("/v1/sys/auth/" + authPath),
Method: "POST",
Body: struct {
Type string `json:"type"`
}{authType},
MaxRedirects: 10,
RedirectHeaders: true,
}.WithHeader("X-Vault-Token", vaultToken),
}.Do()
if err == nil {
defer r.Body.Close()
if r.StatusCode == 200 || r.StatusCode == 204 {
return authPath, nil
} else {
var e vault.Error
e.Code = r.StatusCode
if err := r.Body.FromJsonTo(&e); err == nil {
return "", e
} else {
return "", err
}
}
} else {
return "", err
}
}
const mockPolicy = `{
"mock:*":{
"roles":["test_role"],
"num_uses":1
},
"mock:special":{
"roles":["test_role", "{{name}}"],
"num_uses":1
}
}`
func TestRequestToken(t *testing.T) {
mock.ValidTaskId = ksuid.New().String()
var authPath string
if ap, err := createAuthEndpoint("approle"); err == nil {
authPath = ap
} else {
t.Fatalf("Failed to initialize approle endpoint: %v", err)
}
policyPath := "v1/secret/data/" + ksuid.New().String()
for _, appRoleName := range []string{"mock", "test_role", "special"} {
r, err := vault.Request{goreq.Request{
Uri: vault.Path("/v1/auth/" + authPath + "/role/" + appRoleName),
MaxRedirects: 10,
RedirectHeaders: true,
Body: struct {
Policies string `json:"policies"`
}{"unseal"},
ContentType: "application/json",
Method: "POST",
}.WithHeader("X-Vault-Token", vaultToken)}.Do()
if err != nil || (r.StatusCode != 200 && r.StatusCode != 204) {
t.Fatalf("failed to create app role for testing")
}
}
if err := installPolicy(policyPath, mockPolicy); err != nil {
if verr, ok := err.(vault.Error); ok {
t.Fatalf("Could not upload policy to vault: %v", verr)
} else {
t.Fatalf("Failed to upload policy to vault: %v", err)
}
}
conf := Config{
Schedulers: []string{"mock"},
Store: "memory",
PolicyPath: policyPath,
MaxTaskLife: 1 * time.Minute,
Unsealer: unsealer.TokenUnsealer{vaultToken},
}
conf.Vault.Address = vaultAddr
conf.Vault.KvVersion = "2"
conf.Vault.AppRoleMount = authPath
if g, err := NewGatekeeper(conf); err == nil && g.IsUnsealed() {
if token, _, err := g.RequestToken("mock", mock.ValidTaskId, "", ""); err == nil {
if _, err := (unsealer.WrappedTokenUnsealer{token}).Token(); err != nil {
t.Fatalf("Wrapped token requested from gatekeeper could not be unwrapped: %v", err)
}
} else {
t.Fatalf("Failed to request token: %v", err)
}
if _, _, err := g.RequestToken("mock", mock.ValidTaskId, "", ""); err != ErrMaxTokensGiven {
t.Fatalf("Token request should have failed with ErrMaxTokensGiven: %v", err)
}
mock.ValidTaskId = ksuid.New().String()
if _, _, err := g.RequestToken("mock", mock.ValidTaskId, "super-role", ""); err != ErrRoleMismatch {
t.Fatalf("Token request should have failed with ErrRoleMismatch: %v", err)
}
mock.ValidTaskId = ksuid.New().String()
if _, _, err := g.RequestToken("mock", mock.ValidTaskId, "{{name}}", ""); err != ErrRoleMismatch {
t.Fatalf("Token request should have failed with ErrRoleMismatch: %v", err)
}
mock.ValidTaskId = "special"
if _, _, err := g.RequestToken("mock", mock.ValidTaskId, "{{name}}", ""); err != nil {
t.Fatalf("Token request should have succeeded with {{name}}: %v", err)
}
mock.ValidTaskId = "localhost"
g.config.HostCheck = true
if _, _, err := g.RequestToken("mock", mock.ValidTaskId, "", "localhost"); err != nil {
t.Fatalf("Token request should have succeeded: %v", err)
}
if _, _, err := g.RequestToken("mock", mock.ValidTaskId, "", "172.217.9.78"); err != ErrHostMismatch {
t.Fatalf("Token request should have failed with ErrHostMismatch: %v", err)
}
g.config.HostCheck = false
if _, _, err := g.RequestToken("mock", ksuid.New().String(), "", ""); err != scheduler.ErrTaskNotFound {
t.Fatalf("Unknown task should have failed: %v", err)
}
if _, _, err := g.RequestToken("mock", "expired", "", ""); err != ErrTaskNotFresh {
t.Fatalf("Expired task should have returned task not fresh: %v", err)
}
} else if err == nil {
t.Fatalf("Failed to create gatekeeper instance: could not unseal.")
} else {
t.Fatalf("Failed to create gatekeeper instance: %v", err)
}
}
|
[
"\"VAULT_TOKEN\"",
"\"VAULT_ADDR\""
] |
[] |
[
"VAULT_ADDR",
"VAULT_TOKEN"
] |
[]
|
["VAULT_ADDR", "VAULT_TOKEN"]
|
go
| 2 | 0 | |
cpython/Lib/test/test_gdb.py
|
# Verify that gdb can pretty-print the various PyObject* types
#
# The code for testing gdb was adapted from similar work in Unladen Swallow's
# Lib/test/test_jit_gdb.py
import os
import re
import pprint
import subprocess
import sys
import sysconfig
import unittest
import locale
# Is this Python configured to support threads?
try:
import _thread
except ImportError:
_thread = None
from test import support
from test.support import run_unittest, findfile, python_is_optimized
if not hasattr(subprocess, 'Popen'):
raise unittest.SkipTest('test needs subprocess.Popen()')
try:
gdb_version, _ = subprocess.Popen(["gdb", "-nx", "--version"],
stdout=subprocess.PIPE).communicate()
except OSError:
# This is what "no gdb" looks like. There may, however, be other
# errors that manifest this way too.
raise unittest.SkipTest("Couldn't find gdb on the path")
gdb_version_number = re.search(b"^GNU gdb [^\d]*(\d+)\.(\d)", gdb_version)
gdb_major_version = int(gdb_version_number.group(1))
gdb_minor_version = int(gdb_version_number.group(2))
if gdb_major_version < 7:
raise unittest.SkipTest("gdb versions before 7.0 didn't support python embedding"
" Saw:\n" + gdb_version.decode('ascii', 'replace'))
if not sysconfig.is_python_build():
raise unittest.SkipTest("test_gdb only works on source builds at the moment.")
# Location of custom hooks file in a repository checkout.
checkout_hook_path = os.path.join(os.path.dirname(sys.executable),
'python-gdb.py')
PYTHONHASHSEED = '123'
def run_gdb(*args, **env_vars):
"""Runs gdb in --batch mode with the additional arguments given by *args.
Returns its (stdout, stderr) decoded from utf-8 using the replace handler.
"""
if env_vars:
env = os.environ.copy()
env.update(env_vars)
else:
env = None
# -nx: Do not execute commands from any .gdbinit initialization files
# (issue #22188)
base_cmd = ('gdb', '--batch', '-nx')
if (gdb_major_version, gdb_minor_version) >= (7, 4):
base_cmd += ('-iex', 'add-auto-load-safe-path ' + checkout_hook_path)
out, err = subprocess.Popen(base_cmd + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env,
).communicate()
return out.decode('utf-8', 'replace'), err.decode('utf-8', 'replace')
# Verify that "gdb" was built with the embedded python support enabled:
gdbpy_version, _ = run_gdb("--eval-command=python import sys; print(sys.version_info)")
if not gdbpy_version:
raise unittest.SkipTest("gdb not built with embedded python support")
# Verify that "gdb" can load our custom hooks, as OS security settings may
# disallow this without a customised .gdbinit.
cmd = ['--args', sys.executable]
_, gdbpy_errors = run_gdb('--args', sys.executable)
if "auto-loading has been declined" in gdbpy_errors:
msg = "gdb security settings prevent use of custom hooks: "
raise unittest.SkipTest(msg + gdbpy_errors.rstrip())
def gdb_has_frame_select():
# Does this build of gdb have gdb.Frame.select ?
stdout, _ = run_gdb("--eval-command=python print(dir(gdb.Frame))")
m = re.match(r'.*\[(.*)\].*', stdout)
if not m:
raise unittest.SkipTest("Unable to parse output from gdb.Frame.select test")
gdb_frame_dir = m.group(1).split(', ')
return "'select'" in gdb_frame_dir
HAS_PYUP_PYDOWN = gdb_has_frame_select()
BREAKPOINT_FN='builtin_id'
class DebuggerTests(unittest.TestCase):
"""Test that the debugger can debug Python."""
def get_stack_trace(self, source=None, script=None,
breakpoint=BREAKPOINT_FN,
cmds_after_breakpoint=None,
import_site=False):
'''
Run 'python -c SOURCE' under gdb with a breakpoint.
Support injecting commands after the breakpoint is reached
Returns the stdout from gdb
cmds_after_breakpoint: if provided, a list of strings: gdb commands
'''
# We use "set breakpoint pending yes" to avoid blocking with a:
# Function "foo" not defined.
# Make breakpoint pending on future shared library load? (y or [n])
# error, which typically happens python is dynamically linked (the
# breakpoints of interest are to be found in the shared library)
# When this happens, we still get:
# Function "textiowrapper_write" not defined.
# emitted to stderr each time, alas.
# Initially I had "--eval-command=continue" here, but removed it to
# avoid repeated print breakpoints when traversing hierarchical data
# structures
# Generate a list of commands in gdb's language:
commands = ['set breakpoint pending yes',
'break %s' % breakpoint,
# The tests assume that the first frame of printed
# backtrace will not contain program counter,
# that is however not guaranteed by gdb
# therefore we need to use 'set print address off' to
# make sure the counter is not there. For example:
# #0 in PyObject_Print ...
# is assumed, but sometimes this can be e.g.
# #0 0x00003fffb7dd1798 in PyObject_Print ...
'set print address off',
'run']
# GDB as of 7.4 onwards can distinguish between the
# value of a variable at entry vs current value:
# http://sourceware.org/gdb/onlinedocs/gdb/Variables.html
# which leads to the selftests failing with errors like this:
# AssertionError: 'v@entry=()' != '()'
# Disable this:
if (gdb_major_version, gdb_minor_version) >= (7, 4):
commands += ['set print entry-values no']
if cmds_after_breakpoint:
commands += cmds_after_breakpoint
else:
commands += ['backtrace']
# print commands
# Use "commands" to generate the arguments with which to invoke "gdb":
args = ["gdb", "--batch", "-nx"]
args += ['--eval-command=%s' % cmd for cmd in commands]
args += ["--args",
sys.executable]
if not import_site:
# -S suppresses the default 'import site'
args += ["-S"]
if source:
args += ["-c", source]
elif script:
args += [script]
# print args
# print (' '.join(args))
# Use "args" to invoke gdb, capturing stdout, stderr:
out, err = run_gdb(*args, PYTHONHASHSEED=PYTHONHASHSEED)
errlines = err.splitlines()
unexpected_errlines = []
# Ignore some benign messages on stderr.
ignore_patterns = (
'Function "%s" not defined.' % breakpoint,
"warning: no loadable sections found in added symbol-file"
" system-supplied DSO",
"warning: Unable to find libthread_db matching"
" inferior's thread library, thread debugging will"
" not be available.",
"warning: Cannot initialize thread debugging"
" library: Debugger service failed",
'warning: Could not load shared library symbols for '
'linux-vdso.so',
'warning: Could not load shared library symbols for '
'linux-gate.so',
'warning: Could not load shared library symbols for '
'linux-vdso64.so',
'Do you need "set solib-search-path" or '
'"set sysroot"?',
'warning: Source file is more recent than executable.',
# Issue #19753: missing symbols on System Z
'Missing separate debuginfo for ',
'Try: zypper install -C ',
)
for line in errlines:
if not line.startswith(ignore_patterns):
unexpected_errlines.append(line)
# Ensure no unexpected error messages:
self.assertEqual(unexpected_errlines, [])
return out
def get_gdb_repr(self, source,
cmds_after_breakpoint=None,
import_site=False):
# Given an input python source representation of data,
# run "python -c'id(DATA)'" under gdb with a breakpoint on
# builtin_id and scrape out gdb's representation of the "op"
# parameter, and verify that the gdb displays the same string
#
# Verify that the gdb displays the expected string
#
# For a nested structure, the first time we hit the breakpoint will
# give us the top-level structure
# NOTE: avoid decoding too much of the traceback as some
# undecodable characters may lurk there in optimized mode
# (issue #19743).
cmds_after_breakpoint = cmds_after_breakpoint or ["backtrace 1"]
gdb_output = self.get_stack_trace(source, breakpoint=BREAKPOINT_FN,
cmds_after_breakpoint=cmds_after_breakpoint,
import_site=import_site)
# gdb can insert additional '\n' and space characters in various places
# in its output, depending on the width of the terminal it's connected
# to (using its "wrap_here" function)
m = re.match('.*#0\s+builtin_id\s+\(self\=.*,\s+v=\s*(.*?)\)\s+at\s+\S*Python/bltinmodule.c.*',
gdb_output, re.DOTALL)
if not m:
self.fail('Unexpected gdb output: %r\n%s' % (gdb_output, gdb_output))
return m.group(1), gdb_output
def assertEndsWith(self, actual, exp_end):
'''Ensure that the given "actual" string ends with "exp_end"'''
self.assertTrue(actual.endswith(exp_end),
msg='%r did not end with %r' % (actual, exp_end))
def assertMultilineMatches(self, actual, pattern):
m = re.match(pattern, actual, re.DOTALL)
if not m:
self.fail(msg='%r did not match %r' % (actual, pattern))
def get_sample_script(self):
return findfile('gdb_sample.py')
class PrettyPrintTests(DebuggerTests):
def test_getting_backtrace(self):
gdb_output = self.get_stack_trace('id(42)')
self.assertTrue(BREAKPOINT_FN in gdb_output)
def assertGdbRepr(self, val, exp_repr=None):
# Ensure that gdb's rendering of the value in a debugged process
# matches repr(value) in this process:
gdb_repr, gdb_output = self.get_gdb_repr('id(' + ascii(val) + ')')
if not exp_repr:
exp_repr = repr(val)
self.assertEqual(gdb_repr, exp_repr,
('%r did not equal expected %r; full output was:\n%s'
% (gdb_repr, exp_repr, gdb_output)))
def test_int(self):
'Verify the pretty-printing of various int values'
self.assertGdbRepr(42)
self.assertGdbRepr(0)
self.assertGdbRepr(-7)
self.assertGdbRepr(1000000000000)
self.assertGdbRepr(-1000000000000000)
def test_singletons(self):
'Verify the pretty-printing of True, False and None'
self.assertGdbRepr(True)
self.assertGdbRepr(False)
self.assertGdbRepr(None)
def test_dicts(self):
'Verify the pretty-printing of dictionaries'
self.assertGdbRepr({})
self.assertGdbRepr({'foo': 'bar'}, "{'foo': 'bar'}")
self.assertGdbRepr({'foo': 'bar', 'douglas': 42}, "{'douglas': 42, 'foo': 'bar'}")
def test_lists(self):
'Verify the pretty-printing of lists'
self.assertGdbRepr([])
self.assertGdbRepr(list(range(5)))
def test_bytes(self):
'Verify the pretty-printing of bytes'
self.assertGdbRepr(b'')
self.assertGdbRepr(b'And now for something hopefully the same')
self.assertGdbRepr(b'string with embedded NUL here \0 and then some more text')
self.assertGdbRepr(b'this is a tab:\t'
b' this is a slash-N:\n'
b' this is a slash-R:\r'
)
self.assertGdbRepr(b'this is byte 255:\xff and byte 128:\x80')
self.assertGdbRepr(bytes([b for b in range(255)]))
def test_strings(self):
'Verify the pretty-printing of unicode strings'
encoding = locale.getpreferredencoding()
def check_repr(text):
try:
text.encode(encoding)
printable = True
except UnicodeEncodeError:
self.assertGdbRepr(text, ascii(text))
else:
self.assertGdbRepr(text)
self.assertGdbRepr('')
self.assertGdbRepr('And now for something hopefully the same')
self.assertGdbRepr('string with embedded NUL here \0 and then some more text')
# Test printing a single character:
# U+2620 SKULL AND CROSSBONES
check_repr('\u2620')
# Test printing a Japanese unicode string
# (I believe this reads "mojibake", using 3 characters from the CJK
# Unified Ideographs area, followed by U+3051 HIRAGANA LETTER KE)
check_repr('\u6587\u5b57\u5316\u3051')
# Test a character outside the BMP:
# U+1D121 MUSICAL SYMBOL C CLEF
# This is:
# UTF-8: 0xF0 0x9D 0x84 0xA1
# UTF-16: 0xD834 0xDD21
check_repr(chr(0x1D121))
def test_tuples(self):
'Verify the pretty-printing of tuples'
self.assertGdbRepr(tuple(), '()')
self.assertGdbRepr((1,), '(1,)')
self.assertGdbRepr(('foo', 'bar', 'baz'))
def test_sets(self):
'Verify the pretty-printing of sets'
if (gdb_major_version, gdb_minor_version) < (7, 3):
self.skipTest("pretty-printing of sets needs gdb 7.3 or later")
self.assertGdbRepr(set(), 'set()')
self.assertGdbRepr(set(['a', 'b']), "{'a', 'b'}")
self.assertGdbRepr(set([4, 5, 6]), "{4, 5, 6}")
# Ensure that we handle sets containing the "dummy" key value,
# which happens on deletion:
gdb_repr, gdb_output = self.get_gdb_repr('''s = set(['a','b'])
s.remove('a')
id(s)''')
self.assertEqual(gdb_repr, "{'b'}")
def test_frozensets(self):
'Verify the pretty-printing of frozensets'
if (gdb_major_version, gdb_minor_version) < (7, 3):
self.skipTest("pretty-printing of frozensets needs gdb 7.3 or later")
self.assertGdbRepr(frozenset(), 'frozenset()')
self.assertGdbRepr(frozenset(['a', 'b']), "frozenset({'a', 'b'})")
self.assertGdbRepr(frozenset([4, 5, 6]), "frozenset({4, 5, 6})")
def test_exceptions(self):
# Test a RuntimeError
gdb_repr, gdb_output = self.get_gdb_repr('''
try:
raise RuntimeError("I am an error")
except RuntimeError as e:
id(e)
''')
self.assertEqual(gdb_repr,
"RuntimeError('I am an error',)")
# Test division by zero:
gdb_repr, gdb_output = self.get_gdb_repr('''
try:
a = 1 / 0
except ZeroDivisionError as e:
id(e)
''')
self.assertEqual(gdb_repr,
"ZeroDivisionError('division by zero',)")
def test_modern_class(self):
'Verify the pretty-printing of new-style class instances'
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo:
pass
foo = Foo()
foo.an_int = 42
id(foo)''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x-?[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected new-style class rendering %r' % gdb_repr)
def test_subclassing_list(self):
'Verify the pretty-printing of an instance of a list subclass'
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo(list):
pass
foo = Foo()
foo += [1, 2, 3]
foo.an_int = 42
id(foo)''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x-?[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected new-style class rendering %r' % gdb_repr)
def test_subclassing_tuple(self):
'Verify the pretty-printing of an instance of a tuple subclass'
# This should exercise the negative tp_dictoffset code in the
# new-style class support
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo(tuple):
pass
foo = Foo((1, 2, 3))
foo.an_int = 42
id(foo)''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x-?[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected new-style class rendering %r' % gdb_repr)
def assertSane(self, source, corruption, exprepr=None):
'''Run Python under gdb, corrupting variables in the inferior process
immediately before taking a backtrace.
Verify that the variable's representation is the expected failsafe
representation'''
if corruption:
cmds_after_breakpoint=[corruption, 'backtrace']
else:
cmds_after_breakpoint=['backtrace']
gdb_repr, gdb_output = \
self.get_gdb_repr(source,
cmds_after_breakpoint=cmds_after_breakpoint)
if exprepr:
if gdb_repr == exprepr:
# gdb managed to print the value in spite of the corruption;
# this is good (see http://bugs.python.org/issue8330)
return
# Match anything for the type name; 0xDEADBEEF could point to
# something arbitrary (see http://bugs.python.org/issue8330)
pattern = '<.* at remote 0x-?[0-9a-f]+>'
m = re.match(pattern, gdb_repr)
if not m:
self.fail('Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_NULL_ptr(self):
'Ensure that a NULL PyObject* is handled gracefully'
gdb_repr, gdb_output = (
self.get_gdb_repr('id(42)',
cmds_after_breakpoint=['set variable v=0',
'backtrace'])
)
self.assertEqual(gdb_repr, '0x0')
def test_NULL_ob_type(self):
'Ensure that a PyObject* with NULL ob_type is handled gracefully'
self.assertSane('id(42)',
'set v->ob_type=0')
def test_corrupt_ob_type(self):
'Ensure that a PyObject* with a corrupt ob_type is handled gracefully'
self.assertSane('id(42)',
'set v->ob_type=0xDEADBEEF',
exprepr='42')
def test_corrupt_tp_flags(self):
'Ensure that a PyObject* with a type with corrupt tp_flags is handled'
self.assertSane('id(42)',
'set v->ob_type->tp_flags=0x0',
exprepr='42')
def test_corrupt_tp_name(self):
'Ensure that a PyObject* with a type with corrupt tp_name is handled'
self.assertSane('id(42)',
'set v->ob_type->tp_name=0xDEADBEEF',
exprepr='42')
def test_builtins_help(self):
'Ensure that the new-style class _Helper in site.py can be handled'
# (this was the issue causing tracebacks in
# http://bugs.python.org/issue8032#msg100537 )
gdb_repr, gdb_output = self.get_gdb_repr('id(__builtins__.help)', import_site=True)
m = re.match(r'<_Helper at remote 0x-?[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected rendering %r' % gdb_repr)
def test_selfreferential_list(self):
'''Ensure that a reference loop involving a list doesn't lead proxyval
into an infinite loop:'''
gdb_repr, gdb_output = \
self.get_gdb_repr("a = [3, 4, 5] ; a.append(a) ; id(a)")
self.assertEqual(gdb_repr, '[3, 4, 5, [...]]')
gdb_repr, gdb_output = \
self.get_gdb_repr("a = [3, 4, 5] ; b = [a] ; a.append(b) ; id(a)")
self.assertEqual(gdb_repr, '[3, 4, 5, [[...]]]')
def test_selfreferential_dict(self):
'''Ensure that a reference loop involving a dict doesn't lead proxyval
into an infinite loop:'''
gdb_repr, gdb_output = \
self.get_gdb_repr("a = {} ; b = {'bar':a} ; a['foo'] = b ; id(a)")
self.assertEqual(gdb_repr, "{'foo': {'bar': {...}}}")
def test_selfreferential_old_style_instance(self):
gdb_repr, gdb_output = \
self.get_gdb_repr('''
class Foo:
pass
foo = Foo()
foo.an_attr = foo
id(foo)''')
self.assertTrue(re.match('<Foo\(an_attr=<\.\.\.>\) at remote 0x-?[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_selfreferential_new_style_instance(self):
gdb_repr, gdb_output = \
self.get_gdb_repr('''
class Foo(object):
pass
foo = Foo()
foo.an_attr = foo
id(foo)''')
self.assertTrue(re.match('<Foo\(an_attr=<\.\.\.>\) at remote 0x-?[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
gdb_repr, gdb_output = \
self.get_gdb_repr('''
class Foo(object):
pass
a = Foo()
b = Foo()
a.an_attr = b
b.an_attr = a
id(a)''')
self.assertTrue(re.match('<Foo\(an_attr=<Foo\(an_attr=<\.\.\.>\) at remote 0x-?[0-9a-f]+>\) at remote 0x-?[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_truncation(self):
'Verify that very long output is truncated'
gdb_repr, gdb_output = self.get_gdb_repr('id(list(range(1000)))')
self.assertEqual(gdb_repr,
"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, "
"14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, "
"27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, "
"40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, "
"53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, "
"66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, "
"79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, "
"92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, "
"104, 105, 106, 107, 108, 109, 110, 111, 112, 113, "
"114, 115, 116, 117, 118, 119, 120, 121, 122, 123, "
"124, 125, 126, 127, 128, 129, 130, 131, 132, 133, "
"134, 135, 136, 137, 138, 139, 140, 141, 142, 143, "
"144, 145, 146, 147, 148, 149, 150, 151, 152, 153, "
"154, 155, 156, 157, 158, 159, 160, 161, 162, 163, "
"164, 165, 166, 167, 168, 169, 170, 171, 172, 173, "
"174, 175, 176, 177, 178, 179, 180, 181, 182, 183, "
"184, 185, 186, 187, 188, 189, 190, 191, 192, 193, "
"194, 195, 196, 197, 198, 199, 200, 201, 202, 203, "
"204, 205, 206, 207, 208, 209, 210, 211, 212, 213, "
"214, 215, 216, 217, 218, 219, 220, 221, 222, 223, "
"224, 225, 226...(truncated)")
self.assertEqual(len(gdb_repr),
1024 + len('...(truncated)'))
def test_builtin_method(self):
gdb_repr, gdb_output = self.get_gdb_repr('import sys; id(sys.stdout.readlines)')
self.assertTrue(re.match('<built-in method readlines of _io.TextIOWrapper object at remote 0x-?[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_frames(self):
gdb_output = self.get_stack_trace('''
def foo(a, b, c):
pass
foo(3, 4, 5)
id(foo.__code__)''',
breakpoint='builtin_id',
cmds_after_breakpoint=['print (PyFrameObject*)(((PyCodeObject*)v)->co_zombieframe)']
)
self.assertTrue(re.match('.*\s+\$1 =\s+Frame 0x-?[0-9a-f]+, for file <string>, line 3, in foo \(\)\s+.*',
gdb_output,
re.DOTALL),
'Unexpected gdb representation: %r\n%s' % (gdb_output, gdb_output))
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
class PyListTests(DebuggerTests):
def assertListing(self, expected, actual):
self.assertEndsWith(actual, expected)
def test_basic_command(self):
'Verify that the "py-list" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-list'])
self.assertListing(' 5 \n'
' 6 def bar(a, b, c):\n'
' 7 baz(a, b, c)\n'
' 8 \n'
' 9 def baz(*args):\n'
' >10 id(42)\n'
' 11 \n'
' 12 foo(1, 2, 3)\n',
bt)
def test_one_abs_arg(self):
'Verify the "py-list" command with one absolute argument'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-list 9'])
self.assertListing(' 9 def baz(*args):\n'
' >10 id(42)\n'
' 11 \n'
' 12 foo(1, 2, 3)\n',
bt)
def test_two_abs_args(self):
'Verify the "py-list" command with two absolute arguments'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-list 1,3'])
self.assertListing(' 1 # Sample script for use by test_gdb.py\n'
' 2 \n'
' 3 def foo(a, b, c):\n',
bt)
class StackNavigationTests(DebuggerTests):
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_pyup_command(self):
'Verify that the "py-up" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up'])
self.assertMultilineMatches(bt,
r'''^.*
#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 7, in bar \(a=1, b=2, c=3\)
baz\(a, b, c\)
$''')
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
def test_down_at_bottom(self):
'Verify handling of "py-down" at the bottom of the stack'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-down'])
self.assertEndsWith(bt,
'Unable to find a newer python frame\n')
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
def test_up_at_top(self):
'Verify handling of "py-up" at the top of the stack'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up'] * 4)
self.assertEndsWith(bt,
'Unable to find an older python frame\n')
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_up_then_down(self):
'Verify "py-up" followed by "py-down"'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up', 'py-down'])
self.assertMultilineMatches(bt,
r'''^.*
#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 7, in bar \(a=1, b=2, c=3\)
baz\(a, b, c\)
#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 10, in baz \(args=\(1, 2, 3\)\)
id\(42\)
$''')
class PyBtTests(DebuggerTests):
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_bt(self):
'Verify that the "py-bt" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-bt'])
self.assertMultilineMatches(bt,
r'''^.*
Traceback \(most recent call first\):
File ".*gdb_sample.py", line 10, in baz
id\(42\)
File ".*gdb_sample.py", line 7, in bar
baz\(a, b, c\)
File ".*gdb_sample.py", line 4, in foo
bar\(a, b, c\)
File ".*gdb_sample.py", line 12, in <module>
foo\(1, 2, 3\)
''')
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_bt_full(self):
'Verify that the "py-bt-full" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-bt-full'])
self.assertMultilineMatches(bt,
r'''^.*
#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 7, in bar \(a=1, b=2, c=3\)
baz\(a, b, c\)
#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 4, in foo \(a=1, b=2, c=3\)
bar\(a, b, c\)
#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 12, in <module> \(\)
foo\(1, 2, 3\)
''')
@unittest.skipUnless(_thread,
"Python was compiled without thread support")
def test_threads(self):
'Verify that "py-bt" indicates threads that are waiting for the GIL'
cmd = '''
from threading import Thread
class TestThread(Thread):
# These threads would run forever, but we'll interrupt things with the
# debugger
def run(self):
i = 0
while 1:
i += 1
t = {}
for i in range(4):
t[i] = TestThread()
t[i].start()
# Trigger a breakpoint on the main thread
id(42)
'''
# Verify with "py-bt":
gdb_output = self.get_stack_trace(cmd,
cmds_after_breakpoint=['thread apply all py-bt'])
self.assertIn('Waiting for the GIL', gdb_output)
# Verify with "py-bt-full":
gdb_output = self.get_stack_trace(cmd,
cmds_after_breakpoint=['thread apply all py-bt-full'])
self.assertIn('Waiting for the GIL', gdb_output)
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
# Some older versions of gdb will fail with
# "Cannot find new threads: generic error"
# unless we add LD_PRELOAD=PATH-TO-libpthread.so.1 as a workaround
@unittest.skipUnless(_thread,
"Python was compiled without thread support")
def test_gc(self):
'Verify that "py-bt" indicates if a thread is garbage-collecting'
cmd = ('from gc import collect\n'
'id(42)\n'
'def foo():\n'
' collect()\n'
'def bar():\n'
' foo()\n'
'bar()\n')
# Verify with "py-bt":
gdb_output = self.get_stack_trace(cmd,
cmds_after_breakpoint=['break update_refs', 'continue', 'py-bt'],
)
self.assertIn('Garbage-collecting', gdb_output)
# Verify with "py-bt-full":
gdb_output = self.get_stack_trace(cmd,
cmds_after_breakpoint=['break update_refs', 'continue', 'py-bt-full'],
)
self.assertIn('Garbage-collecting', gdb_output)
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
# Some older versions of gdb will fail with
# "Cannot find new threads: generic error"
# unless we add LD_PRELOAD=PATH-TO-libpthread.so.1 as a workaround
@unittest.skipUnless(_thread,
"Python was compiled without thread support")
def test_pycfunction(self):
'Verify that "py-bt" displays invocations of PyCFunction instances'
# Tested function must not be defined with METH_NOARGS or METH_O,
# otherwise call_function() doesn't call PyCFunction_Call()
cmd = ('from time import gmtime\n'
'def foo():\n'
' gmtime(1)\n'
'def bar():\n'
' foo()\n'
'bar()\n')
# Verify with "py-bt":
gdb_output = self.get_stack_trace(cmd,
breakpoint='time_gmtime',
cmds_after_breakpoint=['bt', 'py-bt'],
)
self.assertIn('<built-in method gmtime', gdb_output)
# Verify with "py-bt-full":
gdb_output = self.get_stack_trace(cmd,
breakpoint='time_gmtime',
cmds_after_breakpoint=['py-bt-full'],
)
self.assertIn('#0 <built-in method gmtime', gdb_output)
class PyPrintTests(DebuggerTests):
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_basic_command(self):
'Verify that the "py-print" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-print args'])
self.assertMultilineMatches(bt,
r".*\nlocal 'args' = \(1, 2, 3\)\n.*")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
def test_print_after_up(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up', 'py-print c', 'py-print b', 'py-print a'])
self.assertMultilineMatches(bt,
r".*\nlocal 'c' = 3\nlocal 'b' = 2\nlocal 'a' = 1\n.*")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_printing_global(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-print __name__'])
self.assertMultilineMatches(bt,
r".*\nglobal '__name__' = '__main__'\n.*")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_printing_builtin(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-print len'])
self.assertMultilineMatches(bt,
r".*\nbuiltin 'len' = <built-in method len of module object at remote 0x-?[0-9a-f]+>\n.*")
class PyLocalsTests(DebuggerTests):
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_basic_command(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-locals'])
self.assertMultilineMatches(bt,
r".*\nargs = \(1, 2, 3\)\n.*")
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_locals_after_up(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up', 'py-locals'])
self.assertMultilineMatches(bt,
r".*\na = 1\nb = 2\nc = 3\n.*")
def test_main():
if support.verbose:
print("GDB version:")
for line in os.fsdecode(gdb_version).splitlines():
print(" " * 4 + line)
run_unittest(PrettyPrintTests,
PyListTests,
StackNavigationTests,
PyBtTests,
PyPrintTests,
PyLocalsTests
)
if __name__ == "__main__":
test_main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
"""
Sphinx documentation builder
"""
import os
# Set env flag so that we can doc functions that may otherwise not be loaded
# see for example interactive visualizations in qiskit.visualization.
os.environ['QISKIT_DOCS'] = 'TRUE'
# -- Project information -----------------------------------------------------
project = 'Qiskit'
copyright = '2019, Qiskit Development Team' # pylint: disable=redefined-builtin
author = 'Qiskit Development Team'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.12.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'sphinx_tabs.tabs',
'jupyter_sphinx.execute',
'sphinx_autodoc_typehints',
'reno.sphinxext',
]
html_static_path = ['_static']
templates_path = ['_templates']
html_css_files = ['style.css', 'custom.css']
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
autosummary_generate = True
# -----------------------------------------------------------------------------
# Autodoc
# -----------------------------------------------------------------------------
autodoc_default_options = {
'inherited-members': None,
}
# If true, figures, tables and code-blocks are automatically numbered if they
# have a caption.
numfig = True
# A dictionary mapping 'figure', 'table', 'code-block' and 'section' to
# strings that are used for format of figure numbers. As a special character,
# %s will be replaced to figure number.
numfig_format = {
'table': 'Table %s'
}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'colorful'
# A boolean that decides whether module names are prepended to all object names
# (for object types where a “module” of some kind is defined), e.g. for
# py:function directives.
add_module_names = False
# A list of prefixes that are ignored for sorting the Python module index
# (e.g., if this is set to ['foo.'], then foo.bar is shown under B, not F).
# This can be handy if you document a project that consists of a single
# package. Works only for the HTML builder currently.
modindex_common_prefix = ['qiskit.']
# -- Configuration for extlinks extension ------------------------------------
# Refer to https://www.sphinx-doc.org/en/master/usage/extensions/extlinks.html
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme' # use the theme in subdir 'theme'
html_logo = 'images/logo.png'
#html_sidebars = {'**': ['globaltoc.html']}
html_last_updated_fmt = '%Y/%m/%d'
html_theme_options = {
'logo_only': True,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': True,
'style_nav_header_background': '#212121',
}
autoclass_content = 'both'
|
[] |
[] |
[
"QISKIT_DOCS"
] |
[]
|
["QISKIT_DOCS"]
|
python
| 1 | 0 | |
src/internal/godebug/godebug.go
|
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package godebug parses the GODEBUG environment variable.
package godebug
import "os"
// Get returns the value for the provided GODEBUG key.
func Get(key string) string {
return get(os.Getenv("GODEBUG"), key)
}
// get returns the value part of key=value in s (a GODEBUG value).
func get(s, key string) string {
for i := 0; i < len(s)-len(key)-1; i++ {
if i > 0 && s[i-1] != ',' {
continue
}
afterKey := s[i+len(key):]
if afterKey[0] != '=' || s[i:i+len(key)] != key {
continue
}
val := afterKey[1:]
for i, b := range val {
if b == ',' {
return val[:i]
}
}
return val
}
return ""
}
|
[
"\"GODEBUG\""
] |
[] |
[
"GODEBUG"
] |
[]
|
["GODEBUG"]
|
go
| 1 | 0 | |
echo_server_golang.go
|
package main
import (
"net/http"
"os"
"log"
)
// https://github.com/aautar/go-http-echo
// DefaultPort is the default port to use if once is not specified by the SERVER_PORT environment variable
const DefaultPort = "7893";
func getServerPort() (string) {
port := os.Getenv("SERVER_PORT");
if port != "" {
return port;
}
return DefaultPort;
}
// EchoHandler echos back the request as a response
func EchoHandler(writer http.ResponseWriter, request *http.Request) {
log.Println("Echoing back request made to " + request.URL.Path + " to client (" + request.RemoteAddr + ")")
writer.Header().Set("Access-Control-Allow-Origin", "*")
// allow pre-flight headers
writer.Header().Set("Access-Control-Allow-Headers", "Content-Range, Content-Disposition, Content-Type, ETag")
request.Write(writer)
}
func main() {
log.Println("starting server, listening on port " + getServerPort())
http.HandleFunc("/", EchoHandler)
http.ListenAndServe(":" + getServerPort(), nil)
}
|
[
"\"SERVER_PORT\""
] |
[] |
[
"SERVER_PORT"
] |
[]
|
["SERVER_PORT"]
|
go
| 1 | 0 | |
tests/integration/util.py
|
'''Shared objects for integration testing.'''
import os
from plaid import Client
def create_client():
'''Create a new client for testing.'''
return Client(os.environ['CLIENT_ID'],
os.environ['SECRET'],
os.environ['PUBLIC_KEY'],
'sandbox',
api_version="2019-05-29",
client_app="plaid-python-unit-tests")
SANDBOX_INSTITUTION = 'ins_109508'
SANDBOX_INSTITUTION_NAME = 'First Platypus Bank'
SANDBOX_INSTITUTIONS = [
'ins_109508',
'ins_109509',
'ins_109510',
'ins_109511',
'ins_109512',
]
|
[] |
[] |
[
"SECRET",
"CLIENT_ID",
"PUBLIC_KEY"
] |
[]
|
["SECRET", "CLIENT_ID", "PUBLIC_KEY"]
|
python
| 3 | 0 | |
recipes/m4/all/conanfile.py
|
from conans import ConanFile, tools, AutoToolsBuildEnvironment
from contextlib import contextmanager
import os
class M4Conan(ConanFile):
name = "m4"
description = "GNU M4 is an implementation of the traditional Unix macro processor"
topics = ("conan", "m4", "macro", "macro processor")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.gnu.org/software/m4/"
license = "GPL-3.0-only"
exports_sources = ["patches/*.patch"]
settings = "os", "arch", "compiler"
_autotools = None
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
@property
def _is_msvc(self):
return self.settings.compiler == "Visual Studio"
@property
def _is_clang(self):
return str(self.settings.compiler).endswith("clang")
def build_requirements(self):
if tools.os_info.is_windows and "CONAN_BASH_PATH" not in os.environ and \
tools.os_info.detect_windows_subsystem() != "msys2":
self.build_requires("msys2/20190524")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("m4-" + self.version, self._source_subfolder)
def _configure_autotools(self):
if self._autotools:
return self._autotools
conf_args = []
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
build_canonical_name = None
host_canonical_name = None
if self.settings.compiler == "Visual Studio":
# The somewhat older configure script of m4 does not understand the canonical names of Visual Studio
build_canonical_name = False
host_canonical_name = False
self._autotools.configure(args=conf_args, configure_dir=self._source_subfolder, build=build_canonical_name, host=host_canonical_name)
return self._autotools
@contextmanager
def _build_context(self):
env = {}
if self.settings.compiler == "Visual Studio":
with tools.vcvars(self.settings):
env.update({
"AR": "{}/build-aux/ar-lib lib".format(tools.unix_path(self._source_subfolder)),
"CC": "cl -nologo",
"CXX": "cl -nologo",
"LD": "link",
"NM": "dumpbin -symbols",
"OBJDUMP": ":",
"RANLIB": ":",
"STRIP": ":",
})
with tools.environment_append(env):
yield
else:
if self._is_clang:
env["CFLAGS"] = "-rtlib=compiler-rt"
with tools.environment_append(env):
yield
def _patch_sources(self):
for patch in self.conan_data["patches"][self.version]:
tools.patch(**patch)
def build(self):
self._patch_sources()
with self._build_context():
autotools = self._configure_autotools()
autotools.make()
if bool(os.environ.get("CONAN_RUN_TESTS", "")):
self.output.info("Running m4 checks...")
with tools.chdir("tests"):
autotools.make(target="check")
def package(self):
self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
with self._build_context():
autotools = self._configure_autotools()
autotools.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_id(self):
self.info.include_build_settings()
def package_info(self):
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bin_path))
self.env_info.PATH.append(bin_path)
bin_ext = ".exe" if self.settings.os == "Windows" else ""
m4_bin = os.path.join(self.package_folder, "bin", "m4{}".format(bin_ext)).replace("\\", "/")
# M4 environment variable is used by a lot of scripts as a way to override a hard-coded embedded m4 path
self.output.info("Setting M4 environment variable: {}".format(m4_bin))
self.env_info.M4 = m4_bin
|
[] |
[] |
[
"CONAN_RUN_TESTS"
] |
[]
|
["CONAN_RUN_TESTS"]
|
python
| 1 | 0 | |
pkg/utils/k8sutil.go
|
package utils
import (
"os"
"github.com/Sirupsen/logrus"
apps_v1 "k8s.io/api/apps/v1"
batch_v1 "k8s.io/api/batch/v1"
api_v1 "k8s.io/api/core/v1"
ext_v1beta1 "k8s.io/api/extensions/v1beta1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// GetClient returns a k8s clientset to the request from inside of cluster
func GetClient() kubernetes.Interface {
config, err := rest.InClusterConfig()
if err != nil {
logrus.Fatalf("Can not get kubernetes config: %v", err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
logrus.Fatalf("Can not create kubernetes client: %v", err)
}
return clientset
}
func buildOutOfClusterConfig() (*rest.Config, error) {
kubeconfigPath := os.Getenv("KUBECONFIG")
if kubeconfigPath == "" {
kubeconfigPath = os.Getenv("HOME") + "/.kube/config"
}
return clientcmd.BuildConfigFromFlags("", kubeconfigPath)
}
// GetClientOutOfCluster returns a k8s clientset to the request from outside of cluster
func GetClientOutOfCluster() kubernetes.Interface {
config, err := buildOutOfClusterConfig()
if err != nil {
logrus.Fatalf("Can not get kubernetes config: %v", err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
logrus.Fatalf("Can not get kubernetes config: %v", err)
}
return clientset
}
// GetObjectMetaData returns metadata of a given k8s object
func GetObjectMetaData(obj interface{}) meta_v1.ObjectMeta {
var objectMeta meta_v1.ObjectMeta
switch object := obj.(type) {
case *apps_v1.Deployment:
objectMeta = object.ObjectMeta
case *api_v1.ReplicationController:
objectMeta = object.ObjectMeta
case *apps_v1.ReplicaSet:
objectMeta = object.ObjectMeta
case *apps_v1.DaemonSet:
objectMeta = object.ObjectMeta
case *api_v1.Service:
objectMeta = object.ObjectMeta
case *api_v1.Pod:
objectMeta = object.ObjectMeta
case *batch_v1.Job:
objectMeta = object.ObjectMeta
case *api_v1.PersistentVolume:
objectMeta = object.ObjectMeta
case *api_v1.Namespace:
objectMeta = object.ObjectMeta
case *api_v1.Secret:
objectMeta = object.ObjectMeta
case *ext_v1beta1.Ingress:
objectMeta = object.ObjectMeta
case *ext_v1beta1.PodSecurityPolicy:
objectMeta = object.ObjectMeta
}
return objectMeta
}
|
[
"\"KUBECONFIG\"",
"\"HOME\""
] |
[] |
[
"HOME",
"KUBECONFIG"
] |
[]
|
["HOME", "KUBECONFIG"]
|
go
| 2 | 0 | |
scripts/buildExtension.py
|
#!/usr/bin/env python3
# $Copyright (c) 2019-2020 Software AG, Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA, and/or its subsidiaries and/or its affiliates and/or their licensors.$
# Use, reproduction, transfer, publication or disclosure is prohibited except as specifically provided for in your License Agreement with Software AG
import shutil, json, os, subprocess, urllib
import blockMetadataGenerator, buildVersions
from pathlib import Path
import ssl, urllib.parse, urllib.request, base64, sys
ENCODING = 'UTF8'
BLOCK_METADATA_EVENT = 'apama.analyticsbuilder.BlockMetadata'
BLOCK_MESSAGES_EVENT = 'apama.analyticsbuilder.BlockMessages'
PAS_EXT_TYPE = 'pas_extension' # Type of the ManagedObject containing information about extension zip.
PAS_EXT_ID_FIELD = 'pas_extension_binary_id' # The field of the ManagedObject with id of the extension zip binary object.
BLOCK_REGISTRY_CHANNEL = 'analyticsbuilder.metadata.requests'
def add_arguments(parser):
""" Add parser arguments. """
parser.add_argument('--input', metavar='DIR', type=str, required=False, help='the input directory containing extension files - required when not deleting an extension')
parser.add_argument('--cdp', action='store_true', default=False, required=False, help='package all EPL files into a single CDP file')
parser.add_argument('--priority', metavar='N', type=int, required=False, help='the priority of the extension')
local = parser.add_argument_group('local save (requires at least the following arguments: --input, and --output)')
local.add_argument('--output', metavar='ZIP_FILE', type=str, required=False, help='the output zip file (requires the --input argument)')
remote = parser.add_argument_group('remote upload or delete (requires at least the following arguments: --cumulocity_url, --username, --password, and --name)')
remote.add_argument('--cumulocity_url', metavar='URL', help='the base Cumulocity URL')
remote.add_argument('--username', help='the Cumulocity tenant identifier and the username in the <tenantId>/<username> format')
remote.add_argument('--password', help='the Cumulocity password')
remote.add_argument('--name', help='the extension name in the inventory')
remote.add_argument('--delete', action='store_true', default=False, help='delete the extension from the inventory')
remote.add_argument('--restart', action='store_true', default=False, help='restart the apama-ctrl after upload or delete operation')
remote.add_argument('--ignoreVersion', action='store_true', default=False, required=False,
help='ignore the analytics builder script version check')
def write_evt_file(ext_files_dir, name, event):
"""
Append event into the evt file of the extension.
:param ext_files_dir: The 'files' directory of the extension.
:param name: Name of the evt file
:param event: The event to append.
:return:
"""
events_dir = ext_files_dir / 'events'
events_dir.mkdir(parents=True, exist_ok=True)
with open(events_dir / name, mode='w+', encoding='UTF8') as f:
return f.writelines([event])
def embeddable_json_str(json_str):
"""Return JSON string which could be included in a string literal of an event string."""
s = json.dumps(json.loads(json_str, encoding=ENCODING), separators=(',', ':'))
return json.dumps(s)
def gen_messages_evt_file(name, input, ext_files_dir, messages_from_metadata):
"""
Generate evt file containing event string for sending message JSON.
:param name: Extension name.
:param input: The input directory containing messages JSON files.
:param ext_files_dir: The 'files' directory of the extension.
:param messages_from_metadata: Extra messages to include extracted from blocks' metadata.
:return: None
"""
all_msgs = messages_from_metadata.copy()
msg_to_files = {}
msg_files = list(input.rglob('messages.json')) + list(input.rglob('*-messages.json'))
for f in msg_files:
try:
data = json.loads(f.read_text(encoding=ENCODING), encoding=ENCODING)
if not isinstance(data, dict):
print(f'Skipping JSON file with invalid messages format: {str(f)}')
continue
for (k, v) in data.items():
if k in all_msgs:
print(f'Message {k} defined multiple times in "{msg_to_files[k]}" and "{f}".')
else:
all_msgs[k] = v
msg_to_files[k] = f
except:
print(f'Skipping invalid JSON file: {str(f)}')
write_evt_file(ext_files_dir, f'{name}_messages.evt',
f'"{BLOCK_REGISTRY_CHANNEL}",{BLOCK_MESSAGES_EVENT}("{name}", "EN", {embeddable_json_str(json.dumps(all_msgs))})')
def createCDP(name, mons, ext_files_dir):
"""
Package mon files into a CDP file.
:param name: The name of the CDP file.
:param mons: The mon files to include in the CDP file.
:param ext_files_dir: Output directory for the CDP file.
:return: None
"""
cmd = [
os.path.join(os.getenv('APAMA_HOME'), 'bin', 'engine_package'),
'-u',
'-o', os.path.join(ext_files_dir, name+'.cdp'),
] + [str(f) for f in mons]
subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE).check_returncode()
def build_extension(input, output, tmpDir, cdp=False, priority=None, printMsg=False):
"""
Build an extension from specified input directory.
:param input: The input directory containing artifacts for the extension.
:param output: The output zip file for the extension.
:param tmpDir: The temporary directory.
:param cdp: Package all monitors into a CDP file.
:param priority: The priority of the package.
:param printMsg: Print success message with location of the extension zip.
:return:
"""
input = Path(input).resolve()
output = Path(output).resolve()
tmpDir = Path(tmpDir).resolve()
if not input.exists():
raise Exception(f'Input directory does not exist: {input.absolute()}')
name = output.name # catalog name
if name.endswith('.zip'):
name = name[:-4]
output = output.with_name(name)
ext_dir = tmpDir / name # '/' operator on Path object joins them
ext_files_dir = ext_dir / 'files'
ext_files_dir.mkdir(parents=True, exist_ok=True)
# Define priority of the extension if specified
if priority is not None:
ext_dir.joinpath('priority.txt').write_text(str(priority), encoding=ENCODING)
files_to_copy = list(input.rglob('*.evt'))
# Create CPD or copy mon files to extension directory while maintaining structure
mons = list(input.rglob('*.mon'))
if cdp:
createCDP(name, mons, ext_files_dir)
else:
files_to_copy.extend(mons)
files_to_copy.extend(list(input.rglob('*.so*')))
files_to_copy.extend(list(input.rglob('*.yaml')))
files_to_copy.extend(list(input.rglob('*.jar')))
for p in files_to_copy:
target_file = ext_files_dir / p.relative_to(input)
target_file.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(p, target_file)
# Generate block metadata
metadata_tmp_dir = tmpDir / 'metadata'
(metadata_json_file, messages) = blockMetadataGenerator.run_metadata_generator(input, str(metadata_tmp_dir / name), str(metadata_tmp_dir))
if metadata_json_file:
# Write evt file for metadata events
metadata = Path(metadata_json_file).read_text(encoding=ENCODING)
write_evt_file(ext_files_dir, f'{name}_metadata.evt', f'"{BLOCK_REGISTRY_CHANNEL}",{BLOCK_METADATA_EVENT}("{name}", "EN", {embeddable_json_str(metadata)})')
# Collate all the messages from the messages.json and *-messages.json
gen_messages_evt_file(name, input, ext_files_dir, messages)
# Create zip of extension
shutil.make_archive(output, format='zip', root_dir=ext_dir)
if printMsg:
print(f'Created {output}.zip')
return output.absolute().with_suffix('.zip')
class C8yConnection(object):
"""
Simple object to create connection to Cumulocity and perform REST requests.
"""
def __init__(self, url, username, password):
if not (url.startswith('http://') or url.startswith('https://')):
url = 'https://' + url
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(realm='Name of Your Realm',
uri=url,
user=username,
passwd=password)
auth_handler.add_password(realm='Cumulocity',
uri=url,
user=username,
passwd=password)
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.urlopener = urllib.request.build_opener(urllib.request.HTTPSHandler(context=ctx, check_hostname=False), auth_handler)
self.base_url = url
self.auth_header = "Basic " + base64.b64encode(bytes("%s:%s" % (username, password), "utf8")).decode()
def request(self, method, path, body=None, headers=None):
"""
Perform an HTTP request. In case of POST request, return the id of the created resource.
:param method: The method.
:param path: The path of the resource.
:param body: The body for the request.
:param headers: The headers for the request.
:return: Body of the response. In case of POST request, id of the resource specified by the Location header.
"""
headers = headers or {}
headers['Authorization'] = self.auth_header
if isinstance(body, str):
body = bytes(body, encoding=ENCODING)
url = self.base_url[:-1] if self.base_url.endswith('/') else self.base_url
req = urllib.request.Request(url + path, data=body, headers=headers, method=method)
resp = self.urlopener.open(req)
if resp.getheader('Content-Type', '') == 'text/html': # we never ask for HTML, if we got it, this is probably the wrong URL (or we're very confused)
raise urllib.error.HTTPError(url, 404, resp.getheaders(), f'Failed to perform REST request for resource {path} on url {self.base_url}. Verify that the base Cumulocity URL is correct.', None)
# return the object id if POST
if method == 'POST':
loc = resp.getheader('Location', None)
if loc.endswith('/'): loc = loc[:-1]
return loc.split('/')[-1]
return resp.read()
def do_get(self, path, params=None, headers=None, jsonResp=True):
"""
Perform GET request.
:param path: The path to the resource.
:param params: The query params.
:param headers: The headers.
:param jsonResp: Response is JSON.
:return: The body of the response. If JSON output is expected then parse the JSON string to python object.
"""
if params:
path = f'{path}?{urllib.parse.urlencode(params)}'
body = self.request('GET', path, None, headers)
if body and jsonResp:
body = json.loads(body)
return body
def do_request_json(self, method, path, body, headers=None):
"""
Perform REST request (POST/GET mainly) with JSON body.
:param method: The REST method.
:param path: The path to resource.
:param body: The JSON body.
:param headers: The headers.
:return: Response body string.
"""
headers = headers or {}
headers['Content-Type'] = 'application/json'
body = json.dumps(body)
return self.request(method, path, body, headers)
def upload_new_extension(connection, f, extension_name):
"""
Create multi-form payload and header for REST request to upload the specified file.
:param connection: Object to perform REST requests.
:param f: The file to upload.
:param extension_name: Name of the extension to create.
:return: None
"""
formBoundary = '----PASExtension3XtDFfhJ8XLIrkPw'
headers = {
'Accept': '*/*',
'Content-Type': f'multipart/form-data; boundary={formBoundary}',
'Content': 'multipart/form-data'
}
file_content = Path(f).read_bytes()
formBoundary = '--' + formBoundary
filename = extension_name + '.zip'
body = bytearray('%s\r\nContent-Disposition: form-data; name="object"\r\n\r\n{"name":"%s","type":"application/zip","pas_extension":"%s"}\r\n' % (formBoundary, filename, extension_name), encoding=ENCODING)
body += bytearray('%s\r\nContent-Disposition: form-data; name="filesize"\r\n\r\n%s\r\n' % (formBoundary, len(file_content)), encoding=ENCODING)
body += bytearray('%s\r\nContent-Disposition: form-data; name="file"; filename="%s"\r\nContent-Type: application/zip\r\n\r\n' % (formBoundary, filename), encoding=ENCODING)
body += file_content
body += bytearray(f'\r\n{formBoundary}--', encoding=ENCODING)
try:
connection.request('POST', '/inventory/binaries', body, headers)
except Exception as ex:
raise Exception(f'Unable to upload extension using POST on /inventory/binaries: {ex}')
def replace_extension_content(connection, f, moId):
"""
Replace content of existing extension.
:param connection: Object to perform REST requests.
:param f: The zip file.
:param moId: The id of the extension object.
:return: None
"""
file_content = Path(f).read_bytes()
headers = {
'Accept': '*/*',
'Content-Type': f'application/zip',
}
try:
connection.request('PUT', f'/inventory/binaries/{moId}', file_content, headers)
except Exception as ex:
raise Exception(f'Unable to replace extension content using PUT on /inventory/binaries/{moId}: {ex}')
def upload_or_delete_extension(extension_zip, url, username, password, name, delete=False, restart=False, ignoreVersion=False, printMsg=False):
"""
Upload the extension to the Cumulocity inventory or delete the extension from the inventory.
:param extension_zip: The extension zip to upload.
:param url: The Cumulocity URL.
:param username: The username.
:param password: The password.
:param name: The name of the extension.
:param delete: Delete the extension instead of uploading it.
:param restart: Restart the apama-ctrl after uploading the extension.
:param ignoreVersion: Ignores block sdk version.
:param printMsg: Print the success message.
:return:
"""
connection = C8yConnection(url, username, password)
# checks Analytics builder version with Apama-ctrl version
checkVersions(connection, ignoreVersion)
checkIfStarter(connection, ignoreVersion)
# Get existing ManagedObject for PAS extension.
try:
extension_mos = connection.do_get('/inventory/managedObjects', {'query': f"pas_extension eq '{name}'"})
except urllib.error.HTTPError as err:
if err.code == 404:
raise Exception(
f'Failed to perform REST request for resource /inventory/managedObjects on url {connection.base_url}. Verify that the base Cumulocity URL is correct.')
raise err
extension_mo = None
if extension_mos:
extension_mos = extension_mos.get('managedObjects', [])
extension_mo = extension_mos[0] if len(extension_mos) == 1 else None
if len(extension_mos) > 1:
raise Exception(f'Multiple Managed Objects found with pas_extension={name}. Delete them to upload a new extension with the same name.')
if extension_mo:
moId = extension_mo["id"]
if delete:
try:
connection.request('DELETE', f'/inventory/binaries/{moId}')
except Exception as ex:
raise Exception(f'Unable to delete extension using DELETE on /inventory/binaries/{moId}: {ex}')
if printMsg: print(f'Deleted extension {name}')
else:
replace_extension_content(connection, extension_zip, moId)
if printMsg: print(f'Uploaded extension {name}')
else:
if delete:
print('Extension already deleted')
else:
upload_new_extension(connection, extension_zip, name)
if printMsg: print(f'Uploaded extension {name}')
if restart:
try:
connection.request('PUT', f'/service/cep/restart')
if printMsg: print('Restart requested')
except (urllib.error.HTTPError, urllib.error.URLError) as ex:
statuscode = int(ex.code)
if statuscode // 10 == 50:
if printMsg: print('Restart requested')
else:
raise Exception(f'Failed to restart Apama-ctrl: {ex}')
except Exception as ex:
raise Exception(f'Failed to restart Apama-ctrl: {ex}')
def isAllRemoteOptions(args, remote):
for k, v in remote.items():
if v and getattr(args, k, None) is None:
raise Exception(f'Argument --{k} is required for the remote operation.')
def checkIfStarter(connection,ignoreVersion):
is_starter = None
try:
resp = connection.request('GET',f'/service/cep/diagnostics/apamaCtrlStatus')
is_starter = (json.loads(resp).get('is_starter_mode'))
except urllib.error.HTTPError as err:
print(f'Could not identify Apama-ctrl : {err}')
if is_starter == True:
if ignoreVersion:
print(f'WARNING: Uploaded extensions are not supported in Apama Starter so they will not be available in the model editor.')
else:
print(f'FAILED: Extensions are not supported in Apama Starter. Ignore the check using --ignoreVersion')
exit()
def checkVersions(connection, ignoreVersion):
apamactrl_version = None
git_url = 'https://github.com/SoftwareAG/apama-analytics-builder-block-sdk/releases'
try:
resp = connection.request('GET', f'/service/cep/diagnostics/componentVersion')
apamactrl_version = json.loads(resp).get('releaseTrainVersion')
except urllib.error.HTTPError as err:
if err.code == 404:
if ignoreVersion:
print(f'WARNING: It is recommended to use the Analytics Builder script only against Apama-ctrl with the same version.', file=sys.stderr)
else:
raise Exception(f'Failed to perform REST request for resource /diagnostics/componentVersion on url {connection.base_url}. A user using a Cumulocity tenant version ({apamactrl_version}) has to checkout the latest and compatible version of the branch, for example if using the cloned github repository, switch to the 10.5.0.x branch using git checkout rel/10.5.0.x. Else download the latest release of 10.5.0.x from {git_url}.')
else:
if err.code >= 400:
if not ignoreVersion:
ignoreVersion= True
print(f'WARNING: apama-ctrl may not be running, skipping version check.', file=sys.stderr)
apamactrl_version = 'Unknown'
else:
raise err
sdk_version = buildVersions.RELEASE_TRAIN_VERSION
if apamactrl_version is not None and apamactrl_version != sdk_version:
if ignoreVersion:
print(f'WARNING: It is recommended to use the Analytics Builder script only against Apama-ctrl with the same version. The version of the Analytics Builder script is {sdk_version} but the version of Apama-ctrl is {apamactrl_version}.')
else:
raise Exception(f'The apama analytics builder block sdk version has to be compatible with the apama-ctrl microservice version. Please download the latest block sdk release for v{apamactrl_version} from https://github.com/SoftwareAG/apama-analytics-builder-block-sdk/releases. If you have cloned the git repository then checkout/switch to the branch that\'s compatible with the version of the apama-ctrl microservice. For example, if the apama-ctrl microservice release train version is {apamactrl_version} switch to {apamactrl_version}.x branch using \'git checkout rel/{apamactrl_version}.x\'. You can also provide the --ignoreVersion command line option if you want to ignore the version compatibility check.')
def run(args):
# Support remote operations and whether they are mandatory.
remote = {'cumulocity_url':True, 'username':True, 'password':True, 'name':True, 'delete':False, 'restart':False}
# Check if any remote option is provided
is_remote = False
for k in remote.keys():
if getattr(args, k, None):
is_remote = True
break
# check all mandatory remote options are provided
if is_remote:
isAllRemoteOptions(args, remote)
# check either output or mandatory remote options are provided.
if not (is_remote or args.output):
raise Exception(f'Provide arguments to save the extension locally, perform remote operations or both.')
if not args.input and not args.delete:
raise Exception(f'Argument --input is required when not deleting an extension')
zip_path = Path(args.tmpDir, args.name).with_suffix('.zip') if is_remote else args.output # Use the <name>.zip for the zip name which gets uploaded.
if not args.delete:
zip_path = build_extension(args.input, zip_path, args.tmpDir, args.cdp, args.priority, printMsg=bool(args.output))
if is_remote:
if args.output and not args.delete:
output = args.output + ('' if args.output.endswith('.zip') else '.zip')
shutil.copy2(zip_path, output)
return upload_or_delete_extension(zip_path, args.cumulocity_url, args.username,
args.password, args.name, args.delete, args.restart, args.ignoreVersion, printMsg=True)
|
[] |
[] |
[
"APAMA_HOME"
] |
[]
|
["APAMA_HOME"]
|
python
| 1 | 0 | |
test/e2e-apiserver-test/cluster_test.go
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_apiserver_test
import (
"fmt"
"io/ioutil"
"os"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
v1 "github.com/oam-dev/kubevela/pkg/apiserver/rest/apis/v1"
"github.com/oam-dev/kubevela/pkg/multicluster"
util "github.com/oam-dev/kubevela/pkg/utils"
)
const (
WorkerClusterName = "cluster-worker"
WorkerClusterKubeConfigPath = "/tmp/worker.kubeconfig"
)
var _ = Describe("Test cluster rest api", func() {
Context("Test basic cluster CURD", func() {
var clusterName string
BeforeEach(func() {
clusterName = WorkerClusterName + "-" + util.RandomString(8)
kubeconfigBytes, err := ioutil.ReadFile(WorkerClusterKubeConfigPath)
Expect(err).Should(Succeed())
resp := post("/clusters", v1.CreateClusterRequest{
Name: clusterName,
KubeConfig: string(kubeconfigBytes),
})
Expect(decodeResponseBody(resp, nil)).Should(Succeed())
})
AfterEach(func() {
resp := delete("/clusters/" + clusterName)
Expect(decodeResponseBody(resp, nil)).Should(Succeed())
})
It("Test get cluster", func() {
resp := get("/clusters/" + clusterName)
clusterResp := &v1.DetailClusterResponse{}
Expect(decodeResponseBody(resp, clusterResp)).Should(Succeed())
Expect(clusterResp.Status).Should(Equal("Healthy"))
})
It("Test list clusters", func() {
defer GinkgoRecover()
resp := get("/clusters/?page=1&pageSize=5")
clusterResp := &v1.ListClusterResponse{}
Expect(decodeResponseBody(resp, clusterResp)).Should(Succeed())
Expect(len(clusterResp.Clusters) >= 2).Should(BeTrue())
Expect(clusterResp.Clusters[0].Name).Should(Equal(multicluster.ClusterLocalName))
Expect(clusterResp.Clusters[1].Name).Should(Equal(clusterName))
resp = get("/clusters/?page=1&pageSize=5&query=" + WorkerClusterName)
clusterResp = &v1.ListClusterResponse{}
Expect(decodeResponseBody(resp, clusterResp)).Should(Succeed())
Expect(len(clusterResp.Clusters) >= 1).Should(BeTrue())
Expect(clusterResp.Clusters[0].Name).Should(Equal(clusterName))
})
It("Test modify cluster", func() {
kubeconfigBytes, err := ioutil.ReadFile(WorkerClusterKubeConfigPath)
Expect(err).Should(Succeed())
resp := put("/clusters/"+clusterName, v1.CreateClusterRequest{
Name: clusterName,
KubeConfig: string(kubeconfigBytes),
Description: "Example description",
})
clusterResp := &v1.ClusterBase{}
Expect(decodeResponseBody(resp, clusterResp)).Should(Succeed())
Expect(clusterResp.Description).ShouldNot(Equal(""))
})
It("Test create ns in cluster", func() {
testNamespace := fmt.Sprintf("test-%d", time.Now().Unix())
resp := post("/clusters/"+clusterName+"/namespaces", v1.CreateClusterNamespaceRequest{Namespace: testNamespace})
nsResp := &v1.CreateClusterNamespaceResponse{}
Expect(decodeResponseBody(resp, nsResp)).Should(Succeed())
Expect(nsResp.Exists).Should(Equal(false))
resp = post("/clusters/"+clusterName+"/namespaces", v1.CreateClusterNamespaceRequest{Namespace: testNamespace})
nsResp = &v1.CreateClusterNamespaceResponse{}
Expect(decodeResponseBody(resp, nsResp)).Should(Succeed())
Expect(nsResp.Exists).Should(Equal(true))
})
})
PContext("Test cloud cluster rest api", func() {
var clusterName string
BeforeEach(func() {
clusterName = WorkerClusterName + "-" + util.RandomString(8)
})
AfterEach(func() {
resp := delete("/clusters/" + clusterName)
Expect(decodeResponseBody(resp, nil)).Should(Succeed())
})
It("Test list aliyun cloud cluster and connect", func() {
AccessKeyID := os.Getenv("ALIYUN_ACCESS_KEY_ID")
AccessKeySecret := os.Getenv("ALIYUN_ACCESS_KEY_SECRET")
resp := post("/clusters/cloud-clusters/aliyun/?page=1&pageSize=5", v1.AccessKeyRequest{
AccessKeyID: AccessKeyID,
AccessKeySecret: AccessKeySecret,
})
clusterResp := &v1.ListCloudClusterResponse{}
Expect(decodeResponseBody(resp, clusterResp)).Should(Succeed())
Expect(len(clusterResp.Clusters)).ShouldNot(Equal(0))
ClusterID := clusterResp.Clusters[0].ID
resp = post("/clusters/cloud-clusters/aliyun/connect", v1.ConnectCloudClusterRequest{
AccessKeyID: AccessKeyID,
AccessKeySecret: AccessKeySecret,
ClusterID: ClusterID,
Name: clusterName,
})
clusterBase := &v1.ClusterBase{}
Expect(decodeResponseBody(resp, clusterBase)).Should(Succeed())
Expect(clusterBase.Status).Should(Equal("Healthy"))
})
})
})
|
[
"\"ALIYUN_ACCESS_KEY_ID\"",
"\"ALIYUN_ACCESS_KEY_SECRET\""
] |
[] |
[
"ALIYUN_ACCESS_KEY_SECRET",
"ALIYUN_ACCESS_KEY_ID"
] |
[]
|
["ALIYUN_ACCESS_KEY_SECRET", "ALIYUN_ACCESS_KEY_ID"]
|
go
| 2 | 0 | |
napari/utils/perf/__init__.py
|
"""Performance Monitoring.
The perfmon module lets you instrument your code and visualize its run-time
behavior and timings in Chrome's Tracing GUI.
To enable perfmon define the env var NAPARI_PERFMON as follows:
NAPARI_PERFMON=1
Activates perfmon, trace using Debug -> Performance Trace menu.
NAPARI_PERFMON=/path/to/config.json
Configure perfmon using the config.json configuration. See the
PerfmonConfig docs for the spec of the config file.
Chrome Tracing
---------------
Chrome has a nice built-in performance tool called chrome://tracing. Chrome
can record traces of web applications. But the format is well-documented and
anyone can create the files and use the nice GUI. And other programs accept
the format including:
1) https://www.speedscope.app/ which does flamegraphs (Chrome doesn't).
2) Qt Creator's performance tools.
Monkey Patching
---------------
The best way to add perf_timers is using the perfmon config file. You can
list which methods or functions you want to time, and a perf_timer will be
monkey-patched into each callable on startup. The monkey patching
is done only if perfmon is enabled.
Trace On Start
---------------
Add a line to the config file like:
"trace_file_on_start": "/Path/to/my/trace.json"
Perfmon will start tracing on startup. You must quit napari with the Quit
command for napari to write trace file. See PerfmonConfig docs.
Manual Timing
-------------
You can also manually add "perf_timer" context objects and
"add_counter_event()" and "add_instant_event()" functions to your code. All
three of these should be removed before merging the PR into master. While
they have almost zero overhead when perfmon is disabled, it's still better
not to leave them in the code. Think of them as similar to debug prints.
"""
import os
from ._compat import perf_counter_ns
from ._config import perf_config
from ._event import PerfEvent
from ._timers import add_counter_event, add_instant_event, perf_timer, timers
USE_PERFMON = os.getenv("NAPARI_PERFMON", "0") != "0"
|
[] |
[] |
[
"NAPARI_PERFMON"
] |
[]
|
["NAPARI_PERFMON"]
|
python
| 1 | 0 | |
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"text/tabwriter"
"time"
"github.com/golang/glog"
"golang.org/x/crypto/ssh"
"golang.org/x/net/websocket"
"google.golang.org/api/googleapi"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
apps "k8s.io/api/apps/v1"
batch "k8s.io/api/batch/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
"k8s.io/kubernetes/pkg/api/legacyscheme"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
api "k8s.io/kubernetes/pkg/apis/core"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/controller"
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubectl"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
sshutil "k8s.io/kubernetes/pkg/ssh"
"k8s.io/kubernetes/pkg/util/system"
taintutils "k8s.io/kubernetes/pkg/util/taints"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
)
const (
// How long to wait for the pod to be listable
PodListTimeout = time.Minute
// Initial pod start can be delayed O(minutes) by slow docker pulls
// TODO: Make this 30 seconds once #4566 is resolved.
PodStartTimeout = 5 * time.Minute
// Same as `PodStartTimeout` to wait for the pod to be started, but shorter.
// Use it case by case when we are sure pod start will not be delayed
// minutes by slow docker pulls or something else.
PodStartShortTimeout = 1 * time.Minute
// How long to wait for a pod to be deleted
PodDeleteTimeout = 5 * time.Minute
// If there are any orphaned namespaces to clean up, this test is running
// on a long lived cluster. A long wait here is preferably to spurious test
// failures caused by leaked resources from a previous test run.
NamespaceCleanupTimeout = 15 * time.Minute
// Some pods can take much longer to get ready due to volume attach/detach latency.
slowPodStartTimeout = 15 * time.Minute
// How long to wait for a service endpoint to be resolvable.
ServiceStartTimeout = 3 * time.Minute
// How often to Poll pods, nodes and claims.
Poll = 2 * time.Second
pollShortTimeout = 1 * time.Minute
pollLongTimeout = 5 * time.Minute
// service accounts are provisioned after namespace creation
// a service account is required to support pod creation in a namespace as part of admission control
ServiceAccountProvisionTimeout = 2 * time.Minute
// How long to try single API calls (like 'get' or 'list'). Used to prevent
// transient failures from failing tests.
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
SingleCallTimeout = 5 * time.Minute
// How long nodes have to be "ready" when a test begins. They should already
// be "ready" before the test starts, so this is small.
NodeReadyInitialTimeout = 20 * time.Second
// How long pods have to be "ready" when a test begins.
PodReadyBeforeTimeout = 5 * time.Minute
// How long pods have to become scheduled onto nodes
podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second)
podRespondingTimeout = 15 * time.Minute
ServiceRespondingTimeout = 2 * time.Minute
EndpointRegisterTimeout = time.Minute
// How long claims have to become dynamically provisioned
ClaimProvisionTimeout = 5 * time.Minute
// Same as `ClaimProvisionTimeout` to wait for claim to be dynamically provisioned, but shorter.
// Use it case by case when we are sure this timeout is enough.
ClaimProvisionShortTimeout = 1 * time.Minute
// How long claims have to become bound
ClaimBindingTimeout = 3 * time.Minute
// How long claims have to become deleted
ClaimDeletingTimeout = 3 * time.Minute
// How long PVs have to beome reclaimed
PVReclaimingTimeout = 3 * time.Minute
// How long PVs have to become bound
PVBindingTimeout = 3 * time.Minute
// How long PVs have to become deleted
PVDeletingTimeout = 3 * time.Minute
// How long a node is allowed to become "Ready" after it is restarted before
// the test is considered failed.
RestartNodeReadyAgainTimeout = 5 * time.Minute
// How long a pod is allowed to become "running" and "ready" after a node
// restart before test is considered failed.
RestartPodReadyAgainTimeout = 5 * time.Minute
// Number of objects that gc can delete in a second.
// GC issues 2 requestes for single delete.
gcThroughput = 10
// Minimal number of nodes for the cluster to be considered large.
largeClusterThreshold = 100
// TODO(justinsb): Avoid hardcoding this.
awsMasterIP = "172.20.0.9"
// ssh port
sshPort = "22"
// ImagePrePullingTimeout is the time we wait for the e2e-image-puller
// static pods to pull the list of seeded images. If they don't pull
// images within this time we simply log their output and carry on
// with the tests.
ImagePrePullingTimeout = 5 * time.Minute
)
var (
BusyBoxImage = "busybox"
// Label allocated to the image puller static pod that runs on each node
// before e2es.
ImagePullerLabels = map[string]string{"name": "e2e-image-puller"}
// For parsing Kubectl version for version-skewed testing.
gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"")
// Slice of regexps for names of pods that have to be running to consider a Node "healthy"
requiredPerNodePods = []*regexp.Regexp{
regexp.MustCompile(".*kube-proxy.*"),
regexp.MustCompile(".*fluentd-elasticsearch.*"),
regexp.MustCompile(".*node-problem-detector.*"),
}
// Serve hostname image name
ServeHostnameImage = imageutils.GetE2EImage(imageutils.ServeHostname)
)
type Address struct {
internalIP string
externalIP string
hostname string
}
// GetServerArchitecture fetches the architecture of the cluster's apiserver.
func GetServerArchitecture(c clientset.Interface) string {
arch := ""
sVer, err := c.Discovery().ServerVersion()
if err != nil || sVer.Platform == "" {
// If we failed to get the server version for some reason, default to amd64.
arch = "amd64"
} else {
// Split the platform string into OS and Arch separately.
// The platform string may for example be "linux/amd64", "linux/arm" or "windows/amd64".
osArchArray := strings.Split(sVer.Platform, "/")
arch = osArchArray[1]
}
return arch
}
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
func GetPauseImageName(c clientset.Interface) string {
return imageutils.GetE2EImageWithArch(imageutils.Pause, GetServerArchitecture(c))
}
func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
return request.Resource("services").SubResource("proxy"), nil
}
// unique identifier of the e2e run
var RunId = uuid.NewUUID()
type CreateTestingNSFn func(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error)
type ContainerFailures struct {
status *v1.ContainerStateTerminated
Restarts int
}
func GetMasterHost() string {
masterUrl, err := url.Parse(TestContext.Host)
ExpectNoError(err)
return masterUrl.Host
}
func nowStamp() string {
return time.Now().Format(time.StampMilli)
}
func log(level string, format string, args ...interface{}) {
fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
}
func Logf(format string, args ...interface{}) {
log("INFO", format, args...)
}
func Failf(format string, args ...interface{}) {
FailfWithOffset(1, format, args...)
}
// FailfWithOffset calls "Fail" and logs the error at "offset" levels above its caller
// (for example, for call chain f -> g -> FailfWithOffset(1, ...) error would be logged for "f").
func FailfWithOffset(offset int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
ginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
}
func Skipf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
ginkgowrapper.Skip(nowStamp() + ": " + msg)
}
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
if TestContext.CloudConfig.NumNodes < minNodeCount {
Skipf("Requires at least %d nodes (not %d)", minNodeCount, TestContext.CloudConfig.NumNodes)
}
}
func SkipUnlessNodeCountIsAtMost(maxNodeCount int) {
if TestContext.CloudConfig.NumNodes > maxNodeCount {
Skipf("Requires at most %d nodes (not %d)", maxNodeCount, TestContext.CloudConfig.NumNodes)
}
}
func SkipUnlessAtLeast(value int, minValue int, message string) {
if value < minValue {
Skipf(message)
}
}
func SkipIfProviderIs(unsupportedProviders ...string) {
if ProviderIs(unsupportedProviders...) {
Skipf("Not supported for providers %v (found %s)", unsupportedProviders, TestContext.Provider)
}
}
func SkipUnlessLocalEphemeralStorageEnabled() {
if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
Skipf("Only supported when %v feature is enabled", features.LocalStorageCapacityIsolation)
}
}
func SkipUnlessSSHKeyPresent() {
if _, err := GetSigner(TestContext.Provider); err != nil {
Skipf("No SSH Key for provider %s: '%v'", TestContext.Provider, err)
}
}
func SkipUnlessProviderIs(supportedProviders ...string) {
if !ProviderIs(supportedProviders...) {
Skipf("Only supported for providers %v (not %s)", supportedProviders, TestContext.Provider)
}
}
func SkipUnlessMultizone(c clientset.Interface) {
zones, err := GetClusterZones(c)
if err != nil {
Skipf("Error listing cluster zones")
}
if zones.Len() <= 1 {
Skipf("Requires more than one zone")
}
}
func SkipIfMultizone(c clientset.Interface) {
zones, err := GetClusterZones(c)
if err != nil {
Skipf("Error listing cluster zones")
}
if zones.Len() > 1 {
Skipf("Requires more than one zone")
}
}
func SkipUnlessClusterMonitoringModeIs(supportedMonitoring ...string) {
if !ClusterMonitoringModeIs(supportedMonitoring...) {
Skipf("Only next monitoring modes are supported %v (not %s)", supportedMonitoring, TestContext.ClusterMonitoringMode)
}
}
func SkipUnlessPrometheusMonitoringIsEnabled(supportedMonitoring ...string) {
if !TestContext.EnablePrometheusMonitoring {
Skipf("Skipped because prometheus monitoring is not enabled")
}
}
func SkipUnlessMasterOSDistroIs(supportedMasterOsDistros ...string) {
if !MasterOSDistroIs(supportedMasterOsDistros...) {
Skipf("Only supported for master OS distro %v (not %s)", supportedMasterOsDistros, TestContext.MasterOSDistro)
}
}
func SkipUnlessNodeOSDistroIs(supportedNodeOsDistros ...string) {
if !NodeOSDistroIs(supportedNodeOsDistros...) {
Skipf("Only supported for node OS distro %v (not %s)", supportedNodeOsDistros, TestContext.NodeOSDistro)
}
}
func SkipUnlessSecretExistsAfterWait(c clientset.Interface, name, namespace string, timeout time.Duration) {
Logf("Waiting for secret %v in namespace %v to exist in duration %v", name, namespace, timeout)
start := time.Now()
if wait.PollImmediate(15*time.Second, timeout, func() (bool, error) {
_, err := c.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
Logf("Secret %v in namespace %v still does not exist after duration %v", name, namespace, time.Since(start))
return false, nil
}
return true, nil
}) != nil {
Skipf("Secret %v in namespace %v did not exist after timeout of %v", name, namespace, timeout)
}
Logf("Secret %v in namespace %v found after duration %v", name, namespace, time.Since(start))
}
func SkipIfContainerRuntimeIs(runtimes ...string) {
for _, runtime := range runtimes {
if runtime == TestContext.ContainerRuntime {
Skipf("Not supported under container runtime %s", runtime)
}
}
}
func RunIfContainerRuntimeIs(runtimes ...string) {
for _, runtime := range runtimes {
if runtime == TestContext.ContainerRuntime {
return
}
}
Skipf("Skipped because container runtime %q is not in %s", TestContext.ContainerRuntime, runtimes)
}
func RunIfSystemSpecNameIs(names ...string) {
for _, name := range names {
if name == TestContext.SystemSpecName {
return
}
}
Skipf("Skipped because system spec name %q is not in %v", TestContext.SystemSpecName, names)
}
func ProviderIs(providers ...string) bool {
for _, provider := range providers {
if strings.ToLower(provider) == strings.ToLower(TestContext.Provider) {
return true
}
}
return false
}
func ClusterMonitoringModeIs(monitoringModes ...string) bool {
for _, mode := range monitoringModes {
if strings.ToLower(mode) == strings.ToLower(TestContext.ClusterMonitoringMode) {
return true
}
}
return false
}
func MasterOSDistroIs(supportedMasterOsDistros ...string) bool {
for _, distro := range supportedMasterOsDistros {
if strings.ToLower(distro) == strings.ToLower(TestContext.MasterOSDistro) {
return true
}
}
return false
}
func NodeOSDistroIs(supportedNodeOsDistros ...string) bool {
for _, distro := range supportedNodeOsDistros {
if strings.ToLower(distro) == strings.ToLower(TestContext.NodeOSDistro) {
return true
}
}
return false
}
func ProxyMode(f *Framework) (string, error) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-proxy-mode-detector",
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
HostNetwork: true,
Containers: []v1.Container{
{
Name: "detector",
Image: imageutils.GetE2EImage(imageutils.Net),
Command: []string{"/bin/sleep", "3600"},
},
},
},
}
f.PodClient().CreateSync(pod)
defer f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
cmd := "curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode"
stdout, err := RunHostCmd(pod.Namespace, pod.Name, cmd)
if err != nil {
return "", err
}
Logf("ProxyMode: %s", stdout)
return stdout, nil
}
func SkipUnlessServerVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) {
gte, err := ServerVersionGTE(v, c)
if err != nil {
Failf("Failed to get server version: %v", err)
}
if !gte {
Skipf("Not supported for server versions before %q", v)
}
}
func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, namespace string) {
resourceClient := dynamicClient.Resource(gvr).Namespace(namespace)
_, err := resourceClient.List(metav1.ListOptions{})
if err != nil {
// not all resources support list, so we ignore those
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
Skipf("Could not find %s resource, skipping test: %#v", gvr, err)
}
Failf("Unexpected error getting %v: %v", gvr, err)
}
}
// ProvidersWithSSH are those providers where each node is accessible with SSH
var ProvidersWithSSH = []string{"gce", "gke", "aws", "local"}
type podCondition func(pod *v1.Pod) (bool, error)
// logPodStates logs basic info of provided pods for debugging.
func logPodStates(pods []v1.Pod) {
// Find maximum widths for pod, node, and phase strings for column printing.
maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE")
for i := range pods {
pod := &pods[i]
if len(pod.ObjectMeta.Name) > maxPodW {
maxPodW = len(pod.ObjectMeta.Name)
}
if len(pod.Spec.NodeName) > maxNodeW {
maxNodeW = len(pod.Spec.NodeName)
}
if len(pod.Status.Phase) > maxPhaseW {
maxPhaseW = len(pod.Status.Phase)
}
}
// Increase widths by one to separate by a single space.
maxPodW++
maxNodeW++
maxPhaseW++
maxGraceW++
// Log pod info. * does space padding, - makes them left-aligned.
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS")
for _, pod := range pods {
grace := ""
if pod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds)
}
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions)
}
Logf("") // Final empty line helps for readability.
}
// errorBadPodsStates create error message of basic info of bad pods for debugging.
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string {
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
// Print bad pods info only if there are fewer than 10 bad pods
if len(badPods) > 10 {
return errStr + "There are too many bad pods. Please check log for details."
}
buf := bytes.NewBuffer(nil)
w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS")
for _, badPod := range badPods {
grace := ""
if badPod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds)
}
podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%+v",
badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions)
fmt.Fprintln(w, podInfo)
}
w.Flush()
return errStr + buf.String()
}
// WaitForPodsSuccess waits till all labels matching the given selector enter
// the Success state. The caller is expected to only invoke this method once the
// pods have been created.
func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[string]string, timeout time.Duration) error {
successPodSelector := labels.SelectorFromSet(successPodLabels)
start, badPods, desiredPods := time.Now(), []v1.Pod{}, 0
if wait.PollImmediate(30*time.Second, timeout, func() (bool, error) {
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: successPodSelector.String()})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
if len(podList.Items) == 0 {
Logf("Waiting for pods to enter Success, but no pods in %q match label %v", ns, successPodLabels)
return true, nil
}
badPods = []v1.Pod{}
desiredPods = len(podList.Items)
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodSucceeded {
badPods = append(badPods, pod)
}
}
successPods := len(podList.Items) - len(badPods)
Logf("%d / %d pods in namespace %q are in Success state (%d seconds elapsed)",
successPods, len(podList.Items), ns, int(time.Since(start).Seconds()))
if len(badPods) == 0 {
return true, nil
}
return false, nil
}) != nil {
logPodStates(badPods)
LogPodsWithLabels(c, ns, successPodLabels, Logf)
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "SUCCESS", timeout))
}
return nil
}
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
// namespace ns are either running and ready, or failed but controlled by a
// controller. Also, it ensures that at least minPods are running and
// ready. It has separate behavior from other 'wait for' pods functions in
// that it requests the list of pods on every iteration. This is useful, for
// example, in cluster startup, because the number of pods increases while
// waiting. All pods that are in SUCCESS state are not counted.
//
// If ignoreLabels is not empty, pods matching this selector are ignored.
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
start := time.Now()
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns)
wg := sync.WaitGroup{}
wg.Add(1)
var ignoreNotReady bool
badPods := []v1.Pod{}
desiredPods := 0
notReady := int32(0)
if wait.PollImmediate(Poll, timeout, func() (bool, error) {
// We get the new list of pods, replication controllers, and
// replica sets in every iteration because more pods come
// online during startup and we want to ensure they are also
// checked.
replicas, replicaOk := int32(0), int32(0)
rcList, err := c.CoreV1().ReplicationControllers(ns).List(metav1.ListOptions{})
if err != nil {
Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for _, rc := range rcList.Items {
replicas += *rc.Spec.Replicas
replicaOk += rc.Status.ReadyReplicas
}
rsList, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(metav1.ListOptions{})
if err != nil {
Logf("Error getting replication sets in namespace %q: %v", ns, err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for _, rs := range rsList.Items {
replicas += *rs.Spec.Replicas
replicaOk += rs.Status.ReadyReplicas
}
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
Logf("Error getting pods in namespace '%s': %v", ns, err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
nOk := int32(0)
notReady = int32(0)
badPods = []v1.Pod{}
desiredPods = len(podList.Items)
for _, pod := range podList.Items {
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) {
continue
}
res, err := testutils.PodRunningReady(&pod)
switch {
case res && err == nil:
nOk++
case pod.Status.Phase == v1.PodSucceeded:
Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name)
// it doesn't make sense to wait for this pod
continue
case pod.Status.Phase != v1.PodFailed:
Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
notReady++
badPods = append(badPods, pod)
default:
if metav1.GetControllerOf(&pod) == nil {
Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
badPods = append(badPods, pod)
}
//ignore failed pods that are controlled by some controller
}
}
Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
return true, nil
}
ignoreNotReady = (notReady <= allowedNotReadyPods)
logPodStates(badPods)
return false, nil
}) != nil {
if !ignoreNotReady {
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout))
}
Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
}
return nil
}
func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
for _, container := range pod.Spec.Containers {
if strings.Contains(container.Name, containerNameSubstr) {
// Contains() matches all strings if substr is empty
logs, err := GetPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
}
}
logFunc("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName)
logFunc("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name)
}
}
}
func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
logFunc("Error getting pods in namespace '%s': %v", ns, err)
return
}
logFunc("Running kubectl logs on non-ready containers in %v", ns)
for _, pod := range podList.Items {
if res, err := testutils.PodRunningReady(&pod); !res || err != nil {
kubectlLogPod(c, pod, "", Logf)
}
}
}
func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()})
if err != nil {
logFunc("Error getting pods in namespace %q: %v", ns, err)
return
}
logFunc("Running kubectl logs on pods with labels %v in %v", match, ns)
for _, pod := range podList.Items {
kubectlLogPod(c, pod, "", logFunc)
}
}
func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[string]string, containerSubstr string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
return
}
for _, pod := range podList.Items {
kubectlLogPod(c, pod, containerSubstr, logFunc)
}
}
// DeleteNamespaces deletes all namespaces that match the given delete and skip filters.
// Filter is by simple strings.Contains; first skip filter, then delete filter.
// Returns the list of deleted namespaces or an error.
func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) {
By("Deleting namespaces")
nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
var deleted []string
var wg sync.WaitGroup
OUTER:
for _, item := range nsList.Items {
if skipFilter != nil {
for _, pattern := range skipFilter {
if strings.Contains(item.Name, pattern) {
continue OUTER
}
}
}
if deleteFilter != nil {
var shouldDelete bool
for _, pattern := range deleteFilter {
if strings.Contains(item.Name, pattern) {
shouldDelete = true
break
}
}
if !shouldDelete {
continue OUTER
}
}
wg.Add(1)
deleted = append(deleted, item.Name)
go func(nsName string) {
defer wg.Done()
defer GinkgoRecover()
Expect(c.CoreV1().Namespaces().Delete(nsName, nil)).To(Succeed())
Logf("namespace : %v api call to delete is complete ", nsName)
}(item.Name)
}
wg.Wait()
return deleted, nil
}
func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeout time.Duration) error {
By("Waiting for namespaces to vanish")
nsMap := map[string]bool{}
for _, ns := range namespaces {
nsMap[ns] = true
}
//Now POLL until all namespaces have been eradicated.
return wait.Poll(2*time.Second, timeout,
func() (bool, error) {
nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
if err != nil {
return false, err
}
for _, item := range nsList.Items {
if _, ok := nsMap[item.Name]; ok {
return false, nil
}
}
return true, nil
})
}
func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error {
w, err := c.CoreV1().ServiceAccounts(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, conditions.ServiceAccountHasSecrets)
return err
}
func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err)
return err
}
Logf("Get pod %q in namespace %q failed, ignoring for %v. Error: %v", podName, ns, Poll, err)
continue
}
// log now so that current pod info is reported before calling `condition()`
Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
podName, pod.Status.Phase, pod.Status.Reason, podutil.IsPodReady(pod), time.Since(start))
if done, err := condition(pod); done {
if err == nil {
Logf("Pod %q satisfied condition %q", podName, desc)
}
return err
}
}
return fmt.Errorf("Gave up after waiting %v for pod %q to be %q", timeout, podName, desc)
}
// WaitForMatchPodsCondition finds match pods based on the input ListOptions.
// waits and checks if all match pods are in the given podCondition
func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(opts)
if err != nil {
return err
}
conditionNotMatch := []string{}
for _, pod := range pods.Items {
done, err := condition(&pod)
if done && err != nil {
return fmt.Errorf("Unexpected error: %v", err)
}
if !done {
conditionNotMatch = append(conditionNotMatch, format.Pod(&pod))
}
}
if len(conditionNotMatch) <= 0 {
return err
}
Logf("%d pods are not %s: %v", len(conditionNotMatch), desc, conditionNotMatch)
}
return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout)
}
// WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned
// the default service account is what is associated with pods when they do not specify a service account
// as a result, pods are not able to be provisioned in a namespace until the service account is provisioned
func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace string) error {
return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout)
}
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err != nil {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
continue
} else {
if pv.Status.Phase == phase {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
}
// WaitForStatefulSetReplicasReady waits for all replicas of a StatefulSet to become ready or until timeout occurs, whichever comes first.
func WaitForStatefulSetReplicasReady(statefulSetName, ns string, c clientset.Interface, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for StatefulSet %s to have all replicas ready", timeout, statefulSetName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
sts, err := c.AppsV1().StatefulSets(ns).Get(statefulSetName, metav1.GetOptions{})
if err != nil {
Logf("Get StatefulSet %s failed, ignoring for %v: %v", statefulSetName, Poll, err)
continue
} else {
if sts.Status.ReadyReplicas == *sts.Spec.Replicas {
Logf("All %d replicas of StatefulSet %s are ready. (%v)", sts.Status.ReadyReplicas, statefulSetName, time.Since(start))
return nil
} else {
Logf("StatefulSet %s found but there are %d ready replicas and %d total replicas.", statefulSetName, sts.Status.ReadyReplicas, *sts.Spec.Replicas)
}
}
}
return fmt.Errorf("StatefulSet %s still has unready pods within %v", statefulSetName, timeout)
}
// WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err == nil {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start))
continue
} else {
if apierrs.IsNotFound(err) {
Logf("PersistentVolume %s was removed", pvName)
return nil
} else {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
}
}
}
return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout)
}
// WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
if err != nil {
Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
continue
} else {
if pvc.Status.Phase == phase {
Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolumeClaim %s not in phase %s within %v", pvcName, phase, timeout)
}
// CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name.
// Please see NewFramework instead of using this directly.
func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) {
if labels == nil {
labels = map[string]string{}
}
labels["e2e-run"] = string(RunId)
namespaceObj := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName),
Namespace: "",
Labels: labels,
},
Status: v1.NamespaceStatus{},
}
// Be robust about making the namespace creation call.
var got *v1.Namespace
if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) {
var err error
got, err = c.CoreV1().Namespaces().Create(namespaceObj)
if err != nil {
Logf("Unexpected error while creating namespace: %v", err)
return false, nil
}
return true, nil
}); err != nil {
return nil, err
}
if TestContext.VerifyServiceAccount {
if err := WaitForDefaultServiceAccountInNamespace(c, got.Name); err != nil {
// Even if we fail to create serviceAccount in the namespace,
// we have successfully create a namespace.
// So, return the created namespace.
return got, err
}
}
return got, nil
}
// CheckTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state
// and waits until they are finally deleted. It ignores namespace skip.
func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error {
// TODO: Since we don't have support for bulk resource deletion in the API,
// while deleting a namespace we are deleting all objects from that namespace
// one by one (one deletion == one API call). This basically exposes us to
// throttling - currently controller-manager has a limit of max 20 QPS.
// Once #10217 is implemented and used in namespace-controller, deleting all
// object from a given namespace should be much faster and we will be able
// to lower this timeout.
// However, now Density test is producing ~26000 events and Load capacity test
// is producing ~35000 events, thus assuming there are no other requests it will
// take ~30 minutes to fully delete the namespace. Thus I'm setting it to 60
// minutes to avoid any timeouts here.
timeout := 60 * time.Minute
Logf("Waiting for terminating namespaces to be deleted...")
for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) {
namespaces, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
if err != nil {
Logf("Listing namespaces failed: %v", err)
continue
}
terminating := 0
for _, ns := range namespaces.Items {
if strings.HasPrefix(ns.ObjectMeta.Name, "e2e-tests-") && ns.ObjectMeta.Name != skip {
if ns.Status.Phase == v1.NamespaceActive {
return fmt.Errorf("Namespace %s is active", ns.ObjectMeta.Name)
}
terminating++
}
}
if terminating == 0 {
return nil
}
}
return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out")
}
// deleteNS deletes the provided namespace, waits for it to be completely deleted, and then checks
// whether there are any pods remaining in a non-terminating state.
func deleteNS(c clientset.Interface, dynamicClient dynamic.Interface, namespace string, timeout time.Duration) error {
startTime := time.Now()
if err := c.CoreV1().Namespaces().Delete(namespace, nil); err != nil {
return err
}
// wait for namespace to delete or timeout.
err := wait.PollImmediate(2*time.Second, timeout, func() (bool, error) {
if _, err := c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{}); err != nil {
if apierrs.IsNotFound(err) {
return true, nil
}
Logf("Error while waiting for namespace to be terminated: %v", err)
return false, nil
}
return false, nil
})
// verify there is no more remaining content in the namespace
remainingContent, cerr := hasRemainingContent(c, dynamicClient, namespace)
if cerr != nil {
return cerr
}
// if content remains, let's dump information about the namespace, and system for flake debugging.
remainingPods := 0
missingTimestamp := 0
if remainingContent {
// log information about namespace, and set of namespaces in api server to help flake detection
logNamespace(c, namespace)
logNamespaces(c, namespace)
// if we can, check if there were pods remaining with no timestamp.
remainingPods, missingTimestamp, _ = countRemainingPods(c, namespace)
}
// a timeout waiting for namespace deletion happened!
if err != nil {
// some content remains in the namespace
if remainingContent {
// pods remain
if remainingPods > 0 {
if missingTimestamp != 0 {
// pods remained, but were not undergoing deletion (namespace controller is probably culprit)
return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v, pods missing deletion timestamp: %v", namespace, err, remainingPods, missingTimestamp)
}
// but they were all undergoing deletion (kubelet is probably culprit, check NodeLost)
return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v", namespace, err, remainingPods)
}
// other content remains (namespace controller is probably screwed up)
return fmt.Errorf("namespace %v was not deleted with limit: %v, namespaced content other than pods remain", namespace, err)
}
// no remaining content, but namespace was not deleted (namespace controller is probably wedged)
return fmt.Errorf("namespace %v was not deleted with limit: %v, namespace is empty but is not yet removed", namespace, err)
}
Logf("namespace %v deletion completed in %s", namespace, time.Since(startTime))
return nil
}
// logNamespaces logs the number of namespaces by phase
// namespace is the namespace the test was operating against that failed to delete so it can be grepped in logs
func logNamespaces(c clientset.Interface, namespace string) {
namespaceList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
if err != nil {
Logf("namespace: %v, unable to list namespaces: %v", namespace, err)
return
}
numActive := 0
numTerminating := 0
for _, namespace := range namespaceList.Items {
if namespace.Status.Phase == v1.NamespaceActive {
numActive++
} else {
numTerminating++
}
}
Logf("namespace: %v, total namespaces: %v, active: %v, terminating: %v", namespace, len(namespaceList.Items), numActive, numTerminating)
}
// logNamespace logs detail about a namespace
func logNamespace(c clientset.Interface, namespace string) {
ns, err := c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
Logf("namespace: %v no longer exists", namespace)
return
}
Logf("namespace: %v, unable to get namespace due to error: %v", namespace, err)
return
}
Logf("namespace: %v, DeletionTimetamp: %v, Finalizers: %v, Phase: %v", ns.Name, ns.DeletionTimestamp, ns.Spec.Finalizers, ns.Status.Phase)
}
// countRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp.
func countRemainingPods(c clientset.Interface, namespace string) (int, int, error) {
// check for remaining pods
pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
return 0, 0, err
}
// nothing remains!
if len(pods.Items) == 0 {
return 0, 0, nil
}
// stuff remains, log about it
logPodStates(pods.Items)
// check if there were any pods with missing deletion timestamp
numPods := len(pods.Items)
missingTimestamp := 0
for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil {
missingTimestamp++
}
}
return numPods, missingTimestamp, nil
}
// isDynamicDiscoveryError returns true if the error is a group discovery error
// only for groups expected to be created/deleted dynamically during e2e tests
func isDynamicDiscoveryError(err error) bool {
if !discovery.IsGroupDiscoveryFailedError(err) {
return false
}
discoveryErr := err.(*discovery.ErrGroupDiscoveryFailed)
for gv := range discoveryErr.Groups {
switch gv.Group {
case "mygroup.example.com":
// custom_resource_definition
// garbage_collector
case "wardle.k8s.io":
// aggregator
case "metrics.k8s.io":
// aggregated metrics server add-on, no persisted resources
default:
Logf("discovery error for unexpected group: %#v", gv)
return false
}
}
return true
}
// hasRemainingContent checks if there is remaining content in the namespace via API discovery
func hasRemainingContent(c clientset.Interface, dynamicClient dynamic.Interface, namespace string) (bool, error) {
// some tests generate their own framework.Client rather than the default
// TODO: ensure every test call has a configured dynamicClient
if dynamicClient == nil {
return false, nil
}
// find out what content is supported on the server
// Since extension apiserver is not always available, e.g. metrics server sometimes goes down,
// add retry here.
resources, err := waitForServerPreferredNamespacedResources(c.Discovery(), 30*time.Second)
if err != nil {
return false, err
}
groupVersionResources, err := discovery.GroupVersionResources(resources)
if err != nil {
return false, err
}
// TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798
ignoredResources := sets.NewString("bindings")
contentRemaining := false
// dump how many of resource type is on the server in a log.
for gvr := range groupVersionResources {
// get a client for this group version...
dynamicClient := dynamicClient.Resource(gvr).Namespace(namespace)
if err != nil {
// not all resource types support list, so some errors here are normal depending on the resource type.
Logf("namespace: %s, unable to get client - gvr: %v, error: %v", namespace, gvr, err)
continue
}
// get the api resource
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
if ignoredResources.Has(gvr.Resource) {
Logf("namespace: %s, resource: %s, ignored listing per whitelist", namespace, apiResource.Name)
continue
}
unstructuredList, err := dynamicClient.List(metav1.ListOptions{})
if err != nil {
// not all resources support list, so we ignore those
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
continue
}
// skip unavailable servers
if apierrs.IsServiceUnavailable(err) {
continue
}
return false, err
}
if len(unstructuredList.Items) > 0 {
Logf("namespace: %s, resource: %s, items remaining: %v", namespace, apiResource.Name, len(unstructuredList.Items))
contentRemaining = true
}
}
return contentRemaining, nil
}
func ContainerInitInvariant(older, newer runtime.Object) error {
oldPod := older.(*v1.Pod)
newPod := newer.(*v1.Pod)
if len(oldPod.Spec.InitContainers) == 0 {
return nil
}
if len(oldPod.Spec.InitContainers) != len(newPod.Spec.InitContainers) {
return fmt.Errorf("init container list changed")
}
if oldPod.UID != newPod.UID {
return fmt.Errorf("two different pods exist in the condition: %s vs %s", oldPod.UID, newPod.UID)
}
if err := initContainersInvariants(oldPod); err != nil {
return err
}
if err := initContainersInvariants(newPod); err != nil {
return err
}
oldInit, _, _ := podInitialized(oldPod)
newInit, _, _ := podInitialized(newPod)
if oldInit && !newInit {
// TODO: we may in the future enable resetting PodInitialized = false if the kubelet needs to restart it
// from scratch
return fmt.Errorf("pod cannot be initialized and then regress to not being initialized")
}
return nil
}
func podInitialized(pod *v1.Pod) (ok bool, failed bool, err error) {
allInit := true
initFailed := false
for _, s := range pod.Status.InitContainerStatuses {
switch {
case initFailed && s.State.Waiting == nil:
return allInit, initFailed, fmt.Errorf("container %s is after a failed container but isn't waiting", s.Name)
case allInit && s.State.Waiting == nil:
return allInit, initFailed, fmt.Errorf("container %s is after an initializing container but isn't waiting", s.Name)
case s.State.Terminated == nil:
allInit = false
case s.State.Terminated.ExitCode != 0:
allInit = false
initFailed = true
case !s.Ready:
return allInit, initFailed, fmt.Errorf("container %s initialized but isn't marked as ready", s.Name)
}
}
return allInit, initFailed, nil
}
func initContainersInvariants(pod *v1.Pod) error {
allInit, initFailed, err := podInitialized(pod)
if err != nil {
return err
}
if !allInit || initFailed {
for _, s := range pod.Status.ContainerStatuses {
if s.State.Waiting == nil || s.RestartCount != 0 {
return fmt.Errorf("container %s is not waiting but initialization not complete", s.Name)
}
if s.State.Waiting.Reason != "PodInitializing" {
return fmt.Errorf("container %s should have reason PodInitializing: %s", s.Name, s.State.Waiting.Reason)
}
}
}
_, c := podutil.GetPodCondition(&pod.Status, v1.PodInitialized)
if c == nil {
return fmt.Errorf("pod does not have initialized condition")
}
if c.LastTransitionTime.IsZero() {
return fmt.Errorf("PodInitialized condition should always have a transition time")
}
switch {
case c.Status == v1.ConditionUnknown:
return fmt.Errorf("PodInitialized condition should never be Unknown")
case c.Status == v1.ConditionTrue && (initFailed || !allInit):
return fmt.Errorf("PodInitialized condition was True but all not all containers initialized")
case c.Status == v1.ConditionFalse && (!initFailed && allInit):
return fmt.Errorf("PodInitialized condition was False but all containers initialized")
}
return nil
}
type InvariantFunc func(older, newer runtime.Object) error
func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error {
errs := sets.NewString()
for i := range events {
j := i + 1
if j >= len(events) {
continue
}
for _, fn := range fns {
if err := fn(events[i].Object, events[j].Object); err != nil {
errs.Insert(err.Error())
}
}
}
if errs.Len() > 0 {
return fmt.Errorf("invariants violated:\n* %s", strings.Join(errs.List(), "\n* "))
}
return nil
}
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func WaitForPodRunningInNamespace(c clientset.Interface, pod *v1.Pod) error {
if pod.Status.Phase == v1.PodRunning {
return nil
}
return WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, PodStartTimeout)
}
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error {
return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, PodStartTimeout)
}
// Waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running.
// The resourceVersion is used when Watching object changes, it tells since when we care
// about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state.
func waitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace string) error {
return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, slowPodStartTimeout)
}
func WaitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return wait.PollImmediate(Poll, timeout, podRunning(c, podName, namespace))
}
func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodRunning:
return true, nil
case v1.PodFailed, v1.PodSucceeded:
return false, conditions.ErrPodCompleted
}
return false, nil
}
}
// Waits default amount of time (DefaultPodDeletionTimeout) for the specified pod to stop running.
// Returns an error if timeout occurs first.
func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string) error {
return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, DefaultPodDeletionTimeout)
}
func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return wait.PollImmediate(Poll, timeout, podCompleted(c, podName, namespace))
}
func podCompleted(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodFailed, v1.PodSucceeded:
return true, nil
}
return false, nil
}
}
func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return wait.PollImmediate(Poll, timeout, podRunningAndReady(c, podName, namespace))
}
func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodFailed, v1.PodSucceeded:
return false, conditions.ErrPodCompleted
case v1.PodRunning:
return podutil.IsPodReady(pod), nil
}
return false, nil
}
}
// WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state.
// The resourceVersion is used when Watching object changes, it tells since when we care
// about changes to the pod.
func WaitForPodNotPending(c clientset.Interface, ns, podName string) error {
return wait.PollImmediate(Poll, PodStartTimeout, podNotPending(c, podName, ns))
}
func podNotPending(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodPending:
return false, nil
default:
return true, nil
}
}
}
// waitForPodTerminatedInNamespace returns an error if it takes too long for the pod to terminate,
// if the pod Get api returns an error (IsNotFound or other), or if the pod failed (and thus did not
// terminate) with an unexpected reason. Typically called to test that the passed-in pod is fully
// terminated (reason==""), but may be called to detect if a pod did *not* terminate according to
// the supplied reason.
func waitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error {
return WaitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", PodStartTimeout, func(pod *v1.Pod) (bool, error) {
// Only consider Failed pods. Successful pods will be deleted and detected in
// waitForPodCondition's Get call returning `IsNotFound`
if pod.Status.Phase == v1.PodFailed {
if pod.Status.Reason == reason { // short-circuit waitForPodCondition's loop
return true, nil
} else {
return true, fmt.Errorf("Expected pod %q in namespace %q to be terminated with reason %q, got reason: %q", podName, namespace, reason, pod.Status.Reason)
}
}
return false, nil
})
}
// waitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate.
// Unlike `waitForPodTerminatedInNamespace`, the pod's Phase and Reason are ignored. If the pod Get
// api returns IsNotFound then the wait stops and nil is returned. If the Get api returns an error other
// than "not found" then that error is returned and the wait stops.
func waitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error {
return wait.PollImmediate(Poll, timeout, func() (bool, error) {
_, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if apierrs.IsNotFound(err) {
return true, nil // done
}
if err != nil {
return true, err // stop wait with error
}
return false, nil
})
}
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string, namespace string, timeout time.Duration) error {
return WaitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *v1.Pod) (bool, error) {
if pod.Spec.RestartPolicy == v1.RestartPolicyAlways {
return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName)
}
switch pod.Status.Phase {
case v1.PodSucceeded:
By("Saw pod success")
return true, nil
case v1.PodFailed:
return true, fmt.Errorf("pod %q failed with status: %+v", podName, pod.Status)
default:
return false, nil
}
})
}
// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, PodStartTimeout)
}
// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
}
// WaitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status.
func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.Duration) error {
options := metav1.ListOptions{FieldSelector: fields.Set{
"metadata.name": name,
"metadata.namespace": ns,
}.AsSelector().String()}
w, err := c.CoreV1().ReplicationControllers(ns).Watch(options)
if err != nil {
return err
}
_, err = watch.Until(timeout, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, apierrs.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "")
}
switch rc := event.Object.(type) {
case *v1.ReplicationController:
if rc.Name == name && rc.Namespace == ns &&
rc.Generation <= rc.Status.ObservedGeneration &&
*(rc.Spec.Replicas) == rc.Status.Replicas {
return true, nil
}
Logf("Waiting for rc %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d",
name, rc.Generation, rc.Status.ObservedGeneration, *(rc.Spec.Replicas), rc.Status.Replicas)
}
return false, nil
})
return err
}
func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
Logf("Waiting for pod %s to disappear", podName)
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(options)
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
found := false
for _, pod := range pods.Items {
if pod.Name == podName {
Logf("Pod %s still exists", podName)
found = true
break
}
}
if !found {
Logf("Pod %s no longer exists", podName)
return true, nil
}
return false, nil
})
}
// WaitForPodNameUnschedulableInNamespace returns an error if it takes too long for the pod to become Pending
// and have condition Status equal to Unschedulable,
// if the pod Get api returns an error (IsNotFound or other), or if the pod failed with an unexpected reason.
// Typically called to test that the passed-in pod is Pending and Unschedulable.
func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, namespace string) error {
return WaitForPodCondition(c, namespace, podName, "Unschedulable", PodStartTimeout, func(pod *v1.Pod) (bool, error) {
// Only consider Failed pods. Successful pods will be deleted and detected in
// waitForPodCondition's Get call returning `IsNotFound`
if pod.Status.Phase == v1.PodPending {
for _, cond := range pod.Status.Conditions {
if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" {
return true, nil
}
}
}
if pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
return true, fmt.Errorf("Expected pod %q in namespace %q to be in phase Pending, but got phase: %v", podName, namespace, pod.Status.Phase)
}
return false, nil
})
}
// WaitForService waits until the service appears (exist == true), or disappears (exist == false)
func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.CoreV1().Services(namespace).Get(name, metav1.GetOptions{})
switch {
case err == nil:
Logf("Service %s in namespace %s found.", name, namespace)
return exist, nil
case apierrs.IsNotFound(err):
Logf("Service %s in namespace %s disappeared.", name, namespace)
return !exist, nil
case !testutils.IsRetryableAPIError(err):
Logf("Non-retryable failure while getting service.")
return false, err
default:
Logf("Get service %s in namespace %s failed: %v", name, namespace, err)
return false, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service %s/%s %s: %v", namespace, name, stateMsg[exist], err)
}
return nil
}
// WaitForServiceWithSelector waits until any service with given selector appears (exist == true), or disappears (exist == false)
func WaitForServiceWithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval,
timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
services, err := c.CoreV1().Services(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
switch {
case len(services.Items) != 0:
Logf("Service with %s in namespace %s found.", selector.String(), namespace)
return exist, nil
case len(services.Items) == 0:
Logf("Service with %s in namespace %s disappeared.", selector.String(), namespace)
return !exist, nil
case !testutils.IsRetryableAPIError(err):
Logf("Non-retryable failure while listing service.")
return false, err
default:
Logf("List service with %s in namespace %s failed: %v", selector.String(), namespace, err)
return false, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err)
}
return nil
}
//WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
return wait.Poll(interval, timeout, func() (bool, error) {
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
list, err := c.CoreV1().Endpoints(namespace).List(metav1.ListOptions{})
if err != nil {
return false, err
}
for _, e := range list.Items {
if e.Name == serviceName && countEndpointsNum(&e) == expectNum {
return true, nil
}
}
return false, nil
})
}
func countEndpointsNum(e *v1.Endpoints) int {
num := 0
for _, sub := range e.Subsets {
num += len(sub.Addresses)
}
return num
}
func WaitForEndpoint(c clientset.Interface, ns, name string) error {
for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) {
endpoint, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{})
if apierrs.IsNotFound(err) {
Logf("Endpoint %s/%s is not ready yet", ns, name)
continue
}
Expect(err).NotTo(HaveOccurred())
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
Logf("Endpoint %s/%s is not ready yet", ns, name)
continue
} else {
return nil
}
}
return fmt.Errorf("Failed to get endpoints for %s/%s", ns, name)
}
// Context for checking pods responses by issuing GETs to them (via the API
// proxy) and verifying that they answer with there own pod name.
type podProxyResponseChecker struct {
c clientset.Interface
ns string
label labels.Selector
controllerName string
respondName bool // Whether the pod should respond with its own name.
pods *v1.PodList
}
func PodProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) podProxyResponseChecker {
return podProxyResponseChecker{c, ns, label, controllerName, respondName, pods}
}
// CheckAllResponses issues GETs to all pods in the context and verify they
// reply with their own pod name.
func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) {
successes := 0
options := metav1.ListOptions{LabelSelector: r.label.String()}
currentPods, err := r.c.CoreV1().Pods(r.ns).List(options)
Expect(err).NotTo(HaveOccurred())
for i, pod := range r.pods.Items {
// Check that the replica list remains unchanged, otherwise we have problems.
if !isElementOf(pod.UID, currentPods) {
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
}
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
defer cancel()
body, err := r.c.CoreV1().RESTClient().Get().
Context(ctx).
Namespace(r.ns).
Resource("pods").
SubResource("proxy").
Name(string(pod.Name)).
Do().
Raw()
if err != nil {
if ctx.Err() != nil {
// We may encounter errors here because of a race between the pod readiness and apiserver
// proxy. So, we log the error and retry if this occurs.
Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
return false, nil
}
Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
continue
}
// The response checker expects the pod's name unless !respondName, in
// which case it just checks for a non-empty response.
got := string(body)
what := ""
if r.respondName {
what = "expected"
want := pod.Name
if got != want {
Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
r.controllerName, i+1, pod.Name, want, got)
continue
}
} else {
what = "non-empty"
if len(got) == 0 {
Logf("Controller %s: Replica %d [%s] expected non-empty response",
r.controllerName, i+1, pod.Name)
continue
}
}
successes++
Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
}
if successes < len(r.pods.Items) {
return false, nil
}
return true, nil
}
// ServerVersionGTE returns true if v is greater than or equal to the server
// version.
//
// TODO(18726): This should be incorporated into client.VersionInterface.
func ServerVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) (bool, error) {
serverVersion, err := c.ServerVersion()
if err != nil {
return false, fmt.Errorf("Unable to get server version: %v", err)
}
sv, err := utilversion.ParseSemantic(serverVersion.GitVersion)
if err != nil {
return false, fmt.Errorf("Unable to parse server version %q: %v", serverVersion.GitVersion, err)
}
return sv.AtLeast(v), nil
}
func SkipUnlessKubectlVersionGTE(v *utilversion.Version) {
gte, err := KubectlVersionGTE(v)
if err != nil {
Failf("Failed to get kubectl version: %v", err)
}
if !gte {
Skipf("Not supported for kubectl versions before %q", v)
}
}
// KubectlVersionGTE returns true if the kubectl version is greater than or
// equal to v.
func KubectlVersionGTE(v *utilversion.Version) (bool, error) {
kv, err := KubectlVersion()
if err != nil {
return false, err
}
return kv.AtLeast(v), nil
}
// KubectlVersion gets the version of kubectl that's currently being used (see
// --kubectl-path in e2e.go to use an alternate kubectl).
func KubectlVersion() (*utilversion.Version, error) {
output := RunKubectlOrDie("version", "--client")
matches := gitVersionRegexp.FindStringSubmatch(output)
if len(matches) != 2 {
return nil, fmt.Errorf("Could not find kubectl version in output %v", output)
}
// Don't use the full match, as it contains "GitVersion:\"" and a
// trailing "\"". Just use the submatch.
return utilversion.ParseSemantic(matches[1])
}
func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error {
By("trying to dial each unique pod")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return wait.PollImmediate(Poll, podRespondingTimeout, PodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
}
func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return PodsCreatedByLabel(c, ns, name, replicas, label)
}
func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) {
timeout := 2 * time.Minute
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
options := metav1.ListOptions{LabelSelector: label.String()}
// List the pods, making sure we observe all the replicas.
pods, err := c.CoreV1().Pods(ns).List(options)
if err != nil {
return nil, err
}
created := []v1.Pod{}
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
created = append(created, pod)
}
Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
if int32(len(created)) == replicas {
pods.Items = created
return pods, nil
}
}
return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
By("ensuring each pod is running")
e := []error{}
error_chan := make(chan error)
for _, pod := range pods.Items {
go func(p v1.Pod) {
error_chan <- WaitForPodRunningInNamespace(c, &p)
}(pod)
}
for range pods.Items {
err := <-error_chan
if err != nil {
e = append(e, err)
}
}
return e
}
func VerifyPods(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
return podRunningMaybeResponding(c, ns, name, wantName, replicas, true)
}
func VerifyPodsRunning(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
return podRunningMaybeResponding(c, ns, name, wantName, replicas, false)
}
func podRunningMaybeResponding(c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error {
pods, err := PodsCreated(c, ns, name, replicas)
if err != nil {
return err
}
e := podsRunning(c, pods)
if len(e) > 0 {
return fmt.Errorf("failed to wait for pods running: %v", e)
}
if checkResponding {
err = PodsResponding(c, ns, name, wantName, pods)
if err != nil {
return fmt.Errorf("failed to wait for pods responding: %v", err)
}
}
return nil
}
func ServiceResponding(c clientset.Interface, ns, name string) error {
By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name))
return wait.PollImmediate(Poll, ServiceRespondingTimeout, func() (done bool, err error) {
proxyRequest, errProxy := GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
if errProxy != nil {
Logf("Failed to get services proxy request: %v:", errProxy)
return false, nil
}
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
defer cancel()
body, err := proxyRequest.Namespace(ns).
Context(ctx).
Name(name).
Do().
Raw()
if err != nil {
if ctx.Err() != nil {
Failf("Failed to GET from service %s: %v", name, err)
return true, err
}
Logf("Failed to GET from service %s: %v:", name, err)
return false, nil
}
got := string(body)
if len(got) == 0 {
Logf("Service %s: expected non-empty response", name)
return false, err // stop polling
}
Logf("Service %s: found nonempty answer: %s", name, got)
return true, nil
})
}
func RestclientConfig(kubeContext string) (*clientcmdapi.Config, error) {
Logf(">>> kubeConfig: %s", TestContext.KubeConfig)
if TestContext.KubeConfig == "" {
return nil, fmt.Errorf("KubeConfig must be specified to load client config")
}
c, err := clientcmd.LoadFromFile(TestContext.KubeConfig)
if err != nil {
return nil, fmt.Errorf("error loading KubeConfig: %v", err.Error())
}
if kubeContext != "" {
Logf(">>> kubeContext: %s", kubeContext)
c.CurrentContext = kubeContext
}
return c, nil
}
type ClientConfigGetter func() (*restclient.Config, error)
func LoadConfig() (*restclient.Config, error) {
if TestContext.NodeE2E {
// This is a node e2e test, apply the node e2e configuration
return &restclient.Config{Host: TestContext.Host}, nil
}
c, err := RestclientConfig(TestContext.KubeContext)
if err != nil {
if TestContext.KubeConfig == "" {
return restclient.InClusterConfig()
} else {
return nil, err
}
}
return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig()
}
func LoadInternalClientset() (*internalclientset.Clientset, error) {
config, err := LoadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
return internalclientset.NewForConfig(config)
}
func LoadClientset() (*clientset.Clientset, error) {
config, err := LoadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
return clientset.NewForConfig(config)
}
// randomSuffix provides a random string to append to pods,services,rcs.
// TODO: Allow service names to have the same form as names
// for pods and replication controllers so we don't
// need to use such a function and can instead
// use the UUID utility function.
func randomSuffix() string {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return strconv.Itoa(r.Int() % 10000)
}
func ExpectNoError(err error, explain ...interface{}) {
ExpectNoErrorWithOffset(1, err, explain...)
}
// ExpectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller
// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f").
func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
if err != nil {
Logf("Unexpected error occurred: %v", err)
}
ExpectWithOffset(1+offset, err).NotTo(HaveOccurred(), explain...)
}
func ExpectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) {
var err error
for i := 0; i < maxRetries; i++ {
err = fn()
if err == nil {
return
}
Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err)
}
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
func Cleanup(filePath, ns string, selectors ...string) {
By("using delete to clean up resources")
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
RunKubectlOrDie("delete", "--grace-period=0", "-f", filePath, nsArg)
AssertCleanup(ns, selectors...)
}
// Asserts that cleanup of a namespace wrt selectors occurred.
func AssertCleanup(ns string, selectors ...string) {
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
var e error
verifyCleanupFunc := func() (bool, error) {
e = nil
for _, selector := range selectors {
resources := RunKubectlOrDie("get", "rc,svc", "-l", selector, "--no-headers", nsArg)
if resources != "" {
e = fmt.Errorf("Resources left running after stop:\n%s", resources)
return false, nil
}
pods := RunKubectlOrDie("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
e = fmt.Errorf("Pods left unterminated after stop:\n%s", pods)
return false, nil
}
}
return true, nil
}
err := wait.PollImmediate(500*time.Millisecond, 1*time.Minute, verifyCleanupFunc)
if err != nil {
Failf(e.Error())
}
}
// KubectlCmd runs the kubectl executable through the wrapper script.
func KubectlCmd(args ...string) *exec.Cmd {
defaultArgs := []string{}
// Reference a --server option so tests can run anywhere.
if TestContext.Host != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+TestContext.Host)
}
if TestContext.KubeConfig != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
// Reference the KubeContext
if TestContext.KubeContext != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+TestContext.KubeContext)
}
} else {
if TestContext.CertDir != "" {
defaultArgs = append(defaultArgs,
fmt.Sprintf("--certificate-authority=%s", filepath.Join(TestContext.CertDir, "ca.crt")),
fmt.Sprintf("--client-certificate=%s", filepath.Join(TestContext.CertDir, "kubecfg.crt")),
fmt.Sprintf("--client-key=%s", filepath.Join(TestContext.CertDir, "kubecfg.key")))
}
}
kubectlArgs := append(defaultArgs, args...)
//We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh"
//and so on.
cmd := exec.Command(TestContext.KubectlPath, kubectlArgs...)
//caller will invoke this and wait on it.
return cmd
}
// kubectlBuilder is used to build, customize and execute a kubectl Command.
// Add more functions to customize the builder as needed.
type kubectlBuilder struct {
cmd *exec.Cmd
timeout <-chan time.Time
}
func NewKubectlCommand(args ...string) *kubectlBuilder {
b := new(kubectlBuilder)
b.cmd = KubectlCmd(args...)
return b
}
func (b *kubectlBuilder) WithEnv(env []string) *kubectlBuilder {
b.cmd.Env = env
return b
}
func (b *kubectlBuilder) WithTimeout(t <-chan time.Time) *kubectlBuilder {
b.timeout = t
return b
}
func (b kubectlBuilder) WithStdinData(data string) *kubectlBuilder {
b.cmd.Stdin = strings.NewReader(data)
return &b
}
func (b kubectlBuilder) WithStdinReader(reader io.Reader) *kubectlBuilder {
b.cmd.Stdin = reader
return &b
}
func (b kubectlBuilder) ExecOrDie() string {
str, err := b.Exec()
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
// Note that we're still dying after retrying so that we can get visibility to triage it further.
if isTimeout(err) {
Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.")
time.Sleep(2 * time.Second)
retryStr, retryErr := RunKubectl("version")
Logf("stdout: %q", retryStr)
Logf("err: %v", retryErr)
}
Expect(err).NotTo(HaveOccurred())
return str
}
func isTimeout(err error) bool {
switch err := err.(type) {
case net.Error:
if err.Timeout() {
return true
}
case *url.Error:
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
return true
}
}
return false
}
func (b kubectlBuilder) Exec() (string, error) {
var stdout, stderr bytes.Buffer
cmd := b.cmd
cmd.Stdout, cmd.Stderr = &stdout, &stderr
Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately
if err := cmd.Start(); err != nil {
return "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err)
}
errCh := make(chan error, 1)
go func() {
errCh <- cmd.Wait()
}()
select {
case err := <-errCh:
if err != nil {
var rc int = 127
if ee, ok := err.(*exec.ExitError); ok {
rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus())
Logf("rc: %d", rc)
}
return "", uexec.CodeExitError{
Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err),
Code: rc,
}
}
case <-b.timeout:
b.cmd.Process.Kill()
return "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr)
}
Logf("stderr: %q", stderr.String())
Logf("stdout: %q", stdout.String())
return stdout.String(), nil
}
// RunKubectlOrDie is a convenience wrapper over kubectlBuilder
func RunKubectlOrDie(args ...string) string {
return NewKubectlCommand(args...).ExecOrDie()
}
// RunKubectl is a convenience wrapper over kubectlBuilder
func RunKubectl(args ...string) (string, error) {
return NewKubectlCommand(args...).Exec()
}
// RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
func RunKubectlOrDieInput(data string, args ...string) string {
return NewKubectlCommand(args...).WithStdinData(data).ExecOrDie()
}
// RunKubemciWithKubeconfig is a convenience wrapper over RunKubemciCmd
func RunKubemciWithKubeconfig(args ...string) (string, error) {
if TestContext.KubeConfig != "" {
args = append(args, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
}
return RunKubemciCmd(args...)
}
// RunKubemciCmd is a convenience wrapper over kubectlBuilder to run kubemci.
// It assumes that kubemci exists in PATH.
func RunKubemciCmd(args ...string) (string, error) {
// kubemci is assumed to be in PATH.
kubemci := "kubemci"
b := new(kubectlBuilder)
args = append(args, "--gcp-project="+TestContext.CloudConfig.ProjectID)
b.cmd = exec.Command(kubemci, args...)
return b.Exec()
}
func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
stdout, err = cmd.StdoutPipe()
if err != nil {
return
}
stderr, err = cmd.StderrPipe()
if err != nil {
return
}
Logf("Asynchronously running '%s %s'", cmd.Path, strings.Join(cmd.Args, " "))
err = cmd.Start()
return
}
// Rough equivalent of ctrl+c for cleaning up processes. Intended to be run in defer.
func TryKill(cmd *exec.Cmd) {
if err := cmd.Process.Kill(); err != nil {
Logf("ERROR failed to kill command %v! The process may leak", cmd)
}
}
// testContainerOutputMatcher runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using the given matcher.
func (f *Framework) testContainerOutputMatcher(scenarioName string,
pod *v1.Pod,
containerIndex int,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
Failf("Invalid container index: %d", containerIndex)
}
ExpectNoError(f.MatchContainerOutput(pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
}
// MatchContainerOutput creates a pod and waits for all it's containers to exit with success.
// It then tests that the matcher with each expectedOutput matches the output of the specified container.
func (f *Framework) MatchContainerOutput(
pod *v1.Pod,
containerName string,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
ns := pod.ObjectMeta.Namespace
if ns == "" {
ns = f.Namespace.Name
}
podClient := f.PodClientNS(ns)
createdPod := podClient.Create(pod)
defer func() {
By("delete the pod")
podClient.DeleteSync(createdPod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
}()
// Wait for client pod to complete.
podErr := WaitForPodSuccessInNamespace(f.ClientSet, createdPod.Name, ns)
// Grab its logs. Get host first.
podStatus, err := podClient.Get(createdPod.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get pod status: %v", err)
}
if podErr != nil {
// Pod failed. Dump all logs from all containers to see what's wrong
for _, container := range podStatus.Spec.Containers {
logs, err := GetPodLogs(f.ClientSet, ns, podStatus.Name, container.Name)
if err != nil {
Logf("Failed to get logs from node %q pod %q container %q: %v",
podStatus.Spec.NodeName, podStatus.Name, container.Name, err)
continue
}
Logf("Output of node %q pod %q container %q: %s", podStatus.Spec.NodeName, podStatus.Name, container.Name, logs)
}
return fmt.Errorf("expected pod %q success: %v", createdPod.Name, podErr)
}
Logf("Trying to get logs from node %s pod %s container %s: %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
// Sometimes the actual containers take a second to get started, try to get logs for 60s
logs, err := GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName)
if err != nil {
Logf("Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
}
for _, expected := range expectedOutput {
m := matcher(expected)
matches, err := m.Match(logs)
if err != nil {
return fmt.Errorf("expected %q in container output: %v", expected, err)
} else if !matches {
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
}
}
return nil
}
type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error)
func DumpEventsInNamespace(eventsLister EventsLister, namespace string) {
By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
events, err := eventsLister(metav1.ListOptions{}, namespace)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Found %d events.", len(events.Items)))
// Sort events by their first timestamp
sortedEvents := events.Items
if len(sortedEvents) > 1 {
sort.Sort(byFirstTimestamp(sortedEvents))
}
for _, e := range sortedEvents {
Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
}
// Note that we don't wait for any Cleanup to propagate, which means
// that if you delete a bunch of pods right before ending your test,
// you may or may not see the killing/deletion/Cleanup events.
}
func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
DumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) {
return c.CoreV1().Events(ns).List(opts)
}, namespace)
// If cluster is large, then the following logs are basically useless, because:
// 1. it takes tens of minutes or hours to grab all of them
// 2. there are so many of them that working with them are mostly impossible
// So we dump them only if the cluster is relatively small.
maxNodesForDump := 20
if nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}); err == nil {
if len(nodes.Items) <= maxNodesForDump {
dumpAllPodInfo(c)
dumpAllNodeInfo(c)
} else {
Logf("skipping dumping cluster info - cluster too large")
}
} else {
Logf("unable to fetch node list: %v", err)
}
}
// byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker.
type byFirstTimestamp []v1.Event
func (o byFirstTimestamp) Len() int { return len(o) }
func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o byFirstTimestamp) Less(i, j int) bool {
if o[i].FirstTimestamp.Equal(&o[j].FirstTimestamp) {
return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name
}
return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp)
}
func dumpAllPodInfo(c clientset.Interface) {
pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{})
if err != nil {
Logf("unable to fetch pod debug info: %v", err)
}
logPodStates(pods.Items)
}
func dumpAllNodeInfo(c clientset.Interface) {
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
Logf("unable to fetch node list: %v", err)
return
}
names := make([]string, len(nodes.Items))
for ix := range nodes.Items {
names[ix] = nodes.Items[ix].Name
}
DumpNodeDebugInfo(c, names, Logf)
}
func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
for _, n := range nodeNames {
logFunc("\nLogging node info for node %v", n)
node, err := c.CoreV1().Nodes().Get(n, metav1.GetOptions{})
if err != nil {
logFunc("Error getting node info %v", err)
}
logFunc("Node Info: %v", node)
logFunc("\nLogging kubelet events for node %v", n)
for _, e := range getNodeEvents(c, n) {
logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
}
logFunc("\nLogging pods the kubelet thinks is on node %v", n)
podList, err := GetKubeletPods(c, n)
if err != nil {
logFunc("Unable to retrieve kubelet pods for node %v: %v", n, err)
continue
}
for _, p := range podList.Items {
logFunc("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
for _, c := range p.Status.InitContainerStatuses {
logFunc("\tInit container %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
for _, c := range p.Status.ContainerStatuses {
logFunc("\tContainer %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
}
HighLatencyKubeletOperations(c, 10*time.Second, n, logFunc)
// TODO: Log node resource info
}
}
// logNodeEvents logs kubelet events from the given node. This includes kubelet
// restart and node unhealthy events. Note that listing events like this will mess
// with latency metrics, beware of calling it during a test.
func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
selector := fields.Set{
"involvedObject.kind": "Node",
"involvedObject.name": nodeName,
"involvedObject.namespace": metav1.NamespaceAll,
"source": "kubelet",
}.AsSelector().String()
options := metav1.ListOptions{FieldSelector: selector}
events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(options)
if err != nil {
Logf("Unexpected error retrieving node events %v", err)
return []v1.Event{}
}
return events.Items
}
// waitListSchedulableNodes is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
var nodes *v1.NodeList
var err error
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return true, nil
}) != nil {
return nodes, err
}
return nodes, nil
}
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
ExpectNoError(err, "Non-retryable failure or timed out while listing nodes for e2e cluster.")
}
return nodes
}
// Node is schedulable if:
// 1) doesn't have "unschedulable" field set
// 2) it's Ready condition is set to true
// 3) doesn't have NetworkUnavailable condition set to true
func isNodeSchedulable(node *v1.Node) bool {
nodeReady := IsNodeConditionSetAsExpected(node, v1.NodeReady, true)
networkReady := IsNodeConditionUnset(node, v1.NodeNetworkUnavailable) ||
IsNodeConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false)
return !node.Spec.Unschedulable && nodeReady && networkReady
}
// Test whether a fake pod can be scheduled on "node", given its current taints.
func isNodeUntainted(node *v1.Node) bool {
fakePod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "fake-not-scheduled",
Namespace: "fake-not-scheduled",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-not-scheduled",
Image: "fake-not-scheduled",
},
},
},
}
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(node)
fit, _, err := predicates.PodToleratesNodeTaints(fakePod, nil, nodeInfo)
if err != nil {
Failf("Can't test predicates for node %s: %v", node.Name, err)
return false
}
return fit
}
// GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on.
// 1) Needs to be schedulable.
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList) {
nodes = waitListSchedulableNodesOrDie(c)
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
FilterNodes(nodes, func(node v1.Node) bool {
return isNodeSchedulable(&node) && isNodeUntainted(&node)
})
return nodes
}
// GetReadyNodesIncludingTaintedOrDie returns all ready nodes, even those which are tainted.
// There are cases when we care about tainted nodes
// E.g. in tests related to nodes with gpu we care about nodes despite
// presence of nvidia.com/gpu=present:NoSchedule taint
func GetReadyNodesIncludingTaintedOrDie(c clientset.Interface) (nodes *v1.NodeList) {
nodes = waitListSchedulableNodesOrDie(c)
FilterNodes(nodes, func(node v1.Node) bool {
return isNodeSchedulable(&node)
})
return nodes
}
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
var notSchedulable []*v1.Node
attempt := 0
return wait.PollImmediate(30*time.Second, timeout, func() (bool, error) {
attempt++
notSchedulable = nil
opts := metav1.ListOptions{
ResourceVersion: "0",
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
}
nodes, err := c.CoreV1().Nodes().List(opts)
if err != nil {
Logf("Unexpected error listing nodes: %v", err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !isNodeSchedulable(node) {
notSchedulable = append(notSchedulable, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
// to make it possible e.g. for incorrect deployment of some small percentage
// of nodes (which we allow in cluster validation). Some nodes that are not
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
//
// However, we only allow non-ready nodes with some specific reasons.
if len(notSchedulable) > 0 {
// In large clusters, log them only every 10th pass.
if len(nodes.Items) >= largeClusterThreshold && attempt%10 == 0 {
Logf("Unschedulable nodes:")
for i := range notSchedulable {
Logf("-> %s Ready=%t Network=%t",
notSchedulable[i].Name,
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false))
}
Logf("================================")
}
}
return len(notSchedulable) <= TestContext.AllowedNotReadyNodes, nil
})
}
func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration {
// With SecretManager(ConfigMapManager), we may have to wait up to full sync period +
// TTL of secret(configmap) to elapse before the Kubelet projects the update into the
// volume and the container picks it up.
// So this timeout is based on default Kubelet sync period (1 minute) + maximum TTL for
// secret(configmap) that's based on cluster size + additional time as a fudge factor.
secretTTL, err := GetNodeTTLAnnotationValue(c)
if err != nil {
Logf("Couldn't get node TTL annotation (using default value of 0): %v", err)
}
podLogTimeout := 240*time.Second + secretTTL
return podLogTimeout
}
func GetNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil || len(nodes.Items) == 0 {
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err)
}
// Since TTL the kubelet is using is stored in node object, for the timeout
// purpose we take it from the first node (all of them should be the same).
node := &nodes.Items[0]
if node.Annotations == nil {
return time.Duration(0), fmt.Errorf("No annotations found on the node")
}
value, ok := node.Annotations[v1.ObjectTTLAnnotationKey]
if !ok {
return time.Duration(0), fmt.Errorf("No TTL annotation found on the node")
}
intValue, err := strconv.Atoi(value)
if err != nil {
return time.Duration(0), fmt.Errorf("Cannot convert TTL annotation from %#v to int", *node)
}
return time.Duration(intValue) * time.Second, nil
}
func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {
ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
}
func AddOrUpdateLabelOnNodeAndReturnOldValue(c clientset.Interface, nodeName string, labelKey, labelValue string) string {
var oldValue string
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err)
oldValue = node.Labels[labelKey]
ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
return oldValue
}
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err)
Expect(node.Labels[labelKey]).To(Equal(labelValue))
}
func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) {
ExpectNoError(controller.RemoveTaintOffNode(c, nodeName, nil, &taint))
VerifyThatTaintIsGone(c, nodeName, &taint)
}
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Taint) {
ExpectNoError(controller.AddOrUpdateTaintOnNode(c, nodeName, &taint))
}
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) {
By("removing the label " + labelKey + " off the node " + nodeName)
ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey}))
By("verifying the node doesn't have the label " + labelKey)
ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
}
func VerifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) {
By("verifying the node doesn't have the taint " + taint.ToString())
nodeUpdated, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err)
if taintutils.TaintExists(nodeUpdated.Spec.Taints, taint) {
Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
}
}
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) {
By("verifying the node has the taint " + taint.ToString())
if has, err := NodeHasTaint(c, nodeName, taint); !has {
ExpectNoError(err)
Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
}
}
func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return false, err
}
nodeTaints := node.Spec.Taints
if len(nodeTaints) == 0 || !taintutils.TaintExists(nodeTaints, taint) {
return false, nil
}
return true, nil
}
//AddOrUpdateAvoidPodOnNode adds avoidPods annotations to node, will override if it exists
func AddOrUpdateAvoidPodOnNode(c clientset.Interface, nodeName string, avoidPods v1.AvoidPods) {
err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
taintsData, err := json.Marshal(avoidPods)
ExpectNoError(err)
if node.Annotations == nil {
node.Annotations = make(map[string]string)
}
node.Annotations[v1.PreferAvoidPodsAnnotationKey] = string(taintsData)
_, err = c.CoreV1().Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to add/update avoidPonds %v to %v", avoidPods, nodeName)
}
}
return true, nil
})
ExpectNoError(err)
}
//RemoveAnnotationOffNode removes AvoidPods annotations from the node. It does not fail if no such annotation exists.
func RemoveAvoidPodsOffNode(c clientset.Interface, nodeName string) {
err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
if node.Annotations == nil {
return true, nil
}
delete(node.Annotations, v1.PreferAvoidPodsAnnotationKey)
_, err = c.CoreV1().Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to remove avoidPods to %v", nodeName)
}
}
return true, nil
})
ExpectNoError(err)
}
func ScaleResource(
clientset clientset.Interface,
scalesGetter scaleclient.ScalesGetter,
ns, name string,
size uint,
wait bool,
kind schema.GroupKind,
gr schema.GroupResource,
) error {
By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size))
scaler := kubectl.NewScaler(scalesGetter)
if err := testutils.ScaleResourceWithRetries(scaler, ns, name, size, gr); err != nil {
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err)
}
if !wait {
return nil
}
return WaitForControlledPodsRunning(clientset, ns, name, kind)
}
// Wait up to 10 minutes for pods to become Running.
func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind schema.GroupKind) error {
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
return err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return err
}
replicas, err := getReplicasFromRuntimeObject(rtObject)
if err != nil {
return err
}
err = testutils.WaitForEnoughPodsWithLabelRunning(c, ns, selector, int(replicas))
if err != nil {
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err)
}
return nil
}
// Wait up to PodListTimeout for getting pods of the specified controller name and return them.
func WaitForControlledPods(c clientset.Interface, ns, name string, kind schema.GroupKind) (pods *v1.PodList, err error) {
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
return nil, err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return nil, err
}
return WaitForPodsWithLabel(c, ns, selector)
}
// Returns true if all the specified pods are scheduled, else returns false.
func podsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (bool, error) {
ps, err := testutils.NewPodStore(c, ns, label, fields.Everything())
if err != nil {
return false, err
}
defer ps.Stop()
pods := ps.List()
if len(pods) == 0 {
return false, nil
}
for _, pod := range pods {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
}
// Wait for all matching pods to become scheduled and at least one
// matching pod exists. Return the list of matching pods.
func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
err = wait.PollImmediate(Poll, podScheduledBeforeTimeout,
func() (bool, error) {
pods, err = WaitForPodsWithLabel(c, ns, label)
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
})
return pods, err
}
// Wait up to PodListTimeout for getting pods with certain label
func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) {
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err = c.CoreV1().Pods(ns).List(options)
if err != nil {
if testutils.IsRetryableAPIError(err) {
continue
}
return
}
if len(pods.Items) > 0 {
break
}
}
if pods == nil || len(pods.Items) == 0 {
err = fmt.Errorf("Timeout while waiting for pods with label %v", label)
}
return
}
// Wait for exact amount of matching pods to become running and ready.
// Return the list of matching pods.
func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) {
var current int
err = wait.Poll(Poll, timeout,
func() (bool, error) {
pods, err := WaitForPodsWithLabel(c, ns, label)
if err != nil {
Logf("Failed to list pods: %v", err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
current = 0
for _, pod := range pods.Items {
if flag, err := testutils.PodRunningReady(&pod); err == nil && flag == true {
current++
}
}
if current != num {
Logf("Got %v pods running and ready, expect: %v", current, num)
return false, nil
}
return true, nil
})
return pods, err
}
func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) {
switch kind {
case api.Kind("ReplicationController"):
return c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"):
return c.ExtensionsV1beta1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"):
return c.ExtensionsV1beta1().Deployments(ns).Get(name, metav1.GetOptions{})
case extensionsinternal.Kind("DaemonSet"):
return c.ExtensionsV1beta1().DaemonSets(ns).Get(name, metav1.GetOptions{})
case batchinternal.Kind("Job"):
return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{})
default:
return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind)
}
}
func getSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
switch typed := obj.(type) {
case *v1.ReplicationController:
return labels.SelectorFromSet(typed.Spec.Selector), nil
case *extensions.ReplicaSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensions.Deployment:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensions.DaemonSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *batch.Job:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
default:
return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj)
}
}
func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
switch typed := obj.(type) {
case *v1.ReplicationController:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensions.ReplicaSet:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensions.Deployment:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensions.DaemonSet:
return 0, nil
case *batch.Job:
// TODO: currently we use pause pods so that's OK. When we'll want to switch to Pods
// that actually finish we need a better way to do this.
if typed.Spec.Parallelism != nil {
return *typed.Spec.Parallelism, nil
}
return 0, nil
default:
return -1, fmt.Errorf("Unsupported kind when getting number of replicas: %v", obj)
}
}
// DeleteResourceAndWaitForGC deletes only given resource and waits for GC to delete the pods.
func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns, name string) error {
By(fmt.Sprintf("deleting %v %s in namespace %s, will wait for the garbage collector to delete the pods", kind, name, ns))
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("%v %s not found: %v", kind, name, err)
return nil
}
return err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return err
}
replicas, err := getReplicasFromRuntimeObject(rtObject)
if err != nil {
return err
}
ps, err := testutils.NewPodStore(c, ns, selector, fields.Everything())
if err != nil {
return err
}
defer ps.Stop()
falseVar := false
deleteOption := &metav1.DeleteOptions{OrphanDependents: &falseVar}
startTime := time.Now()
if err := testutils.DeleteResourceWithRetries(c, kind, ns, name, deleteOption); err != nil {
return err
}
deleteTime := time.Since(startTime)
Logf("Deleting %v %s took: %v", kind, name, deleteTime)
var interval, timeout time.Duration
switch {
case replicas < 100:
interval = 100 * time.Millisecond
case replicas < 1000:
interval = 1 * time.Second
default:
interval = 10 * time.Second
}
if replicas < 5000 {
timeout = 10 * time.Minute
} else {
timeout = time.Duration(replicas/gcThroughput) * time.Second
// gcThroughput is pretty strict now, add a bit more to it
timeout = timeout + 3*time.Minute
}
err = waitForPodsInactive(ps, interval, timeout)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Since(startTime) - deleteTime
Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
err = waitForPodsGone(ps, interval, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
return nil
}
// waitForPodsInactive waits until there are no active pods left in the PodStore.
// This is to make a fair comparison of deletion time between DeleteRCAndPods
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
// when the pod is inactvie.
func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
pods := ps.List()
for _, pod := range pods {
if controller.IsPodActive(pod) {
return false, nil
}
}
return true, nil
})
}
// waitForPodsGone waits until there are no pods left in the PodStore.
func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
if pods := ps.List(); len(pods) == 0 {
return true, nil
}
return false, nil
})
}
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := metav1.ListOptions{LabelSelector: label.String()}
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
pods, err := c.CoreV1().Pods(ns).List(options)
if err != nil {
return false, nil
}
for _, pod := range pods.Items {
if !podutil.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) {
return false, nil
}
}
return true, nil
})
}
// Waits for the number of events on the given object to reach a desired count.
func WaitForEvents(c clientset.Interface, ns string, objOrRef runtime.Object, desiredEventsCount int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount == desiredEventsCount {
return true, nil
}
if eventsCount < desiredEventsCount {
return false, nil
}
// Number of events has exceeded the desired count.
return false, fmt.Errorf("number of events has exceeded the desired count, eventsCount: %d, desiredCount: %d", eventsCount, desiredEventsCount)
})
}
// Waits for the number of events on the given object to be at least a desired count.
func WaitForPartialEvents(c clientset.Interface, ns string, objOrRef runtime.Object, atLeastEventsCount int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount >= atLeastEventsCount {
return true, nil
}
return false, nil
})
}
type updateDSFunc func(*apps.DaemonSet)
func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *apps.DaemonSet, err error) {
daemonsets := c.AppsV1().DaemonSets(namespace)
var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if ds, err = daemonsets.Get(name, metav1.GetOptions{}); err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(ds)
if ds, err = daemonsets.Update(ds); err == nil {
Logf("Updating DaemonSet %s", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to DaemonSet %q: %v", name, updateErr)
}
return ds, pollErr
}
// NodeAddresses returns the first address of the given type of each node.
func NodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
hosts := []string{}
for _, n := range nodelist.Items {
for _, addr := range n.Status.Addresses {
// Use the first external IP address we find on the node, and
// use at most one per node.
// TODO(roberthbailey): Use the "preferred" address for the node, once
// such a thing is defined (#2462).
if addr.Type == addrType {
hosts = append(hosts, addr.Address)
break
}
}
}
return hosts
}
// NodeSSHHosts returns SSH-able host names for all schedulable nodes - this excludes master node.
// It returns an error if it can't find an external IP for every node, though it still returns all
// hosts that it found in that case.
func NodeSSHHosts(c clientset.Interface) ([]string, error) {
nodelist := waitListSchedulableNodesOrDie(c)
// TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462).
hosts := NodeAddresses(nodelist, v1.NodeExternalIP)
// Error if any node didn't have an external IP.
if len(hosts) != len(nodelist.Items) {
return hosts, fmt.Errorf(
"only found %d external IPs on nodes, but found %d nodes. Nodelist: %v",
len(hosts), len(nodelist.Items), nodelist)
}
sshHosts := make([]string, 0, len(hosts))
for _, h := range hosts {
sshHosts = append(sshHosts, net.JoinHostPort(h, sshPort))
}
return sshHosts, nil
}
type SSHResult struct {
User string
Host string
Cmd string
Stdout string
Stderr string
Code int
}
// NodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name,
// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
// cloud providers since it involves ssh.
func NodeExec(nodeName, cmd string) (SSHResult, error) {
return SSH(cmd, net.JoinHostPort(nodeName, sshPort), TestContext.Provider)
}
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
// is no error performing the SSH, the stdout, stderr, and exit code are
// returned.
func SSH(cmd, host, provider string) (SSHResult, error) {
result := SSHResult{Host: host, Cmd: cmd}
// Get a signer for the provider.
signer, err := GetSigner(provider)
if err != nil {
return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err)
}
// RunSSHCommand will default to Getenv("USER") if user == "", but we're
// defaulting here as well for logging clarity.
result.User = os.Getenv("KUBE_SSH_USER")
if result.User == "" {
result.User = os.Getenv("USER")
}
stdout, stderr, code, err := sshutil.RunSSHCommand(cmd, result.User, host, signer)
result.Stdout = stdout
result.Stderr = stderr
result.Code = code
return result, err
}
func LogSSHResult(result SSHResult) {
remote := fmt.Sprintf("%s@%s", result.User, result.Host)
Logf("ssh %s: command: %s", remote, result.Cmd)
Logf("ssh %s: stdout: %q", remote, result.Stdout)
Logf("ssh %s: stderr: %q", remote, result.Stderr)
Logf("ssh %s: exit code: %d", remote, result.Code)
}
func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*SSHResult, error) {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP {
host = net.JoinHostPort(a.Address, sshPort)
break
}
}
if host == "" {
// No external IPs were found, let's try to use internal as plan B
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeInternalIP {
host = net.JoinHostPort(a.Address, sshPort)
break
}
}
}
if host == "" {
return nil, fmt.Errorf("couldn't find any IP address for node %s", node.Name)
}
Logf("SSH %q on %s(%s)", cmd, node.Name, host)
result, err := SSH(cmd, host, provider)
LogSSHResult(result)
if result.Code != 0 || err != nil {
return nil, fmt.Errorf("failed running %q: %v (exit code %d)",
cmd, err, result.Code)
}
return &result, nil
}
func IssueSSHCommand(cmd, provider string, node *v1.Node) error {
_, err := IssueSSHCommandWithResult(cmd, provider, node)
if err != nil {
return err
}
return nil
}
// NewHostExecPodSpec returns the pod spec of hostexec pod
func NewHostExecPodSpec(ns, name string) *v1.Pod {
immediate := int64(0)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "hostexec",
Image: imageutils.GetE2EImage(imageutils.Hostexec),
ImagePullPolicy: v1.PullIfNotPresent,
},
},
HostNetwork: true,
SecurityContext: &v1.PodSecurityContext{},
TerminationGracePeriodSeconds: &immediate,
},
}
return pod
}
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell.
func RunHostCmd(ns, name, cmd string) (string, error) {
return RunKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd)
}
// RunHostCmdOrDie calls RunHostCmd and dies on error.
func RunHostCmdOrDie(ns, name, cmd string) string {
stdout, err := RunHostCmd(ns, name, cmd)
Logf("stdout: %v", stdout)
ExpectNoError(err)
return stdout
}
// RunHostCmdWithRetries calls RunHostCmd and retries all errors
// until it succeeds or the specified timeout expires.
// This can be used with idempotent commands to deflake transient Node issues.
func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration) (string, error) {
start := time.Now()
for {
out, err := RunHostCmd(ns, name, cmd)
if err == nil {
return out, nil
}
if elapsed := time.Since(start); elapsed > timeout {
return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err)
}
Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err)
time.Sleep(interval)
}
}
// LaunchHostExecPod launches a hostexec pod in the given namespace and waits
// until it's Running
func LaunchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod {
hostExecPod := NewHostExecPodSpec(ns, name)
pod, err := client.CoreV1().Pods(ns).Create(hostExecPod)
ExpectNoError(err)
err = WaitForPodRunningInNamespace(client, pod)
ExpectNoError(err)
return pod
}
// newExecPodSpec returns the pod spec of exec pod
func newExecPodSpec(ns, generateName string) *v1.Pod {
immediate := int64(0)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: generateName,
Namespace: ns,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &immediate,
Containers: []v1.Container{
{
Name: "exec",
Image: BusyBoxImage,
Command: []string{"sh", "-c", "trap exit TERM; while true; do sleep 5; done"},
},
},
},
}
return pod
}
// CreateExecPodOrFail creates a simple busybox pod in a sleep loop used as a
// vessel for kubectl exec commands.
// Returns the name of the created pod.
func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) string {
Logf("Creating new exec pod")
execPod := newExecPodSpec(ns, generateName)
if tweak != nil {
tweak(execPod)
}
created, err := client.CoreV1().Pods(ns).Create(execPod)
Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return retrievedPod.Status.Phase == v1.PodRunning, nil
})
Expect(err).NotTo(HaveOccurred())
return created.Name
}
func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort) {
By(fmt.Sprintf("Creating pod %s in namespace %s", name, ns))
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: imageutils.GetPauseImageName(),
Ports: containerPorts,
// Add a dummy environment variable to work around a docker issue.
// https://github.com/docker/docker/issues/14203
Env: []v1.EnvVar{{Name: "FOO", Value: " "}},
},
},
},
}
_, err := c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
}
func DeletePodOrFail(c clientset.Interface, ns, name string) {
By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns))
err := c.CoreV1().Pods(ns).Delete(name, nil)
Expect(err).NotTo(HaveOccurred())
}
// GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be
// used to SSH to their nodes.
func GetSigner(provider string) (ssh.Signer, error) {
// Get the directory in which SSH keys are located.
keydir := filepath.Join(os.Getenv("HOME"), ".ssh")
// Select the key itself to use. When implementing more providers here,
// please also add them to any SSH tests that are disabled because of signer
// support.
keyfile := ""
key := ""
switch provider {
case "gce", "gke", "kubemark":
keyfile = "google_compute_engine"
case "aws":
// If there is an env. variable override, use that.
aws_keyfile := os.Getenv("AWS_SSH_KEY")
if len(aws_keyfile) != 0 {
return sshutil.MakePrivateKeySignerFromFile(aws_keyfile)
}
// Otherwise revert to home dir
keyfile = "kube_aws_rsa"
case "local", "vsphere":
keyfile = os.Getenv("LOCAL_SSH_KEY") // maybe?
if len(keyfile) == 0 {
keyfile = "id_rsa"
}
case "skeleton":
keyfile = os.Getenv("KUBE_SSH_KEY")
if len(keyfile) == 0 {
keyfile = "id_rsa"
}
default:
return nil, fmt.Errorf("GetSigner(...) not implemented for %s", provider)
}
if len(key) == 0 {
key = filepath.Join(keydir, keyfile)
}
return sshutil.MakePrivateKeySignerFromFile(key)
}
// CheckPodsRunningReady returns whether all pods whose names are listed in
// podNames in namespace ns are running and ready, using c and waiting at most
// timeout.
func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready")
}
// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are
// listed in podNames in namespace ns are running and ready, or succeeded; use
// c and waiting at most timeout.
func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded")
}
// CheckPodsCondition returns whether all pods whose names are listed in podNames
// in namespace ns are in the condition, using c and waiting at most timeout.
func CheckPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
np := len(podNames)
Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
type waitPodResult struct {
success bool
podName string
}
result := make(chan waitPodResult, len(podNames))
for _, podName := range podNames {
// Launch off pod readiness checkers.
go func(name string) {
err := WaitForPodCondition(c, ns, name, desc, timeout, condition)
result <- waitPodResult{err == nil, name}
}(podName)
}
// Wait for them all to finish.
success := true
for range podNames {
res := <-result
if !res.success {
Logf("Pod %[1]s failed to be %[2]s.", res.podName, desc)
success = false
}
}
Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
return success
}
// WaitForNodeToBeReady returns whether node name is ready within timeout.
func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, v1.NodeReady, true, timeout)
}
// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
// readiness condition is anything but ready, e.g false or unknown) within
// timeout.
func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, v1.NodeReady, false, timeout)
}
func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool {
// Check the node readiness condition (logging all).
for _, cond := range node.Status.Conditions {
// Ensure that the condition type and the status matches as desired.
if cond.Type == conditionType {
// For NodeReady condition we need to check Taints as well
if cond.Type == v1.NodeReady {
hasNodeControllerTaints := false
// For NodeReady we need to check if Taints are gone as well
taints := node.Spec.Taints
for _, taint := range taints {
if taint.MatchTaint(nodectlr.UnreachableTaintTemplate) || taint.MatchTaint(nodectlr.NotReadyTaintTemplate) {
hasNodeControllerTaints = true
break
}
}
if wantTrue {
if (cond.Status == v1.ConditionTrue) && !hasNodeControllerTaints {
return true
} else {
msg := ""
if !hasNodeControllerTaints {
msg = fmt.Sprintf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
} else {
msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure",
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
}
if !silent {
Logf(msg)
}
return false
}
} else {
// TODO: check if the Node is tainted once we enable NC notReady/unreachable taints by default
if cond.Status != v1.ConditionTrue {
return true
}
if !silent {
Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
}
if (wantTrue && (cond.Status == v1.ConditionTrue)) || (!wantTrue && (cond.Status != v1.ConditionTrue)) {
return true
} else {
if !silent {
Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
}
}
if !silent {
Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
}
return false
}
func IsNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false)
}
func IsNodeConditionSetAsExpectedSilent(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true)
}
func IsNodeConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool {
for _, cond := range node.Status.Conditions {
if cond.Type == conditionType {
return false
}
}
return true
}
// WaitForNodeToBe returns whether node "name's" condition state matches wantTrue
// within timeout. If wantTrue is true, it will ensure the node condition status
// is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown).
func WaitForNodeToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
Logf("Couldn't get node %s", name)
continue
}
if IsNodeConditionSetAsExpected(node, conditionType, wantTrue) {
return true
}
}
Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
return false
}
// Checks whether all registered nodes are ready.
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
// and figure out how to do it in a configurable way, as we can't expect all setups to run
// default test add-ons.
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, TestContext.AllowedNotReadyNodes)
var notReady []*v1.Node
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !IsNodeConditionSetAsExpected(node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
// to make it possible e.g. for incorrect deployment of some small percentage
// of nodes (which we allow in cluster validation). Some nodes that are not
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
return len(notReady) <= TestContext.AllowedNotReadyNodes, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > TestContext.AllowedNotReadyNodes {
msg := ""
for _, node := range notReady {
msg = fmt.Sprintf("%s, %s", msg, node.Name)
}
return fmt.Errorf("Not ready nodes: %#v", msg)
}
return nil
}
// checks whether all registered nodes are ready and all required Pods are running on them.
func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []v1.Node
var missingPodsPerNode map[string][]string
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for _, node := range nodes.Items {
if !IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
systemPodsPerNode := make(map[string][]string)
for _, pod := range pods.Items {
if pod.Namespace == metav1.NamespaceSystem && pod.Status.Phase == v1.PodRunning {
if pod.Spec.NodeName != "" {
systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name)
}
}
}
missingPodsPerNode = make(map[string][]string)
for _, node := range nodes.Items {
if !system.IsMasterNode(node.Name) {
for _, requiredPod := range requiredPerNodePods {
foundRequired := false
for _, presentPod := range systemPodsPerNode[node.Name] {
if requiredPod.MatchString(presentPod) {
foundRequired = true
break
}
}
if !foundRequired {
missingPodsPerNode[node.Name] = append(missingPodsPerNode[node.Name], requiredPod.String())
}
}
}
}
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > 0 {
return fmt.Errorf("Not ready nodes: %v", notReady)
}
if len(missingPodsPerNode) > 0 {
return fmt.Errorf("Not running system Pods: %v", missingPodsPerNode)
}
return nil
}
// Filters nodes in NodeList in place, removing nodes that do not
// satisfy the given condition
// TODO: consider merging with pkg/client/cache.NodeLister
func FilterNodes(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
var l []v1.Node
for _, node := range nodeList.Items {
if fn(node) {
l = append(l, node)
}
}
nodeList.Items = l
}
// ParseKVLines parses output that looks like lines containing "<key>: <val>"
// and returns <val> if <key> is found. Otherwise, it returns the empty string.
func ParseKVLines(output, key string) string {
delim := ":"
key = key + delim
for _, line := range strings.Split(output, "\n") {
pieces := strings.SplitAfterN(line, delim, 2)
if len(pieces) != 2 {
continue
}
k, v := pieces[0], pieces[1]
if k == key {
return strings.TrimSpace(v)
}
}
return ""
}
func RestartKubeProxy(host string) error {
// TODO: Make it work for all providers.
if !ProviderIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
// kubelet will restart the kube-proxy since it's running in a static pod
Logf("Killing kube-proxy on node %v", host)
result, err := SSH("sudo pkill kube-proxy", host, TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart kube-proxy: %v", err)
}
// wait for kube-proxy to come back up
sshCmd := "sudo /bin/sh -c 'pgrep kube-proxy | wc -l'"
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
Logf("Waiting for kubeproxy to come back up with %v on %v", sshCmd, host)
result, err := SSH(sshCmd, host, TestContext.Provider)
if err != nil {
return false, err
}
if result.Code != 0 {
LogSSHResult(result)
return false, fmt.Errorf("failed to run command, exited %d", result.Code)
}
if result.Stdout == "0\n" {
return false, nil
}
Logf("kube-proxy is back up.")
return true, nil
})
if err != nil {
return fmt.Errorf("kube-proxy didn't recover: %v", err)
}
return nil
}
func RestartKubelet(host string) error {
// TODO: Make it work for all providers and distros.
supportedProviders := []string{"gce", "aws", "vsphere"}
if !ProviderIs(supportedProviders...) {
return fmt.Errorf("unsupported provider: %s, supported providers are: %v", TestContext.Provider, supportedProviders)
}
if ProviderIs("gce") && !NodeOSDistroIs("debian", "gci") {
return fmt.Errorf("unsupported node OS distro: %s", TestContext.NodeOSDistro)
}
var cmd string
if ProviderIs("gce") && NodeOSDistroIs("debian") {
cmd = "sudo /etc/init.d/kubelet restart"
} else if ProviderIs("vsphere") {
var sudoPresent bool
sshResult, err := SSH("sudo --version", host, TestContext.Provider)
if err != nil {
return fmt.Errorf("Unable to ssh to host %s with error %v", host, err)
}
if !strings.Contains(sshResult.Stderr, "command not found") {
sudoPresent = true
}
sshResult, err = SSH("systemctl --version", host, TestContext.Provider)
if !strings.Contains(sshResult.Stderr, "command not found") {
cmd = "systemctl restart kubelet"
} else {
cmd = "service kubelet restart"
}
if sudoPresent {
cmd = fmt.Sprintf("sudo %s", cmd)
}
} else {
cmd = "sudo systemctl restart kubelet"
}
Logf("Restarting kubelet via ssh on host %s with command %s", host, cmd)
result, err := SSH(cmd, host, TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart kubelet: %v", err)
}
return nil
}
func WaitForKubeletUp(host string) error {
cmd := "curl http://localhost:" + strconv.Itoa(ports.KubeletReadOnlyPort) + "/healthz"
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
result, err := SSH(cmd, host, TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
}
if result.Stdout == "ok" {
return nil
}
}
return fmt.Errorf("waiting for kubelet timed out")
}
func RestartApiserver(cs clientset.Interface) error {
// TODO: Make it work for all providers.
if !ProviderIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
if ProviderIs("gce", "aws") {
initialRestartCount, err := getApiserverRestartCount(cs)
if err != nil {
return fmt.Errorf("failed to get apiserver's restart count: %v", err)
}
if err := sshRestartMaster(); err != nil {
return fmt.Errorf("failed to restart apiserver: %v", err)
}
return waitForApiserverRestarted(cs, initialRestartCount)
}
// GKE doesn't allow ssh access, so use a same-version master
// upgrade to teardown/recreate master.
v, err := cs.Discovery().ServerVersion()
if err != nil {
return err
}
return masterUpgradeGKE(v.GitVersion[1:]) // strip leading 'v'
}
func sshRestartMaster() error {
if !ProviderIs("gce", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
var command string
if ProviderIs("gce") {
command = "pidof kube-apiserver | xargs sudo kill"
} else {
command = "sudo /etc/init.d/kube-apiserver restart"
}
Logf("Restarting master via ssh, running: %v", command)
result, err := SSH(command, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart apiserver: %v", err)
}
return nil
}
func WaitForApiserverUp(c clientset.Interface) error {
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
body, err := c.CoreV1().RESTClient().Get().AbsPath("/healthz").Do().Raw()
if err == nil && string(body) == "ok" {
return nil
}
}
return fmt.Errorf("waiting for apiserver timed out")
}
// waitForApiserverRestarted waits until apiserver's restart count increased.
func waitForApiserverRestarted(c clientset.Interface, initialRestartCount int32) error {
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
restartCount, err := getApiserverRestartCount(c)
if err != nil {
Logf("Failed to get apiserver's restart count: %v", err)
continue
}
if restartCount > initialRestartCount {
Logf("Apiserver has restarted.")
return nil
}
Logf("Waiting for apiserver restart count to increase")
}
return fmt.Errorf("timed out waiting for apiserver to be restarted")
}
func getApiserverRestartCount(c clientset.Interface) (int32, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"component": "kube-apiserver"}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts)
if err != nil {
return -1, err
}
if len(pods.Items) != 1 {
return -1, fmt.Errorf("unexpected number of apiserver pod: %d", len(pods.Items))
}
for _, s := range pods.Items[0].Status.ContainerStatuses {
if s.Name != "kube-apiserver" {
continue
}
return s.RestartCount, nil
}
return -1, fmt.Errorf("failed to find kube-apiserver container in pod")
}
func RestartControllerManager() error {
// TODO: Make it work for all providers and distros.
if !ProviderIs("gce", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
if ProviderIs("gce") && !MasterOSDistroIs("gci") {
return fmt.Errorf("unsupported master OS distro: %s", TestContext.MasterOSDistro)
}
cmd := "pidof kube-controller-manager | xargs sudo kill"
Logf("Restarting controller-manager via ssh, running: %v", cmd)
result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart controller-manager: %v", err)
}
return nil
}
func WaitForControllerManagerUp() error {
cmd := "curl http://localhost:" + strconv.Itoa(ports.InsecureKubeControllerManagerPort) + "/healthz"
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
}
if result.Stdout == "ok" {
return nil
}
}
return fmt.Errorf("waiting for controller-manager timed out")
}
// CheckForControllerManagerHealthy checks that the controller manager does not crash within "duration"
func CheckForControllerManagerHealthy(duration time.Duration) error {
var PID string
cmd := "pidof kube-controller-manager"
for start := time.Now(); time.Since(start) < duration; time.Sleep(5 * time.Second) {
result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
if err != nil {
// We don't necessarily know that it crashed, pipe could just be broken
LogSSHResult(result)
return fmt.Errorf("master unreachable after %v", time.Since(start))
} else if result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("SSH result code not 0. actually: %v after %v", result.Code, time.Since(start))
} else if result.Stdout != PID {
if PID == "" {
PID = result.Stdout
} else {
//its dead
return fmt.Errorf("controller manager crashed, old PID: %s, new PID: %s", PID, result.Stdout)
}
} else {
Logf("kube-controller-manager still healthy after %v", time.Since(start))
}
}
return nil
}
// NumberOfRegisteredNodes returns number of registered Nodes excluding Master Node.
func NumberOfRegisteredNodes(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
Logf("Failed to list nodes: %v", err)
return 0, err
}
return len(nodes.Items), nil
}
// NumberOfReadyNodes returns number of ready Nodes excluding Master Node.
func NumberOfReadyNodes(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
Logf("Failed to list nodes: %v", err)
return 0, err
}
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
return len(nodes.Items), nil
}
// CheckNodesReady waits up to timeout for cluster to has desired size and
// there is no not-ready nodes in it. By cluster size we mean number of Nodes
// excluding Master Node.
func CheckNodesReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
Logf("Cluster has reached the desired number of ready nodes %d", size)
return nodes.Items, nil
}
Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady)
}
return nil, fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size)
}
// WaitForReadyNodes waits up to timeout for cluster to has desired size and
// there is no not-ready nodes in it. By cluster size we mean number of Nodes
// excluding Master Node.
func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) error {
_, err := CheckNodesReady(c, size, timeout)
return err
}
func GenerateMasterRegexp(prefix string) string {
return prefix + "(-...)?"
}
// waitForMasters waits until the cluster has the desired number of ready masters in it.
func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
// Filter out nodes that are not master replicas
FilterNodes(nodes, func(node v1.Node) bool {
res, err := regexp.Match(GenerateMasterRegexp(masterPrefix), ([]byte)(node.Name))
if err != nil {
Logf("Failed to match regexp to node name: %v", err)
return false
}
return res
})
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
Logf("Cluster has reached the desired number of masters %d", size)
return nil
}
Logf("Waiting for the number of masters %d, current %d, not ready master nodes %d", size, numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for the number of masters to be %d", timeout, size)
}
// GetHostExternalAddress gets the node for a pod and returns the first External
// address. Returns an error if the node the pod is on doesn't have an External
// address.
func GetHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddress string, err error) {
node, err := client.CoreV1().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{})
if err != nil {
return "", err
}
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
externalAddress = address.Address
break
}
}
}
if externalAddress == "" {
err = fmt.Errorf("No external address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
return
}
type extractRT struct {
http.Header
}
func (rt *extractRT) RoundTrip(req *http.Request) (*http.Response, error) {
rt.Header = req.Header
return &http.Response{}, nil
}
// headersForConfig extracts any http client logic necessary for the provided
// config.
func headersForConfig(c *restclient.Config) (http.Header, error) {
extract := &extractRT{}
rt, err := restclient.HTTPWrappersForConfig(c, extract)
if err != nil {
return nil, err
}
if _, err := rt.RoundTrip(&http.Request{}); err != nil {
return nil, err
}
return extract.Header, nil
}
// OpenWebSocketForURL constructs a websocket connection to the provided URL, using the client
// config, with the specified protocols.
func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) {
tlsConfig, err := restclient.TLSConfigFor(config)
if err != nil {
return nil, fmt.Errorf("failed to create tls config: %v", err)
}
if tlsConfig != nil {
url.Scheme = "wss"
if !strings.Contains(url.Host, ":") {
url.Host += ":443"
}
} else {
url.Scheme = "ws"
if !strings.Contains(url.Host, ":") {
url.Host += ":80"
}
}
headers, err := headersForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to load http headers: %v", err)
}
cfg, err := websocket.NewConfig(url.String(), "http://localhost")
if err != nil {
return nil, fmt.Errorf("failed to create websocket config: %v", err)
}
cfg.Header = headers
cfg.TlsConfig = tlsConfig
cfg.Protocol = protocols
return websocket.DialConfig(cfg)
}
// Looks for the given string in the log of a specific pod container
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("logs", podName, container, fmt.Sprintf("--namespace=%v", ns))
})
}
// Looks for the given string in a file in a specific pod container
func LookForStringInFile(ns, podName, container, file, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("exec", podName, "-c", container, fmt.Sprintf("--namespace=%v", ns), "--", "cat", file)
})
}
// Looks for the given string in the output of a command executed in a specific pod container
func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
// use the first container
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"}
args = append(args, command...)
return RunKubectlOrDie(args...)
})
}
// Looks for the given string in the output of fn, repeatedly calling fn until
// the timeout is reached or the string is found. Returns last log and possibly
// error if the string was not found.
func LookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) {
result = fn()
if strings.Contains(result, expectedString) {
return
}
}
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
return
}
// getSvcNodePort returns the node port for the given service:port.
func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) {
svc, err := client.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
if err != nil {
return 0, err
}
for _, p := range svc.Spec.Ports {
if p.Port == int32(svcPort) {
if p.NodePort != 0 {
return int(p.NodePort), nil
}
}
}
return 0, fmt.Errorf(
"No node port found for service %v, port %v", name, svcPort)
}
// GetNodePortURL returns the url to a nodeport Service.
func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (string, error) {
nodePort, err := getSvcNodePort(client, ns, name, svcPort)
if err != nil {
return "", err
}
// This list of nodes must not include the master, which is marked
// unschedulable, since the master doesn't run kube-proxy. Without
// kube-proxy NodePorts won't work.
var nodes *v1.NodeList
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return true, nil
}) != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", fmt.Errorf("Unable to list nodes in cluster.")
}
for _, node := range nodes.Items {
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
return fmt.Sprintf("http://%v:%v", address.Address, nodePort), nil
}
}
}
}
return "", fmt.Errorf("Failed to find external address for service %v", name)
}
// TODO(random-liu): Change this to be a member function of the framework.
func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false)
}
func getPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true)
}
// utility function for gomega Eventually
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) {
logs, err := c.CoreV1().RESTClient().Get().
Resource("pods").
Namespace(namespace).
Name(podName).SubResource("log").
Param("container", containerName).
Param("previous", strconv.FormatBool(previous)).
Do().
Raw()
if err != nil {
return "", err
}
if err == nil && strings.Contains(string(logs), "Internal Error") {
return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs))
}
return string(logs), err
}
func GetGCECloud() (*gcecloud.GCECloud, error) {
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
if !ok {
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
}
return gceCloud, nil
}
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
// are actually cleaned up. Currently only implemented for GCE/GKE.
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
return ensureGCELoadBalancerResourcesDeleted(ip, portRange)
}
return nil
}
func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error {
gceCloud, err := GetGCECloud()
if err != nil {
return err
}
project := TestContext.CloudConfig.ProjectID
region, err := gcecloud.GetGCERegion(TestContext.CloudConfig.Zone)
if err != nil {
return fmt.Errorf("could not get region for zone %q: %v", TestContext.CloudConfig.Zone, err)
}
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
service := gceCloud.ComputeServices().GA
list, err := service.ForwardingRules.List(project, region).Do()
if err != nil {
return false, err
}
for _, item := range list.Items {
if item.PortRange == portRange && item.IPAddress == ip {
Logf("found a load balancer: %v", item)
return false, nil
}
}
return true, nil
})
}
// The following helper functions can block/unblock network from source
// host to destination host by manipulating iptable rules.
// This function assumes it can ssh to the source host.
//
// Caution:
// Recommend to input IP instead of hostnames. Using hostnames will cause iptables to
// do a DNS lookup to resolve the name to an IP address, which will
// slow down the test and cause it to fail if DNS is absent or broken.
//
// Suggested usage pattern:
// func foo() {
// ...
// defer UnblockNetwork(from, to)
// BlockNetwork(from, to)
// ...
// }
//
func BlockNetwork(from string, to string) {
Logf("block network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
dropCmd := fmt.Sprintf("sudo iptables --insert %s", iptablesRule)
if result, err := SSH(dropCmd, from, TestContext.Provider); result.Code != 0 || err != nil {
LogSSHResult(result)
Failf("Unexpected error: %v", err)
}
}
func UnblockNetwork(from string, to string) {
Logf("Unblock network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
undropCmd := fmt.Sprintf("sudo iptables --delete %s", iptablesRule)
// Undrop command may fail if the rule has never been created.
// In such case we just lose 30 seconds, but the cluster is healthy.
// But if the rule had been created and removing it failed, the node is broken and
// not coming back. Subsequent tests will run or fewer nodes (some of the tests
// may fail). Manual intervention is required in such case (recreating the
// cluster solves the problem too).
err := wait.Poll(time.Millisecond*100, time.Second*30, func() (bool, error) {
result, err := SSH(undropCmd, from, TestContext.Provider)
if result.Code == 0 && err == nil {
return true, nil
}
LogSSHResult(result)
if err != nil {
Logf("Unexpected error: %v", err)
}
return false, nil
})
if err != nil {
Failf("Failed to remove the iptable REJECT rule. Manual intervention is "+
"required on host %s: remove rule %s, if exists", from, iptablesRule)
}
}
func isElementOf(podUID types.UID, pods *v1.PodList) bool {
for _, pod := range pods.Items {
if pod.UID == podUID {
return true
}
}
return false
}
// timeout for proxy requests.
const proxyTimeout = 2 * time.Minute
// NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) {
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
// This will leak a goroutine if proxy hangs. #22165
var result restclient.Result
finished := make(chan struct{})
go func() {
result = c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()
finished <- struct{}{}
}()
select {
case <-finished:
return result, nil
case <-time.After(proxyTimeout):
return restclient.Result{}, nil
}
}
// GetKubeletPods retrieves the list of pods on the kubelet
func GetKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
return getKubeletPods(c, node, "pods")
}
// GetKubeletRunningPods retrieves the list of running pods on the kubelet. The pods
// includes necessary information (e.g., UID, name, namespace for
// pods/containers), but do not contain the full spec.
func GetKubeletRunningPods(c clientset.Interface, node string) (*v1.PodList, error) {
return getKubeletPods(c, node, "runningpods")
}
func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) {
result := &v1.PodList{}
client, err := NodeProxyRequest(c, node, resource)
if err != nil {
return &v1.PodList{}, err
}
if err = client.Into(result); err != nil {
return &v1.PodList{}, err
}
return result, nil
}
// LaunchWebserverPod launches a pod serving http on port 8080 to act
// as the target for networking connectivity checks. The ip address
// of the created pod will be returned if the pod is launched
// successfully.
func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) {
containerName := fmt.Sprintf("%s-container", podName)
port := 8080
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: imageutils.GetE2EImage(imageutils.Porter),
Env: []v1.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}},
Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
},
},
NodeName: nodeName,
RestartPolicy: v1.RestartPolicyNever,
},
}
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
_, err := podClient.Create(pod)
ExpectNoError(err)
ExpectNoError(f.WaitForPodRunning(podName))
createdPod, err := podClient.Get(podName, metav1.GetOptions{})
ExpectNoError(err)
ip = net.JoinHostPort(createdPod.Status.PodIP, strconv.Itoa(port))
Logf("Target pod IP:port is %s", ip)
return
}
type PingCommand string
const (
IPv4PingCommand PingCommand = "ping"
IPv6PingCommand PingCommand = "ping6"
)
// CheckConnectivityToHost launches a pod to test connectivity to the specified
// host. An error will be returned if the host is not reachable from the pod.
//
// An empty nodeName will use the schedule to choose where the pod is executed.
func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, pingCmd PingCommand, timeout int) error {
contName := fmt.Sprintf("%s-container", podName)
command := []string{
string(pingCmd),
"-c", "3", // send 3 pings
"-W", "2", // wait at most 2 seconds for a reply
"-w", strconv.Itoa(timeout),
host,
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: contName,
Image: BusyBoxImage,
Command: command,
},
},
NodeName: nodeName,
RestartPolicy: v1.RestartPolicyNever,
},
}
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
_, err := podClient.Create(pod)
if err != nil {
return err
}
err = WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name)
if err != nil {
logs, logErr := GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, contName)
if logErr != nil {
Logf("Warning: Failed to get logs from pod %q: %v", pod.Name, logErr)
} else {
Logf("pod %s/%s logs:\n%s", f.Namespace.Name, pod.Name, logs)
}
}
return err
}
// CoreDump SSHs to the master and all nodes and dumps their logs into dir.
// It shells out to cluster/log-dump/log-dump.sh to accomplish this.
func CoreDump(dir string) {
if TestContext.DisableLogDump {
Logf("Skipping dumping logs from cluster")
return
}
var cmd *exec.Cmd
if TestContext.LogexporterGCSPath != "" {
Logf("Dumping logs from nodes to GCS directly at path: %s", TestContext.LogexporterGCSPath)
cmd = exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump", "log-dump.sh"), dir, TestContext.LogexporterGCSPath)
} else {
Logf("Dumping logs locally to: %s", dir)
cmd = exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump", "log-dump.sh"), dir)
}
cmd.Env = append(os.Environ(), fmt.Sprintf("LOG_DUMP_SYSTEMD_SERVICES=%s", parseSystemdServices(TestContext.SystemdServices)))
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
Logf("Error running cluster/log-dump/log-dump.sh: %v", err)
}
}
// parseSystemdServices converts services separator from comma to space.
func parseSystemdServices(services string) string {
return strings.TrimSpace(strings.Replace(services, ",", " ", -1))
}
func UpdatePodWithRetries(client clientset.Interface, ns, name string, update func(*v1.Pod)) (*v1.Pod, error) {
for i := 0; i < 3; i++ {
pod, err := client.CoreV1().Pods(ns).Get(name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("Failed to get pod %q: %v", name, err)
}
update(pod)
pod, err = client.CoreV1().Pods(ns).Update(pod)
if err == nil {
return pod, nil
}
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
return nil, fmt.Errorf("Failed to update pod %q: %v", name, err)
}
}
return nil, fmt.Errorf("Too many retries updating Pod %q", name)
}
func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) {
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
return []*v1.Pod{}, err
}
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
filtered := []*v1.Pod{}
for _, p := range pods.Items {
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) {
continue
}
filtered = append(filtered, &p)
}
return filtered, nil
}
// RunCmd runs cmd using args and returns its stdout and stderr. It also outputs
// cmd's stdout and stderr to their respective OS streams.
func RunCmd(command string, args ...string) (string, string, error) {
return RunCmdEnv(nil, command, args...)
}
// RunCmdEnv runs cmd with the provided environment and args and
// returns its stdout and stderr. It also outputs cmd's stdout and
// stderr to their respective OS streams.
func RunCmdEnv(env []string, command string, args ...string) (string, string, error) {
Logf("Running %s %v", command, args)
var bout, berr bytes.Buffer
cmd := exec.Command(command, args...)
// We also output to the OS stdout/stderr to aid in debugging in case cmd
// hangs and never returns before the test gets killed.
//
// This creates some ugly output because gcloud doesn't always provide
// newlines.
cmd.Stdout = io.MultiWriter(os.Stdout, &bout)
cmd.Stderr = io.MultiWriter(os.Stderr, &berr)
cmd.Env = env
err := cmd.Run()
stdout, stderr := bout.String(), berr.String()
if err != nil {
return "", "", fmt.Errorf("error running %s %v; got error %v, stdout %q, stderr %q",
command, args, err, stdout, stderr)
}
return stdout, stderr, nil
}
// retryCmd runs cmd using args and retries it for up to SingleCallTimeout if
// it returns an error. It returns stdout and stderr.
func retryCmd(command string, args ...string) (string, string, error) {
var err error
stdout, stderr := "", ""
wait.Poll(Poll, SingleCallTimeout, func() (bool, error) {
stdout, stderr, err = RunCmd(command, args...)
if err != nil {
Logf("Got %v", err)
return false, nil
}
return true, nil
})
return stdout, stderr, err
}
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
for _, pod := range pods.Items {
if !masterNodes.Has(pod.Spec.NodeName) {
if pod.Spec.NodeName != "" {
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
Expect(scheduledCondition.Status).To(Equal(v1.ConditionTrue))
scheduledPods = append(scheduledPods, pod)
} else {
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
Expect(scheduledCondition.Status).To(Equal(v1.ConditionFalse))
if scheduledCondition.Reason == "Unschedulable" {
notScheduledPods = append(notScheduledPods, pod)
}
}
}
}
return
}
// WaitForStableCluster waits until all existing pods are scheduled and returns their amount.
func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
timeout := 10 * time.Minute
startTime := time.Now()
allPods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
ExpectNoError(err)
// API server returns also Pods that succeeded. We need to filter them out.
currentPods := make([]v1.Pod, 0, len(allPods.Items))
for _, pod := range allPods.Items {
if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
currentPods = append(currentPods, pod)
}
}
allPods.Items = currentPods
scheduledPods, currentlyNotScheduledPods := GetPodsScheduled(masterNodes, allPods)
for len(currentlyNotScheduledPods) != 0 {
time.Sleep(2 * time.Second)
allPods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
ExpectNoError(err)
scheduledPods, currentlyNotScheduledPods = GetPodsScheduled(masterNodes, allPods)
if startTime.Add(timeout).Before(time.Now()) {
Failf("Timed out after %v waiting for stable cluster.", timeout)
break
}
}
return len(scheduledPods)
}
// GetMasterAndWorkerNodesOrDie will return a list masters and schedulable worker nodes
func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList) {
nodes := &v1.NodeList{}
masters := sets.NewString()
all, _ := c.CoreV1().Nodes().List(metav1.ListOptions{})
for _, n := range all.Items {
if system.IsMasterNode(n.Name) {
masters.Insert(n.Name)
} else if isNodeSchedulable(&n) && isNodeUntainted(&n) {
nodes.Items = append(nodes.Items, n)
}
}
return masters, nodes
}
func ListNamespaceEvents(c clientset.Interface, ns string) error {
ls, err := c.CoreV1().Events(ns).List(metav1.ListOptions{})
if err != nil {
return err
}
for _, event := range ls.Items {
glog.Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message)
}
return nil
}
// E2ETestNodePreparer implements testutils.TestNodePreparer interface, which is used
// to create/modify Nodes before running a test.
type E2ETestNodePreparer struct {
client clientset.Interface
// Specifies how many nodes should be modified using the given strategy.
// Only one strategy can be applied to a single Node, so there needs to
// be at least <sum_of_keys> Nodes in the cluster.
countToStrategy []testutils.CountToStrategy
nodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy
}
func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy) testutils.TestNodePreparer {
return &E2ETestNodePreparer{
client: client,
countToStrategy: countToStrategy,
nodeToAppliedStrategy: make(map[string]testutils.PrepareNodeStrategy),
}
}
func (p *E2ETestNodePreparer) PrepareNodes() error {
nodes := GetReadySchedulableNodesOrDie(p.client)
numTemplates := 0
for k := range p.countToStrategy {
numTemplates += k
}
if numTemplates > len(nodes.Items) {
return fmt.Errorf("Can't prepare Nodes. Got more templates than existing Nodes.")
}
index := 0
sum := 0
for _, v := range p.countToStrategy {
sum += v.Count
for ; index < sum; index++ {
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {
glog.Errorf("Aborting node preparation: %v", err)
return err
}
p.nodeToAppliedStrategy[nodes.Items[index].Name] = v.Strategy
}
}
return nil
}
func (p *E2ETestNodePreparer) CleanupNodes() error {
var encounteredError error
nodes := GetReadySchedulableNodesOrDie(p.client)
for i := range nodes.Items {
var err error
name := nodes.Items[i].Name
strategy, found := p.nodeToAppliedStrategy[name]
if found {
if err = testutils.DoCleanupNode(p.client, name, strategy); err != nil {
glog.Errorf("Skipping cleanup of Node: failed update of %v: %v", name, err)
encounteredError = err
}
}
}
return encounteredError
}
func GetClusterID(c clientset.Interface) (string, error) {
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{})
if err != nil || cm == nil {
return "", fmt.Errorf("error getting cluster ID: %v", err)
}
clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster]
providerID, providerIDExists := cm.Data[gcecloud.UIDProvider]
if !clusterIDExists {
return "", fmt.Errorf("cluster ID not set")
}
if providerIDExists {
return providerID, nil
}
return clusterID, nil
}
// CleanupGCEResources cleans up GCE Service Type=LoadBalancer resources with
// the given name. The name is usually the UUID of the Service prefixed with an
// alpha-numeric character ('a') to work around cloudprovider rules.
func CleanupGCEResources(c clientset.Interface, loadBalancerName, region, zone string) (retErr error) {
gceCloud, err := GetGCECloud()
if err != nil {
return err
}
if region == "" {
// Attempt to parse region from zone if no region is given.
region, err = gcecloud.GetGCERegion(zone)
if err != nil {
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
}
}
if err := gceCloud.DeleteFirewall(gcecloud.MakeFirewallName(loadBalancerName)); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = err
}
if err := gceCloud.DeleteRegionForwardingRule(loadBalancerName, region); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, err)
}
if err := gceCloud.DeleteRegionAddress(loadBalancerName, region); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, err)
}
clusterID, err := GetClusterID(c)
if err != nil {
retErr = fmt.Errorf("%v\n%v", retErr, err)
return
}
hcNames := []string{gcecloud.MakeNodesHealthCheckName(clusterID)}
hc, getErr := gceCloud.GetHttpHealthCheck(loadBalancerName)
if getErr != nil && !IsGoogleAPIHTTPErrorCode(getErr, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, getErr)
return
}
if hc != nil {
hcNames = append(hcNames, hc.Name)
}
if err := gceCloud.DeleteExternalTargetPoolAndChecks(&v1.Service{}, loadBalancerName, region, clusterID, hcNames...); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, err)
}
return
}
// IsHTTPErrorCode returns true if the error is a google api
// error matching the corresponding HTTP error code.
func IsGoogleAPIHTTPErrorCode(err error, code int) bool {
apiErr, ok := err.(*googleapi.Error)
return ok && apiErr.Code == code
}
// getMaster populates the externalIP, internalIP and hostname fields of the master.
// If any of these is unavailable, it is set to "".
func getMaster(c clientset.Interface) Address {
master := Address{}
// Populate the internal IP.
eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
if err != nil {
Failf("Failed to get kubernetes endpoints: %v", err)
}
if len(eps.Subsets) != 1 || len(eps.Subsets[0].Addresses) != 1 {
Failf("There are more than 1 endpoints for kubernetes service: %+v", eps)
}
master.internalIP = eps.Subsets[0].Addresses[0].IP
// Populate the external IP/hostname.
url, err := url.Parse(TestContext.Host)
if err != nil {
Failf("Failed to parse hostname: %v", err)
}
if net.ParseIP(url.Host) != nil {
// TODO: Check that it is external IP (not having a reserved IP address as per RFC1918).
master.externalIP = url.Host
} else {
master.hostname = url.Host
}
return master
}
// GetMasterAddress returns the hostname/external IP/internal IP as appropriate for e2e tests on a particular provider
// which is the address of the interface used for communication with the kubelet.
func GetMasterAddress(c clientset.Interface) string {
master := getMaster(c)
switch TestContext.Provider {
case "gce", "gke":
return master.externalIP
case "aws":
return awsMasterIP
default:
Failf("This test is not supported for provider %s and should be disabled", TestContext.Provider)
}
return ""
}
// GetNodeExternalIP returns node external IP concatenated with port 22 for ssh
// e.g. 1.2.3.4:22
func GetNodeExternalIP(node *v1.Node) string {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP {
host = net.JoinHostPort(a.Address, sshPort)
break
}
}
if host == "" {
Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host
}
// SimpleGET executes a get on the given url, returns error if non-200 returned.
func SimpleGET(c *http.Client, url, host string) (string, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return "", err
}
req.Host = host
res, err := c.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
rawBody, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
body := string(rawBody)
if res.StatusCode != http.StatusOK {
err = fmt.Errorf(
"GET returned http error %v", res.StatusCode)
}
return body, err
}
// PollURL polls till the url responds with a healthy http code. If
// expectUnreachable is true, it breaks on first non-healthy http code instead.
func PollURL(route, host string, timeout time.Duration, interval time.Duration, httpClient *http.Client, expectUnreachable bool) error {
var lastBody string
pollErr := wait.PollImmediate(interval, timeout, func() (bool, error) {
var err error
lastBody, err = SimpleGET(httpClient, route, host)
if err != nil {
Logf("host %v path %v: %v unreachable", host, route, err)
return expectUnreachable, nil
}
Logf("host %v path %v: reached", host, route)
return !expectUnreachable, nil
})
if pollErr != nil {
return fmt.Errorf("Failed to execute a successful GET within %v, Last response body for %v, host %v:\n%v\n\n%v\n",
timeout, route, host, lastBody, pollErr)
}
return nil
}
func DescribeIng(ns string) {
Logf("\nOutput of kubectl describe ing:\n")
desc, _ := RunKubectl(
"describe", "ing", fmt.Sprintf("--namespace=%v", ns))
Logf(desc)
}
// NewTestPod returns a pod that has the specified requests and limits
func (f *Framework) NewTestPod(name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: GetPauseImageName(f.ClientSet),
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
},
},
},
}
}
// create empty file at given path on the pod.
func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error {
_, err := RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
return err
}
// GetAzureCloud returns azure cloud provider
func GetAzureCloud() (*azure.Cloud, error) {
cloud, ok := TestContext.CloudConfig.Provider.(*azure.Cloud)
if !ok {
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to Azure: %#v", TestContext.CloudConfig.Provider)
}
return cloud, nil
}
func PrintSummaries(summaries []TestDataSummary, testBaseName string) {
now := time.Now()
for i := range summaries {
Logf("Printing summary: %v", summaries[i].SummaryKind())
switch TestContext.OutputPrintType {
case "hr":
if TestContext.ReportDir == "" {
Logf(summaries[i].PrintHumanReadable())
} else {
// TODO: learn to extract test name and append it to the kind instead of timestamp.
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt")
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintHumanReadable()), 0644); err != nil {
Logf("Failed to write file %v with test performance data: %v", filePath, err)
}
}
case "json":
fallthrough
default:
if TestContext.OutputPrintType != "json" {
Logf("Unknown output type: %v. Printing JSON", TestContext.OutputPrintType)
}
if TestContext.ReportDir == "" {
Logf("%v JSON\n%v", summaries[i].SummaryKind(), summaries[i].PrintJSON())
Logf("Finished")
} else {
// TODO: learn to extract test name and append it to the kind instead of timestamp.
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".json")
Logf("Writing to %s", filePath)
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintJSON()), 0644); err != nil {
Logf("Failed to write file %v with test performance data: %v", filePath, err)
}
}
}
}
}
func DumpDebugInfo(c clientset.Interface, ns string) {
sl, _ := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
for _, s := range sl.Items {
desc, _ := RunKubectl("describe", "po", s.Name, fmt.Sprintf("--namespace=%v", ns))
Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc)
l, _ := RunKubectl("logs", s.Name, fmt.Sprintf("--namespace=%v", ns), "--tail=100")
Logf("\nLast 100 log lines of %v:\n%v", s.Name, l)
}
}
// DsFromManifest reads a .json/yaml file and returns the daemonset in it.
func DsFromManifest(url string) (*apps.DaemonSet, error) {
var controller apps.DaemonSet
Logf("Parsing ds from %v", url)
var response *http.Response
var err error
for i := 1; i <= 5; i++ {
response, err = http.Get(url)
if err == nil && response.StatusCode == 200 {
break
}
time.Sleep(time.Duration(i) * time.Second)
}
if err != nil {
return nil, fmt.Errorf("failed to get url: %v", err)
}
if response.StatusCode != 200 {
return nil, fmt.Errorf("invalid http response status: %v", response.StatusCode)
}
defer response.Body.Close()
data, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, fmt.Errorf("failed to read html response body: %v", err)
}
json, err := utilyaml.ToJSON(data)
if err != nil {
return nil, fmt.Errorf("failed to parse data to json: %v", err)
}
err = runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), json, &controller)
if err != nil {
return nil, fmt.Errorf("failed to decode DaemonSet spec: %v", err)
}
return &controller, nil
}
// waitForServerPreferredNamespacedResources waits until server preferred namespaced resources could be successfully discovered.
// TODO: Fix https://github.com/kubernetes/kubernetes/issues/55768 and remove the following retry.
func waitForServerPreferredNamespacedResources(d discovery.DiscoveryInterface, timeout time.Duration) ([]*metav1.APIResourceList, error) {
Logf("Waiting up to %v for server preferred namespaced resources to be successfully discovered", timeout)
var resources []*metav1.APIResourceList
if err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
var err error
resources, err = d.ServerPreferredNamespacedResources()
if err == nil || isDynamicDiscoveryError(err) {
return true, nil
}
if !discovery.IsGroupDiscoveryFailedError(err) {
return false, err
}
Logf("Error discoverying server preferred namespaced resources: %v, retrying in %v.", err, Poll)
return false, nil
}); err != nil {
return nil, err
}
return resources, nil
}
// WaitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
_, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns)
return nil
}
Logf("Failed to get claim %q in namespace %q, retrying in %v. Error: %v", pvcName, ns, Poll, err)
}
}
return fmt.Errorf("PersistentVolumeClaim %s is not removed from the system within %v", pvcName, timeout)
}
func GetClusterZones(c clientset.Interface) (sets.String, error) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
}
// collect values of zone label from all nodes
zones := sets.NewString()
for _, node := range nodes.Items {
if zone, found := node.Labels[kubeletapis.LabelZoneFailureDomain]; found {
zones.Insert(zone)
}
}
return zones, nil
}
|
[
"\"KUBE_SSH_USER\"",
"\"USER\"",
"\"HOME\"",
"\"AWS_SSH_KEY\"",
"\"LOCAL_SSH_KEY\"",
"\"KUBE_SSH_KEY\""
] |
[] |
[
"KUBE_SSH_KEY",
"LOCAL_SSH_KEY",
"AWS_SSH_KEY",
"KUBE_SSH_USER",
"USER",
"HOME"
] |
[]
|
["KUBE_SSH_KEY", "LOCAL_SSH_KEY", "AWS_SSH_KEY", "KUBE_SSH_USER", "USER", "HOME"]
|
go
| 6 | 0 | |
userbot/__init__.py
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot initialization. """
import os
from sys import version_info
from logging import basicConfig, getLogger, INFO, DEBUG
from distutils.util import strtobool as sb
from pylast import LastFMNetwork, md5
from pySmartDL import SmartDL
from dotenv import load_dotenv
from requests import get
from telethon import TelegramClient
from telethon.sessions import StringSession
load_dotenv("config.env")
# Bot Logs setup:
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
if CONSOLE_LOGGER_VERBOSE:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=DEBUG,
)
else:
basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=INFO)
LOGS = getLogger(__name__)
if version_info[0] < 3 or version_info[1] < 8:
LOGS.info("You MUST have a python version of at least 3.8."
"Multiple features depend on this. Bot quitting.")
quit(1)
# Check if the config was edited by using the already used variable.
# Basically, its the 'virginity check' for the config file ;)
CONFIG_CHECK = os.environ.get(
"___________PLOX_______REMOVE_____THIS_____LINE__________", None)
if CONFIG_CHECK:
LOGS.info(
"Please remove the line mentioned in the first hashtag from the config.env file"
)
quit(1)
# Telegram App KEY and HASH
API_KEY = os.environ.get("API_KEY", None)
API_HASH = os.environ.get("API_HASH", None)
# Userbot Session String
STRING_SESSION = os.environ.get("STRING_SESSION", None)
# Logging channel/group ID configuration.
BOTLOG_CHATID = int(os.environ.get("BOTLOG_CHATID", None))
# Userbot logging feature switch.
BOTLOG = sb(os.environ.get("BOTLOG", "False"))
LOGSPAMMER = sb(os.environ.get("LOGSPAMMER", "False"))
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME", None)
# Bleep Blop, this is a bot ;)
PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN", "False"))
# Heroku Credentials for updater.
HEROKU_MEMEZ = sb(os.environ.get("HEROKU_MEMEZ", "False"))
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME", None)
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY", None)
# Github Credentials for updater and Gitupload.
GIT_REPO_NAME = os.environ.get("GIT_REPO_NAME", None)
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", None)
# Custom (forked) repo URL for updater.
UPSTREAM_REPO_URL = os.environ.get(
"UPSTREAM_REPO_URL",
"https://github.com/alfiananda84/ProjectBish.git")
# UPSTREAM_REPO_URL branch, the default is master
UPSTREAM_REPO_BRANCH = os.environ.get(
"UPSTREAM_REPO_BRANCH", "master")
# Console verbose logging
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
# SQL Database URI
DB_URI = os.environ.get("DATABASE_URL", None)
# OCR API key
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None)
# remove.bg API key
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None)
# Chrome Driver and Headless Google Chrome Binaries
CHROME_DRIVER = os.environ.get("CHROME_DRIVER", None)
GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN", None)
# OpenWeatherMap API Key
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None)
WEATHER_DEFCITY = os.environ.get("WEATHER_DEFCITY", None)
# Quotes API Token
QUOTES_API_TOKEN = os.environ.get("QUOTES_API_TOKEN", None)
# Anti Spambot Config
ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT", "False"))
ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT", "False"))
# Youtube API key
YOUTUBE_API_KEY = os.environ.get("YOUTUBE_API_KEY", None)
# Time & Date - Country and Time Zone
COUNTRY = str(os.environ.get("COUNTRY", ""))
TZ_NUMBER = int(os.environ.get("TZ_NUMBER", 1))
# Clean Welcome
CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME", "True"))
# Last.fm Module
BIO_PREFIX = os.environ.get("BIO_PREFIX", None)
DEFAULT_BIO = os.environ.get("DEFAULT_BIO", None)
LASTFM_API = os.environ.get("LASTFM_API", None)
LASTFM_SECRET = os.environ.get("LASTFM_SECRET", None)
LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME", None)
LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD", None)
LASTFM_PASS = md5(LASTFM_PASSWORD_PLAIN)
if LASTFM_API and LASTFM_SECRET and LASTFM_USERNAME and LASTFM_PASS:
lastfm = LastFMNetwork(api_key=LASTFM_API,
api_secret=LASTFM_SECRET,
username=LASTFM_USERNAME,
password_hash=LASTFM_PASS)
else:
lastfm = None
# Google Drive Module
G_DRIVE_DATA = os.environ.get("G_DRIVE_DATA", None)
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None)
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None)
G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA", None)
G_DRIVE_FOLDER_ID = os.environ.get("G_DRIVE_FOLDER_ID", None)
TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TMP_DOWNLOAD_DIRECTORY",
"./downloads")
# Terminal Alias
TERM_ALIAS = os.environ.get("TERM_ALIAS", None)
# Genius Lyrics API
GENIUS = os.environ.get("GENIUS_ACCESS_TOKEN", None)
# IMG Stuff
IMG_LIMIT = os.environ.get("IMG_LIMIT", None)
# Deezloader
DEEZER_ARL_TOKEN = os.environ.get("DEEZER_ARL_TOKEN", None)
# JustWatch Country
WATCH_COUNTRY = os.environ.get("WATCH_COUNTRY", None)
# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists('bin'):
os.mkdir('bin')
binaries = {
"https://raw.githubusercontent.com/adekmaulana/megadown/master/megadown":
"bin/megadown",
"https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py":
"bin/cmrudl"
}
for binary, path in binaries.items():
downloader = SmartDL(binary, path, progress_bar=False)
downloader.start()
os.chmod(path, 0o755)
# 'bot' variable
if STRING_SESSION:
# pylint: disable=invalid-name
bot = TelegramClient(StringSession(STRING_SESSION), API_KEY, API_HASH)
else:
# pylint: disable=invalid-name
bot = TelegramClient("userbot", API_KEY, API_HASH)
async def check_botlog_chatid():
if not BOTLOG_CHATID and LOGSPAMMER:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the private error log storage to work."
)
quit(1)
elif not BOTLOG_CHATID and BOTLOG:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the userbot logging feature to work."
)
quit(1)
elif not BOTLOG or not LOGSPAMMER:
return
entity = await bot.get_entity(BOTLOG_CHATID)
if entity.default_banned_rights.send_messages:
LOGS.info(
"Your account doesn't have rights to send messages to BOTLOG_CHATID "
"group. Check if you typed the Chat ID correctly.")
quit(1)
with bot:
try:
bot.loop.run_until_complete(check_botlog_chatid())
except:
LOGS.info(
"BOTLOG_CHATID environment variable isn't a "
"valid entity. Check your environment variables/config.env file.")
quit(1)
# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
CMD_HELP = {}
ISAFK = False
AFKREASON = None
ZALG_LIST = {}
|
[] |
[] |
[
"WATCH_COUNTRY",
"GOOGLE_CHROME_BIN",
"G_DRIVE_CLIENT_SECRET",
"COUNTRY",
"LASTFM_API",
"ANTI_SPAMBOT_SHOUT",
"UPSTREAM_REPO_URL",
"OCR_SPACE_API_KEY",
"BIO_PREFIX",
"LOGSPAMMER",
"TZ_NUMBER",
"G_DRIVE_FOLDER_ID",
"LASTFM_PASSWORD",
"DATABASE_URL",
"HEROKU_APP_NAME",
"___________PLOX_______REMOVE_____THIS_____LINE__________",
"GIT_REPO_NAME",
"HEROKU_API_KEY",
"DEEZER_ARL_TOKEN",
"CHROME_DRIVER",
"YOUTUBE_API_KEY",
"HEROKU_MEMEZ",
"LASTFM_USERNAME",
"G_DRIVE_CLIENT_ID",
"IMG_LIMIT",
"API_KEY",
"PM_AUTO_BAN",
"DEFAULT_BIO",
"ANTI_SPAMBOT",
"OPEN_WEATHER_MAP_APPID",
"LASTFM_SECRET",
"G_DRIVE_AUTH_TOKEN_DATA",
"UPSTREAM_REPO_BRANCH",
"WEATHER_DEFCITY",
"STRING_SESSION",
"QUOTES_API_TOKEN",
"CONSOLE_LOGGER_VERBOSE",
"GITHUB_ACCESS_TOKEN",
"ALIVE_NAME",
"BOTLOG_CHATID",
"TMP_DOWNLOAD_DIRECTORY",
"CLEAN_WELCOME",
"GENIUS_ACCESS_TOKEN",
"G_DRIVE_DATA",
"REM_BG_API_KEY",
"BOTLOG",
"API_HASH",
"TERM_ALIAS"
] |
[]
|
["WATCH_COUNTRY", "GOOGLE_CHROME_BIN", "G_DRIVE_CLIENT_SECRET", "COUNTRY", "LASTFM_API", "ANTI_SPAMBOT_SHOUT", "UPSTREAM_REPO_URL", "OCR_SPACE_API_KEY", "BIO_PREFIX", "LOGSPAMMER", "TZ_NUMBER", "G_DRIVE_FOLDER_ID", "LASTFM_PASSWORD", "DATABASE_URL", "HEROKU_APP_NAME", "___________PLOX_______REMOVE_____THIS_____LINE__________", "GIT_REPO_NAME", "HEROKU_API_KEY", "DEEZER_ARL_TOKEN", "CHROME_DRIVER", "YOUTUBE_API_KEY", "HEROKU_MEMEZ", "LASTFM_USERNAME", "G_DRIVE_CLIENT_ID", "IMG_LIMIT", "API_KEY", "PM_AUTO_BAN", "DEFAULT_BIO", "ANTI_SPAMBOT", "OPEN_WEATHER_MAP_APPID", "LASTFM_SECRET", "G_DRIVE_AUTH_TOKEN_DATA", "UPSTREAM_REPO_BRANCH", "WEATHER_DEFCITY", "STRING_SESSION", "QUOTES_API_TOKEN", "CONSOLE_LOGGER_VERBOSE", "GITHUB_ACCESS_TOKEN", "ALIVE_NAME", "BOTLOG_CHATID", "TMP_DOWNLOAD_DIRECTORY", "CLEAN_WELCOME", "GENIUS_ACCESS_TOKEN", "G_DRIVE_DATA", "REM_BG_API_KEY", "BOTLOG", "API_HASH", "TERM_ALIAS"]
|
python
| 48 | 0 | |
src/main/java/org/spray/keyauth/util/HWID.java
|
package org.spray.keyauth.util;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
/**
*
* @author superblaubeere27
*
*/
public class HWID {
private final static char[] hexArray = "0123456789ABCDEF".toCharArray();
public static String getHWID() {
return bytesToHex(generateHWID());
}
public static byte[] generateHWID() {
try {
MessageDigest hash = MessageDigest.getInstance("MD5");
String s = System.getProperty("os.name") + System.getProperty("os.arch") + System.getProperty("os.version")
+ Runtime.getRuntime().availableProcessors() + System.getenv("PROCESSOR_IDENTIFIER")
+ System.getenv("PROCESSOR_ARCHITECTURE") + System.getenv("PROCESSOR_ARCHITEW6432")
+ System.getenv("NUMBER_OF_PROCESSORS");
return hash.digest(s.getBytes());
} catch (NoSuchAlgorithmException e) {
throw new Error("Algorithm wasn't found.", e);
}
}
public static byte[] hexStringToByteArray(String s) {
int len = s.length();
byte[] data = new byte[len / 2];
for (int i = 0; i < len; i += 2) {
data[i / 2] = (byte) ((Character.digit(s.charAt(i), 16) << 4) + Character.digit(s.charAt(i + 1), 16));
}
return data;
}
public static String bytesToHex(byte[] bytes) {
char[] hexChars = new char[bytes.length * 2];
for (int j = 0; j < bytes.length; j++) {
int v = bytes[j] & 0xFF;
hexChars[j * 2] = hexArray[v >>> 4];
hexChars[j * 2 + 1] = hexArray[v & 0x0F];
}
return new String(hexChars);
}
}
|
[
"\"PROCESSOR_IDENTIFIER\"",
"\"PROCESSOR_ARCHITECTURE\"",
"\"PROCESSOR_ARCHITEW6432\"",
"\"NUMBER_OF_PROCESSORS\""
] |
[] |
[
"PROCESSOR_ARCHITECTURE",
"PROCESSOR_ARCHITEW6432",
"NUMBER_OF_PROCESSORS",
"PROCESSOR_IDENTIFIER"
] |
[]
|
["PROCESSOR_ARCHITECTURE", "PROCESSOR_ARCHITEW6432", "NUMBER_OF_PROCESSORS", "PROCESSOR_IDENTIFIER"]
|
java
| 4 | 0 | |
sparkctl/cmd/create.go
|
/*
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"context"
"fmt"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"reflect"
"unicode/utf8"
"github.com/google/go-cloud/blob"
"github.com/spf13/cobra"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/yaml"
clientset "k8s.io/client-go/kubernetes"
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
crdclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned"
)
const bufferSize = 1024
var DeleteIfExists bool
var LogsEnabled bool
var RootPath string
var UploadToPath string
var UploadToEndpoint string
var UploadToRegion string
var Public bool
var S3ForcePathStyle bool
var Override bool
var From string
var createCmd = &cobra.Command{
Use: "create <yaml file>",
Short: "Create a SparkApplication object",
Long: `Create a SparkApplication from a given YAML file storing the application specification.`,
Run: func(cmd *cobra.Command, args []string) {
if From != "" && len(args) != 1 {
fmt.Fprintln(os.Stderr, "must specify the name of a ScheduledSparkApplication")
return
}
if len(args) != 1 {
fmt.Fprintln(os.Stderr, "must specify a YAML file of a SparkApplication")
return
}
kubeClient, err := getKubeClient()
if err != nil {
fmt.Fprintf(os.Stderr, "failed to get Kubernetes client: %v\n", err)
return
}
crdClient, err := getSparkApplicationClient()
if err != nil {
fmt.Fprintf(os.Stderr, "failed to get SparkApplication client: %v\n", err)
return
}
if From != "" {
if err := createFromScheduledSparkApplication(args[0], kubeClient, crdClient); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
}
} else {
if err := createFromYaml(args[0], kubeClient, crdClient); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
}
}
},
}
func init() {
createCmd.Flags().BoolVarP(&DeleteIfExists, "delete", "d", false,
"delete the SparkApplication if already exists")
createCmd.Flags().BoolVarP(&LogsEnabled, "logs", "l", false,
"watch the SparkApplication logs")
createCmd.Flags().StringVarP(&UploadToPath, "upload-to", "u", "",
"the name of the bucket where local application dependencies are to be uploaded")
createCmd.Flags().StringVarP(&RootPath, "upload-prefix", "p", "",
"the prefix to use for the dependency uploads")
createCmd.Flags().StringVarP(&UploadToRegion, "upload-to-region", "r", "",
"the GCS or S3 storage region for the bucket")
createCmd.Flags().StringVarP(&UploadToEndpoint, "upload-to-endpoint", "e",
"https://storage.googleapis.com", "the GCS or S3 storage api endpoint url")
createCmd.Flags().BoolVarP(&Public, "public", "c", false,
"whether to make uploaded files publicly available")
createCmd.Flags().BoolVar(&S3ForcePathStyle, "s3-force-path-style", false,
"whether to force path style URLs for S3 objects")
createCmd.Flags().BoolVarP(&Override, "override", "o", false,
"whether to override remote files with the same names")
createCmd.Flags().StringVarP(&From, "from", "f", "",
"the name of ScheduledSparkApplication from which a forced SparkApplication run is created")
}
func createFromYaml(yamlFile string, kubeClient clientset.Interface, crdClient crdclientset.Interface) error {
app, err := loadFromYAML(yamlFile)
if err != nil {
return fmt.Errorf("failed to read a SparkApplication from %s: %v", yamlFile, err)
}
if err := createSparkApplication(app, kubeClient, crdClient); err != nil {
return fmt.Errorf("failed to create SparkApplication %s: %v", app.Name, err)
}
return nil
}
func createFromScheduledSparkApplication(name string, kubeClient clientset.Interface, crdClient crdclientset.Interface) error {
sapp, err := crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(Namespace).Get(context.TODO(), From, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get ScheduledSparkApplication %s: %v", From, err)
}
app := &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Namespace: Namespace,
Name: name,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: v1beta2.SchemeGroupVersion.String(),
Kind: reflect.TypeOf(v1beta2.ScheduledSparkApplication{}).Name(),
Name: sapp.Name,
UID: sapp.UID,
},
},
},
Spec: *sapp.Spec.Template.DeepCopy(),
}
if err := createSparkApplication(app, kubeClient, crdClient); err != nil {
return fmt.Errorf("failed to create SparkApplication %s: %v", app.Name, err)
}
return nil
}
func createSparkApplication(app *v1beta2.SparkApplication, kubeClient clientset.Interface, crdClient crdclientset.Interface) error {
if DeleteIfExists {
deleteSparkApplication(app.Name, crdClient)
}
v1beta2.SetSparkApplicationDefaults(app)
if err := validateSpec(app.Spec); err != nil {
return err
}
if err := handleLocalDependencies(app); err != nil {
return err
}
if hadoopConfDir := os.Getenv("HADOOP_CONF_DIR"); hadoopConfDir != "" {
fmt.Println("creating a ConfigMap for Hadoop configuration files in HADOOP_CONF_DIR")
if err := handleHadoopConfiguration(app, hadoopConfDir, kubeClient); err != nil {
return err
}
}
if _, err := crdClient.SparkoperatorV1beta2().SparkApplications(Namespace).Create(
context.TODO(),
app,
metav1.CreateOptions{},
); err != nil {
return err
}
fmt.Printf("SparkApplication \"%s\" created\n", app.Name)
if LogsEnabled {
doLog(app.Name, true, kubeClient, crdClient)
}
return nil
}
func loadFromYAML(yamlFile string) (*v1beta2.SparkApplication, error) {
file, err := os.Open(yamlFile)
if err != nil {
return nil, err
}
defer file.Close()
decoder := yaml.NewYAMLOrJSONDecoder(file, bufferSize)
app := &v1beta2.SparkApplication{}
err = decoder.Decode(app)
if err != nil {
return nil, err
}
return app, nil
}
func validateSpec(spec v1beta2.SparkApplicationSpec) error {
if spec.Image == nil && (spec.Driver.Image == nil || spec.Executor.Image == nil) {
return fmt.Errorf("'spec.driver.image' and 'spec.executor.image' cannot be empty when 'spec.image' " +
"is not set")
}
return nil
}
func handleLocalDependencies(app *v1beta2.SparkApplication) error {
if app.Spec.MainApplicationFile != nil {
isMainAppFileLocal, err := isLocalFile(*app.Spec.MainApplicationFile)
if err != nil {
return err
}
if isMainAppFileLocal {
uploadedMainFile, err := uploadLocalDependencies(app, []string{*app.Spec.MainApplicationFile})
if err != nil {
return fmt.Errorf("failed to upload local main application file: %v", err)
}
app.Spec.MainApplicationFile = &uploadedMainFile[0]
}
}
localJars, err := filterLocalFiles(app.Spec.Deps.Jars)
if err != nil {
return fmt.Errorf("failed to filter local jars: %v", err)
}
if len(localJars) > 0 {
uploadedJars, err := uploadLocalDependencies(app, localJars)
if err != nil {
return fmt.Errorf("failed to upload local jars: %v", err)
}
app.Spec.Deps.Jars = uploadedJars
}
localFiles, err := filterLocalFiles(app.Spec.Deps.Files)
if err != nil {
return fmt.Errorf("failed to filter local files: %v", err)
}
if len(localFiles) > 0 {
uploadedFiles, err := uploadLocalDependencies(app, localFiles)
if err != nil {
return fmt.Errorf("failed to upload local files: %v", err)
}
app.Spec.Deps.Files = uploadedFiles
}
localPyFiles, err := filterLocalFiles(app.Spec.Deps.PyFiles)
if err != nil {
return fmt.Errorf("failed to filter local pyfiles: %v", err)
}
if len(localPyFiles) > 0 {
uploadedPyFiles, err := uploadLocalDependencies(app, localPyFiles)
if err != nil {
return fmt.Errorf("failed to upload local pyfiles: %v", err)
}
app.Spec.Deps.PyFiles = uploadedPyFiles
}
return nil
}
func filterLocalFiles(files []string) ([]string, error) {
var localFiles []string
for _, file := range files {
if isLocal, err := isLocalFile(file); err != nil {
return nil, err
} else if isLocal {
localFiles = append(localFiles, file)
}
}
return localFiles, nil
}
func isLocalFile(file string) (bool, error) {
fileUrl, err := url.Parse(file)
if err != nil {
return false, err
}
if fileUrl.Scheme == "file" || fileUrl.Scheme == "" {
return true, nil
}
return false, nil
}
type blobHandler interface {
// TODO: With go-cloud supporting setting ACLs, remove implementations of interface
setPublicACL(ctx context.Context, bucket string, filePath string) error
}
type uploadHandler struct {
blob blobHandler
blobUploadBucket string
blobEndpoint string
hdpScheme string
ctx context.Context
b *blob.Bucket
}
func (uh uploadHandler) uploadToBucket(uploadPath, localFilePath string) (string, error) {
fileName := filepath.Base(localFilePath)
uploadFilePath := filepath.Join(uploadPath, fileName)
// Check if exists by trying to fetch metadata
reader, err := uh.b.NewRangeReader(uh.ctx, uploadFilePath, 0, 0)
if err == nil {
reader.Close()
}
if (blob.IsNotExist(err)) || (err == nil && Override) {
fmt.Printf("uploading local file: %s\n", fileName)
// Prepare the file for upload.
data, err := ioutil.ReadFile(localFilePath)
if err != nil {
return "", fmt.Errorf("failed to read file: %s", err)
}
// Open Bucket
w, err := uh.b.NewWriter(uh.ctx, uploadFilePath, nil)
if err != nil {
return "", fmt.Errorf("failed to obtain bucket writer: %s", err)
}
// Write data to bucket and close bucket writer
_, writeErr := w.Write(data)
if err := w.Close(); err != nil {
return "", fmt.Errorf("failed to close bucket writer: %s", err)
}
// Check if write has been successful
if writeErr != nil {
return "", fmt.Errorf("failed to write to bucket: %s", err)
}
// Set public ACL if needed
if Public {
err := uh.blob.setPublicACL(uh.ctx, uh.blobUploadBucket, uploadFilePath)
if err != nil {
return "", err
}
endpointURL, err := url.Parse(uh.blobEndpoint)
if err != nil {
return "", err
}
// Public needs full bucket endpoint
return fmt.Sprintf("%s://%s/%s/%s",
endpointURL.Scheme,
endpointURL.Host,
uh.blobUploadBucket,
uploadFilePath), nil
}
} else if err == nil {
fmt.Printf("not uploading file %s as it already exists remotely\n", fileName)
} else {
return "", err
}
// Return path to file with proper hadoop-connector scheme
return fmt.Sprintf("%s://%s/%s", uh.hdpScheme, uh.blobUploadBucket, uploadFilePath), nil
}
func uploadLocalDependencies(app *v1beta2.SparkApplication, files []string) ([]string, error) {
if UploadToPath == "" {
return nil, fmt.Errorf(
"unable to upload local dependencies: no upload location specified via --upload-to")
}
uploadLocationUrl, err := url.Parse(UploadToPath)
if err != nil {
return nil, err
}
uploadBucket := uploadLocationUrl.Host
var uh *uploadHandler
ctx := context.Background()
switch uploadLocationUrl.Scheme {
case "gs":
uh, err = newGCSBlob(ctx, uploadBucket, UploadToEndpoint, UploadToRegion)
case "s3":
uh, err = newS3Blob(ctx, uploadBucket, UploadToEndpoint, UploadToRegion, S3ForcePathStyle)
default:
return nil, fmt.Errorf("unsupported upload location URL scheme: %s", uploadLocationUrl.Scheme)
}
// Check if bucket has been successfully setup
if err != nil {
return nil, err
}
var uploadedFilePaths []string
uploadPath := filepath.Join(RootPath, app.Namespace, app.Name)
for _, localFilePath := range files {
uploadFilePath, err := uh.uploadToBucket(uploadPath, localFilePath)
if err != nil {
return nil, err
}
uploadedFilePaths = append(uploadedFilePaths, uploadFilePath)
}
return uploadedFilePaths, nil
}
func handleHadoopConfiguration(
app *v1beta2.SparkApplication,
hadoopConfDir string,
kubeClientset clientset.Interface) error {
configMap, err := buildHadoopConfigMap(app.Name, hadoopConfDir)
if err != nil {
return fmt.Errorf("failed to create a ConfigMap for Hadoop configuration files in %s: %v",
hadoopConfDir, err)
}
err = kubeClientset.CoreV1().ConfigMaps(Namespace).Delete(context.TODO(), configMap.Name, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
return fmt.Errorf("failed to delete existing ConfigMap %s: %v", configMap.Name, err)
}
if configMap, err = kubeClientset.CoreV1().ConfigMaps(Namespace).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
return fmt.Errorf("failed to create ConfigMap %s: %v", configMap.Name, err)
}
app.Spec.HadoopConfigMap = &configMap.Name
return nil
}
func buildHadoopConfigMap(appName string, hadoopConfDir string) (*apiv1.ConfigMap, error) {
info, err := os.Stat(hadoopConfDir)
if err != nil {
return nil, err
}
if !info.IsDir() {
return nil, fmt.Errorf("%s is not a directory", hadoopConfDir)
}
files, err := ioutil.ReadDir(hadoopConfDir)
if err != nil {
return nil, err
}
if len(files) == 0 {
return nil, fmt.Errorf("no Hadoop configuration file found in %s", hadoopConfDir)
}
hadoopStringConfigFiles := make(map[string]string)
hadoopBinaryConfigFiles := make(map[string][]byte)
for _, file := range files {
if file.IsDir() {
continue
}
content, err := ioutil.ReadFile(filepath.Join(hadoopConfDir, file.Name()))
if err != nil {
return nil, err
}
if utf8.Valid(content) {
hadoopStringConfigFiles[file.Name()] = string(content)
} else {
hadoopBinaryConfigFiles[file.Name()] = content
}
}
configMap := &apiv1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: appName + "-hadoop-config",
Namespace: Namespace,
},
Data: hadoopStringConfigFiles,
BinaryData: hadoopBinaryConfigFiles,
}
return configMap, nil
}
|
[
"\"HADOOP_CONF_DIR\""
] |
[] |
[
"HADOOP_CONF_DIR"
] |
[]
|
["HADOOP_CONF_DIR"]
|
go
| 1 | 0 | |
http/server.go
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// HTTP server. See RFC 7230 through 7235.
package http
import (
"bufio"
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"log"
"math/rand"
"net"
"os"
"path"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"net/textproto"
"net/url"
urlpkg "net/url"
"github.com/SandwichDev/net/http/httpguts"
)
// Errors used by the HTTP server.
var (
// ErrBodyNotAllowed is returned by ResponseWriter.Write calls
// when the HTTP method or response code does not permit a
// body.
ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body")
// ErrHijacked is returned by ResponseWriter.Write calls when
// the underlying connection has been hijacked using the
// Hijacker interface. A zero-byte write on a hijacked
// connection will return ErrHijacked without any other side
// effects.
ErrHijacked = errors.New("http: connection has been hijacked")
// ErrContentLength is returned by ResponseWriter.Write calls
// when a Handler set a Content-Length response header with a
// declared size and then attempted to write more bytes than
// declared.
ErrContentLength = errors.New("http: wrote more than the declared Content-Length")
// Deprecated: ErrWriteAfterFlush is no longer returned by
// anything in the net/http package. Callers should not
// compare errors against this variable.
ErrWriteAfterFlush = errors.New("unused")
)
// A Handler responds to an HTTP request.
//
// ServeHTTP should write reply headers and data to the ResponseWriter
// and then return. Returning signals that the request is finished; it
// is not valid to use the ResponseWriter or read from the
// Request.Body after or concurrently with the completion of the
// ServeHTTP call.
//
// Depending on the HTTP client software, HTTP protocol version, and
// any intermediaries between the client and the Go server, it may not
// be possible to read from the Request.Body after writing to the
// ResponseWriter. Cautious handlers should read the Request.Body
// first, and then reply.
//
// Except for reading the body, handlers should not modify the
// provided Request.
//
// If ServeHTTP panics, the server (the caller of ServeHTTP) assumes
// that the effect of the panic was isolated to the active request.
// It recovers the panic, logs a stack trace to the server error log,
// and either closes the network connection or sends an HTTP/2
// RST_STREAM, depending on the HTTP protocol. To abort a handler so
// the client sees an interrupted response but the server doesn't log
// an error, panic with the value ErrAbortHandler.
type Handler interface {
ServeHTTP(ResponseWriter, *Request)
}
// A ResponseWriter interface is used by an HTTP handler to
// construct an HTTP response.
//
// A ResponseWriter may not be used after the Handler.ServeHTTP method
// has returned.
type ResponseWriter interface {
// Header returns the header map that will be sent by
// WriteHeader. The Header map also is the mechanism with which
// Handlers can set HTTP trailers.
//
// Changing the header map after a call to WriteHeader (or
// Write) has no effect unless the modified headers are
// trailers.
//
// There are two ways to set Trailers. The preferred way is to
// predeclare in the headers which trailers you will later
// send by setting the "Trailer" header to the names of the
// trailer keys which will come later. In this case, those
// keys of the Header map are treated as if they were
// trailers. See the example. The second way, for trailer
// keys not known to the Handler until after the first Write,
// is to prefix the Header map keys with the TrailerPrefix
// constant value. See TrailerPrefix.
//
// To suppress automatic response headers (such as "Date"), set
// their value to nil.
Header() Header
// Write writes the data to the connection as part of an HTTP reply.
//
// If WriteHeader has not yet been called, Write calls
// WriteHeader(http.StatusOK) before writing the data. If the Header
// does not contain a Content-Type line, Write adds a Content-Type set
// to the result of passing the initial 512 bytes of written data to
// DetectContentType. Additionally, if the total size of all written
// data is under a few KB and there are no Flush calls, the
// Content-Length header is added automatically.
//
// Depending on the HTTP protocol version and the client, calling
// Write or WriteHeader may prevent future reads on the
// Request.Body. For HTTP/1.x requests, handlers should read any
// needed request body data before writing the response. Once the
// headers have been flushed (due to either an explicit Flusher.Flush
// call or writing enough data to trigger a flush), the request body
// may be unavailable. For HTTP/2 requests, the Go HTTP server permits
// handlers to continue to read the request body while concurrently
// writing the response. However, such behavior may not be supported
// by all HTTP/2 clients. Handlers should read before writing if
// possible to maximize compatibility.
Write([]byte) (int, error)
// WriteHeader sends an HTTP response header with the provided
// status code.
//
// If WriteHeader is not called explicitly, the first call to Write
// will trigger an implicit WriteHeader(http.StatusOK).
// Thus explicit calls to WriteHeader are mainly used to
// send error codes.
//
// The provided code must be a valid HTTP 1xx-5xx status code.
// Only one header may be written. Go does not currently
// support sending user-defined 1xx informational headers,
// with the exception of 100-continue response header that the
// Server sends automatically when the Request.Body is read.
WriteHeader(statusCode int)
}
// The Flusher interface is implemented by ResponseWriters that allow
// an HTTP handler to flush buffered data to the client.
//
// The default HTTP/1.x and HTTP/2 ResponseWriter implementations
// support Flusher, but ResponseWriter wrappers may not. Handlers
// should always test for this ability at runtime.
//
// Note that even for ResponseWriters that support Flush,
// if the client is connected through an HTTP proxy,
// the buffered data may not reach the client until the response
// completes.
type Flusher interface {
// Flush sends any buffered data to the client.
Flush()
}
// The Hijacker interface is implemented by ResponseWriters that allow
// an HTTP handler to take over the connection.
//
// The default ResponseWriter for HTTP/1.x connections supports
// Hijacker, but HTTP/2 connections intentionally do not.
// ResponseWriter wrappers may also not support Hijacker. Handlers
// should always test for this ability at runtime.
type Hijacker interface {
// Hijack lets the caller take over the connection.
// After a call to Hijack the HTTP server library
// will not do anything else with the connection.
//
// It becomes the caller's responsibility to manage
// and close the connection.
//
// The returned net.Conn may have read or write deadlines
// already set, depending on the configuration of the
// Server. It is the caller's responsibility to set
// or clear those deadlines as needed.
//
// The returned bufio.Reader may contain unprocessed buffered
// data from the client.
//
// After a call to Hijack, the original Request.Body must not
// be used. The original Request's Context remains valid and
// is not canceled until the Request's ServeHTTP method
// returns.
Hijack() (net.Conn, *bufio.ReadWriter, error)
}
// The CloseNotifier interface is implemented by ResponseWriters which
// allow detecting when the underlying connection has gone away.
//
// This mechanism can be used to cancel long operations on the server
// if the client has disconnected before the response is ready.
//
// Deprecated: the CloseNotifier interface predates Go's context package.
// New code should use Request.Context instead.
type CloseNotifier interface {
// CloseNotify returns a channel that receives at most a
// single value (true) when the client connection has gone
// away.
//
// CloseNotify may wait to notify until Request.Body has been
// fully read.
//
// After the Handler has returned, there is no guarantee
// that the channel receives a value.
//
// If the protocol is HTTP/1.1 and CloseNotify is called while
// processing an idempotent request (such a GET) while
// HTTP/1.1 pipelining is in use, the arrival of a subsequent
// pipelined request may cause a value to be sent on the
// returned channel. In practice HTTP/1.1 pipelining is not
// enabled in browsers and not seen often in the wild. If this
// is a problem, use HTTP/2 or only use CloseNotify on methods
// such as POST.
CloseNotify() <-chan bool
}
var (
// ServerContextKey is a context key. It can be used in HTTP
// handlers with Context.Value to access the server that
// started the handler. The associated value will be of
// type *Server.
ServerContextKey = &contextKey{"http-server"}
// LocalAddrContextKey is a context key. It can be used in
// HTTP handlers with Context.Value to access the local
// address the connection arrived on.
// The associated value will be of type net.Addr.
LocalAddrContextKey = &contextKey{"local-addr"}
)
// A conn represents the server side of an HTTP connection.
type conn struct {
// server is the server on which the connection arrived.
// Immutable; never nil.
server *Server
// cancelCtx cancels the connection-level context.
cancelCtx context.CancelFunc
// rwc is the underlying network connection.
// This is never wrapped by other types and is the value given out
// to CloseNotifier callers. It is usually of type *net.TCPConn or
// *tls.Conn.
rwc net.Conn
// remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously
// inside the Listener's Accept goroutine, as some implementations block.
// It is populated immediately inside the (*conn).serve goroutine.
// This is the value of a Handler's (*Request).RemoteAddr.
remoteAddr string
// tlsState is the TLS connection state when using TLS.
// nil means not TLS.
tlsState *tls.ConnectionState
// werr is set to the first write error to rwc.
// It is set via checkConnErrorWriter{w}, where bufw writes.
werr error
// r is bufr's read source. It's a wrapper around rwc that provides
// io.LimitedReader-style limiting (while reading request headers)
// and functionality to support CloseNotifier. See *connReader docs.
r *connReader
// bufr reads from r.
bufr *bufio.Reader
// bufw writes to checkConnErrorWriter{c}, which populates werr on error.
bufw *bufio.Writer
// lastMethod is the method of the most recent request
// on this connection, if any.
lastMethod string
curReq atomic.Value // of *response (which has a Request in it)
curState struct{ atomic uint64 } // packed (unixtime<<8|uint8(ConnState))
// mu guards hijackedv
mu sync.Mutex
// hijackedv is whether this connection has been hijacked
// by a Handler with the Hijacker interface.
// It is guarded by mu.
hijackedv bool
}
func (c *conn) hijacked() bool {
c.mu.Lock()
defer c.mu.Unlock()
return c.hijackedv
}
// c.mu must be held.
func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
if c.hijackedv {
return nil, nil, ErrHijacked
}
c.r.abortPendingRead()
c.hijackedv = true
rwc = c.rwc
rwc.SetDeadline(time.Time{})
buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc))
if c.r.hasByte {
if _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil {
return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err)
}
}
c.setState(rwc, StateHijacked, runHooks)
return
}
// This should be >= 512 bytes for DetectContentType,
// but otherwise it's somewhat arbitrary.
const bufferBeforeChunkingSize = 2048
// chunkWriter writes to a response's conn buffer, and is the writer
// wrapped by the response.bufw buffered writer.
//
// chunkWriter also is responsible for finalizing the Header, including
// conditionally setting the Content-Type and setting a Content-Length
// in cases where the handler's final output is smaller than the buffer
// size. It also conditionally adds chunk headers, when in chunking mode.
//
// See the comment above (*response).Write for the entire write flow.
type chunkWriter struct {
res *response
// header is either nil or a deep clone of res.handlerHeader
// at the time of res.writeHeader, if res.writeHeader is
// called and extra buffering is being done to calculate
// Content-Type and/or Content-Length.
header Header
// wroteHeader tells whether the header's been written to "the
// wire" (or rather: w.conn.buf). this is unlike
// (*response).wroteHeader, which tells only whether it was
// logically written.
wroteHeader bool
// set by the writeHeader method:
chunking bool // using chunked transfer encoding for reply body
}
var (
crlf = []byte("\r\n")
colonSpace = []byte(": ")
)
func (cw *chunkWriter) Write(p []byte) (n int, err error) {
if !cw.wroteHeader {
cw.writeHeader(p)
}
if cw.res.req.Method == "HEAD" {
// Eat writes.
return len(p), nil
}
if cw.chunking {
_, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p))
if err != nil {
cw.res.conn.rwc.Close()
return
}
}
n, err = cw.res.conn.bufw.Write(p)
if cw.chunking && err == nil {
_, err = cw.res.conn.bufw.Write(crlf)
}
if err != nil {
cw.res.conn.rwc.Close()
}
return
}
func (cw *chunkWriter) flush() {
if !cw.wroteHeader {
cw.writeHeader(nil)
}
cw.res.conn.bufw.Flush()
}
func (cw *chunkWriter) close() {
if !cw.wroteHeader {
cw.writeHeader(nil)
}
if cw.chunking {
bw := cw.res.conn.bufw // conn's bufio writer
// zero chunk to mark EOF
bw.WriteString("0\r\n")
if trailers := cw.res.finalTrailers(); trailers != nil {
trailers.Write(bw) // the writer handles noting errors
}
// final blank line after the trailers (whether
// present or not)
bw.WriteString("\r\n")
}
}
// A response represents the server side of an HTTP response.
type response struct {
conn *conn
req *Request // request for this response
reqBody io.ReadCloser
cancelCtx context.CancelFunc // when ServeHTTP exits
wroteHeader bool // reply header has been (logically) written
wroteContinue bool // 100 Continue response was written
wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive"
wantsClose bool // HTTP request has Connection "close"
// canWriteContinue is a boolean value accessed as an atomic int32
// that says whether or not a 100 Continue header can be written
// to the connection.
// writeContinueMu must be held while writing the header.
// These two fields together synchronize the body reader
// (the expectContinueReader, which wants to write 100 Continue)
// against the main writer.
canWriteContinue atomicBool
writeContinueMu sync.Mutex
w *bufio.Writer // buffers output in chunks to chunkWriter
cw chunkWriter
// handlerHeader is the Header that Handlers get access to,
// which may be retained and mutated even after WriteHeader.
// handlerHeader is copied into cw.header at WriteHeader
// time, and privately mutated thereafter.
handlerHeader Header
calledHeader bool // handler accessed handlerHeader via Header
written int64 // number of bytes written in body
contentLength int64 // explicitly-declared Content-Length; or -1
status int // status code passed to WriteHeader
// close connection after this reply. set on request and
// updated after response from handler if there's a
// "Connection: keep-alive" response header and a
// Content-Length.
closeAfterReply bool
// requestBodyLimitHit is set by requestTooLarge when
// maxBytesReader hits its max size. It is checked in
// WriteHeader, to make sure we don't consume the
// remaining request body to try to advance to the next HTTP
// request. Instead, when this is set, we stop reading
// subsequent requests on this connection and stop reading
// input from it.
requestBodyLimitHit bool
// trailers are the headers to be sent after the handler
// finishes writing the body. This field is initialized from
// the Trailer response header when the response header is
// written.
trailers []string
handlerDone atomicBool // set true when the handler exits
// Buffers for Date, Content-Length, and status code
dateBuf [len(TimeFormat)]byte
clenBuf [10]byte
statusBuf [3]byte
// closeNotifyCh is the channel returned by CloseNotify.
// TODO(bradfitz): this is currently (for Go 1.8) always
// non-nil. Make this lazily-created again as it used to be?
closeNotifyCh chan bool
didCloseNotify int32 // atomic (only 0->1 winner should send)
}
// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
// that, if present, signals that the map entry is actually for
// the response trailers, and not the response headers. The prefix
// is stripped after the ServeHTTP call finishes and the values are
// sent in the trailers.
//
// This mechanism is intended only for trailers that are not known
// prior to the headers being written. If the set of trailers is fixed
// or known before the header is written, the normal Go trailers mechanism
// is preferred:
// https://golang.org/pkg/net/http/#ResponseWriter
// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
const TrailerPrefix = "Trailer:"
// finalTrailers is called after the Handler exits and returns a non-nil
// value if the Handler set any trailers.
func (w *response) finalTrailers() Header {
var t Header
for k, vv := range w.handlerHeader {
if strings.HasPrefix(k, TrailerPrefix) {
if t == nil {
t = make(Header)
}
t[strings.TrimPrefix(k, TrailerPrefix)] = vv
}
}
for _, k := range w.trailers {
if t == nil {
t = make(Header)
}
for _, v := range w.handlerHeader[k] {
t.Add(k, v)
}
}
return t
}
type atomicBool int32
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
// declareTrailer is called for each Trailer header when the
// response header is written. It notes that a header will need to be
// written in the trailers at the end of the response.
func (w *response) declareTrailer(k string) {
k = CanonicalHeaderKey(k)
if !httpguts.ValidTrailerHeader(k) {
// Forbidden by RFC 7230, section 4.1.2
return
}
w.trailers = append(w.trailers, k)
}
// requestTooLarge is called by maxBytesReader when too much input has
// been read from the client.
func (w *response) requestTooLarge() {
w.closeAfterReply = true
w.requestBodyLimitHit = true
if !w.wroteHeader {
w.Header().Set("Connection", "close")
}
}
// needsSniff reports whether a Content-Type still needs to be sniffed.
func (w *response) needsSniff() bool {
_, haveType := w.handlerHeader["Content-Type"]
return !w.cw.wroteHeader && !haveType && w.written < sniffLen
}
// writerOnly hides an io.Writer value's optional ReadFrom method
// from io.Copy.
type writerOnly struct {
io.Writer
}
// ReadFrom is here to optimize copying from an *os.File regular file
// to a *net.TCPConn with sendfile, or from a supported src type such
// as a *net.TCPConn on Linux with splice.
func (w *response) ReadFrom(src io.Reader) (n int64, err error) {
bufp := copyBufPool.Get().(*[]byte)
buf := *bufp
defer copyBufPool.Put(bufp)
// Our underlying w.conn.rwc is usually a *TCPConn (with its
// own ReadFrom method). If not, just fall back to the normal
// copy method.
rf, ok := w.conn.rwc.(io.ReaderFrom)
if !ok {
return io.CopyBuffer(writerOnly{w}, src, buf)
}
// sendfile path:
// Do not start actually writing response until src is readable.
// If body length is <= sniffLen, sendfile/splice path will do
// little anyway. This small read also satisfies sniffing the
// body in case Content-Type is missing.
nr, er := src.Read(buf[:sniffLen])
atEOF := errors.Is(er, io.EOF)
n += int64(nr)
if nr > 0 {
// Write the small amount read normally.
nw, ew := w.Write(buf[:nr])
if ew != nil {
err = ew
} else if nr != nw {
err = io.ErrShortWrite
}
}
if err == nil && er != nil && !atEOF {
err = er
}
// Do not send StatusOK in the error case where nothing has been written.
if err == nil && !w.wroteHeader {
w.WriteHeader(StatusOK) // nr == 0, no error (or EOF)
}
if err != nil || atEOF {
return n, err
}
w.w.Flush() // get rid of any previous writes
w.cw.flush() // make sure Header is written; flush data to rwc
// Now that cw has been flushed, its chunking field is guaranteed initialized.
if !w.cw.chunking && w.bodyAllowed() {
n0, err := rf.ReadFrom(src)
n += n0
w.written += n0
return n, err
}
n0, err := io.Copy(writerOnly{w}, src)
n += n0
return n, err
}
// debugServerConnections controls whether all server connections are wrapped
// with a verbose logging wrapper.
const debugServerConnections = false
// Create new connection from rwc.
func (srv *Server) newConn(rwc net.Conn) *conn {
c := &conn{
server: srv,
rwc: rwc,
}
if debugServerConnections {
c.rwc = newLoggingConn("server", c.rwc)
}
return c
}
type readResult struct {
_ incomparable
n int
err error
b byte // byte read, if n == 1
}
// connReader is the io.Reader wrapper used by *conn. It combines a
// selectively-activated io.LimitedReader (to bound request header
// read sizes) with support for selectively keeping an io.Reader.Read
// call blocked in a background goroutine to wait for activity and
// trigger a CloseNotifier channel.
type connReader struct {
conn *conn
mu sync.Mutex // guards following
hasByte bool
byteBuf [1]byte
cond *sync.Cond
inRead bool
aborted bool // set true before conn.rwc deadline is set to past
remain int64 // bytes remaining
}
func (cr *connReader) lock() {
cr.mu.Lock()
if cr.cond == nil {
cr.cond = sync.NewCond(&cr.mu)
}
}
func (cr *connReader) unlock() { cr.mu.Unlock() }
func (cr *connReader) startBackgroundRead() {
cr.lock()
defer cr.unlock()
if cr.inRead {
panic("invalid concurrent Body.Read call")
}
if cr.hasByte {
return
}
cr.inRead = true
cr.conn.rwc.SetReadDeadline(time.Time{})
go cr.backgroundRead()
}
func (cr *connReader) backgroundRead() {
n, err := cr.conn.rwc.Read(cr.byteBuf[:])
cr.lock()
if n == 1 {
cr.hasByte = true
// We were past the end of the previous request's body already
// (since we wouldn't be in a background read otherwise), so
// this is a pipelined HTTP request. Prior to Go 1.11 we used to
// send on the CloseNotify channel and cancel the context here,
// but the behavior was documented as only "may", and we only
// did that because that's how CloseNotify accidentally behaved
// in very early Go releases prior to context support. Once we
// added context support, people used a Handler's
// Request.Context() and passed it along. Having that context
// cancel on pipelined HTTP requests caused problems.
// Fortunately, almost nothing uses HTTP/1.x pipelining.
// Unfortunately, apt-get does, or sometimes does.
// New Go 1.11 behavior: don't fire CloseNotify or cancel
// contexts on pipelined requests. Shouldn't affect people, but
// fixes cases like Issue 23921. This does mean that a client
// closing their TCP connection after sending a pipelined
// request won't cancel the context, but we'll catch that on any
// write failure (in checkConnErrorWriter.Write).
// If the server never writes, yes, there are still contrived
// server & client behaviors where this fails to ever cancel the
// context, but that's kinda why HTTP/1.x pipelining died
// anyway.
}
if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() {
// Ignore this error. It's the expected error from
// another goroutine calling abortPendingRead.
} else if err != nil {
cr.handleReadError(err)
}
cr.aborted = false
cr.inRead = false
cr.unlock()
cr.cond.Broadcast()
}
func (cr *connReader) abortPendingRead() {
cr.lock()
defer cr.unlock()
if !cr.inRead {
return
}
cr.aborted = true
cr.conn.rwc.SetReadDeadline(aLongTimeAgo)
for cr.inRead {
cr.cond.Wait()
}
cr.conn.rwc.SetReadDeadline(time.Time{})
}
func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain }
func (cr *connReader) setInfiniteReadLimit() { cr.remain = maxInt64 }
func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 }
// handleReadError is called whenever a Read from the client returns a
// non-nil error.
//
// The provided non-nil err is almost always io.EOF or a "use of
// closed network connection". In any case, the error is not
// particularly interesting, except perhaps for debugging during
// development. Any error means the connection is dead and we should
// down its context.
//
// It may be called from multiple goroutines.
func (cr *connReader) handleReadError(_ error) {
cr.conn.cancelCtx()
cr.closeNotify()
}
// may be called from multiple goroutines.
func (cr *connReader) closeNotify() {
res, _ := cr.conn.curReq.Load().(*response)
if res != nil && atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) {
res.closeNotifyCh <- true
}
}
func (cr *connReader) Read(p []byte) (n int, err error) {
cr.lock()
if cr.inRead {
cr.unlock()
if cr.conn.hijacked() {
panic("invalid Body.Read call. After hijacked, the original Request must not be used")
}
panic("invalid concurrent Body.Read call")
}
if cr.hitReadLimit() {
cr.unlock()
return 0, io.EOF
}
if len(p) == 0 {
cr.unlock()
return 0, nil
}
if int64(len(p)) > cr.remain {
p = p[:cr.remain]
}
if cr.hasByte {
p[0] = cr.byteBuf[0]
cr.hasByte = false
cr.unlock()
return 1, nil
}
cr.inRead = true
cr.unlock()
n, err = cr.conn.rwc.Read(p)
cr.lock()
cr.inRead = false
if err != nil {
cr.handleReadError(err)
}
cr.remain -= int64(n)
cr.unlock()
cr.cond.Broadcast()
return n, err
}
var (
bufioReaderPool sync.Pool
bufioWriter2kPool sync.Pool
bufioWriter4kPool sync.Pool
)
var copyBufPool = sync.Pool{
New: func() interface{} {
b := make([]byte, 32*1024)
return &b
},
}
func bufioWriterPool(size int) *sync.Pool {
switch size {
case 2 << 10:
return &bufioWriter2kPool
case 4 << 10:
return &bufioWriter4kPool
}
return nil
}
func newBufioReader(r io.Reader) *bufio.Reader {
if v := bufioReaderPool.Get(); v != nil {
br := v.(*bufio.Reader)
br.Reset(r)
return br
}
// Note: if this reader size is ever changed, update
// TestHandlerBodyClose's assumptions.
return bufio.NewReader(r)
}
func putBufioReader(br *bufio.Reader) {
br.Reset(nil)
bufioReaderPool.Put(br)
}
func newBufioWriterSize(w io.Writer, size int) *bufio.Writer {
pool := bufioWriterPool(size)
if pool != nil {
if v := pool.Get(); v != nil {
bw := v.(*bufio.Writer)
bw.Reset(w)
return bw
}
}
return bufio.NewWriterSize(w, size)
}
func putBufioWriter(bw *bufio.Writer) {
bw.Reset(nil)
if pool := bufioWriterPool(bw.Available()); pool != nil {
pool.Put(bw)
}
}
// DefaultMaxHeaderBytes is the maximum permitted size of the headers
// in an HTTP request.
// This can be overridden by setting Server.MaxHeaderBytes.
const DefaultMaxHeaderBytes = 1 << 20 // 1 MB
func (srv *Server) maxHeaderBytes() int {
if srv.MaxHeaderBytes > 0 {
return srv.MaxHeaderBytes
}
return DefaultMaxHeaderBytes
}
func (srv *Server) initialReadLimitSize() int64 {
return int64(srv.maxHeaderBytes()) + 4096 // bufio slop
}
// wrapper around io.ReadCloser which on first read, sends an
// HTTP/1.1 100 Continue header
type expectContinueReader struct {
resp *response
readCloser io.ReadCloser
closed atomicBool
sawEOF atomicBool
}
func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
if ecr.closed.isSet() {
return 0, ErrBodyReadAfterClose
}
w := ecr.resp
if !w.wroteContinue && w.canWriteContinue.isSet() && !w.conn.hijacked() {
w.wroteContinue = true
w.writeContinueMu.Lock()
if w.canWriteContinue.isSet() {
w.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
w.conn.bufw.Flush()
w.canWriteContinue.setFalse()
}
w.writeContinueMu.Unlock()
}
n, err = ecr.readCloser.Read(p)
if err == io.EOF {
ecr.sawEOF.setTrue()
}
return
}
func (ecr *expectContinueReader) Close() error {
ecr.closed.setTrue()
return ecr.readCloser.Close()
}
// TimeFormat is the time format to use when generating times in HTTP
// headers. It is like time.RFC1123 but hard-codes GMT as the time
// zone. The time being formatted must be in UTC for Format to
// generate the correct format.
//
// For parsing this time format, see ParseTime.
const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
// appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat))
func appendTime(b []byte, t time.Time) []byte {
const days = "SunMonTueWedThuFriSat"
const months = "JanFebMarAprMayJunJulAugSepOctNovDec"
t = t.UTC()
yy, mm, dd := t.Date()
hh, mn, ss := t.Clock()
day := days[3*t.Weekday():]
mon := months[3*(mm-1):]
return append(b,
day[0], day[1], day[2], ',', ' ',
byte('0'+dd/10), byte('0'+dd%10), ' ',
mon[0], mon[1], mon[2], ' ',
byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ',
byte('0'+hh/10), byte('0'+hh%10), ':',
byte('0'+mn/10), byte('0'+mn%10), ':',
byte('0'+ss/10), byte('0'+ss%10), ' ',
'G', 'M', 'T')
}
var errTooLarge = errors.New("http: request too large")
// Read next request from connection.
func (c *conn) readRequest(ctx context.Context) (w *response, err error) {
if c.hijacked() {
return nil, ErrHijacked
}
var (
wholeReqDeadline time.Time // or zero if none
hdrDeadline time.Time // or zero if none
)
t0 := time.Now()
if d := c.server.readHeaderTimeout(); d != 0 {
hdrDeadline = t0.Add(d)
}
if d := c.server.ReadTimeout; d != 0 {
wholeReqDeadline = t0.Add(d)
}
c.rwc.SetReadDeadline(hdrDeadline)
if d := c.server.WriteTimeout; d != 0 {
defer func() {
c.rwc.SetWriteDeadline(time.Now().Add(d))
}()
}
c.r.setReadLimit(c.server.initialReadLimitSize())
if c.lastMethod == "POST" {
// RFC 7230 section 3 tolerance for old buggy clients.
peek, _ := c.bufr.Peek(4) // ReadRequest will get err below
c.bufr.Discard(numLeadingCRorLF(peek))
}
req, err := readRequest(c.bufr, keepHostHeader)
if err != nil {
if c.r.hitReadLimit() {
return nil, errTooLarge
}
return nil, err
}
if !http1ServerSupportsRequest(req) {
return nil, statusError{StatusHTTPVersionNotSupported, "unsupported protocol version"}
}
c.lastMethod = req.Method
c.r.setInfiniteReadLimit()
hosts, haveHost := req.Header["Host"]
isH2Upgrade := req.isH2Upgrade()
if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" {
return nil, badRequestError("missing required Host header")
}
if len(hosts) > 1 {
return nil, badRequestError("too many Host headers")
}
if len(hosts) == 1 && !httpguts.ValidHostHeader(hosts[0]) {
return nil, badRequestError("malformed Host header")
}
for k, vv := range req.Header {
if !httpguts.ValidHeaderFieldName(k) {
return nil, badRequestError("invalid header name")
}
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
return nil, badRequestError("invalid header value")
}
}
}
delete(req.Header, "Host")
ctx, cancelCtx := context.WithCancel(ctx)
req.ctx = ctx
req.RemoteAddr = c.remoteAddr
req.TLS = c.tlsState
if body, ok := req.Body.(*body); ok {
body.doEarlyClose = true
}
// Adjust the read deadline if necessary.
if !hdrDeadline.Equal(wholeReqDeadline) {
c.rwc.SetReadDeadline(wholeReqDeadline)
}
w = &response{
conn: c,
cancelCtx: cancelCtx,
req: req,
reqBody: req.Body,
handlerHeader: make(Header),
contentLength: -1,
closeNotifyCh: make(chan bool, 1),
// We populate these ahead of time so we're not
// reading from req.Header after their Handler starts
// and maybe mutates it (Issue 14940)
wants10KeepAlive: req.wantsHttp10KeepAlive(),
wantsClose: req.wantsClose(),
}
if isH2Upgrade {
w.closeAfterReply = true
}
w.cw.res = w
w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize)
return w, nil
}
// http1ServerSupportsRequest reports whether Go's HTTP/1.x server
// supports the given request.
func http1ServerSupportsRequest(req *Request) bool {
if req.ProtoMajor == 1 {
return true
}
// Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can
// wire up their own HTTP/2 upgrades.
if req.ProtoMajor == 2 && req.ProtoMinor == 0 &&
req.Method == "PRI" && req.RequestURI == "*" {
return true
}
// Reject HTTP/0.x, and all other HTTP/2+ requests (which
// aren't encoded in ASCII anyway).
return false
}
func (w *response) Header() Header {
if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader {
// Accessing the header between logically writing it
// and physically writing it means we need to allocate
// a clone to snapshot the logically written state.
w.cw.header = w.handlerHeader.Clone()
}
w.calledHeader = true
return w.handlerHeader
}
// maxPostHandlerReadBytes is the max number of Request.Body bytes not
// consumed by a handler that the server will read from the client
// in order to keep a connection alive. If there are more bytes than
// this then the server to be paranoid instead sends a "Connection:
// close" response.
//
// This number is approximately what a typical machine's TCP buffer
// size is anyway. (if we have the bytes on the machine, we might as
// well read them)
const maxPostHandlerReadBytes = 256 << 10
func checkWriteHeaderCode(code int) {
// Issue 22880: require valid WriteHeader status codes.
// For now we only enforce that it's three digits.
// In the future we might block things over 599 (600 and above aren't defined
// at https://httpwg.org/specs/rfc7231.html#status.codes)
// and we might block under 200 (once we have more mature 1xx support).
// But for now any three digits.
//
// We used to send "HTTP/1.1 000 0" on the wire in responses but there's
// no equivalent bogus thing we can realistically send in HTTP/2,
// so we'll consistently panic instead and help people find their bugs
// early. (We can't return an error from WriteHeader even if we wanted to.)
if code < 100 || code > 999 {
panic(fmt.Sprintf("invalid WriteHeader code %v", code))
}
}
// relevantCaller searches the call stack for the first function outside of net/http.
// The purpose of this function is to provide more helpful error messages.
func relevantCaller() runtime.Frame {
pc := make([]uintptr, 16)
n := runtime.Callers(1, pc)
frames := runtime.CallersFrames(pc[:n])
var frame runtime.Frame
for {
frame, more := frames.Next()
if !strings.HasPrefix(frame.Function, "github.com/SandwichDev/net/http.") {
return frame
}
if !more {
break
}
}
return frame
}
func (w *response) WriteHeader(code int) {
if w.conn.hijacked() {
caller := relevantCaller()
w.conn.server.logf("http: response.WriteHeader on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
return
}
if w.wroteHeader {
caller := relevantCaller()
w.conn.server.logf("http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
return
}
checkWriteHeaderCode(code)
w.wroteHeader = true
w.status = code
if w.calledHeader && w.cw.header == nil {
w.cw.header = w.handlerHeader.Clone()
}
if cl := w.handlerHeader.get("Content-Length"); cl != "" {
v, err := strconv.ParseInt(cl, 10, 64)
if err == nil && v >= 0 {
w.contentLength = v
} else {
w.conn.server.logf("http: invalid Content-Length of %q", cl)
w.handlerHeader.Del("Content-Length")
}
}
}
// extraHeader is the set of headers sometimes added by chunkWriter.writeHeader.
// This type is used to avoid extra allocations from cloning and/or populating
// the response Header map and all its 1-element slices.
type extraHeader struct {
contentType string
connection string
transferEncoding string
date []byte // written if not nil
contentLength []byte // written if not nil
}
// Sorted the same as extraHeader.Write's loop.
var extraHeaderKeys = [][]byte{
[]byte("Content-Type"),
[]byte("Connection"),
[]byte("Transfer-Encoding"),
}
var (
headerContentLength = []byte("Content-Length: ")
headerDate = []byte("Date: ")
)
// Write writes the headers described in h to w.
//
// This method has a value receiver, despite the somewhat large size
// of h, because it prevents an allocation. The escape analysis isn't
// smart enough to realize this function doesn't mutate h.
func (h extraHeader) Write(w *bufio.Writer) {
if h.date != nil {
w.Write(headerDate)
w.Write(h.date)
w.Write(crlf)
}
if h.contentLength != nil {
w.Write(headerContentLength)
w.Write(h.contentLength)
w.Write(crlf)
}
for i, v := range []string{h.contentType, h.connection, h.transferEncoding} {
if v != "" {
w.Write(extraHeaderKeys[i])
w.Write(colonSpace)
w.WriteString(v)
w.Write(crlf)
}
}
}
// writeHeader finalizes the header sent to the client and writes it
// to cw.res.conn.bufw.
//
// p is not written by writeHeader, but is the first chunk of the body
// that will be written. It is sniffed for a Content-Type if none is
// set explicitly. It's also used to set the Content-Length, if the
// total body size was small and the handler has already finished
// running.
func (cw *chunkWriter) writeHeader(p []byte) {
if cw.wroteHeader {
return
}
cw.wroteHeader = true
w := cw.res
keepAlivesEnabled := w.conn.server.doKeepAlives()
isHEAD := w.req.Method == "HEAD"
// header is written out to w.conn.buf below. Depending on the
// state of the handler, we either own the map or not. If we
// don't own it, the exclude map is created lazily for
// WriteSubset to remove headers. The setHeader struct holds
// headers we need to add.
header := cw.header
owned := header != nil
if !owned {
header = w.handlerHeader
}
var excludeHeader map[string]bool
delHeader := func(key string) {
if owned {
header.Del(key)
return
}
if _, ok := header[key]; !ok {
return
}
if excludeHeader == nil {
excludeHeader = make(map[string]bool)
}
excludeHeader[key] = true
}
var setHeader extraHeader
// Don't write out the fake "Trailer:foo" keys. See TrailerPrefix.
trailers := false
for k := range cw.header {
if strings.HasPrefix(k, TrailerPrefix) {
if excludeHeader == nil {
excludeHeader = make(map[string]bool)
}
excludeHeader[k] = true
trailers = true
}
}
for _, v := range cw.header["Trailer"] {
trailers = true
foreachHeaderElement(v, cw.res.declareTrailer)
}
te := header.get("Transfer-Encoding")
hasTE := te != ""
// If the handler is done but never sent a Content-Length
// response header and this is our first (and last) write, set
// it, even to zero. This helps HTTP/1.0 clients keep their
// "keep-alive" connections alive.
// Exceptions: 304/204/1xx responses never get Content-Length, and if
// it was a HEAD request, we don't know the difference between
// 0 actual bytes and 0 bytes because the handler noticed it
// was a HEAD request and chose not to write anything. So for
// HEAD, the handler should either write the Content-Length or
// write non-zero bytes. If it's actually 0 bytes and the
// handler never looked at the Request.Method, we just don't
// send a Content-Length header.
// Further, we don't send an automatic Content-Length if they
// set a Transfer-Encoding, because they're generally incompatible.
if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) {
w.contentLength = int64(len(p))
setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10)
}
// If this was an HTTP/1.0 request with keep-alive and we sent a
// Content-Length back, we can make this a keep-alive response ...
if w.wants10KeepAlive && keepAlivesEnabled {
sentLength := header.get("Content-Length") != ""
if sentLength && header.get("Connection") == "keep-alive" {
w.closeAfterReply = false
}
}
// Check for an explicit (and valid) Content-Length header.
hasCL := w.contentLength != -1
if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) {
_, connectionHeaderSet := header["Connection"]
if !connectionHeaderSet {
setHeader.connection = "keep-alive"
}
} else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose {
w.closeAfterReply = true
}
if header.get("Connection") == "close" || !keepAlivesEnabled {
w.closeAfterReply = true
}
// If the client wanted a 100-continue but we never sent it to
// them (or, more strictly: we never finished reading their
// request body), don't reuse this connection because it's now
// in an unknown state: we might be sending this response at
// the same time the client is now sending its request body
// after a timeout. (Some HTTP clients send Expect:
// 100-continue but knowing that some servers don't support
// it, the clients set a timer and send the body later anyway)
// If we haven't seen EOF, we can't skip over the unread body
// because we don't know if the next bytes on the wire will be
// the body-following-the-timer or the subsequent request.
// See Issue 11549.
if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF.isSet() {
w.closeAfterReply = true
}
// Per RFC 2616, we should consume the request body before
// replying, if the handler hasn't already done so. But we
// don't want to do an unbounded amount of reading here for
// DoS reasons, so we only try up to a threshold.
// TODO(bradfitz): where does RFC 2616 say that? See Issue 15527
// about HTTP/1.x Handlers concurrently reading and writing, like
// HTTP/2 handlers can do. Maybe this code should be relaxed?
if w.req.ContentLength != 0 && !w.closeAfterReply {
var discard, tooBig bool
switch bdy := w.req.Body.(type) {
case *expectContinueReader:
if bdy.resp.wroteContinue {
discard = true
}
case *body:
bdy.mu.Lock()
switch {
case bdy.closed:
if !bdy.sawEOF {
// Body was closed in handler with non-EOF error.
w.closeAfterReply = true
}
case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes:
tooBig = true
default:
discard = true
}
bdy.mu.Unlock()
default:
discard = true
}
if discard {
_, err := io.CopyN(io.Discard, w.reqBody, maxPostHandlerReadBytes+1)
switch err {
case nil:
// There must be even more data left over.
tooBig = true
case ErrBodyReadAfterClose:
// Body was already consumed and closed.
case io.EOF:
// The remaining body was just consumed, close it.
err = w.reqBody.Close()
if err != nil {
w.closeAfterReply = true
}
default:
// Some other kind of error occurred, like a read timeout, or
// corrupt chunked encoding. In any case, whatever remains
// on the wire must not be parsed as another HTTP request.
w.closeAfterReply = true
}
}
if tooBig {
w.requestTooLarge()
delHeader("Connection")
setHeader.connection = "close"
}
}
code := w.status
if bodyAllowedForStatus(code) {
// If no content type, apply sniffing algorithm to body.
_, haveType := header["Content-Type"]
// If the Content-Encoding was set and is non-blank,
// we shouldn't sniff the body. See Issue 31753.
ce := header.Get("Content-Encoding")
hasCE := len(ce) > 0
if !hasCE && !haveType && !hasTE && len(p) > 0 {
setHeader.contentType = DetectContentType(p)
}
} else {
for _, k := range suppressedHeaders(code) {
delHeader(k)
}
}
if !header.has("Date") {
setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now())
}
if hasCL && hasTE && te != "identity" {
// TODO: return an error if WriteHeader gets a return parameter
// For now just ignore the Content-Length.
w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d",
te, w.contentLength)
delHeader("Content-Length")
hasCL = false
}
if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) {
// do nothing
} else if code == StatusNoContent {
delHeader("Transfer-Encoding")
} else if hasCL {
delHeader("Transfer-Encoding")
} else if w.req.ProtoAtLeast(1, 1) {
// HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no
// content-length has been provided. The connection must be closed after the
// reply is written, and no chunking is to be done. This is the setup
// recommended in the Server-Sent Events candidate recommendation 11,
// section 8.
if hasTE && te == "identity" {
cw.chunking = false
w.closeAfterReply = true
} else {
// HTTP/1.1 or greater: use chunked transfer encoding
// to avoid closing the connection at EOF.
cw.chunking = true
setHeader.transferEncoding = "chunked"
if hasTE && te == "chunked" {
// We will send the chunked Transfer-Encoding header later.
delHeader("Transfer-Encoding")
}
}
} else {
// HTTP version < 1.1: cannot do chunked transfer
// encoding and we don't know the Content-Length so
// signal EOF by closing connection.
w.closeAfterReply = true
delHeader("Transfer-Encoding") // in case already set
}
// Cannot use Content-Length with non-identity Transfer-Encoding.
if cw.chunking {
delHeader("Content-Length")
}
if !w.req.ProtoAtLeast(1, 0) {
return
}
// Only override the Connection header if it is not a successful
// protocol switch response and if KeepAlives are not enabled.
// See https://golang.org/issue/36381.
delConnectionHeader := w.closeAfterReply &&
(!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) &&
!isProtocolSwitchResponse(w.status, header)
if delConnectionHeader {
delHeader("Connection")
if w.req.ProtoAtLeast(1, 1) {
setHeader.connection = "close"
}
}
writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:])
cw.header.WriteSubset(w.conn.bufw, excludeHeader)
setHeader.Write(w.conn.bufw)
w.conn.bufw.Write(crlf)
}
// foreachHeaderElement splits v according to the "#rule" construction
// in RFC 7230 section 7 and calls fn for each non-empty element.
func foreachHeaderElement(v string, fn func(string)) {
v = textproto.TrimString(v)
if v == "" {
return
}
if !strings.Contains(v, ",") {
fn(v)
return
}
for _, f := range strings.Split(v, ",") {
if f = textproto.TrimString(f); f != "" {
fn(f)
}
}
}
// writeStatusLine writes an HTTP/1.x Status-Line (RFC 7230 Section 3.1.2)
// to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0.
// code is the response status code.
// scratch is an optional scratch buffer. If it has at least capacity 3, it's used.
func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) {
if is11 {
bw.WriteString("HTTP/1.1 ")
} else {
bw.WriteString("HTTP/1.0 ")
}
if text, ok := statusText[code]; ok {
bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10))
bw.WriteByte(' ')
bw.WriteString(text)
bw.WriteString("\r\n")
} else {
// don't worry about performance
fmt.Fprintf(bw, "%03d status code %d\r\n", code, code)
}
}
// bodyAllowed reports whether a Write is allowed for this response type.
// It's illegal to call this before the header has been flushed.
func (w *response) bodyAllowed() bool {
if !w.wroteHeader {
panic("")
}
return bodyAllowedForStatus(w.status)
}
// The Life Of A Write is like this:
//
// Handler starts. No header has been sent. The handler can either
// write a header, or just start writing. Writing before sending a header
// sends an implicitly empty 200 OK header.
//
// If the handler didn't declare a Content-Length up front, we either
// go into chunking mode or, if the handler finishes running before
// the chunking buffer size, we compute a Content-Length and send that
// in the header instead.
//
// Likewise, if the handler didn't set a Content-Type, we sniff that
// from the initial chunk of output.
//
// The Writers are wired together like:
//
// 1. *response (the ResponseWriter) ->
// 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes
// 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type)
// and which writes the chunk headers, if needed.
// 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to ->
// 5. checkConnErrorWriter{c}, which notes any non-nil error on Write
// and populates c.werr with it if so. but otherwise writes to:
// 6. the rwc, the net.Conn.
//
// TODO(bradfitz): short-circuit some of the buffering when the
// initial header contains both a Content-Type and Content-Length.
// Also short-circuit in (1) when the header's been sent and not in
// chunking mode, writing directly to (4) instead, if (2) has no
// buffered data. More generally, we could short-circuit from (1) to
// (3) even in chunking mode if the write size from (1) is over some
// threshold and nothing is in (2). The answer might be mostly making
// bufferBeforeChunkingSize smaller and having bufio's fast-paths deal
// with this instead.
func (w *response) Write(data []byte) (n int, err error) {
return w.write(len(data), data, "")
}
func (w *response) WriteString(data string) (n int, err error) {
return w.write(len(data), nil, data)
}
// either dataB or dataS is non-zero.
func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) {
if w.conn.hijacked() {
if lenData > 0 {
caller := relevantCaller()
w.conn.server.logf("http: response.Write on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
}
return 0, ErrHijacked
}
if w.canWriteContinue.isSet() {
// Body reader wants to write 100 Continue but hasn't yet.
// Tell it not to. The store must be done while holding the lock
// because the lock makes sure that there is not an active write
// this very moment.
w.writeContinueMu.Lock()
w.canWriteContinue.setFalse()
w.writeContinueMu.Unlock()
}
if !w.wroteHeader {
w.WriteHeader(StatusOK)
}
if lenData == 0 {
return 0, nil
}
if !w.bodyAllowed() {
return 0, ErrBodyNotAllowed
}
w.written += int64(lenData) // ignoring errors, for errorKludge
if w.contentLength != -1 && w.written > w.contentLength {
return 0, ErrContentLength
}
if dataB != nil {
return w.w.Write(dataB)
} else {
return w.w.WriteString(dataS)
}
}
func (w *response) finishRequest() {
w.handlerDone.setTrue()
if !w.wroteHeader {
w.WriteHeader(StatusOK)
}
w.w.Flush()
putBufioWriter(w.w)
w.cw.close()
w.conn.bufw.Flush()
w.conn.r.abortPendingRead()
// Close the body (regardless of w.closeAfterReply) so we can
// re-use its bufio.Reader later safely.
w.reqBody.Close()
if w.req.MultipartForm != nil {
w.req.MultipartForm.RemoveAll()
}
}
// shouldReuseConnection reports whether the underlying TCP connection can be reused.
// It must only be called after the handler is done executing.
func (w *response) shouldReuseConnection() bool {
if w.closeAfterReply {
// The request or something set while executing the
// handler indicated we shouldn't reuse this
// connection.
return false
}
if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written {
// Did not write enough. Avoid getting out of sync.
return false
}
// There was some error writing to the underlying connection
// during the request, so don't re-use this conn.
if w.conn.werr != nil {
return false
}
if w.closedRequestBodyEarly() {
return false
}
return true
}
func (w *response) closedRequestBodyEarly() bool {
body, ok := w.req.Body.(*body)
return ok && body.didEarlyClose()
}
func (w *response) Flush() {
if !w.wroteHeader {
w.WriteHeader(StatusOK)
}
w.w.Flush()
w.cw.flush()
}
func (c *conn) finalFlush() {
if c.bufr != nil {
// Steal the bufio.Reader (~4KB worth of memory) and its associated
// reader for a future connection.
putBufioReader(c.bufr)
c.bufr = nil
}
if c.bufw != nil {
c.bufw.Flush()
// Steal the bufio.Writer (~4KB worth of memory) and its associated
// writer for a future connection.
putBufioWriter(c.bufw)
c.bufw = nil
}
}
// Close the connection.
func (c *conn) close() {
c.finalFlush()
c.rwc.Close()
}
// rstAvoidanceDelay is the amount of time we sleep after closing the
// write side of a TCP connection before closing the entire socket.
// By sleeping, we increase the chances that the client sees our FIN
// and processes its final data before they process the subsequent RST
// from closing a connection with known unread data.
// This RST seems to occur mostly on BSD systems. (And Windows?)
// This timeout is somewhat arbitrary (~latency around the planet).
const rstAvoidanceDelay = 500 * time.Millisecond
type closeWriter interface {
CloseWrite() error
}
var _ closeWriter = (*net.TCPConn)(nil)
// closeWrite flushes any outstanding data and sends a FIN packet (if
// client is connected via TCP), signalling that we're done. We then
// pause for a bit, hoping the client processes it before any
// subsequent RST.
//
// See https://golang.org/issue/3595
func (c *conn) closeWriteAndWait() {
c.finalFlush()
if tcp, ok := c.rwc.(closeWriter); ok {
tcp.CloseWrite()
}
time.Sleep(rstAvoidanceDelay)
}
// validNextProto reports whether the proto is a valid ALPN protocol name.
// Everything is valid except the empty string and built-in protocol types,
// so that those can't be overridden with alternate implementations.
func validNextProto(proto string) bool {
switch proto {
case "", "http/1.1", "http/1.0":
return false
}
return true
}
const (
runHooks = true
skipHooks = false
)
func (c *conn) setState(nc net.Conn, state ConnState, runHook bool) {
srv := c.server
switch state {
case StateNew:
srv.trackConn(c, true)
case StateHijacked, StateClosed:
srv.trackConn(c, false)
}
if state > 0xff || state < 0 {
panic("internal error")
}
packedState := uint64(time.Now().Unix()<<8) | uint64(state)
atomic.StoreUint64(&c.curState.atomic, packedState)
if !runHook {
return
}
if hook := srv.ConnState; hook != nil {
hook(nc, state)
}
}
func (c *conn) getState() (state ConnState, unixSec int64) {
packedState := atomic.LoadUint64(&c.curState.atomic)
return ConnState(packedState & 0xff), int64(packedState >> 8)
}
// badRequestError is a literal string (used by in the server in HTML,
// unescaped) to tell the user why their request was bad. It should
// be plain text without user info or other embedded errors.
func badRequestError(e string) error { return statusError{StatusBadRequest, e} }
// statusError is an error used to respond to a request with an HTTP status.
// The text should be plain text without user info or other embedded errors.
type statusError struct {
code int
text string
}
func (e statusError) Error() string { return StatusText(e.code) + ": " + e.text }
// ErrAbortHandler is a sentinel panic value to abort a handler.
// While any panic from ServeHTTP aborts the response to the client,
// panicking with ErrAbortHandler also suppresses logging of a stack
// trace to the server's error log.
var ErrAbortHandler = errors.New("github.com/SandwichDev/net/http: abort Handler")
// isCommonNetReadError reports whether err is a common error
// encountered during reading a request off the network when the
// client has gone away or had its read fail somehow. This is used to
// determine which logs are interesting enough to log about.
func isCommonNetReadError(err error) bool {
if err == io.EOF {
return true
}
if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
return true
}
if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
return true
}
return false
}
// Serve a new connection.
func (c *conn) serve(ctx context.Context) {
c.remoteAddr = c.rwc.RemoteAddr().String()
ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr())
defer func() {
if err := recover(); err != nil && err != ErrAbortHandler {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf)
}
if !c.hijacked() {
c.close()
c.setState(c.rwc, StateClosed, runHooks)
}
}()
if tlsConn, ok := c.rwc.(*tls.Conn); ok {
if d := c.server.ReadTimeout; d != 0 {
c.rwc.SetReadDeadline(time.Now().Add(d))
}
if d := c.server.WriteTimeout; d != 0 {
c.rwc.SetWriteDeadline(time.Now().Add(d))
}
if err := tlsConn.Handshake(); err != nil {
// If the handshake failed due to the client not speaking
// TLS, assume they're speaking plaintext HTTP and write a
// 400 response on the TLS conn's underlying net.Conn.
if re, ok := err.(tls.RecordHeaderError); ok && re.Conn != nil && tlsRecordHeaderLooksLikeHTTP(re.RecordHeader) {
io.WriteString(re.Conn, "HTTP/1.0 400 Bad Request\r\n\r\nClient sent an HTTP request to an HTTPS server.\n")
re.Conn.Close()
return
}
c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err)
return
}
c.tlsState = new(tls.ConnectionState)
*c.tlsState = tlsConn.ConnectionState()
if proto := c.tlsState.NegotiatedProtocol; validNextProto(proto) {
if fn := c.server.TLSNextProto[proto]; fn != nil {
h := initALPNRequest{ctx, tlsConn, serverHandler{c.server}}
// Mark freshly created HTTP/2 as active and prevent any server state hooks
// from being run on these connections. This prevents closeIdleConns from
// closing such connections. See issue https://golang.org/issue/39776.
c.setState(c.rwc, StateActive, skipHooks)
fn(c.server, tlsConn, h)
}
return
}
}
// HTTP/1.x from here on.
ctx, cancelCtx := context.WithCancel(ctx)
c.cancelCtx = cancelCtx
defer cancelCtx()
c.r = &connReader{conn: c}
c.bufr = newBufioReader(c.r)
c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10)
for {
w, err := c.readRequest(ctx)
if c.r.remain != c.server.initialReadLimitSize() {
// If we read any bytes off the wire, we're active.
c.setState(c.rwc, StateActive, runHooks)
}
if err != nil {
const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n"
switch {
case err == errTooLarge:
// Their HTTP client may or may not be
// able to read this if we're
// responding to them and hanging up
// while they're still writing their
// request. Undefined behavior.
const publicErr = "431 Request Header Fields Too Large"
fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
c.closeWriteAndWait()
return
case isUnsupportedTEError(err):
// Respond as per RFC 7230 Section 3.3.1 which says,
// A server that receives a request message with a
// transfer coding it does not understand SHOULD
// respond with 501 (Unimplemented).
code := StatusNotImplemented
// We purposefully aren't echoing back the transfer-encoding's value,
// so as to mitigate the risk of cross side scripting by an attacker.
fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s%sUnsupported transfer encoding", code, StatusText(code), errorHeaders)
return
case isCommonNetReadError(err):
return // don't reply
default:
if v, ok := err.(statusError); ok {
fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s: %s%s%d %s: %s", v.code, StatusText(v.code), v.text, errorHeaders, v.code, StatusText(v.code), v.text)
return
}
publicErr := "400 Bad Request"
fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
return
}
}
// Expect 100 Continue support
req := w.req
if req.expectsContinue() {
if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 {
// Wrap the Body reader with one that replies on the connection
req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
w.canWriteContinue.setTrue()
}
} else if req.Header.get("Expect") != "" {
w.sendExpectationFailed()
return
}
c.curReq.Store(w)
if requestBodyRemains(req.Body) {
registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead)
} else {
w.conn.r.startBackgroundRead()
}
// HTTP cannot have multiple simultaneous active requests.[*]
// Until the server replies to this request, it can't read another,
// so we might as well run the handler in this goroutine.
// [*] Not strictly true: HTTP pipelining. We could let them all process
// in parallel even if their responses need to be serialized.
// But we're not going to implement HTTP pipelining because it
// was never deployed in the wild and the answer is HTTP/2.
serverHandler{c.server}.ServeHTTP(w, w.req)
w.cancelCtx()
if c.hijacked() {
return
}
w.finishRequest()
if !w.shouldReuseConnection() {
if w.requestBodyLimitHit || w.closedRequestBodyEarly() {
c.closeWriteAndWait()
}
return
}
c.setState(c.rwc, StateIdle, runHooks)
c.curReq.Store((*response)(nil))
if !w.conn.server.doKeepAlives() {
// We're in shutdown mode. We might've replied
// to the user without "Connection: close" and
// they might think they can send another
// request, but such is life with HTTP/1.1.
return
}
if d := c.server.idleTimeout(); d != 0 {
c.rwc.SetReadDeadline(time.Now().Add(d))
if _, err := c.bufr.Peek(4); err != nil {
return
}
}
c.rwc.SetReadDeadline(time.Time{})
}
}
func (w *response) sendExpectationFailed() {
// TODO(bradfitz): let ServeHTTP handlers handle
// requests with non-standard expectation[s]? Seems
// theoretical at best, and doesn't fit into the
// current ServeHTTP model anyway. We'd need to
// make the ResponseWriter an optional
// "ExpectReplier" interface or something.
//
// For now we'll just obey RFC 7231 5.1.1 which says
// "A server that receives an Expect field-value other
// than 100-continue MAY respond with a 417 (Expectation
// Failed) status code to indicate that the unexpected
// expectation cannot be met."
w.Header().Set("Connection", "close")
w.WriteHeader(StatusExpectationFailed)
w.finishRequest()
}
// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter
// and a Hijacker.
func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
if w.handlerDone.isSet() {
panic("github.com/SandwichDev/net/http: Hijack called after ServeHTTP finished")
}
if w.wroteHeader {
w.cw.flush()
}
c := w.conn
c.mu.Lock()
defer c.mu.Unlock()
// Release the bufioWriter that writes to the chunk writer, it is not
// used after a connection has been hijacked.
rwc, buf, err = c.hijackLocked()
if err == nil {
putBufioWriter(w.w)
w.w = nil
}
return rwc, buf, err
}
func (w *response) CloseNotify() <-chan bool {
if w.handlerDone.isSet() {
panic("github.com/SandwichDev/net/http: CloseNotify called after ServeHTTP finished")
}
return w.closeNotifyCh
}
func registerOnHitEOF(rc io.ReadCloser, fn func()) {
switch v := rc.(type) {
case *expectContinueReader:
registerOnHitEOF(v.readCloser, fn)
case *body:
v.registerOnHitEOF(fn)
default:
panic("unexpected type " + fmt.Sprintf("%T", rc))
}
}
// requestBodyRemains reports whether future calls to Read
// on rc might yield more data.
func requestBodyRemains(rc io.ReadCloser) bool {
if rc == NoBody {
return false
}
switch v := rc.(type) {
case *expectContinueReader:
return requestBodyRemains(v.readCloser)
case *body:
return v.bodyRemains()
default:
panic("unexpected type " + fmt.Sprintf("%T", rc))
}
}
// The HandlerFunc type is an adapter to allow the use of
// ordinary functions as HTTP handlers. If f is a function
// with the appropriate signature, HandlerFunc(f) is a
// Handler that calls f.
type HandlerFunc func(ResponseWriter, *Request)
// ServeHTTP calls f(w, r).
func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) {
f(w, r)
}
// Helper handlers
// Error replies to the request with the specified error message and HTTP code.
// It does not otherwise end the request; the caller should ensure no further
// writes are done to w.
// The error message should be plain text.
func Error(w ResponseWriter, error string, code int) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(code)
fmt.Fprintln(w, error)
}
// NotFound replies to the request with an HTTP 404 not found error.
func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) }
// NotFoundHandler returns a simple request handler
// that replies to each request with a ``404 page not found'' reply.
func NotFoundHandler() Handler { return HandlerFunc(NotFound) }
// StripPrefix returns a handler that serves HTTP requests by removing the
// given prefix from the request URL's Path (and RawPath if set) and invoking
// the handler h. StripPrefix handles a request for a path that doesn't begin
// with prefix by replying with an HTTP 404 not found error. The prefix must
// match exactly: if the prefix in the request contains escaped characters
// the reply is also an HTTP 404 not found error.
func StripPrefix(prefix string, h Handler) Handler {
if prefix == "" {
return h
}
return HandlerFunc(func(w ResponseWriter, r *Request) {
p := strings.TrimPrefix(r.URL.Path, prefix)
rp := strings.TrimPrefix(r.URL.RawPath, prefix)
if len(p) < len(r.URL.Path) && (r.URL.RawPath == "" || len(rp) < len(r.URL.RawPath)) {
r2 := new(Request)
*r2 = *r
r2.URL = new(url.URL)
*r2.URL = *r.URL
r2.URL.Path = p
r2.URL.RawPath = rp
h.ServeHTTP(w, r2)
} else {
NotFound(w, r)
}
})
}
// Redirect replies to the request with a redirect to url,
// which may be a path relative to the request path.
//
// The provided code should be in the 3xx range and is usually
// StatusMovedPermanently, StatusFound or StatusSeeOther.
//
// If the Content-Type header has not been set, Redirect sets it
// to "text/html; charset=utf-8" and writes a small HTML body.
// Setting the Content-Type header to any value, including nil,
// disables that behavior.
func Redirect(w ResponseWriter, r *Request, url string, code int) {
if u, err := urlpkg.Parse(url); err == nil {
// If url was relative, make its path absolute by
// combining with request path.
// The client would probably do this for us,
// but doing it ourselves is more reliable.
// See RFC 7231, section 7.1.2
if u.Scheme == "" && u.Host == "" {
oldpath := r.URL.Path
if oldpath == "" { // should not happen, but avoid a crash if it does
oldpath = "/"
}
// no leading http://server
if url == "" || url[0] != '/' {
// make relative path absolute
olddir, _ := path.Split(oldpath)
url = olddir + url
}
var query string
if i := strings.Index(url, "?"); i != -1 {
url, query = url[:i], url[i:]
}
// clean up but preserve trailing slash
trailing := strings.HasSuffix(url, "/")
url = path.Clean(url)
if trailing && !strings.HasSuffix(url, "/") {
url += "/"
}
url += query
}
}
h := w.Header()
// RFC 7231 notes that a short HTML body is usually included in
// the response because older user agents may not understand 301/307.
// Do it only if the request didn't already have a Content-Type header.
_, hadCT := h["Content-Type"]
h.Set("Location", hexEscapeNonASCII(url))
if !hadCT && (r.Method == "GET" || r.Method == "HEAD") {
h.Set("Content-Type", "text/html; charset=utf-8")
}
w.WriteHeader(code)
// Shouldn't send the body for POST or HEAD; that leaves GET.
if !hadCT && r.Method == "GET" {
body := "<a href=\"" + htmlEscape(url) + "\">" + statusText[code] + "</a>.\n"
fmt.Fprintln(w, body)
}
}
var htmlReplacer = strings.NewReplacer(
"&", "&",
"<", "<",
">", ">",
// """ is shorter than """.
`"`, """,
// "'" is shorter than "'" and apos was not in HTML until HTML5.
"'", "'",
)
func htmlEscape(s string) string {
return htmlReplacer.Replace(s)
}
// Redirect to a fixed URL
type redirectHandler struct {
url string
code int
}
func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) {
Redirect(w, r, rh.url, rh.code)
}
// RedirectHandler returns a request handler that redirects
// each request it receives to the given url using the given
// status code.
//
// The provided code should be in the 3xx range and is usually
// StatusMovedPermanently, StatusFound or StatusSeeOther.
func RedirectHandler(url string, code int) Handler {
return &redirectHandler{url, code}
}
// ServeMux is an HTTP request multiplexer.
// It matches the URL of each incoming request against a list of registered
// patterns and calls the handler for the pattern that
// most closely matches the URL.
//
// Patterns name fixed, rooted paths, like "/favicon.ico",
// or rooted subtrees, like "/images/" (note the trailing slash).
// Longer patterns take precedence over shorter ones, so that
// if there are handlers registered for both "/images/"
// and "/images/thumbnails/", the latter handler will be
// called for paths beginning "/images/thumbnails/" and the
// former will receive requests for any other paths in the
// "/images/" subtree.
//
// Note that since a pattern ending in a slash names a rooted subtree,
// the pattern "/" matches all paths not matched by other registered
// patterns, not just the URL with Path == "/".
//
// If a subtree has been registered and a request is received naming the
// subtree root without its trailing slash, ServeMux redirects that
// request to the subtree root (adding the trailing slash). This behavior can
// be overridden with a separate registration for the path without
// the trailing slash. For example, registering "/images/" causes ServeMux
// to redirect a request for "/images" to "/images/", unless "/images" has
// been registered separately.
//
// Patterns may optionally begin with a host name, restricting matches to
// URLs on that host only. Host-specific patterns take precedence over
// general patterns, so that a handler might register for the two patterns
// "/codesearch" and "codesearch.google.com/" without also taking over
// requests for "http://www.google.com/".
//
// ServeMux also takes care of sanitizing the URL request path and the Host
// header, stripping the port number and redirecting any request containing . or
// .. elements or repeated slashes to an equivalent, cleaner URL.
type ServeMux struct {
mu sync.RWMutex
m map[string]muxEntry
es []muxEntry // slice of entries sorted from longest to shortest.
hosts bool // whether any patterns contain hostnames
}
type muxEntry struct {
h Handler
pattern string
}
// NewServeMux allocates and returns a new ServeMux.
func NewServeMux() *ServeMux { return new(ServeMux) }
// DefaultServeMux is the default ServeMux used by Serve.
var DefaultServeMux = &defaultServeMux
var defaultServeMux ServeMux
// cleanPath returns the canonical path for p, eliminating . and .. elements.
func cleanPath(p string) string {
if p == "" {
return "/"
}
if p[0] != '/' {
p = "/" + p
}
np := path.Clean(p)
// path.Clean removes trailing slash except for root;
// put the trailing slash back if necessary.
if p[len(p)-1] == '/' && np != "/" {
// Fast path for common case of p being the string we want:
if len(p) == len(np)+1 && strings.HasPrefix(p, np) {
np = p
} else {
np += "/"
}
}
return np
}
// stripHostPort returns h without any trailing ":<port>".
func stripHostPort(h string) string {
// If no port on host, return unchanged
if strings.IndexByte(h, ':') == -1 {
return h
}
host, _, err := net.SplitHostPort(h)
if err != nil {
return h // on error, return unchanged
}
return host
}
// Find a handler on a handler map given a path string.
// Most-specific (longest) pattern wins.
func (mux *ServeMux) match(path string) (h Handler, pattern string) {
// Check for exact match first.
v, ok := mux.m[path]
if ok {
return v.h, v.pattern
}
// Check for longest valid match. mux.es contains all patterns
// that end in / sorted from longest to shortest.
for _, e := range mux.es {
if strings.HasPrefix(path, e.pattern) {
return e.h, e.pattern
}
}
return nil, ""
}
// redirectToPathSlash determines if the given path needs appending "/" to it.
// This occurs when a handler for path + "/" was already registered, but
// not for path itself. If the path needs appending to, it creates a new
// URL, setting the path to u.Path + "/" and returning true to indicate so.
func (mux *ServeMux) redirectToPathSlash(host, path string, u *url.URL) (*url.URL, bool) {
mux.mu.RLock()
shouldRedirect := mux.shouldRedirectRLocked(host, path)
mux.mu.RUnlock()
if !shouldRedirect {
return u, false
}
path = path + "/"
u = &url.URL{Path: path, RawQuery: u.RawQuery}
return u, true
}
// shouldRedirectRLocked reports whether the given path and host should be redirected to
// path+"/". This should happen if a handler is registered for path+"/" but
// not path -- see comments at ServeMux.
func (mux *ServeMux) shouldRedirectRLocked(host, path string) bool {
p := []string{path, host + path}
for _, c := range p {
if _, exist := mux.m[c]; exist {
return false
}
}
n := len(path)
if n == 0 {
return false
}
for _, c := range p {
if _, exist := mux.m[c+"/"]; exist {
return path[n-1] != '/'
}
}
return false
}
// Handler returns the handler to use for the given request,
// consulting r.Method, r.Host, and r.URL.Path. It always returns
// a non-nil handler. If the path is not in its canonical form, the
// handler will be an internally-generated handler that redirects
// to the canonical path. If the host contains a port, it is ignored
// when matching handlers.
//
// The path and host are used unchanged for CONNECT requests.
//
// Handler also returns the registered pattern that matches the
// request or, in the case of internally-generated redirects,
// the pattern that will match after following the redirect.
//
// If there is no registered handler that applies to the request,
// Handler returns a ``page not found'' handler and an empty pattern.
func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) {
// CONNECT requests are not canonicalized.
if r.Method == "CONNECT" {
// If r.URL.Path is /tree and its handler is not registered,
// the /tree -> /tree/ redirect applies to CONNECT requests
// but the path canonicalization does not.
if u, ok := mux.redirectToPathSlash(r.URL.Host, r.URL.Path, r.URL); ok {
return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
}
return mux.handler(r.Host, r.URL.Path)
}
// All other requests have any port stripped and path cleaned
// before passing to mux.handler.
host := stripHostPort(r.Host)
path := cleanPath(r.URL.Path)
// If the given path is /tree and its handler is not registered,
// redirect for /tree/.
if u, ok := mux.redirectToPathSlash(host, path, r.URL); ok {
return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
}
if path != r.URL.Path {
_, pattern = mux.handler(host, path)
url := *r.URL
url.Path = path
return RedirectHandler(url.String(), StatusMovedPermanently), pattern
}
return mux.handler(host, r.URL.Path)
}
// handler is the main implementation of Handler.
// The path is known to be in canonical form, except for CONNECT methods.
func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) {
mux.mu.RLock()
defer mux.mu.RUnlock()
// Host-specific pattern takes precedence over generic ones
if mux.hosts {
h, pattern = mux.match(host + path)
}
if h == nil {
h, pattern = mux.match(path)
}
if h == nil {
h, pattern = NotFoundHandler(), ""
}
return
}
// ServeHTTP dispatches the request to the handler whose
// pattern most closely matches the request URL.
func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) {
if r.RequestURI == "*" {
if r.ProtoAtLeast(1, 1) {
w.Header().Set("Connection", "close")
}
w.WriteHeader(StatusBadRequest)
return
}
h, _ := mux.Handler(r)
h.ServeHTTP(w, r)
}
// Handle registers the handler for the given pattern.
// If a handler already exists for pattern, Handle panics.
func (mux *ServeMux) Handle(pattern string, handler Handler) {
mux.mu.Lock()
defer mux.mu.Unlock()
if pattern == "" {
panic("http: invalid pattern")
}
if handler == nil {
panic("http: nil handler")
}
if _, exist := mux.m[pattern]; exist {
panic("http: multiple registrations for " + pattern)
}
if mux.m == nil {
mux.m = make(map[string]muxEntry)
}
e := muxEntry{h: handler, pattern: pattern}
mux.m[pattern] = e
if pattern[len(pattern)-1] == '/' {
mux.es = appendSorted(mux.es, e)
}
if pattern[0] != '/' {
mux.hosts = true
}
}
func appendSorted(es []muxEntry, e muxEntry) []muxEntry {
n := len(es)
i := sort.Search(n, func(i int) bool {
return len(es[i].pattern) < len(e.pattern)
})
if i == n {
return append(es, e)
}
// we now know that i points at where we want to insert
es = append(es, muxEntry{}) // try to grow the slice in place, any entry works.
copy(es[i+1:], es[i:]) // Move shorter entries down
es[i] = e
return es
}
// HandleFunc registers the handler function for the given pattern.
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
if handler == nil {
panic("http: nil handler")
}
mux.Handle(pattern, HandlerFunc(handler))
}
// Handle registers the handler for the given pattern
// in the DefaultServeMux.
// The documentation for ServeMux explains how patterns are matched.
func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
// HandleFunc registers the handler function for the given pattern
// in the DefaultServeMux.
// The documentation for ServeMux explains how patterns are matched.
func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
DefaultServeMux.HandleFunc(pattern, handler)
}
// Serve accepts incoming HTTP connections on the listener l,
// creating a new service goroutine for each. The service goroutines
// read requests and then call handler to reply to them.
//
// The handler is typically nil, in which case the DefaultServeMux is used.
//
// HTTP/2 support is only enabled if the Listener returns *tls.Conn
// connections and they were configured with "h2" in the TLS
// Config.NextProtos.
//
// Serve always returns a non-nil error.
func Serve(l net.Listener, handler Handler) error {
srv := &Server{Handler: handler}
return srv.Serve(l)
}
// ServeTLS accepts incoming HTTPS connections on the listener l,
// creating a new service goroutine for each. The service goroutines
// read requests and then call handler to reply to them.
//
// The handler is typically nil, in which case the DefaultServeMux is used.
//
// Additionally, files containing a certificate and matching private key
// for the server must be provided. If the certificate is signed by a
// certificate authority, the certFile should be the concatenation
// of the server's certificate, any intermediates, and the CA's certificate.
//
// ServeTLS always returns a non-nil error.
func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error {
srv := &Server{Handler: handler}
return srv.ServeTLS(l, certFile, keyFile)
}
// A Server defines parameters for running an HTTP server.
// The zero value for Server is a valid configuration.
type Server struct {
// Addr optionally specifies the TCP address for the server to listen on,
// in the form "host:port". If empty, ":http" (port 80) is used.
// The service names are defined in RFC 6335 and assigned by IANA.
// See net.Dial for details of the address format.
Addr string
Handler Handler // handler to invoke, http.DefaultServeMux if nil
// TLSConfig optionally provides a TLS configuration for use
// by ServeTLS and ListenAndServeTLS. Note that this value is
// cloned by ServeTLS and ListenAndServeTLS, so it's not
// possible to modify the configuration with methods like
// tls.Config.SetSessionTicketKeys. To use
// SetSessionTicketKeys, use Server.Serve with a TLS Listener
// instead.
TLSConfig *tls.Config
// ReadTimeout is the maximum duration for reading the entire
// request, including the body.
//
// Because ReadTimeout does not let Handlers make per-request
// decisions on each request body's acceptable deadline or
// upload rate, most users will prefer to use
// ReadHeaderTimeout. It is valid to use them both.
ReadTimeout time.Duration
// ReadHeaderTimeout is the amount of time allowed to read
// request headers. The connection's read deadline is reset
// after reading the headers and the Handler can decide what
// is considered too slow for the body. If ReadHeaderTimeout
// is zero, the value of ReadTimeout is used. If both are
// zero, there is no timeout.
ReadHeaderTimeout time.Duration
// WriteTimeout is the maximum duration before timing out
// writes of the response. It is reset whenever a new
// request's header is read. Like ReadTimeout, it does not
// let Handlers make decisions on a per-request basis.
WriteTimeout time.Duration
// IdleTimeout is the maximum amount of time to wait for the
// next request when keep-alives are enabled. If IdleTimeout
// is zero, the value of ReadTimeout is used. If both are
// zero, there is no timeout.
IdleTimeout time.Duration
// MaxHeaderBytes controls the maximum number of bytes the
// server will read parsing the request header's keys and
// values, including the request line. It does not limit the
// size of the request body.
// If zero, DefaultMaxHeaderBytes is used.
MaxHeaderBytes int
// TLSNextProto optionally specifies a function to take over
// ownership of the provided TLS connection when an ALPN
// protocol upgrade has occurred. The map key is the protocol
// name negotiated. The Handler argument should be used to
// handle HTTP requests and will initialize the Request's TLS
// and RemoteAddr if not already set. The connection is
// automatically closed when the function returns.
// If TLSNextProto is not nil, HTTP/2 support is not enabled
// automatically.
TLSNextProto map[string]func(*Server, *tls.Conn, Handler)
// ConnState specifies an optional callback function that is
// called when a client connection changes state. See the
// ConnState type and associated constants for details.
ConnState func(net.Conn, ConnState)
// ErrorLog specifies an optional logger for errors accepting
// connections, unexpected behavior from handlers, and
// underlying FileSystem errors.
// If nil, logging is done via the log package's standard logger.
ErrorLog *log.Logger
// BaseContext optionally specifies a function that returns
// the base context for incoming requests on this server.
// The provided Listener is the specific Listener that's
// about to start accepting requests.
// If BaseContext is nil, the default is context.Background().
// If non-nil, it must return a non-nil context.
BaseContext func(net.Listener) context.Context
// ConnContext optionally specifies a function that modifies
// the context used for a new connection c. The provided ctx
// is derived from the base context and has a ServerContextKey
// value.
ConnContext func(ctx context.Context, c net.Conn) context.Context
inShutdown atomicBool // true when when server is in shutdown
disableKeepAlives int32 // accessed atomically.
nextProtoOnce sync.Once // guards setupHTTP2_* init
nextProtoErr error // result of http2.ConfigureServer if used
mu sync.Mutex
listeners map[*net.Listener]struct{}
activeConn map[*conn]struct{}
doneChan chan struct{}
onShutdown []func()
}
func (s *Server) getDoneChan() <-chan struct{} {
s.mu.Lock()
defer s.mu.Unlock()
return s.getDoneChanLocked()
}
func (s *Server) getDoneChanLocked() chan struct{} {
if s.doneChan == nil {
s.doneChan = make(chan struct{})
}
return s.doneChan
}
func (s *Server) closeDoneChanLocked() {
ch := s.getDoneChanLocked()
select {
case <-ch:
// Already closed. Don't close again.
default:
// Safe to close here. We're the only closer, guarded
// by s.mu.
close(ch)
}
}
// Close immediately closes all active net.Listeners and any
// connections in state StateNew, StateActive, or StateIdle. For a
// graceful shutdown, use Shutdown.
//
// Close does not attempt to close (and does not even know about)
// any hijacked connections, such as WebSockets.
//
// Close returns any error returned from closing the Server's
// underlying Listener(s).
func (srv *Server) Close() error {
srv.inShutdown.setTrue()
srv.mu.Lock()
defer srv.mu.Unlock()
srv.closeDoneChanLocked()
err := srv.closeListenersLocked()
for c := range srv.activeConn {
c.rwc.Close()
delete(srv.activeConn, c)
}
return err
}
// shutdownPollIntervalMax is the max polling interval when checking
// quiescence during Server.Shutdown. Polling starts with a small
// interval and backs off to the max.
// Ideally we could find a solution that doesn't involve polling,
// but which also doesn't have a high runtime cost (and doesn't
// involve any contentious mutexes), but that is left as an
// exercise for the reader.
const shutdownPollIntervalMax = 500 * time.Millisecond
// Shutdown gracefully shuts down the server without interrupting any
// active connections. Shutdown works by first closing all open
// listeners, then closing all idle connections, and then waiting
// indefinitely for connections to return to idle and then shut down.
// If the provided context expires before the shutdown is complete,
// Shutdown returns the context's error, otherwise it returns any
// error returned from closing the Server's underlying Listener(s).
//
// When Shutdown is called, Serve, ListenAndServe, and
// ListenAndServeTLS immediately return ErrServerClosed. Make sure the
// program doesn't exit and waits instead for Shutdown to return.
//
// Shutdown does not attempt to close nor wait for hijacked
// connections such as WebSockets. The caller of Shutdown should
// separately notify such long-lived connections of shutdown and wait
// for them to close, if desired. See RegisterOnShutdown for a way to
// register shutdown notification functions.
//
// Once Shutdown has been called on a server, it may not be reused;
// future calls to methods such as Serve will return ErrServerClosed.
func (srv *Server) Shutdown(ctx context.Context) error {
srv.inShutdown.setTrue()
srv.mu.Lock()
lnerr := srv.closeListenersLocked()
srv.closeDoneChanLocked()
for _, f := range srv.onShutdown {
go f()
}
srv.mu.Unlock()
pollIntervalBase := time.Millisecond
nextPollInterval := func() time.Duration {
// Add 10% jitter.
interval := pollIntervalBase + time.Duration(rand.Intn(int(pollIntervalBase/10)))
// Double and clamp for next time.
pollIntervalBase *= 2
if pollIntervalBase > shutdownPollIntervalMax {
pollIntervalBase = shutdownPollIntervalMax
}
return interval
}
timer := time.NewTimer(nextPollInterval())
defer timer.Stop()
for {
if srv.closeIdleConns() && srv.numListeners() == 0 {
return lnerr
}
select {
case <-ctx.Done():
return ctx.Err()
case <-timer.C:
timer.Reset(nextPollInterval())
}
}
}
// RegisterOnShutdown registers a function to call on Shutdown.
// This can be used to gracefully shutdown connections that have
// undergone ALPN protocol upgrade or that have been hijacked.
// This function should start protocol-specific graceful shutdown,
// but should not wait for shutdown to complete.
func (srv *Server) RegisterOnShutdown(f func()) {
srv.mu.Lock()
srv.onShutdown = append(srv.onShutdown, f)
srv.mu.Unlock()
}
func (s *Server) numListeners() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.listeners)
}
// closeIdleConns closes all idle connections and reports whether the
// server is quiescent.
func (s *Server) closeIdleConns() bool {
s.mu.Lock()
defer s.mu.Unlock()
quiescent := true
for c := range s.activeConn {
st, unixSec := c.getState()
// Issue 22682: treat StateNew connections as if
// they're idle if we haven't read the first request's
// header in over 5 seconds.
if st == StateNew && unixSec < time.Now().Unix()-5 {
st = StateIdle
}
if st != StateIdle || unixSec == 0 {
// Assume unixSec == 0 means it's a very new
// connection, without state set yet.
quiescent = false
continue
}
c.rwc.Close()
delete(s.activeConn, c)
}
return quiescent
}
func (s *Server) closeListenersLocked() error {
var err error
for ln := range s.listeners {
if cerr := (*ln).Close(); cerr != nil && err == nil {
err = cerr
}
}
return err
}
// A ConnState represents the state of a client connection to a server.
// It's used by the optional Server.ConnState hook.
type ConnState int
const (
// StateNew represents a new connection that is expected to
// send a request immediately. Connections begin at this
// state and then transition to either StateActive or
// StateClosed.
StateNew ConnState = iota
// StateActive represents a connection that has read 1 or more
// bytes of a request. The Server.ConnState hook for
// StateActive fires before the request has entered a handler
// and doesn't fire again until the request has been
// handled. After the request is handled, the state
// transitions to StateClosed, StateHijacked, or StateIdle.
// For HTTP/2, StateActive fires on the transition from zero
// to one active request, and only transitions away once all
// active requests are complete. That means that ConnState
// cannot be used to do per-request work; ConnState only notes
// the overall state of the connection.
StateActive
// StateIdle represents a connection that has finished
// handling a request and is in the keep-alive state, waiting
// for a new request. Connections transition from StateIdle
// to either StateActive or StateClosed.
StateIdle
// StateHijacked represents a hijacked connection.
// This is a terminal state. It does not transition to StateClosed.
StateHijacked
// StateClosed represents a closed connection.
// This is a terminal state. Hijacked connections do not
// transition to StateClosed.
StateClosed
)
var stateName = map[ConnState]string{
StateNew: "new",
StateActive: "active",
StateIdle: "idle",
StateHijacked: "hijacked",
StateClosed: "closed",
}
func (c ConnState) String() string {
return stateName[c]
}
// serverHandler delegates to either the server's Handler or
// DefaultServeMux and also handles "OPTIONS *" requests.
type serverHandler struct {
srv *Server
}
func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) {
handler := sh.srv.Handler
if handler == nil {
handler = DefaultServeMux
}
if req.RequestURI == "*" && req.Method == "OPTIONS" {
handler = globalOptionsHandler{}
}
handler.ServeHTTP(rw, req)
}
// ListenAndServe listens on the TCP network address srv.Addr and then
// calls Serve to handle requests on incoming connections.
// Accepted connections are configured to enable TCP keep-alives.
//
// If srv.Addr is blank, ":http" is used.
//
// ListenAndServe always returns a non-nil error. After Shutdown or Close,
// the returned error is ErrServerClosed.
func (srv *Server) ListenAndServe() error {
if srv.shuttingDown() {
return ErrServerClosed
}
addr := srv.Addr
if addr == "" {
addr = ":http"
}
ln, err := net.Listen("tcp", addr)
if err != nil {
return err
}
return srv.Serve(ln)
}
var testHookServerServe func(*Server, net.Listener) // used if non-nil
// shouldDoServeHTTP2 reports whether Server.Serve should configure
// automatic HTTP/2. (which sets up the srv.TLSNextProto map)
func (srv *Server) shouldConfigureHTTP2ForServe() bool {
if srv.TLSConfig == nil {
// Compatibility with Go 1.6:
// If there's no TLSConfig, it's possible that the user just
// didn't set it on the http.Server, but did pass it to
// tls.NewListener and passed that listener to Serve.
// So we should configure HTTP/2 (to set up srv.TLSNextProto)
// in case the listener returns an "h2" *tls.Conn.
return true
}
// The user specified a TLSConfig on their http.Server.
// In this, case, only configure HTTP/2 if their tls.Config
// explicitly mentions "h2". Otherwise http2.ConfigureServer
// would modify the tls.Config to add it, but they probably already
// passed this tls.Config to tls.NewListener. And if they did,
// it's too late anyway to fix it. It would only be potentially racy.
// See Issue 15908.
return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS)
}
// ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe,
// and ListenAndServeTLS methods after a call to Shutdown or Close.
var ErrServerClosed = errors.New("http: Server closed")
// Serve accepts incoming connections on the Listener l, creating a
// new service goroutine for each. The service goroutines read requests and
// then call srv.Handler to reply to them.
//
// HTTP/2 support is only enabled if the Listener returns *tls.Conn
// connections and they were configured with "h2" in the TLS
// Config.NextProtos.
//
// Serve always returns a non-nil error and closes l.
// After Shutdown or Close, the returned error is ErrServerClosed.
func (srv *Server) Serve(l net.Listener) error {
if fn := testHookServerServe; fn != nil {
fn(srv, l) // call hook with unwrapped listener
}
origListener := l
l = &onceCloseListener{Listener: l}
defer l.Close()
if err := srv.setupHTTP2_Serve(); err != nil {
return err
}
if !srv.trackListener(&l, true) {
return ErrServerClosed
}
defer srv.trackListener(&l, false)
baseCtx := context.Background()
if srv.BaseContext != nil {
baseCtx = srv.BaseContext(origListener)
if baseCtx == nil {
panic("BaseContext returned a nil context")
}
}
var tempDelay time.Duration // how long to sleep on accept failure
ctx := context.WithValue(baseCtx, ServerContextKey, srv)
for {
rw, err := l.Accept()
if err != nil {
select {
case <-srv.getDoneChan():
return ErrServerClosed
default:
}
if ne, ok := err.(net.Error); ok && ne.Temporary() {
if tempDelay == 0 {
tempDelay = 5 * time.Millisecond
} else {
tempDelay *= 2
}
if max := 1 * time.Second; tempDelay > max {
tempDelay = max
}
srv.logf("http: Accept error: %v; retrying in %v", err, tempDelay)
time.Sleep(tempDelay)
continue
}
return err
}
connCtx := ctx
if cc := srv.ConnContext; cc != nil {
connCtx = cc(connCtx, rw)
if connCtx == nil {
panic("ConnContext returned nil")
}
}
tempDelay = 0
c := srv.newConn(rw)
c.setState(c.rwc, StateNew, runHooks) // before Serve can return
go c.serve(connCtx)
}
}
// ServeTLS accepts incoming connections on the Listener l, creating a
// new service goroutine for each. The service goroutines perform TLS
// setup and then read requests, calling srv.Handler to reply to them.
//
// Files containing a certificate and matching private key for the
// server must be provided if neither the Server's
// TLSConfig.Certificates nor TLSConfig.GetCertificate are populated.
// If the certificate is signed by a certificate authority, the
// certFile should be the concatenation of the server's certificate,
// any intermediates, and the CA's certificate.
//
// ServeTLS always returns a non-nil error. After Shutdown or Close, the
// returned error is ErrServerClosed.
func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error {
// Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig
// before we clone it and create the TLS Listener.
if err := srv.setupHTTP2_ServeTLS(); err != nil {
return err
}
config := cloneTLSConfig(srv.TLSConfig)
if !strSliceContains(config.NextProtos, "http/1.1") {
config.NextProtos = append(config.NextProtos, "http/1.1")
}
configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil
if !configHasCert || certFile != "" || keyFile != "" {
var err error
config.Certificates = make([]tls.Certificate, 1)
config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return err
}
}
tlsListener := tls.NewListener(l, config)
return srv.Serve(tlsListener)
}
// trackListener adds or removes a net.Listener to the set of tracked
// listeners.
//
// We store a pointer to interface in the map set, in case the
// net.Listener is not comparable. This is safe because we only call
// trackListener via Serve and can track+defer untrack the same
// pointer to local variable there. We never need to compare a
// Listener from another caller.
//
// It reports whether the server is still up (not Shutdown or Closed).
func (s *Server) trackListener(ln *net.Listener, add bool) bool {
s.mu.Lock()
defer s.mu.Unlock()
if s.listeners == nil {
s.listeners = make(map[*net.Listener]struct{})
}
if add {
if s.shuttingDown() {
return false
}
s.listeners[ln] = struct{}{}
} else {
delete(s.listeners, ln)
}
return true
}
func (s *Server) trackConn(c *conn, add bool) {
s.mu.Lock()
defer s.mu.Unlock()
if s.activeConn == nil {
s.activeConn = make(map[*conn]struct{})
}
if add {
s.activeConn[c] = struct{}{}
} else {
delete(s.activeConn, c)
}
}
func (s *Server) idleTimeout() time.Duration {
if s.IdleTimeout != 0 {
return s.IdleTimeout
}
return s.ReadTimeout
}
func (s *Server) readHeaderTimeout() time.Duration {
if s.ReadHeaderTimeout != 0 {
return s.ReadHeaderTimeout
}
return s.ReadTimeout
}
func (s *Server) doKeepAlives() bool {
return atomic.LoadInt32(&s.disableKeepAlives) == 0 && !s.shuttingDown()
}
func (s *Server) shuttingDown() bool {
return s.inShutdown.isSet()
}
// SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled.
// By default, keep-alives are always enabled. Only very
// resource-constrained environments or servers in the process of
// shutting down should disable them.
func (srv *Server) SetKeepAlivesEnabled(v bool) {
if v {
atomic.StoreInt32(&srv.disableKeepAlives, 0)
return
}
atomic.StoreInt32(&srv.disableKeepAlives, 1)
// Close idle HTTP/1 conns:
srv.closeIdleConns()
// TODO: Issue 26303: close HTTP/2 conns as soon as they become idle.
}
func (s *Server) logf(format string, args ...interface{}) {
if s.ErrorLog != nil {
s.ErrorLog.Printf(format, args...)
} else {
log.Printf(format, args...)
}
}
// logf prints to the ErrorLog of the *Server associated with request r
// via ServerContextKey. If there's no associated server, or if ErrorLog
// is nil, logging is done via the log package's standard logger.
func logf(r *Request, format string, args ...interface{}) {
s, _ := r.Context().Value(ServerContextKey).(*Server)
if s != nil && s.ErrorLog != nil {
s.ErrorLog.Printf(format, args...)
} else {
log.Printf(format, args...)
}
}
// ListenAndServe listens on the TCP network address addr and then calls
// Serve with handler to handle requests on incoming connections.
// Accepted connections are configured to enable TCP keep-alives.
//
// The handler is typically nil, in which case the DefaultServeMux is used.
//
// ListenAndServe always returns a non-nil error.
func ListenAndServe(addr string, handler Handler) error {
server := &Server{Addr: addr, Handler: handler}
return server.ListenAndServe()
}
// ListenAndServeTLS acts identically to ListenAndServe, except that it
// expects HTTPS connections. Additionally, files containing a certificate and
// matching private key for the server must be provided. If the certificate
// is signed by a certificate authority, the certFile should be the concatenation
// of the server's certificate, any intermediates, and the CA's certificate.
func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
server := &Server{Addr: addr, Handler: handler}
return server.ListenAndServeTLS(certFile, keyFile)
}
// ListenAndServeTLS listens on the TCP network address srv.Addr and
// then calls ServeTLS to handle requests on incoming TLS connections.
// Accepted connections are configured to enable TCP keep-alives.
//
// Filenames containing a certificate and matching private key for the
// server must be provided if neither the Server's TLSConfig.Certificates
// nor TLSConfig.GetCertificate are populated. If the certificate is
// signed by a certificate authority, the certFile should be the
// concatenation of the server's certificate, any intermediates, and
// the CA's certificate.
//
// If srv.Addr is blank, ":https" is used.
//
// ListenAndServeTLS always returns a non-nil error. After Shutdown or
// Close, the returned error is ErrServerClosed.
func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
if srv.shuttingDown() {
return ErrServerClosed
}
addr := srv.Addr
if addr == "" {
addr = ":https"
}
ln, err := net.Listen("tcp", addr)
if err != nil {
return err
}
defer ln.Close()
return srv.ServeTLS(ln, certFile, keyFile)
}
// setupHTTP2_ServeTLS conditionally configures HTTP/2 on
// srv and reports whether there was an error setting it up. If it is
// not configured for policy reasons, nil is returned.
func (srv *Server) setupHTTP2_ServeTLS() error {
srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults)
return srv.nextProtoErr
}
// setupHTTP2_Serve is called from (*Server).Serve and conditionally
// configures HTTP/2 on srv using a more conservative policy than
// setupHTTP2_ServeTLS because Serve is called after tls.Listen,
// and may be called concurrently. See shouldConfigureHTTP2ForServe.
//
// The tests named TestTransportAutomaticHTTP2* and
// TestConcurrentServerServe in server_test.go demonstrate some
// of the supported use cases and motivations.
func (srv *Server) setupHTTP2_Serve() error {
srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve)
return srv.nextProtoErr
}
func (srv *Server) onceSetNextProtoDefaults_Serve() {
if srv.shouldConfigureHTTP2ForServe() {
srv.onceSetNextProtoDefaults()
}
}
// onceSetNextProtoDefaults configures HTTP/2, if the user hasn't
// configured otherwise. (by setting srv.TLSNextProto non-nil)
// It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*).
func (srv *Server) onceSetNextProtoDefaults() {
if omitBundledHTTP2 || strings.Contains(os.Getenv("GODEBUG"), "http2server=0") {
return
}
// Enable HTTP/2 by default if the user hasn't otherwise
// configured their TLSNextProto map.
if srv.TLSNextProto == nil {
conf := &http2Server{
NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) },
}
srv.nextProtoErr = http2ConfigureServer(srv, conf)
}
}
// TimeoutHandler returns a Handler that runs h with the given time limit.
//
// The new Handler calls h.ServeHTTP to handle each request, but if a
// call runs for longer than its time limit, the handler responds with
// a 503 Service Unavailable error and the given message in its body.
// (If msg is empty, a suitable default message will be sent.)
// After such a timeout, writes by h to its ResponseWriter will return
// ErrHandlerTimeout.
//
// TimeoutHandler supports the Pusher interface but does not support
// the Hijacker or Flusher interfaces.
func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler {
return &timeoutHandler{
handler: h,
body: msg,
dt: dt,
}
}
// ErrHandlerTimeout is returned on ResponseWriter Write calls
// in handlers which have timed out.
var ErrHandlerTimeout = errors.New("http: Handler timeout")
type timeoutHandler struct {
handler Handler
body string
dt time.Duration
// When set, no context will be created and this context will
// be used instead.
testContext context.Context
}
func (h *timeoutHandler) errorBody() string {
if h.body != "" {
return h.body
}
return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>"
}
func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) {
ctx := h.testContext
if ctx == nil {
var cancelCtx context.CancelFunc
ctx, cancelCtx = context.WithTimeout(r.Context(), h.dt)
defer cancelCtx()
}
r = r.WithContext(ctx)
done := make(chan struct{})
tw := &timeoutWriter{
w: w,
h: make(Header),
req: r,
}
panicChan := make(chan interface{}, 1)
go func() {
defer func() {
if p := recover(); p != nil {
panicChan <- p
}
}()
h.handler.ServeHTTP(tw, r)
close(done)
}()
select {
case p := <-panicChan:
panic(p)
case <-done:
tw.mu.Lock()
defer tw.mu.Unlock()
dst := w.Header()
for k, vv := range tw.h {
dst[k] = vv
}
if !tw.wroteHeader {
tw.code = StatusOK
}
w.WriteHeader(tw.code)
w.Write(tw.wbuf.Bytes())
case <-ctx.Done():
tw.mu.Lock()
defer tw.mu.Unlock()
w.WriteHeader(StatusServiceUnavailable)
io.WriteString(w, h.errorBody())
tw.timedOut = true
}
}
type timeoutWriter struct {
w ResponseWriter
h Header
wbuf bytes.Buffer
req *Request
mu sync.Mutex
timedOut bool
wroteHeader bool
code int
}
var _ Pusher = (*timeoutWriter)(nil)
// Push implements the Pusher interface.
func (tw *timeoutWriter) Push(target string, opts *PushOptions) error {
if pusher, ok := tw.w.(Pusher); ok {
return pusher.Push(target, opts)
}
return ErrNotSupported
}
func (tw *timeoutWriter) Header() Header { return tw.h }
func (tw *timeoutWriter) Write(p []byte) (int, error) {
tw.mu.Lock()
defer tw.mu.Unlock()
if tw.timedOut {
return 0, ErrHandlerTimeout
}
if !tw.wroteHeader {
tw.writeHeaderLocked(StatusOK)
}
return tw.wbuf.Write(p)
}
func (tw *timeoutWriter) writeHeaderLocked(code int) {
checkWriteHeaderCode(code)
switch {
case tw.timedOut:
return
case tw.wroteHeader:
if tw.req != nil {
caller := relevantCaller()
logf(tw.req, "http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
}
default:
tw.wroteHeader = true
tw.code = code
}
}
func (tw *timeoutWriter) WriteHeader(code int) {
tw.mu.Lock()
defer tw.mu.Unlock()
tw.writeHeaderLocked(code)
}
// onceCloseListener wraps a net.Listener, protecting it from
// multiple Close calls.
type onceCloseListener struct {
net.Listener
once sync.Once
closeErr error
}
func (oc *onceCloseListener) Close() error {
oc.once.Do(oc.close)
return oc.closeErr
}
func (oc *onceCloseListener) close() { oc.closeErr = oc.Listener.Close() }
// globalOptionsHandler responds to "OPTIONS *" requests.
type globalOptionsHandler struct{}
func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) {
w.Header().Set("Content-Length", "0")
if r.ContentLength != 0 {
// Read up to 4KB of OPTIONS body (as mentioned in the
// spec as being reserved for future use), but anything
// over that is considered a waste of server resources
// (or an attack) and we abort and close the connection,
// courtesy of MaxBytesReader's EOF behavior.
mb := MaxBytesReader(w, r.Body, 4<<10)
io.Copy(io.Discard, mb)
}
}
// initALPNRequest is an HTTP handler that initializes certain
// uninitialized fields in its *Request. Such partially-initialized
// Requests come from ALPN protocol handlers.
type initALPNRequest struct {
ctx context.Context
c *tls.Conn
h serverHandler
}
// BaseContext is an exported but unadvertised http.Handler method
// recognized by x/net/http2 to pass down a context; the TLSNextProto
// API predates context support so we shoehorn through the only
// interface we have available.
func (h initALPNRequest) BaseContext() context.Context { return h.ctx }
func (h initALPNRequest) ServeHTTP(rw ResponseWriter, req *Request) {
if req.TLS == nil {
req.TLS = &tls.ConnectionState{}
*req.TLS = h.c.ConnectionState()
}
if req.Body == nil {
req.Body = NoBody
}
if req.RemoteAddr == "" {
req.RemoteAddr = h.c.RemoteAddr().String()
}
h.h.ServeHTTP(rw, req)
}
// loggingConn is used for debugging.
type loggingConn struct {
name string
net.Conn
}
var (
uniqNameMu sync.Mutex
uniqNameNext = make(map[string]int)
)
func newLoggingConn(baseName string, c net.Conn) net.Conn {
uniqNameMu.Lock()
defer uniqNameMu.Unlock()
uniqNameNext[baseName]++
return &loggingConn{
name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]),
Conn: c,
}
}
func (c *loggingConn) Write(p []byte) (n int, err error) {
log.Printf("%s.Write(%d) = ....", c.name, len(p))
n, err = c.Conn.Write(p)
log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err)
return
}
func (c *loggingConn) Read(p []byte) (n int, err error) {
log.Printf("%s.Read(%d) = ....", c.name, len(p))
n, err = c.Conn.Read(p)
log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err)
return
}
func (c *loggingConn) Close() (err error) {
log.Printf("%s.Close() = ...", c.name)
err = c.Conn.Close()
log.Printf("%s.Close() = %v", c.name, err)
return
}
// checkConnErrorWriter writes to c.rwc and records any write errors to c.werr.
// It only contains one field (and a pointer field at that), so it
// fits in an interface value without an extra allocation.
type checkConnErrorWriter struct {
c *conn
}
func (w checkConnErrorWriter) Write(p []byte) (n int, err error) {
n, err = w.c.rwc.Write(p)
if err != nil && w.c.werr == nil {
w.c.werr = err
w.c.cancelCtx()
}
return
}
func numLeadingCRorLF(v []byte) (n int) {
for _, b := range v {
if b == '\r' || b == '\n' {
n++
continue
}
break
}
return
}
func strSliceContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
// tlsRecordHeaderLooksLikeHTTP reports whether a TLS record header
// looks like it might've been a misdirected plaintext HTTP request.
func tlsRecordHeaderLooksLikeHTTP(hdr [5]byte) bool {
switch string(hdr[:]) {
case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO":
return true
}
return false
}
|
[
"\"GODEBUG\""
] |
[] |
[
"GODEBUG"
] |
[]
|
["GODEBUG"]
|
go
| 1 | 0 | |
pkg/meshdiscovery/utils/common.go
|
package utils
import (
"fmt"
"os"
"strings"
gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1"
"github.com/solo-io/go-utils/errors"
"github.com/solo-io/solo-kit/pkg/api/v1/resources/common/kubernetes"
"github.com/solo-io/solo-kit/pkg/api/v1/resources/core"
v1 "github.com/solo-io/supergloo/pkg/api/v1"
"github.com/solo-io/supergloo/pkg/translator/utils"
corev1 "k8s.io/api/core/v1"
)
const (
SelectorDiscoveredByPrefix = "discovered_by"
SelectorCreatedByPrefix = "created_by"
SelectorCreatedByValue = "mesh-discovery"
)
func MeshWriteNamespace() string {
if writeNamespace := os.Getenv("POD_NAMESPACE"); writeNamespace != "" {
return writeNamespace
}
return "supergloo-system"
}
func GetVersionFromPodWithMatchers(pod *kubernetes.Pod, podStringMatchers []string) (string, error) {
containers := pod.Spec.Containers
for _, container := range containers {
if StringContainsAll(podStringMatchers, container.Image) {
return ImageVersion(container.Image)
}
}
return "", errors.Errorf("unable to find matching container from pod")
}
func StringContainsAll(podStringMatchers []string, matchString string) bool {
for _, substr := range podStringMatchers {
if !strings.Contains(matchString, substr) {
return false
}
}
return true
}
func BasicMeshInfo(meshNamespace string, discoverySelector map[string]string, meshType string) *v1.Mesh {
mesh := &v1.Mesh{
Metadata: core.Metadata{
Namespace: MeshWriteNamespace(),
Name: fmt.Sprintf("%s-%s", meshType, meshNamespace),
Labels: discoverySelector,
},
}
return mesh
}
func InjectedPodsByNamespace(pods kubernetes.PodList, proxyContainerName string) kubernetes.PodsByNamespace {
result := make(kubernetes.PodsByNamespace)
for _, pod := range pods {
if isInjectedPodRunning(pod, proxyContainerName) {
result.Add(pod)
}
}
return result
}
func isInjectedPodRunning(pod *kubernetes.Pod, proxyContainerName string) bool {
for _, container := range pod.Spec.Containers {
if container.Name == proxyContainerName &&
pod.Status.Phase == corev1.PodRunning {
return true
}
}
return false
}
func GetUpstreamsForInjectedPods(pods kubernetes.PodList, upstreams gloov1.UpstreamList) gloov1.UpstreamList {
var result gloov1.UpstreamList
for _, us := range upstreams {
podsForUpstream := utils.PodsForUpstreams(gloov1.UpstreamList{us}, pods)
if len(podsForUpstream) > 0 {
result = append(result, us)
}
}
return result
}
|
[
"\"POD_NAMESPACE\""
] |
[] |
[
"POD_NAMESPACE"
] |
[]
|
["POD_NAMESPACE"]
|
go
| 1 | 0 | |
deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/android.py
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': 'NOT_USED_ON_ANDROID',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Generator-specific gyp specs.
generator_additional_non_configuration_keys = [
# Boolean to declare that this target does not want its name mangled.
'android_unmangled_name',
]
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
SHARED_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
android_standard_include_paths = set([
# JNI_H_INCLUDE in build/core/binary.mk
'dalvik/libnativehelper/include/nativehelper',
# from SRC_HEADERS in build/core/config.mk
'system/core/include',
'hardware/libhardware/include',
'hardware/libhardware_legacy/include',
'hardware/ril/include',
'dalvik/libnativehelper/include',
'frameworks/native/include',
'frameworks/native/opengl/include',
'frameworks/base/include',
'frameworks/base/opengl/include',
'frameworks/base/native/include',
'external/skia/include',
# TARGET_C_INCLUDES in build/core/combo/TARGET_linux-arm.mk
'bionic/libc/arch-arm/include',
'bionic/libc/include',
'bionic/libstdc++/include',
'bionic/libc/kernel/common',
'bionic/libc/kernel/arch-arm',
'bionic/libm/include',
'bionic/libm/include/arm',
'bionic/libthread_db/include',
])
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
make.ensure_directory_exists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
self.WriteLn('LOCAL_MODULE_TAGS := optional')
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
# Grab output directories; needed for Actions and Rules.
self.WriteLn('gyp_intermediate_dir := $(call local-intermediates-dir)')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared)')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_intermediate_dir)' %
main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_shared_intermediate_dir)' %
main_output)
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
rule_trigger = '%s_rule_trigger' % self.android_module
did_write_rule = False
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
did_write_rule = True
name = make.StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_intermediate_dir)'
% main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_shared_intermediate_dir)'
% main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
self.WriteLn('%s: %s' % (output, main_output))
self.WriteLn('.PHONY: %s' % (rule_trigger))
self.WriteLn('%s: %s' % (rule_trigger, main_output))
self.WriteLn('')
if did_write_rule:
extra_sources.append(rule_trigger) # Force all rules to run.
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -r $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
config = configs[spec['default_configuration']]
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags'))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS')
cflags_c, includes_from_cflags_c = self.ExtractIncludesFromCFlags(
config.get('cflags_c'))
extracted_includes.extend(includes_from_cflags_c)
self.WriteList(cflags_c, 'MY_CFLAGS_C')
self.WriteList(config.get('defines'), 'MY_DEFS', prefix='-D',
quoter=make.EscapeCppDefine)
self.WriteLn('LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host or
# target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES)')
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if int(spec.get('android_unmangled_name', 0)):
assert self.type != 'shared_library' or self.target.startswith('lib')
return self.target
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_host_gyp'
else:
suffix = '_gyp'
if self.path:
name = '%s%s_%s%s' % (prefix, self.path, self.target, suffix)
else:
name = '%s%s%s' % (prefix, self.target, suffix)
return make.StringToMakefileVariable(name)
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable' and self.toolset == 'host':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$(HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$(TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = '$(call intermediates-dir-for,%s,%s,true)' % (self.android_class,
self.android_module)
else:
path = '$(call intermediates-dir-for,%s,%s)' % (self.android_class,
self.android_module)
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeLdFlags(self, ld_flags):
""" Clean up ldflags from gyp file.
Remove any ldflags that contain android_top_dir.
Args:
ld_flags: ldflags from gyp files.
Returns:
clean ldflags
"""
clean_ldflags = []
for flag in ld_flags:
if self.android_top_dir in flag:
continue
clean_ldflags.append(flag)
return clean_ldflags
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory;
filter out include paths that are already brought in by the Android build
system.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
# Filter out the Android standard search path.
if path not in android_standard_include_paths:
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
if cflags:
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def ComputeAndroidLibraryModuleNames(self, libraries):
"""Compute the Android module names from libraries, ie spec.get('libraries')
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules)
"""
static_lib_modules = []
dynamic_lib_modules = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
# "-lstlport" -> libstlport
if lib.startswith('-l'):
if lib.endswith('_static'):
static_lib_modules.append('lib' + lib[2:])
else:
dynamic_lib_modules.append('lib' + lib[2:])
return (static_lib_modules, dynamic_lib_modules)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
config = configs[spec['default_configuration']]
# LDFLAGS
ldflags = list(config.get('ldflags', []))
static_flags, dynamic_flags = self.ComputeAndroidLibraryModuleNames(
ldflags)
self.WriteLn('')
self.WriteList(self.NormalizeLdFlags(ldflags), 'LOCAL_LDFLAGS')
# Libraries (i.e. -lfoo)
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs = self.ComputeAndroidLibraryModuleNames(
libraries)
# Link dependencies (i.e. libfoo.a, libfoo.so)
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
self.WriteLn('')
self.WriteList(static_flags + static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
self.WriteLn('')
self.WriteList(dynamic_flags + dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
if self.target != self.android_module:
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
if self.toolset == 'host':
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
else:
# Don't install target executables for now, as it results in them being
# included in ROM. This can be revisited if there's a reason to install
# them later.
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
# Sort to avoid non-functional changes to makefile.
build_files = sorted([os.path.join('$(LOCAL_PATH)', f) for f in build_files])
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
build_files_args = [os.path.join('$(PRIVATE_LOCAL_PATH)', f)
for f in build_files_args]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
makefile_path = os.path.join('$(LOCAL_PATH)', makefile_name)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write('GYP_FILES := \\\n %s\n\n' %
'\\\n '.join(map(Sourceify, build_files)))
root_makefile.write('%s: PRIVATE_LOCAL_PATH := $(LOCAL_PATH)\n' %
makefile_path)
root_makefile.write('%s: $(GYP_FILES)\n' % makefile_path)
root_makefile.write('\techo ACTION Regenerating $@\n\t%s\n\n' %
gyp.common.EncodePOSIXShellList([gyp_binary, '-fandroid'] +
gyp.RegenerateFlags(options) +
build_files_args))
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid.mk' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
make.ensure_directory_exists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = (qualified_target in needed_targets and
not int(spec.get('suppress_wildcard', False)))
if limit_to_target_all and not part_of_all:
continue
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, base_path, output_file,
spec, configs, part_of_all=part_of_all)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Some tools need to know the absolute path of the top directory.
root_makefile.write('GYP_ABS_ANDROID_TOP_DIR := $(shell pwd)\n')
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
if generator_flags.get('auto_regeneration', True):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
|
[] |
[] |
[
"ANDROID_BUILD_TOP"
] |
[]
|
["ANDROID_BUILD_TOP"]
|
python
| 1 | 0 | |
capsul/study_config/test/test_fsl_config.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import os
import os.path as osp
import unittest
import sys
import tempfile
from glob import glob
from traits.api import File, Undefined
from capsul.study_config.study_config import StudyConfig, Process
from capsul.subprocess import fsl
class Bet(Process):
input_image = File(optional=False, output=False)
output_image = File(optional=False, output=True)
def _run_process(self):
fsl.check_call(self.study_config, ['bet', self.input_image, self.output_image])
class TestFSL(unittest.TestCase):
def setUp(self):
pass
def test_study_config_fsl(self):
if not sys.platform.startswith('win'):
try:
study_config = StudyConfig(use_fsl=True)
except EnvironmentError as e:
# If FSL cannot be configured automatically, skip the test
print('WARNING: Skip FSL test because it cannot be configured automatically:', str(e), file=sys.stderr)
return
test_image = '/usr/share/data/fsl-mni152-templates/MNI152_T1_1mm_brain.nii.gz'
if not osp.exists(test_image):
fsl_dir = os.environ.get('FSLDIR')
test_image = None
if not fsl_dir and study_config.fsl_config is not Undefined:
fsl_dir = osp.dirname(osp.dirname(osp.dirname(study_config.fsl_config)))
if fsl_dir:
test_image = glob(osp.join(fsl_dir, 'fslpython/envs/fslpython/lib/python*/site-packages/nibabel/tests/data/anatomical.nii'))
if test_image:
test_image = test_image[0]
if not test_image:
print('WARNING: Skip FSL test because test data cannot be found', file=sys.stderr)
return
bet = study_config.get_process_instance(Bet)
with tempfile.NamedTemporaryFile(suffix='.nii.gz') as tmp:
bet.run(
input_image=test_image,
output_image=tmp.name)
self.assertTrue(os.stat(tmp.name).st_size != 0)
def test():
""" Function to execute unitest.
"""
suite = unittest.TestLoader().loadTestsFromTestCase(TestFSL)
runtime = unittest.TextTestRunner(verbosity=2).run(suite)
return runtime.wasSuccessful()
if __name__ == "__main__":
print("RETURNCODE: ", test())
|
[] |
[] |
[
"FSLDIR"
] |
[]
|
["FSLDIR"]
|
python
| 1 | 0 | |
client/tests/utils.py
|
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
import os, json
from client import BaseClient, INLClient
from ci.tests import utils
from ci import models
def create_json_response(canceled=False, success=True):
ret = {'status': 'OK'}
if canceled:
ret['command'] = 'cancel'
else:
ret['command'] = 'none'
ret['success'] = success
return ret
def create_step_dict(script_sleep=2, num=1):
step = {"environment": {"foo": "bar", "step_var_with_root": "BUILD_ROOT/foo"},
'script': 'echo test_output1; sleep %s; echo test_output2' % script_sleep,
'stepresult_id': num,
'step_num': num,
'step_name': 'step {}'.format(num),
'step_id': num,
'abort_on_failure': True,
'allowed_to_fail': False,
}
return step
def create_job_dict(num_steps=1, pk=1):
job = {'environment': {'base_repo': 'base repo', "var_with_root": "BUILD_ROOT/bar"},
'recipe_name': 'test_job',
'prestep_sources': ['prestep'],
'abort_on_failure': True,
'job_id': pk,
'steps':[create_step_dict(num=i) for i in range(num_steps)]
}
return job
def default_client_info():
return {"url": "test_url",
"client_name": "client_name",
"server": "https:://<server0>",
"servers": ["https://<server0>", "https://<server1>"],
"build_configs": ["linux-gnu"],
"ssl_verify": False,
"ssl_cert": "",
"log_file": "",
"log_dir": os.path.abspath(os.path.dirname(__file__)),
"build_key": "1234",
"single_shot": "False",
"poll": 30,
"daemon_cmd": "",
"request_timeout": 30,
"update_step_time": 20,
"server_update_timeout": 5,
"server_update_interval": 30,
"max_output_size": 5*1024*1024,
}
def read_json_test_file(fname):
dirname = os.path.dirname(os.path.realpath(__file__))
full_name = os.path.join(dirname, fname)
with open(full_name, "r") as f:
json_data = f.read()
return json.loads(json_data)
def in_call_args(mock_obj, pat, idx):
for call in mock_obj.call_args_list:
if str(pat) in str(call[0][idx]):
return True
return False
def server_url(stage, client_info, step):
url_names = {"start_step": "start_step_result",
"update_step": "update_step_result",
"complete_step": "complete_step_result",
}
url = "%s/client/%s/%s/%s/%s/" % (client_info["server"], url_names[stage], client_info["build_key"], client_info["client_name"], step["stepresult_id"])
return url
def check_finished(test_obj, claimed_job, client_info, mock_obj):
finished = "%s/client/job_finished/%s/%s/%s/" % (client_info["server"], client_info["build_key"], client_info["client_name"], claimed_job["job_info"]["job_id"])
test_obj.assertTrue(in_call_args(mock_obj, finished, 0))
def check_step(test_obj, step, client_info, mock_obj):
start = "start %s" % step["step_name"]
done = "done %s" % step["step_name"]
env_line = "/foo/bar/global /foo/bar/%s" % step["step_name"]
line = "%s\\n%s\\n%s\\n" % (env_line, start, done)
test_obj.assertTrue(in_call_args(mock_obj, server_url("start_step", client_info, step), 0))
test_obj.assertTrue(in_call_args(mock_obj, server_url("update_step", client_info, step), 0))
test_obj.assertTrue(in_call_args(mock_obj, server_url("complete_step", client_info, step), 0))
test_obj.assertTrue(in_call_args(mock_obj, step["stepresult_id"], 1))
test_obj.assertTrue(in_call_args(mock_obj, env_line, 1))
test_obj.assertTrue(in_call_args(mock_obj, start, 1))
test_obj.assertTrue(in_call_args(mock_obj, done, 1))
test_obj.assertTrue(in_call_args(mock_obj, line, 1))
def check_calls(test_obj, claimed_job, client_info, mock_obj):
for step in claimed_job["job_info"]["steps"]:
check_step(test_obj, step, client_info, mock_obj)
check_finished(test_obj, claimed_job, client_info, mock_obj)
def create_base_client(log_dir=None, log_file=None):
client_info = default_client_info()
if log_dir != None:
client_info["log_dir"] = log_dir
if log_file != None:
client_info["log_file"] = log_file
BaseClient.setup_logger() # logger on stdout
return BaseClient.BaseClient(client_info)
def create_inl_client(log_dir=None, log_file=None):
client_info = default_client_info()
BaseClient.setup_logger() # logger on stdout
return INLClient.INLClient(client_info)
def create_client_job(recipe_dir, name="TestJob", sleep=1):
user = utils.get_test_user()
recipe = utils.create_recipe(user=user, name=name)
test_job = utils.create_job(user=user, recipe=recipe)
test_job.ready = True
test_job.client = None
test_job.status = models.JobStatus.NOT_STARTED
test_job.save()
# create a prestep to make sure sourcing functions work
prestep0 = utils.create_prestepsource(filename="prestep0.sh", recipe=recipe)
with open(os.path.join(recipe_dir, prestep0.filename), "w") as f:
f.write('function start_message()\n{\n echo start "$*"\n}')
# create a prestep to make sure sourcing functions work
prestep1 = utils.create_prestepsource(filename="prestep1.sh", recipe=recipe)
with open(os.path.join(recipe_dir, prestep1.filename), "w") as f:
f.write('function end_message()\n{\n echo end "$*"\n}')
# create a global environment variable to test env works
# as well as BUILD_ROOT replacement
utils.create_recipe_environment(name="GLOBAL_NAME", value="BUILD_ROOT/global", recipe=recipe)
count = 0
for s in ["step0", "step1", "step2"]:
step = utils.create_step(name=s, recipe=recipe, position=count)
# create a step environment variable to test env works
# as well as BUILD_ROOT replacement
utils.create_step_environment(name="STEP_NAME", value="BUILD_ROOT/%s" % s, step=step)
step.filename = "%s.sh" % s
step.save()
count += 1
script_filename = os.path.join(recipe_dir, step.filename)
with open(script_filename, "w") as f:
f.write("echo $GLOBAL_NAME $recipe_name $STEP_NAME\nstart_message {0}:{1}\nsleep {2}\nend_message {0}:{1}\n"
.format(recipe.name, s, sleep))
return test_job
def create_job_with_nested_bash(recipe_dir, name="TestJob", sleep=10):
user = utils.get_test_user()
recipe = utils.create_recipe(user=user, name=name)
test_job = utils.create_job(user=user, recipe=recipe)
test_job.ready = True
test_job.client = None
test_job.status = models.JobStatus.NOT_STARTED
test_job.save()
step = utils.create_step(name="step0", recipe=recipe, position=0)
step.filename = "step0.sh"
step.save()
script_filename = os.path.join(recipe_dir, step.filename)
sub_script_filename = os.path.join(recipe_dir, "step0_sub.sh")
sub_sub_script_filename = os.path.join(recipe_dir, "step0_sub_sub.sh")
with open(script_filename, "w") as f:
f.write("#!/bin/bash\necho 'Launching {0}'\n{0}\necho '{0} returned '".format(sub_script_filename))
with open(sub_script_filename, "w") as f:
f.write("#!/bin/bash\necho 'Launching {0}'\n{0}\necho '{0} returned'".format(sub_sub_script_filename))
import stat
st = os.stat(sub_script_filename)
os.chmod(sub_script_filename, st.st_mode | stat.S_IEXEC)
with open(sub_sub_script_filename, "w") as f:
f.write("#!/bin/bash\necho 'Sleeping {0}...'\nsleep {0}\necho 'Finished sleeping'".format(sleep))
st = os.stat(sub_sub_script_filename)
os.chmod(sub_sub_script_filename, st.st_mode | stat.S_IEXEC)
return test_job
def check_complete_step(self, job, result):
global_var = "%s/global" % os.environ["BUILD_ROOT"]
step_var = "%s/%s" % (os.environ["BUILD_ROOT"], result.name)
output = "{0} {1} {2}\nstart {1}:{3}\nend {1}:{3}\n".format(global_var, job.recipe.name, step_var, result.name)
self.assertEqual(result.output, output)
def check_complete_job(self, job):
job.refresh_from_db()
self.assertEqual(job.step_results.count(), 3)
for result in job.step_results.order_by("position"):
check_complete_step(self, job, result)
self.assertEqual(job.complete, True)
self.assertEqual(job.status, models.JobStatus.SUCCESS)
self.assertGreater(job.seconds.total_seconds(), 1)
def check_canceled_job(self, job):
job.refresh_from_db()
self.assertEqual(job.step_results.count(), 3)
found_cancel = False
for result in job.step_results.order_by("position"):
if result.status == models.JobStatus.CANCELED:
self.assertEqual(result.output, "")
self.assertGreater(job.seconds.total_seconds(), 1)
found_cancel = True
elif result.status == models.JobStatus.SUCCESS:
check_complete_step(self, job, result)
self.assertGreater(job.seconds.total_seconds(), 1)
self.assertEqual(found_cancel, True)
self.assertEqual(job.complete, True)
self.assertEqual(job.status, models.JobStatus.CANCELED)
def check_stopped_job(self, job):
job.refresh_from_db()
self.assertEqual(job.step_results.count(), 0)
self.assertEqual(job.complete, False)
self.assertEqual(job.status, models.JobStatus.NOT_STARTED)
|
[] |
[] |
[
"BUILD_ROOT"
] |
[]
|
["BUILD_ROOT"]
|
python
| 1 | 0 | |
my_university_api/config.py
|
# This file contain the configuration of our API
####################################################################
# import
####################################################################
import os # OS routines for NT or Posix depending on what system we're on
####################################################################
# class
####################################################################
class Config(object):
# Set the secret key of our api, if is present it is taken from the environment, in the other case
# we have a default string. This is used because various lib use this to work
SECRET_KEY = os.environ.get('SECRET_KEY') or 'TECNOLOGIE_WEB_2020'
SWAGGER_UI_DOC_EXPANSION = os.environ.get('SWAGGER_UI_DOC_EXPANSION') or 'none'
SWAGGER_UI_REQUEST_DURATION = os.environ.get('SWAGGER_UI_REQUEST_DURATION') or True
CORS_HEADERS = os.environ.get('CORS_HEADERS') or 'Content-Type'
|
[] |
[] |
[
"CORS_HEADERS",
"SECRET_KEY",
"SWAGGER_UI_DOC_EXPANSION",
"SWAGGER_UI_REQUEST_DURATION"
] |
[]
|
["CORS_HEADERS", "SECRET_KEY", "SWAGGER_UI_DOC_EXPANSION", "SWAGGER_UI_REQUEST_DURATION"]
|
python
| 4 | 0 | |
app/config.py
|
#!/usr/bin/env python3
import html
import cgi
import os
import http.cookies
import funct
import sql
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader('templates/'))
template = env.get_template('config.html')
funct.check_login()
form = cgi.FieldStorage()
serv = form.getvalue('serv')
config_read = ""
cfg = ""
stderr = ""
error = ""
aftersave = ""
try:
cookie = http.cookies.SimpleCookie(os.environ.get("HTTP_COOKIE"))
user_id = cookie.get('uuid')
user = sql.get_user_name_by_uuid(user_id.value)
servers = sql.get_dick_permit()
token = sql.get_token(user_id.value)
except:
pass
hap_configs_dir = funct.get_config_var('configs', 'haproxy_save_configs_dir')
if serv is not None:
cfg = hap_configs_dir + serv + "-" + funct.get_data('config') + ".cfg"
if serv is not None and form.getvalue('open') is not None :
try:
funct.logging(serv, "config.py open config")
except:
pass
error = funct.get_config(serv, cfg)
try:
conf = open(cfg, "r")
config_read = conf.read()
conf.close
except IOError:
error += '<br />Can\'t read import config file'
os.system("/bin/mv %s %s.old" % (cfg, cfg))
if serv is not None and form.getvalue('config') is not None:
try:
funct.logging(serv, "config.py edited config")
except:
pass
config = form.getvalue('config')
oldcfg = form.getvalue('oldconfig')
save = form.getvalue('save')
aftersave = 1
try:
with open(cfg, "a") as conf:
conf.write(config)
except IOError:
error = "Can't read import config file"
MASTERS = sql.is_master(serv)
for master in MASTERS:
if master[0] != None:
funct.upload_and_restart(master[0], cfg, just_save=save)
stderr = funct.upload_and_restart(serv, cfg, just_save=save)
funct.diff_config(oldcfg, cfg)
if save:
c = http.cookies.SimpleCookie(os.environ.get("HTTP_COOKIE"))
c["restart"] = form.getvalue('serv')
print(c)
os.system("/bin/rm -f " + hap_configs_dir + "*.old")
print('Content-type: text/html\n')
template = template.render(h2 = 1, title = "HAProxy 相关配置",
role = sql.get_user_role_by_uuid(user_id.value),
action = "config.py",
user = user,
select_id = "serv",
serv = serv,
aftersave = aftersave,
config = config_read,
cfg = cfg,
selects = servers,
stderr = stderr,
error = error,
note = 1,
token = token)
print(template)
|
[] |
[] |
[
"HTTP_COOKIE"
] |
[]
|
["HTTP_COOKIE"]
|
python
| 1 | 0 | |
test/services_slos_groups_test.go
|
package test
import (
"github.com/gruntwork-io/terratest/modules/terraform"
"os"
"testing"
)
func TestTerraformServicesSlosGroupsExample(t *testing.T) {
projectId := os.Getenv("PROJECT_ID")
terraformOptions := terraform.WithDefaultRetryableErrors(t, &terraform.Options{
TerraformDir: "../examples/services_slos_groups",
Vars: map[string]interface{}{
"project_id": projectId,
},
})
defer terraform.Destroy(t, terraformOptions)
terraform.InitAndApply(t, terraformOptions)
}
|
[
"\"PROJECT_ID\""
] |
[] |
[
"PROJECT_ID"
] |
[]
|
["PROJECT_ID"]
|
go
| 1 | 0 | |
pkg/cmd/dockerregistry/dockerregistry.go
|
package dockerregistry
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"time"
log "github.com/Sirupsen/logrus"
gorillahandlers "github.com/gorilla/handlers"
"github.com/docker/distribution/configuration"
"github.com/docker/distribution/context"
"github.com/docker/distribution/health"
"github.com/docker/distribution/registry/auth"
"github.com/docker/distribution/registry/handlers"
"github.com/docker/distribution/uuid"
"github.com/docker/distribution/version"
_ "github.com/docker/distribution/registry/auth/htpasswd"
_ "github.com/docker/distribution/registry/auth/token"
_ "github.com/docker/distribution/registry/proxy"
_ "github.com/docker/distribution/registry/storage/driver/azure"
_ "github.com/docker/distribution/registry/storage/driver/filesystem"
_ "github.com/docker/distribution/registry/storage/driver/gcs"
_ "github.com/docker/distribution/registry/storage/driver/inmemory"
_ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront"
_ "github.com/docker/distribution/registry/storage/driver/oss"
_ "github.com/docker/distribution/registry/storage/driver/s3-aws"
_ "github.com/docker/distribution/registry/storage/driver/swift"
"strings"
"github.com/openshift/origin/pkg/cmd/server/crypto"
"github.com/openshift/origin/pkg/cmd/util/clientcmd"
"github.com/openshift/origin/pkg/dockerregistry/server"
"github.com/openshift/origin/pkg/dockerregistry/server/api"
"github.com/openshift/origin/pkg/dockerregistry/server/audit"
registryconfig "github.com/openshift/origin/pkg/dockerregistry/server/configuration"
)
// Execute runs the Docker registry.
func Execute(configFile io.Reader) {
dockerConfig, extraConfig, err := registryconfig.Parse(configFile)
if err != nil {
log.Fatalf("error parsing configuration file: %s", err)
}
setDefaultMiddleware(dockerConfig)
setDefaultLogParameters(dockerConfig)
ctx := context.Background()
ctx = server.WithConfiguration(ctx, extraConfig)
ctx, err = configureLogging(ctx, dockerConfig)
if err != nil {
log.Fatalf("error configuring logger: %v", err)
}
registryClient := server.NewRegistryClient(clientcmd.NewConfig().BindToFile())
ctx = server.WithRegistryClient(ctx, registryClient)
log.Infof("version=%s", version.Version)
// inject a logger into the uuid library. warns us if there is a problem
// with uuid generation under low entropy.
uuid.Loggerf = context.GetLogger(ctx).Warnf
// add parameters for the auth middleware
if dockerConfig.Auth.Type() == server.OpenShiftAuth {
if dockerConfig.Auth[server.OpenShiftAuth] == nil {
dockerConfig.Auth[server.OpenShiftAuth] = make(configuration.Parameters)
}
dockerConfig.Auth[server.OpenShiftAuth][server.AccessControllerOptionParams] = server.AccessControllerParams{
Logger: context.GetLogger(ctx),
SafeClientConfig: registryClient.SafeClientConfig(),
}
}
app := handlers.NewApp(ctx, dockerConfig)
// Add a token handling endpoint
if options, usingOpenShiftAuth := dockerConfig.Auth[server.OpenShiftAuth]; usingOpenShiftAuth {
tokenRealm, err := server.TokenRealm(options)
if err != nil {
context.GetLogger(app).Fatalf("error setting up token auth: %s", err)
}
err = app.NewRoute().Methods("GET").PathPrefix(tokenRealm.Path).Handler(server.NewTokenHandler(ctx, registryClient)).GetError()
if err != nil {
context.GetLogger(app).Fatalf("error setting up token endpoint at %q: %v", tokenRealm.Path, err)
}
context.GetLogger(app).Debugf("configured token endpoint at %q", tokenRealm.String())
}
// TODO add https scheme
adminRouter := app.NewRoute().PathPrefix(api.AdminPrefix).Subrouter()
pruneAccessRecords := func(*http.Request) []auth.Access {
return []auth.Access{
{
Resource: auth.Resource{
Type: "admin",
},
Action: "prune",
},
}
}
app.RegisterRoute(
// DELETE /admin/blobs/<digest>
adminRouter.Path(api.AdminPath).Methods("DELETE"),
// handler
server.BlobDispatcher,
// repo name not required in url
handlers.NameNotRequired,
// custom access records
pruneAccessRecords,
)
// Registry extensions endpoint provides extra functionality to handle the image
// signatures.
server.RegisterSignatureHandler(app)
// Registry extensions endpoint provides prometheus metrics.
if extraConfig.Metrics.Enabled {
if len(extraConfig.Metrics.Secret) == 0 {
context.GetLogger(app).Fatalf("openshift.metrics.secret field cannot be empty when metrics are enabled")
}
server.RegisterMetricHandler(app)
}
// Advertise features supported by OpenShift
if app.Config.HTTP.Headers == nil {
app.Config.HTTP.Headers = http.Header{}
}
app.Config.HTTP.Headers.Set("X-Registry-Supports-Signatures", "1")
app.RegisterHealthChecks()
handler := alive("/", app)
// TODO: temporarily keep for backwards compatibility; remove in the future
handler = alive("/healthz", handler)
handler = health.Handler(handler)
handler = panicHandler(handler)
handler = gorillahandlers.CombinedLoggingHandler(os.Stdout, handler)
if dockerConfig.HTTP.TLS.Certificate == "" {
context.GetLogger(app).Infof("listening on %v", dockerConfig.HTTP.Addr)
if err := http.ListenAndServe(dockerConfig.HTTP.Addr, handler); err != nil {
context.GetLogger(app).Fatalln(err)
}
} else {
var (
minVersion uint16
cipherSuites []uint16
)
if s := os.Getenv("REGISTRY_HTTP_TLS_MINVERSION"); len(s) > 0 {
minVersion, err = crypto.TLSVersion(s)
if err != nil {
context.GetLogger(app).Fatalln(fmt.Errorf("invalid TLS version %q specified in REGISTRY_HTTP_TLS_MINVERSION: %v (valid values are %q)", s, err, crypto.ValidTLSVersions()))
}
}
if s := os.Getenv("REGISTRY_HTTP_TLS_CIPHERSUITES"); len(s) > 0 {
for _, cipher := range strings.Split(s, ",") {
cipherSuite, err := crypto.CipherSuite(cipher)
if err != nil {
context.GetLogger(app).Fatalln(fmt.Errorf("invalid cipher suite %q specified in REGISTRY_HTTP_TLS_CIPHERSUITES: %v (valid suites are %q)", s, err, crypto.ValidCipherSuites()))
}
cipherSuites = append(cipherSuites, cipherSuite)
}
}
tlsConf := crypto.SecureTLSConfig(&tls.Config{
ClientAuth: tls.NoClientCert,
MinVersion: minVersion,
CipherSuites: cipherSuites,
})
if len(dockerConfig.HTTP.TLS.ClientCAs) != 0 {
pool := x509.NewCertPool()
for _, ca := range dockerConfig.HTTP.TLS.ClientCAs {
caPem, err := ioutil.ReadFile(ca)
if err != nil {
context.GetLogger(app).Fatalln(err)
}
if ok := pool.AppendCertsFromPEM(caPem); !ok {
context.GetLogger(app).Fatalln(fmt.Errorf("Could not add CA to pool"))
}
}
for _, subj := range pool.Subjects() {
context.GetLogger(app).Debugf("CA Subject: %s", string(subj))
}
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
tlsConf.ClientCAs = pool
}
context.GetLogger(app).Infof("listening on %v, tls", dockerConfig.HTTP.Addr)
server := &http.Server{
Addr: dockerConfig.HTTP.Addr,
Handler: handler,
TLSConfig: tlsConf,
}
if err := server.ListenAndServeTLS(dockerConfig.HTTP.TLS.Certificate, dockerConfig.HTTP.TLS.Key); err != nil {
context.GetLogger(app).Fatalln(err)
}
}
}
// configureLogging prepares the context with a logger using the
// configuration.
func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) {
if config.Log.Level == "" && config.Log.Formatter == "" {
// If no config for logging is set, fallback to deprecated "Loglevel".
log.SetLevel(logLevel(config.Loglevel))
ctx = context.WithLogger(ctx, context.GetLogger(ctx))
return ctx, nil
}
log.SetLevel(logLevel(config.Log.Level))
formatter := config.Log.Formatter
if formatter == "" {
formatter = "text" // default formatter
}
switch formatter {
case "json":
log.SetFormatter(&log.JSONFormatter{
TimestampFormat: time.RFC3339Nano,
})
case "text":
log.SetFormatter(&log.TextFormatter{
TimestampFormat: time.RFC3339Nano,
})
case "logstash":
// just let the library use default on empty string.
if config.Log.Formatter != "" {
return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter)
}
// "github.com/Sirupsen/logrus/formatters/logstash"
// log.SetFormatter(&logstash.LogstashFormatter{
// TimestampFormat: time.RFC3339Nano,
// })
default:
// just let the library use default on empty string.
if config.Log.Formatter != "" {
return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter)
}
}
if config.Log.Formatter != "" {
log.Debugf("using %q logging formatter", config.Log.Formatter)
}
if len(config.Log.Fields) > 0 {
// build up the static fields, if present.
var fields []interface{}
for k := range config.Log.Fields {
fields = append(fields, k)
}
ctx = context.WithValues(ctx, config.Log.Fields)
ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...))
}
return ctx, nil
}
func logLevel(level configuration.Loglevel) log.Level {
l, err := log.ParseLevel(string(level))
if err != nil {
l = log.InfoLevel
log.Warnf("error parsing level %q: %v, using %q ", level, err, l)
}
return l
}
// alive simply wraps the handler with a route that always returns an http 200
// response when the path is matched. If the path is not matched, the request
// is passed to the provided handler. There is no guarantee of anything but
// that the server is up. Wrap with other handlers (such as health.Handler)
// for greater affect.
func alive(path string, handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == path {
w.Header().Set("Cache-Control", "no-cache")
w.WriteHeader(http.StatusOK)
return
}
handler.ServeHTTP(w, r)
})
}
// panicHandler add a HTTP handler to web app. The handler recover the happening
// panic. logrus.Panic transmits panic message to pre-config log hooks, which is
// defined in config.yml.
func panicHandler(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
log.Panic(fmt.Sprintf("%v", err))
}
}()
handler.ServeHTTP(w, r)
})
}
func setDefaultMiddleware(config *configuration.Configuration) {
// Default to openshift middleware for relevant types
// This allows custom configs based on old default configs to continue to work
if config.Middleware == nil {
config.Middleware = map[string][]configuration.Middleware{}
}
for _, middlewareType := range []string{"registry", "repository", "storage"} {
found := false
for _, middleware := range config.Middleware[middlewareType] {
if middleware.Name == "openshift" {
found = true
break
}
}
if found {
continue
}
config.Middleware[middlewareType] = append(config.Middleware[middlewareType], configuration.Middleware{
Name: "openshift",
})
log.Errorf("obsolete configuration detected, please add openshift %s middleware into registry config file", middlewareType)
}
return
}
func setDefaultLogParameters(config *configuration.Configuration) {
if len(config.Log.Fields) == 0 {
config.Log.Fields = make(map[string]interface{})
}
config.Log.Fields[audit.LogEntryType] = audit.DefaultLoggerType
}
|
[
"\"REGISTRY_HTTP_TLS_MINVERSION\"",
"\"REGISTRY_HTTP_TLS_CIPHERSUITES\""
] |
[] |
[
"REGISTRY_HTTP_TLS_MINVERSION",
"REGISTRY_HTTP_TLS_CIPHERSUITES"
] |
[]
|
["REGISTRY_HTTP_TLS_MINVERSION", "REGISTRY_HTTP_TLS_CIPHERSUITES"]
|
go
| 2 | 0 | |
test/e2e/systemd_test.go
|
// +build !remoteclient
package integration
import (
"io/ioutil"
"os"
. "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Podman systemd", func() {
var (
tempdir string
err error
podmanTest *PodmanTestIntegration
systemd_unit_file string
)
BeforeEach(func() {
SkipIfRootless()
tempdir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
podmanTest.SeedImages()
systemd_unit_file = `[Unit]
Description=redis container
[Service]
Restart=always
ExecStart=/usr/bin/podman start -a redis
ExecStop=/usr/bin/podman stop -t 10 redis
KillMode=process
[Install]
WantedBy=multi-user.target
`
})
AfterEach(func() {
podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
})
It("podman start container by systemd", func() {
if os.Getenv("SKIP_USERNS") != "" {
Skip("Skip userns tests.")
}
sys_file := ioutil.WriteFile("/etc/systemd/system/redis.service", []byte(systemd_unit_file), 0644)
Expect(sys_file).To(BeNil())
defer func() {
stop := SystemExec("bash", []string{"-c", "systemctl stop redis"})
os.Remove("/etc/systemd/system/redis.service")
SystemExec("bash", []string{"-c", "systemctl daemon-reload"})
Expect(stop.ExitCode()).To(Equal(0))
}()
create := podmanTest.Podman([]string{"create", "-d", "--name", "redis", "redis"})
create.WaitWithDefaultTimeout()
Expect(create.ExitCode()).To(Equal(0))
enable := SystemExec("bash", []string{"-c", "systemctl daemon-reload"})
Expect(enable.ExitCode()).To(Equal(0))
start := SystemExec("bash", []string{"-c", "systemctl start redis"})
Expect(start.ExitCode()).To(Equal(0))
logs := SystemExec("bash", []string{"-c", "journalctl -n 20 -u redis"})
Expect(logs.ExitCode()).To(Equal(0))
status := SystemExec("bash", []string{"-c", "systemctl status redis"})
Expect(status.OutputToString()).To(ContainSubstring("active (running)"))
})
})
|
[
"\"SKIP_USERNS\""
] |
[] |
[
"SKIP_USERNS"
] |
[]
|
["SKIP_USERNS"]
|
go
| 1 | 0 | |
distsql/request_builder_test.go
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package distsql
import (
"os"
"testing"
. "github.com/pingcap/check"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/disk"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tidb/util/testleak"
"github.com/pingcap/tipb/go-tipb"
)
var _ = Suite(&testSuite{})
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
TestingT(t)
}
var _ = Suite(&testSuite{})
type testSuite struct {
sctx sessionctx.Context
}
func (s *testSuite) SetUpSuite(c *C) {
ctx := mock.NewContext()
ctx.GetSessionVars().StmtCtx = &stmtctx.StatementContext{
MemTracker: memory.NewTracker(-1, -1),
DiskTracker: disk.NewTracker(-1, -1),
}
ctx.Store = &mock.Store{
Client: &mock.Client{
MockResponse: &mockResponse{
ctx: ctx,
batch: 1,
total: 2,
},
},
}
s.sctx = ctx
}
func (s *testSuite) TearDownSuite(c *C) {
}
func (s *testSuite) SetUpTest(c *C) {
testleak.BeforeTest()
ctx := s.sctx.(*mock.Context)
store := ctx.Store.(*mock.Store)
store.Client = &mock.Client{
MockResponse: &mockResponse{
ctx: ctx,
batch: 1,
total: 2,
},
}
}
func (s *testSuite) TearDownTest(c *C) {
testleak.AfterTest(c)()
}
type handleRange struct {
start int64
end int64
}
func (s *testSuite) getExpectedRanges(tid int64, hrs []*handleRange) []kv.KeyRange {
krs := make([]kv.KeyRange, 0, len(hrs))
for _, hr := range hrs {
low := codec.EncodeInt(nil, hr.start)
high := codec.EncodeInt(nil, hr.end)
high = kv.Key(high).PrefixNext()
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
return krs
}
func (s *testSuite) TestTableHandlesToKVRanges(c *C) {
handles := []kv.Handle{kv.IntHandle(0), kv.IntHandle(2), kv.IntHandle(3), kv.IntHandle(4), kv.IntHandle(5),
kv.IntHandle(10), kv.IntHandle(11), kv.IntHandle(100), kv.IntHandle(9223372036854775806), kv.IntHandle(9223372036854775807)}
// Build expected key ranges.
hrs := make([]*handleRange, 0, len(handles))
hrs = append(hrs, &handleRange{start: 0, end: 0})
hrs = append(hrs, &handleRange{start: 2, end: 5})
hrs = append(hrs, &handleRange{start: 10, end: 11})
hrs = append(hrs, &handleRange{start: 100, end: 100})
hrs = append(hrs, &handleRange{start: 9223372036854775806, end: 9223372036854775807})
// Build key ranges.
expect := s.getExpectedRanges(1, hrs)
actual := TableHandlesToKVRanges(1, handles)
// Compare key ranges and expected key ranges.
c.Assert(len(actual), Equals, len(expect))
for i := range actual {
c.Assert(actual[i].StartKey, DeepEquals, expect[i].StartKey)
c.Assert(actual[i].EndKey, DeepEquals, expect[i].EndKey)
}
}
func (s *testSuite) TestTableRangesToKVRanges(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
},
}
actual := TableRangesToKVRanges(13, ranges, nil)
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
}
for i := 0; i < len(actual); i++ {
c.Assert(actual[i], DeepEquals, expect[i])
}
}
func (s *testSuite) TestIndexRangesToKVRanges(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
},
}
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
}
actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 12, 15, ranges, nil)
c.Assert(err, IsNil)
for i := range actual {
c.Assert(actual[i], DeepEquals, expect[i])
}
}
func (s *testSuite) TestRequestBuilder1(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
},
}
actual, err := (&RequestBuilder{}).SetTableRanges(12, ranges, nil).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetFromSessionVars(variable.NewSessionVars()).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
},
Cacheable: true,
KeepOrder: false,
Desc: false,
Concurrency: variable.DefDistSQLScanConcurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
SyncLog: false,
Streaming: false,
ReplicaRead: kv.ReplicaReadLeader,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder2(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
},
}
actual, err := (&RequestBuilder{}).SetIndexRanges(new(stmtctx.StatementContext), 12, 15, ranges).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetFromSessionVars(variable.NewSessionVars()).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
},
Cacheable: true,
KeepOrder: false,
Desc: false,
Concurrency: variable.DefDistSQLScanConcurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
SyncLog: false,
Streaming: false,
ReplicaRead: kv.ReplicaReadLeader,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder3(c *C) {
handles := []kv.Handle{kv.IntHandle(0), kv.IntHandle(2), kv.IntHandle(3), kv.IntHandle(4),
kv.IntHandle(5), kv.IntHandle(10), kv.IntHandle(11), kv.IntHandle(100)}
actual, err := (&RequestBuilder{}).SetTableHandles(15, handles).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetFromSessionVars(variable.NewSessionVars()).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65},
},
},
Cacheable: true,
KeepOrder: false,
Desc: false,
Concurrency: variable.DefDistSQLScanConcurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
SyncLog: false,
Streaming: false,
ReplicaRead: kv.ReplicaReadLeader,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder4(c *C) {
keyRanges := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65},
},
}
actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetStreaming(true).
SetFromSessionVars(variable.NewSessionVars()).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: keyRanges,
Cacheable: true,
KeepOrder: false,
Desc: false,
Concurrency: variable.DefDistSQLScanConcurrency,
IsolationLevel: 0,
Priority: 0,
Streaming: true,
NotFillCache: false,
SyncLog: false,
ReplicaRead: kv.ReplicaReadLeader,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder5(c *C) {
keyRanges := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65},
},
}
actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges).
SetAnalyzeRequest(&tipb.AnalyzeReq{}).
SetKeepOrder(true).
SetConcurrency(15).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 104,
StartTs: 0x0,
Data: []uint8{0x8, 0x0, 0x18, 0x0, 0x20, 0x0},
KeyRanges: keyRanges,
KeepOrder: true,
Desc: false,
Concurrency: 15,
IsolationLevel: kv.RC,
Priority: 1,
NotFillCache: true,
SyncLog: false,
Streaming: false,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder6(c *C) {
keyRanges := []kv.KeyRange{
{
StartKey: kv.Key{0x00, 0x01},
EndKey: kv.Key{0x02, 0x03},
},
}
concurrency := 10
actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges).
SetChecksumRequest(&tipb.ChecksumRequest{}).
SetConcurrency(concurrency).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 105,
StartTs: 0x0,
Data: []uint8{0x10, 0x0, 0x18, 0x0},
KeyRanges: keyRanges,
KeepOrder: false,
Desc: false,
Concurrency: concurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: true,
SyncLog: false,
Streaming: false,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder7(c *C) {
vars := variable.NewSessionVars()
vars.SetReplicaRead(kv.ReplicaReadFollower)
concurrency := 10
actual, err := (&RequestBuilder{}).
SetFromSessionVars(vars).
SetConcurrency(concurrency).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 0,
StartTs: 0x0,
KeepOrder: false,
Desc: false,
Concurrency: concurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
SyncLog: false,
Streaming: false,
ReplicaRead: kv.ReplicaReadFollower,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder8(c *C) {
sv := variable.NewSessionVars()
sv.SnapshotInfoschema = infoschema.MockInfoSchemaWithSchemaVer(nil, 10000)
actual, err := (&RequestBuilder{}).
SetFromSessionVars(sv).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 0,
StartTs: 0x0,
Data: []uint8(nil),
Concurrency: variable.DefDistSQLScanConcurrency,
IsolationLevel: 0,
Priority: 0,
MemTracker: (*memory.Tracker)(nil),
ReplicaRead: 0x1,
SchemaVar: 10000,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestTableRangesToKVRangesWithFbs(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(4)},
},
}
hist := statistics.NewHistogram(1, 30, 30, 0, types.NewFieldType(mysql.TypeLonglong), chunk.InitialCapacity, 0)
for i := 0; i < 10; i++ {
hist.Bounds.AppendInt64(0, int64(i))
hist.Bounds.AppendInt64(0, int64(i+2))
hist.Buckets = append(hist.Buckets, statistics.Bucket{Repeat: 10, Count: int64(i + 30)})
}
fb := statistics.NewQueryFeedback(0, hist, 0, false)
lower, upper := types.NewIntDatum(2), types.NewIntDatum(3)
fb.Feedback = []statistics.Feedback{
{Lower: &lower, Upper: &upper, Count: 1, Repeat: 1},
}
actual := TableRangesToKVRanges(0, ranges, fb)
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5},
},
}
for i := 0; i < len(actual); i++ {
c.Assert(actual[i], DeepEquals, expect[i])
}
}
func (s *testSuite) TestIndexRangesToKVRangesWithFbs(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(4)},
},
}
hist := statistics.NewHistogram(1, 30, 30, 0, types.NewFieldType(mysql.TypeLonglong), chunk.InitialCapacity, 0)
for i := 0; i < 10; i++ {
hist.Bounds.AppendInt64(0, int64(i))
hist.Bounds.AppendInt64(0, int64(i+2))
hist.Buckets = append(hist.Buckets, statistics.Bucket{Repeat: 10, Count: int64(i + 30)})
}
fb := statistics.NewQueryFeedback(0, hist, 0, false)
lower, upper := types.NewIntDatum(2), types.NewIntDatum(3)
fb.Feedback = []statistics.Feedback{
{Lower: &lower, Upper: &upper, Count: 1, Repeat: 1},
}
actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 0, 0, ranges, fb)
c.Assert(err, IsNil)
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5},
},
}
for i := 0; i < len(actual); i++ {
c.Assert(actual[i], DeepEquals, expect[i])
}
}
|
[
"\"log_level\""
] |
[] |
[
"log_level"
] |
[]
|
["log_level"]
|
go
| 1 | 0 | |
azure-functions-maven-plugin/src/test/java/com/microsoft/azure/maven/function/invoker/CommonUtils.java
|
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*/
package com.microsoft.azure.maven.function.invoker;
import com.microsoft.azure.AzureEnvironment;
import com.microsoft.azure.credentials.ApplicationTokenCredentials;
import com.microsoft.azure.management.Azure;
import com.microsoft.rest.LogLevel;
import org.apache.commons.lang3.StringUtils;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.concurrent.TimeUnit;
public class CommonUtils {
private static final String clientId = System.getenv("CLIENT_ID");
private static final String tenantId = System.getenv("TENANT_ID");
private static final String key = System.getenv("KEY");
private static final String loginAzureCli = "az login --service-principal -u %s -p %s --tenant %s";
private static final String deleteResourceGroup = "az group delete -y -n %s%s";
private static final String windowsCommand = "cmd /c %s";
private static final String nonWindowsCommand = "bash -c %s";
private static final int RETRY_TIMES = 5;
private static final int WAIT_IN_SECOND = 5;
private static Azure azureClient = null;
private static final boolean isWindows = System.getProperty("os.name").contains("Windows");
private static void azureLogin() throws IOException, InterruptedException {
executeCommand(String.format(loginAzureCli, clientId, key, tenantId));
}
public static void deleteAzureResourceGroup(String resourceGroupName, boolean waitForOperationFinish)
throws InterruptedException, IOException {
executeCommand(
String.format(deleteResourceGroup,
resourceGroupName,
waitForOperationFinish ? "" : " --no-wait"));
}
/**
* @param command input command
* @return output of the process
* @throws IOException
* @throws InterruptedException
*/
public static String executeCommand(final String command) throws IOException, InterruptedException {
if (StringUtils.isNotEmpty(command)) {
final String wholeCommand = String.format(isWindows ? windowsCommand : nonWindowsCommand, command);
final Process process = Runtime.getRuntime().exec(wholeCommand);
process.waitFor();
final BufferedReader reader = new BufferedReader(
new InputStreamReader(process.getInputStream())
);
final StringBuilder builder = new StringBuilder();
String line;
while ((line = reader.readLine()) != null) {
builder.append(line);
}
return builder.toString();
}
return "";
}
/**
* @param verification the Runnable class which contains the verification logic
* @throws Exception
*/
public static void runVerification(Runnable verification) throws Exception {
int i = 0;
while (i < RETRY_TIMES) {
try {
verification.run();
return;
} catch (Exception e) {
// ignore warm-up exception and wait for 5 seconds
e.printStackTrace();
i++;
TimeUnit.SECONDS.sleep(WAIT_IN_SECOND);
}
}
throw new Exception("Integration test fails for 5 times.");
}
/**
* @return Azure Management client which could manage azure resources
* @throws IOException
*/
public static Azure getAzureClient() throws IOException {
if (azureClient == null) {
synchronized (CommonUtils.class) {
if (azureClient == null) {
final ApplicationTokenCredentials credentials = new ApplicationTokenCredentials(
System.getenv("CLIENT_ID"), System.getenv("TENANT_ID"),
System.getenv("KEY"), AzureEnvironment.AZURE);
azureClient = Azure.configure()
.withLogLevel(LogLevel.BODY)
.authenticate(credentials)
.withDefaultSubscription();
}
}
}
return azureClient;
}
}
|
[
"\"CLIENT_ID\"",
"\"TENANT_ID\"",
"\"KEY\"",
"\"CLIENT_ID\"",
"\"TENANT_ID\"",
"\"KEY\""
] |
[] |
[
"KEY",
"TENANT_ID",
"CLIENT_ID"
] |
[]
|
["KEY", "TENANT_ID", "CLIENT_ID"]
|
java
| 3 | 0 | |
tests/frontend/logging/plugins/logtest.py
|
from buildstream import Element
class LogTest(Element):
BST_MIN_VERSION = "2.0"
def configure(self, node):
pass
def preflight(self):
pass
def get_unique_key(self):
return {}
def configure_sandbox(self, sandbox):
pass
def stage(self, sandbox):
# Here we stage the artifacts of dependencies individually without
# using a timed activity or suppressing the logging.
#
# This allows us to test the logging behavior when log lines are
# triggered by an element which is not the element being processed.
#
# * The master build log should show the element name and cache key
# of the task element, i.e. the element currently being built, not
# the element issuing the message.
#
# * In the individual task log, we expect to see the name and cache
# key of the element issuing messages, since the entire log file
# is contextual to the task, it makes more sense to provide the
# full context of the element issuing the log in this case.
#
for dep in self.dependencies():
dep.stage_artifact(sandbox)
def assemble(self, sandbox):
return "/"
# Plugin entry point
def setup():
return LogTest
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
mitsune_if.py
|
# インストールした discord.py を読み込む
import discord
import threading
import queue
import asyncio
import cmdgroup.bb as cm_bb
import cmdgroup.common as cm_common
import easy_cmd_convert
import os
class mitsune_if(discord.Client):
# 自分のBotのアクセストークンに置き換えてください
TOKEN = os.environ["YOUR_CHANNEL_ACCESS_TOKEN"]
# queue定義
api_q = queue.Queue()
# コマンドコンバーター
cmd_conv = None
# 起動時に動作する処理
async def on_ready(self):
# 起動したらターミナルにログイン通知が表示される
print('ログインしました')
# メッセージ受信時に動作する処理
async def on_message(self, message):
# メッセージ送信者がBotだった場合は無視する
if message.author.bot:
return
for line in message.content.split('\n'):
# メッセージが / で始まってれう
if(line[0] == '/'):
mes = line.strip('/')
self.api_q.put([message, mes])
elif(line[0] == '@'):
# 簡単コマンド入力
mes = line.strip('@')
mes = self.cmd_conv.convert(mes)
self.api_q.put([message, mes])
else:
# do nothing
pass
async def my_background_task(self):
await self.wait_until_ready()
while not self.is_closed():
if self.api_q.empty():
# タスク処理
await self.bb.next_step()
await asyncio.sleep(0.1)
continue
# ブロッキング取り出し
item = self.api_q.get(False)
# メッセージインスタンスの取り出し
message = item[0]
# メッセージ文字列の取り出し
mes = item[1]
# サブコマンドパース
com_list = mes.split()
if len(com_list) == 0:
show_help()
else:
# コマンドグループ呼び出し
if com_list[0] == "bb":
await self.bb.exec(com_list[1:], message)
elif com_list[0] == "bd":
await self.bb.exec(com_list[1:], message, True)
elif com_list[0] == "help":
await self.show_help(message)
elif com_list[0] == "testScn1":
# テストシナリオ1
test_scenario_1_txt = self.get_test_scenario_1()
for line in test_scenario_1_txt.split('\n'):
# メッセージが / で始まってれう
if(line[0] == '/'):
mes = line.strip('/')
self.api_q.put([message, mes])
else:
await self.common.exec(com_list, message)
# ヘルプ表示
async def show_help(self, message):
await self.bb.show_help(message)
await self.common.show_help(message)
# 初期化
def init(self):
# 接続に必要なオブジェクトを生成
self.common = cm_common.common(self)
self.bb = cm_bb.bb(self)
self.cmd_conv = easy_cmd_convert.easy_cmd_convert()
# コンストラクタ
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# create the background task and run it in the background
self.bg_task = self.loop.create_task(self.my_background_task())
# メインループ開始
def run(self):
super().run(self.TOKEN)
# テストシナリオ1
def get_test_scenario_1(self):
return '''\
/bd addMatch DBG DB
/bb startSeason
/bd setBattingOrder 6 2 23
/bd setBattingOrder 3 9 34
/bd setBattingOrder 7 5 45
/bd setBattingOrder 2 3 56
/bd setBattingOrder 1 4 67
/bd setBattingOrder 8 7 78
/bd setBattingOrder 5 8 89
/bd setBattingOrder 9 1 90
/bd setBattingOrder 4 6 1
/bd setStartingPitcher 90
/bb setBattingOrder 7 7 11
/bb setBattingOrder 6 6 12
/bb setBattingOrder 1 9 14
/bb setBattingOrder 4 3 15
/bb setBattingOrder 2 4 16
/bb setBattingOrder 3 1 17
/bb setBattingOrder 5 8 19
/bb setBattingOrder 8 2 20
/bb setBattingOrder 9 5 21
/bb setStartingPitcher 17
/bb imReady
/bd imReady\
'''
|
[] |
[] |
[
"YOUR_CHANNEL_ACCESS_TOKEN"
] |
[]
|
["YOUR_CHANNEL_ACCESS_TOKEN"]
|
python
| 1 | 0 | |
urlpool/urlpool.go
|
package urlpool
import (
"fmt"
"os"
"strconv"
"strings"
"context"
neturl "net/url"
"github.com/openbiox/butils/stringo"
"github.com/google/go-github/v27/github"
log "github.com/openbiox/butils/log"
"golang.org/x/oauth2"
)
type bgetToolsURLType struct {
Name string
Description string
Versions []string
VersionsAPI string
Tags []string
URL map[string][]string
PostShellCmd []string
}
type bgetFilesURLType struct {
Name string
Description string
URL []string
Versions []string
Tags []string
PostShellCmd []string
}
// BgetToolsPool an object bioinformatics tools URL
var BgetToolsPool = []bgetToolsURLType{}
// BgetFilesPool an object bioinformatics files URL
var BgetFilesPool = []bgetFilesURLType{}
func setOsStr(env *map[string]string) (ostype string) {
if (*env)["osType"] == "linux" {
ostype = "Linux"
} else if (*env)["osType"] == "windows" {
ostype = "windows"
} else {
ostype = "Mac"
}
return ostype
}
func QueryBgetTools(name string, env *map[string]string) (urls, postShellCmd, versions []string) {
ostype := setOsStr(env)
for i := range BgetToolsPool {
if BgetToolsPool[i].Name == name {
tmpUrls := []string{}
for k, v := range *env {
kstr := fmt.Sprintf("{{%s}}", k)
for j, _ := range BgetToolsPool[i].URL[ostype] {
BgetToolsPool[i].URL[ostype][j] = strings.Replace(BgetToolsPool[i].URL[ostype][j], kstr, v, 10000)
}
tmpUrls = BgetToolsPool[i].URL[ostype]
}
urls = append(urls, tmpUrls...)
tmp := ""
for j := range BgetToolsPool[i].PostShellCmd {
for k, v := range *env {
kstr := fmt.Sprintf("{{%s}}", k)
if tmp == "" {
tmp = strings.Replace(BgetToolsPool[i].PostShellCmd[j], kstr, v, 10000)
} else {
tmp = strings.Replace(tmp, kstr, v, 10000)
}
}
postShellCmd = append(postShellCmd, tmp)
}
if BgetToolsPool[i].VersionsAPI != "" && strings.Contains(BgetToolsPool[i].VersionsAPI, "github.com") {
versions = GitHubVersionSpider(BgetToolsPool[i].VersionsAPI)
} else {
versions = BgetToolsPool[i].Versions
}
}
}
return urls, postShellCmd, versions
}
func formatURL(tmp string, key string, rep string, url string) string {
kstr := fmt.Sprintf("{{%s}}", key)
if tmp == "" {
tmp = strings.Replace(url,
kstr, rep, 10000)
} else {
tmp = strings.Replace(tmp,
kstr, rep, 10000)
}
return tmp
}
func formatURLSlice(tmpSlice []string, env *map[string]string) (urls []string) {
chrom := []string{}
for i := 1; i < 23; i++ {
chrom = append(chrom, strconv.Itoa(i))
}
chrom = append(chrom, "X", "Y", "MT")
for _, v := range tmpSlice {
if stringo.StrDetect(v, "{{chrom}}") {
raw := v
for k := range chrom {
v = strings.Replace(raw, "{{chrom}}", chrom[k], 10000)
urls = append(urls, v)
}
}
if !stringo.StrDetect(v, "{{chrom}}") {
urls = append(urls, v)
}
}
return urls
}
func QueryBgetFiles(name string, env *map[string]string) (urls []string, postShellCmd []string, versions []string) {
for f := range BgetFilesPool {
if BgetFilesPool[f].Name == name {
for _, url := range BgetFilesPool[f].URL {
tmp := ""
tmpSlice := []string{}
for k, v := range *env {
if strings.Contains(v, ",") {
v = stringo.StrReplaceAll(v, " ", "")
vSlice := strings.Split(v, ",")
for _, v2 := range vSlice {
tmpSlice = append(tmpSlice, formatURL(tmp, k, v2, url))
}
} else {
tmp = formatURL(tmp, k, v, url)
}
for k2, _ := range tmpSlice {
tmpSlice[k2] = formatURL(tmpSlice[k2], k, v, url)
}
}
if len(tmpSlice) == 0 {
tmpSlice = append(tmpSlice, tmp)
}
urls = append(urls, formatURLSlice(tmpSlice, env)...)
}
for j := range BgetFilesPool[f].PostShellCmd {
tmp := ""
for k, v := range *env {
kstr := fmt.Sprintf("{{%s}}", k)
if tmp == "" {
tmp = strings.Replace(BgetFilesPool[f].PostShellCmd[j],
kstr, v, 10000)
} else {
tmp = strings.Replace(tmp,
kstr, v, 10000)
}
}
postShellCmd = append(postShellCmd, tmp)
}
versions = BgetFilesPool[f].Versions
}
}
return urls, postShellCmd, versions
}
func genomeVersionConvertor(url string, version string) string {
if stringo.StrDetect(url, "http://hgdownload.cse.ucsc.edu/goldenPath") {
if strings.ToLower(version) == "grch38" {
version = "hg38"
} else if strings.ToLower(version) == "grch37" {
version = "hg19"
} else if strings.ToLower(version) == "grcm38" {
version = "mm10"
} else if strings.ToLower(version) == "grcm37" {
version = "mm9"
}
}
return version
}
// GitHubVersionSpider get all tags and branch
func GitHubVersionSpider(url string) (versions []string) {
accessToken := os.Getenv("GITHUB_TOKEN")
if accessToken == "" {
log.Fatal("Please set GITHUB_TOKEN environment variable.")
}
u, err := neturl.Parse(url)
if err != nil {
log.Fatal(err)
}
if u.Host != "github.com" {
return
}
pathStr := strings.Split(u.Path, "/")
user, repo := pathStr[1], pathStr[2]
ctx := context.Background()
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: accessToken},
)
tc := oauth2.NewClient(ctx, ts)
client := github.NewClient(tc)
opt := &github.ListOptions{}
//version, _, _ := client.Repositories.ListTags(ctx, user, repo, opt)
vers, _, err := client.Repositories.ListTags(ctx, user, repo, opt)
if err != nil {
log.Fatal(err)
}
brchs, _, err := client.Repositories.ListBranches(ctx, user, repo, opt)
if err != nil {
log.Fatal(err)
}
for i := range vers {
versions = append(versions, vers[i].GetName())
}
for i := range brchs {
versions = append(versions, brchs[i].GetName())
}
return versions
}
func init() {
BgetToolsPool = append(BgetToolsPool, toolsLinks...)
BgetFilesPool = append(BgetFilesPool, reffaFiles...)
BgetFilesPool = append(BgetFilesPool, githubRepos...)
BgetFilesPool = append(BgetFilesPool, journalsMeta...)
BgetFilesPool = append(BgetFilesPool, annovarLinks...)
BgetFilesPool = append(BgetFilesPool, otherFiles...)
}
|
[
"\"GITHUB_TOKEN\""
] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
go
| 1 | 0 | |
myweight/__init__.py
|
import os
import sys
from flask import Flask
from flask import render_template
from flask_sqlalchemy import SQLAlchemy
from flask_jwt_extended import JWTManager
WIN = sys.platform.startswith('win')
if WIN: # 如果是 Windows 系统,使用三个斜线
prefix = 'sqlite:///'
else: # 否则使用四个斜线
prefix = 'sqlite:////'
app = Flask(__name__,
static_folder = "./dist/static",
template_folder = "./dist")
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
return render_template("index.html")
app.config['SECRET_KEY'] = os.getenv('SECRET_KEY', 'dev')
app.config['SQLALCHEMY_DATABASE_URI'] = prefix + os.path.join(os.path.dirname(app.root_path), os.getenv('DATABASE_FILE', 'data.db'))
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # 关闭对模型修改的监控
app.config['JWT_SECRET_KEY'] = os.getenv('SECRET_KEY', 'my_jwt_token')
# 在扩展类实例化前加载配置
db = SQLAlchemy(app)
jwt = JWTManager(app)
from myweight import models
from myweight import errors, commands, restfulapi
|
[] |
[] |
[
"SECRET_KEY",
"DATABASE_FILE"
] |
[]
|
["SECRET_KEY", "DATABASE_FILE"]
|
python
| 2 | 0 | |
pkg/jsonstore/etcd/etcd.go
|
// Copyright (c) 2021 Terminus, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package etcd 是 jsonstore 使用 etcd 作为 backend 的实现
package etcd
import (
"context"
"crypto/tls"
"math/rand"
"net/url"
"os"
"strings"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/mvcc/mvccpb"
"github.com/coreos/etcd/pkg/transport"
"github.com/pkg/errors"
"github.com/erda-project/erda/pkg/jsonstore/stm"
"github.com/erda-project/erda/pkg/jsonstore/storetypes"
)
const (
// The short keepalive timeout and interval have been chosen to aggressively
// detect a failed etcd server without introducing much overhead.
keepaliveTime = 30 * time.Second
keepaliveTimeout = 10 * time.Second
// 对etcd操作的超时时间
parentCtxTimeout = 10 * time.Second
// 客户端第一次等待etcd请求返回的超时时间
firstTryEtcdRequestTimeout = 5 * time.Second
// 客户端第二次等待etcd请求返回的超时时间
secondTryEtcdRequestTimeout = 3 * time.Second
// etcd客户端连接池容量
etcdClientBufferCapacity = 20
// 当前etcd客户端连接池中连接数数量
etcdClientBufferLen = 3
// WatchChan 的默认 buffer size
defaultWatchChanBufferSize = 100
)
// Store etcd backend 的 storetypes.Store 实现
type Store struct {
pool chan *clientv3.Client
}
type Option struct {
Endpoints []string
}
type OpOption func(*Option)
func WithEndpoints(endpoints []string) OpOption {
return func(opt *Option) {
opt.Endpoints = endpoints
}
}
// New creates a etcd store with etcd client, be used to access the etcd cluster.
func New(ops ...OpOption) (*Store, error) {
// apply option
opt := Option{}
for _, op := range ops {
op(&opt)
}
if len(opt.Endpoints) == 0 {
env := os.Getenv("ETCD_ENDPOINTS")
if env == "" {
opt.Endpoints = []string{"http://127.0.0.1:2379"}
} else {
opt.Endpoints = strings.Split(env, ",")
}
}
var tlsConfig *tls.Config
if len(opt.Endpoints) < 1 {
return nil, errors.New("Invalid Etcd endpoints")
}
url, err := url.Parse(opt.Endpoints[0])
if err != nil {
return nil, errors.Wrap(err, "Invalid Etcd endpoints")
}
if url.Scheme == "https" {
tlsInfo := transport.TLSInfo{
CertFile: "/certs/etcd-client.pem",
KeyFile: "/certs/etcd-client-key.pem",
TrustedCAFile: "/certs/etcd-ca.pem",
}
tlsConfig, err = tlsInfo.ClientConfig()
if err != nil {
return nil, errors.Wrap(err, "Invalid Etcd TLS config")
}
}
pool := make(chan *clientv3.Client, etcdClientBufferCapacity)
for i := 0; i < etcdClientBufferLen; i++ {
c, err := clientv3.New(clientv3.Config{
Endpoints: shuffle(opt.Endpoints),
DialKeepAliveTime: keepaliveTime,
DialKeepAliveTimeout: keepaliveTimeout,
TLS: tlsConfig,
})
if err != nil {
return nil, err
}
pool <- c
}
store := &Store{
pool: pool,
}
return store, nil
}
func (s *Store) getClient() *clientv3.Client {
c := <-s.pool
s.pool <- c
return c
}
// GetClient 获取 Store 内部的 etcd client
func (s *Store) GetClient() *clientv3.Client {
return s.getClient()
}
// nolint
func (s *Store) retry(do func(cli *clientv3.Client) (interface{}, error)) (interface{}, error) {
cli := s.getClient()
errC1 := make(chan error, 1)
respC1 := make(chan interface{}, 1)
go func() {
resp, err := do(cli)
if err != nil {
errC1 <- err
return
}
respC1 <- resp
}()
select {
case err := <-errC1:
return nil, err
case resp := <-respC1:
return resp, nil
case <-time.After(firstTryEtcdRequestTimeout):
// 超时后,就换一个 client 实例
cli = s.getClient()
}
// 超时重试第二次, 注意上面的 goroutine 可能还在运行
errC2 := make(chan error, 1)
respC2 := make(chan interface{}, 1)
go func() {
resp, err := do(cli)
if err != nil {
errC2 <- err
return
}
respC2 <- resp
}()
select {
case err := <-errC1:
return nil, err
case err := <-errC2:
return nil, err
case resp := <-respC1:
return resp, nil
case resp := <-respC2:
return resp, nil
case <-time.After(secondTryEtcdRequestTimeout):
return nil, errors.New("time out")
}
}
// Put writes the keyvalue pair into etcd.
func (s *Store) Put(pctx context.Context, key, value string) error {
_, err := s.PutWithOption(pctx, key, value, nil)
if err != nil {
return err
}
// 检查 etcd 中的确已存入 key
for i := 0; i < 2; i++ {
if _, err := s.Get(context.Background(), key); err != nil {
if strings.Contains(err.Error(), "not found") {
time.Sleep(1 * time.Second)
continue
}
} else {
return nil
}
}
return nil
}
// PutWithRev 向 etcd 写入 kv,并且返回 revision
func (s *Store) PutWithRev(ctx context.Context, key, value string) (int64, error) {
resp, err := s.PutWithOption(ctx, key, value, nil)
if err != nil {
return 0, err
}
return resp.(*clientv3.PutResponse).Header.GetRevision(), nil
}
// PutWithOption 向 etcd 写入 kv 时能指定 option
func (s *Store) PutWithOption(ctx context.Context, key, value string, opts []interface{}) (interface{}, error) {
etcdopts := []clientv3.OpOption{}
for _, opt := range opts {
etcdopts = append(etcdopts, opt.(clientv3.OpOption))
}
put := func(cli *clientv3.Client) (interface{}, error) {
ctx, cancel := context.WithTimeout(context.Background(), parentCtxTimeout)
defer cancel()
return cli.Put(ctx, key, value, etcdopts...)
}
result, err := s.retry(put)
if err != nil {
return nil, err
}
resp, ok := result.(*clientv3.PutResponse)
if !ok {
return nil, errors.New("invalid response type")
}
return resp, nil
}
// Get returns the value of the key.
func (s *Store) Get(pctx context.Context, key string) (storetypes.KeyValue, error) {
get := func(cli *clientv3.Client) (interface{}, error) {
ctx, cancel := context.WithTimeout(context.Background(), parentCtxTimeout)
defer cancel()
return cli.Get(ctx, key)
}
result, err := s.retry(get)
if err != nil {
return storetypes.KeyValue{}, err
}
resp, ok := result.(*clientv3.GetResponse)
if !ok {
return storetypes.KeyValue{}, errors.New("invalid response type")
}
if len(resp.Kvs) != 0 {
return storetypes.KeyValue{
Key: resp.Kvs[0].Key,
Value: resp.Kvs[0].Value,
Revision: resp.Header.GetRevision(),
ModRevision: resp.Kvs[0].ModRevision,
}, nil
}
return storetypes.KeyValue{}, errors.Errorf("not found")
}
// PrefixGet returns the all key value with specify prefix.
func (s *Store) PrefixGet(pctx context.Context, prefix string) ([]storetypes.KeyValue, error) {
resp, err := s.prefixGet(pctx, prefix, false)
if err != nil {
return nil, err
}
kvs := make([]storetypes.KeyValue, len(resp.Kvs))
for i, kv := range resp.Kvs {
kvs[i] = storetypes.KeyValue{
Key: kv.Key,
Value: kv.Value,
Revision: resp.Header.GetRevision(),
ModRevision: kv.ModRevision,
}
}
return kvs, nil
}
// PrefixGetKey 只获取 key
func (s *Store) PrefixGetKey(pctx context.Context, prefix string) ([]storetypes.Key, error) {
resp, err := s.prefixGet(pctx, prefix, true)
if err != nil {
return nil, err
}
ks := make([]storetypes.Key, len(resp.Kvs))
for i, kv := range resp.Kvs {
ks[i] = storetypes.Key(kv.Key)
}
return ks, nil
}
func (s *Store) prefixGet(_ context.Context, prefix string, keyOnly bool) (*clientv3.GetResponse, error) {
prefixGet := func(cli *clientv3.Client) (interface{}, error) {
ctx, cancel := context.WithTimeout(context.Background(), parentCtxTimeout)
defer cancel()
options := []clientv3.OpOption{clientv3.WithPrefix()}
if keyOnly {
options = append(options, clientv3.WithKeysOnly())
}
return cli.Get(ctx, prefix, options...)
}
result, err := s.retry(prefixGet)
if err != nil {
return nil, err
}
resp, ok := result.(*clientv3.GetResponse)
if !ok {
return nil, errors.New("invalid response type")
}
return resp, nil
}
// Remove 删除一个 keyvalue, 同时返回被删除的 kv 对象.
func (s *Store) Remove(pctx context.Context, key string) (*storetypes.KeyValue, error) {
remove := func(cli *clientv3.Client) (interface{}, error) {
ctx, cancel := context.WithTimeout(context.Background(), parentCtxTimeout)
defer cancel()
return cli.Delete(ctx, key, clientv3.WithPrevKV())
}
result, err := s.retry(remove)
if err != nil {
return nil, err
}
resp, ok := result.(*clientv3.DeleteResponse)
if !ok {
return nil, errors.New("invalid response type")
}
if len(resp.PrevKvs) == 1 {
return &storetypes.KeyValue{
Key: resp.PrevKvs[0].Key,
Value: resp.PrevKvs[0].Value,
Revision: resp.Header.GetRevision(),
ModRevision: resp.PrevKvs[0].ModRevision,
}, nil
}
return nil, nil
}
func (s *Store) prefixRemove(_ context.Context, prefix string) (*clientv3.DeleteResponse, error) {
prefixRemove := func(cli *clientv3.Client) (interface{}, error) {
ctx, cancel := context.WithTimeout(context.Background(), parentCtxTimeout)
defer cancel()
options := []clientv3.OpOption{clientv3.WithPrefix(), clientv3.WithPrevKV()}
return cli.Delete(ctx, prefix, options...)
}
result, err := s.retry(prefixRemove)
if err != nil {
return nil, err
}
resp, ok := result.(*clientv3.DeleteResponse)
if !ok {
return nil, errors.New("invalid response type")
}
return resp, nil
}
// PrefixRemove 删除 prefix 开头的所有 kv
func (s *Store) PrefixRemove(pctx context.Context, prefix string) ([]storetypes.KeyValue, error) {
resp, err := s.prefixRemove(pctx, prefix)
if err != nil {
return nil, err
}
kvs := make([]storetypes.KeyValue, len(resp.PrevKvs))
for i, kv := range resp.PrevKvs {
kvs[i] = storetypes.KeyValue{
Key: kv.Key,
Value: kv.Value,
}
}
return kvs, nil
}
// Watch key 的变化,如果 filterDelete=true,则忽略删除事件
func (s *Store) Watch(ctx context.Context, key string, isPrefix, filterDelete bool) (storetypes.WatchChan, error) {
op := []clientv3.OpOption{clientv3.WithPrevKV()}
if isPrefix {
op = append(op, clientv3.WithPrefix())
}
if filterDelete {
op = append(op, clientv3.WithFilterDelete())
}
ch := s.getClient().Watch(ctx, key, op...)
watchCh := make(chan storetypes.WatchResponse, defaultWatchChanBufferSize)
go func() {
for r := range ch {
if err := r.Err(); err != nil {
watchCh <- storetypes.WatchResponse{
Kvs: []storetypes.KeyValueWithChangeType{},
Err: err,
}
close(watchCh)
return
}
kvs := []storetypes.KeyValueWithChangeType{}
for _, e := range r.Events {
t := eventType(e)
value := e.Kv.Value
if len(value) == 0 && e.PrevKv != nil {
value = e.PrevKv.Value
}
kvs = append(kvs, storetypes.KeyValueWithChangeType{
KeyValue: storetypes.KeyValue{
Key: e.Kv.Key,
Value: value,
Revision: r.Header.GetRevision(),
ModRevision: e.Kv.ModRevision,
},
T: t,
})
}
watchCh <- storetypes.WatchResponse{Kvs: kvs, Err: nil}
}
close(watchCh)
}()
return watchCh, nil
}
func eventType(e *clientv3.Event) storetypes.ChangeType {
if e.Type == mvccpb.DELETE {
return storetypes.Del
}
if e.Type == mvccpb.PUT && e.Kv.CreateRevision == e.Kv.ModRevision {
return storetypes.Add
}
return storetypes.Update
}
// NewSTM etcd concurrency.NewSTM + json (un)marshal
func (s *Store) NewSTM(f func(stm stm.JSONStoreSTMOP) error) error {
impl := stm.NewJSONStoreWithSTMImpl(s.GetClient())
return impl.NewSTM(f)
}
func shuffle(s []string) []string {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for n := len(s); n > 0; n-- {
randIndex := r.Intn(n)
s[n-1], s[randIndex] = s[randIndex], s[n-1]
}
return s
}
|
[
"\"ETCD_ENDPOINTS\""
] |
[] |
[
"ETCD_ENDPOINTS"
] |
[]
|
["ETCD_ENDPOINTS"]
|
go
| 1 | 0 | |
filesystem/fat32/fat32_test.go
|
package fat32_test
/*
These tests the exported functions
We want to do full-in tests with files
*/
import (
"bytes"
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"testing"
"github.com/cusspvz/go-diskfs/filesystem"
"github.com/cusspvz/go-diskfs/filesystem/fat32"
"github.com/cusspvz/go-diskfs/testhelper"
"github.com/cusspvz/go-diskfs/util"
)
var (
intImage = os.Getenv("TEST_IMAGE")
keepTmpFiles = os.Getenv("KEEPTESTFILES")
)
func getOpenMode(mode int) string {
modes := make([]string, 0, 0)
if mode&os.O_CREATE == os.O_CREATE {
modes = append(modes, "CREATE")
}
if mode&os.O_APPEND == os.O_APPEND {
modes = append(modes, "APPEND")
}
if mode&os.O_RDWR == os.O_RDWR {
modes = append(modes, "RDWR")
} else {
modes = append(modes, "RDONLY")
}
return strings.Join(modes, "|")
}
func tmpFat32(fill bool, embedPre, embedPost int64) (*os.File, error) {
filename := "fat32_test"
f, err := ioutil.TempFile("", filename)
if err != nil {
return nil, fmt.Errorf("Failed to create tempfile %s :%v", filename, err)
}
// either copy the contents of the base file over, or make a file of similar size
b, err := ioutil.ReadFile(fat32.Fat32File)
if err != nil {
return nil, fmt.Errorf("Failed to read contents of %s: %v", fat32.Fat32File, err)
}
if embedPre > 0 {
empty := make([]byte, embedPre, embedPre)
written, err := f.Write(empty)
if err != nil {
return nil, fmt.Errorf("Failed to write %d zeroes at beginning of %s: %v", embedPre, filename, err)
}
if written != len(empty) {
return nil, fmt.Errorf("Wrote only %d zeroes at beginning of %s instead of %d", written, filename, len(empty))
}
}
if fill {
written, err := f.Write(b)
if err != nil {
return nil, fmt.Errorf("Failed to write contents of %s to %s: %v", fat32.Fat32File, filename, err)
}
if written != len(b) {
return nil, fmt.Errorf("Wrote only %d bytes of %s to %s instead of %d", written, fat32.Fat32File, filename, len(b))
}
} else {
size := int64(len(b))
empty := make([]byte, size, size)
written, err := f.Write(empty)
if err != nil {
return nil, fmt.Errorf("Failed to write %d zeroes as content of %s: %v", size, filename, err)
}
if written != len(empty) {
return nil, fmt.Errorf("Wrote only %d zeroes as content of %s instead of %d", written, filename, len(empty))
}
}
if embedPost > 0 {
empty := make([]byte, embedPost, embedPost)
written, err := f.Write(empty)
if err != nil {
return nil, fmt.Errorf("Failed to write %d zeroes at end of %s: %v", embedPost, filename, err)
}
if written != len(empty) {
return nil, fmt.Errorf("Wrote only %d zeroes at end of %s instead of %d", written, filename, len(empty))
}
}
return f, nil
}
func TestFat32Type(t *testing.T) {
fs := &fat32.FileSystem{}
fstype := fs.Type()
expected := filesystem.TypeFat32
if fstype != expected {
t.Errorf("Type() returns %v instead of expected %v", fstype, expected)
}
}
func TestFat32Mkdir(t *testing.T) {
// only do this test if os.Getenv("TEST_IMAGE") contains a real image
if intImage == "" {
return
}
runTest := func(t *testing.T, post, pre int64, fatFunc func(util.File, int64, int64, int64) (*fat32.FileSystem, error)) {
// create our directories
tests := []string{
"/",
"/foo",
"/foo/bar",
"/a/b/c",
}
f, err := tmpFat32(true, pre, post)
if err != nil {
t.Fatal(err)
}
if keepTmpFiles == "" {
defer os.Remove(f.Name())
} else {
fmt.Println(f.Name())
}
fileInfo, err := f.Stat()
if err != nil {
t.Fatalf("Error getting file info for tmpfile %s: %v", f.Name(), err)
}
fs, err := fatFunc(f, fileInfo.Size()-pre-post, pre, 512)
if err != nil {
t.Fatalf("Error reading fat32 filesystem from %s: %v", f.Name(), err)
}
for _, p := range tests {
err := fs.Mkdir(p)
switch {
case err != nil:
t.Errorf("Mkdir(%s): error %v", p, err)
default:
// check that the directory actually was created
output := new(bytes.Buffer)
mpath := "/file.img"
mounts := map[string]string{
f.Name(): mpath,
}
err := testhelper.DockerRun(nil, output, false, true, mounts, intImage, "mdir", "-i", fmt.Sprintf("%s@@%d", mpath, pre), fmt.Sprintf("::%s", p))
if err != nil {
t.Errorf("Mkdir(%s): Unexpected err: %v", p, err)
t.Log(output.String())
}
}
}
}
t.Run("Read to Mkdir", func(t *testing.T) {
t.Run("entire image", func(t *testing.T) {
runTest(t, 0, 0, fat32.Read)
})
t.Run("embedded filesystem", func(t *testing.T) {
runTest(t, 500, 1000, fat32.Read)
})
})
t.Run("Create to Mkdir", func(t *testing.T) {
// This is to enable Create "fit" into the common testing logic
createShim := func(file util.File, size int64, start int64, blocksize int64) (*fat32.FileSystem, error) {
return fat32.Create(file, size, start, blocksize, "")
}
t.Run("entire image", func(t *testing.T) {
runTest(t, 0, 0, createShim)
})
t.Run("embedded filesystem", func(t *testing.T) {
runTest(t, 500, 1000, createShim)
})
})
}
func TestFat32Create(t *testing.T) {
tests := []struct {
blocksize int64
filesize int64
fs *fat32.FileSystem
err error
}{
{500, 6000, nil, fmt.Errorf("blocksize for FAT32 must be")},
{513, 6000, nil, fmt.Errorf("blocksize for FAT32 must be")},
{512, fat32.Fat32MaxSize + 100000, nil, fmt.Errorf("requested size is larger than maximum allowed FAT32")},
{512, 0, nil, fmt.Errorf("requested size is smaller than minimum allowed FAT32")},
{512, 10000000, &fat32.FileSystem{}, nil},
}
runTest := func(t *testing.T, pre, post int64) {
for _, tt := range tests {
// get a temporary working file
f, err := tmpFat32(false, pre, post)
if err != nil {
t.Fatal(err)
}
defer os.Remove(f.Name())
// create the filesystem
fs, err := fat32.Create(f, tt.filesize-pre-post, pre, tt.blocksize, "")
switch {
case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())):
t.Errorf("Create(%s, %d, %d, %d): mismatched errors\nactual %v\nexpected %v", f.Name(), tt.filesize, 0, tt.blocksize, err, tt.err)
case (fs == nil && tt.fs != nil) || (fs != nil && tt.fs == nil):
t.Errorf("Create(%s, %d, %d, %d): mismatched fs\nactual %v\nexpected %v", f.Name(), tt.filesize, 0, tt.blocksize, fs, tt.fs)
}
// we do not match the filesystems here, only check functional accuracy
}
}
t.Run("entire image", func(t *testing.T) {
runTest(t, 0, 0)
})
t.Run("embedded filesystem", func(t *testing.T) {
runTest(t, 500, 1000)
})
}
func TestFat32Read(t *testing.T) {
// test cases:
// - invalid blocksize
// - invalid file size (0 and too big)
// - invalid FSISBootSector
// - valid file
tests := []struct {
blocksize int64
filesize int64
bytechange int64
fs *fat32.FileSystem
err error
}{
{500, 6000, -1, nil, fmt.Errorf("blocksize for FAT32 must be")},
{513, 6000, -1, nil, fmt.Errorf("blocksize for FAT32 must be")},
{512, fat32.Fat32MaxSize + 10000, -1, nil, fmt.Errorf("requested size is larger than maximum allowed FAT32 size")},
{512, 0, -1, nil, fmt.Errorf("requested size is smaller than minimum allowed FAT32 size")},
{512, 10000000, 512, nil, fmt.Errorf("Error reading FileSystem Information Sector")},
{512, 10000000, -1, &fat32.FileSystem{}, nil},
}
runTest := func(t *testing.T, pre, post int64) {
for _, tt := range tests {
// get a temporary working file
f, err := tmpFat32(true, pre, post)
if err != nil {
t.Fatal(err)
}
defer os.Remove(f.Name())
// make any changes needed to corrupt it
corrupted := ""
if tt.bytechange >= 0 {
b := make([]byte, 1, 1)
rand.Read(b)
f.WriteAt(b, tt.bytechange+pre)
corrupted = fmt.Sprintf("corrupted %d", tt.bytechange+pre)
}
// create the filesystem
fs, err := fat32.Read(f, tt.filesize-pre-post, pre, tt.blocksize)
switch {
case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())):
t.Errorf("Read(%s, %d, %d, %d) %s: mismatched errors, actual %v expected %v", f.Name(), tt.filesize, 0, tt.blocksize, corrupted, err, tt.err)
case (fs == nil && tt.fs != nil) || (fs != nil && tt.fs == nil):
t.Errorf("Read(%s, %d, %d, %d) %s: mismatched fs, actual then expected", f.Name(), tt.filesize, 0, tt.blocksize, corrupted)
t.Logf("%v", fs)
t.Logf("%v", tt.fs)
}
// we do not match the filesystems here, only check functional accuracy
}
}
t.Run("entire image", func(t *testing.T) {
runTest(t, 0, 0)
})
t.Run("embedded filesystem", func(t *testing.T) {
runTest(t, 500, 1000)
})
}
func TestFat32ReadDir(t *testing.T) {
runTest := func(t *testing.T, pre, post int64) {
// get a temporary working file
f, err := tmpFat32(true, pre, post)
if err != nil {
t.Fatal(err)
}
if keepTmpFiles == "" {
defer os.Remove(f.Name())
} else {
fmt.Println(f.Name())
}
tests := []struct {
path string
count int
name string
isDir bool
err error
}{
// should have 4 entries
// foo
// TERCER~1
// CORTO1.TXT
// UNARCH~1.DAT
{"/", 4, "foo", true, nil},
// should have 80 entries:
// dir0-75 = 76 entries
// dir = 1 entry
// bar = 1 entry
// . = 1 entry
// .. = 1 entry
// total = 80 entries
{"/foo", 80, ".", true, nil},
// 0 entries because the directory does not exist
{"/a/b/c", 0, "", false, fmt.Errorf("Error reading directory /a/b/c")},
}
fileInfo, err := f.Stat()
if err != nil {
t.Fatalf("Error getting file info for tmpfile %s: %v", f.Name(), err)
}
fs, err := fat32.Read(f, fileInfo.Size()-pre-post, pre, 512)
if err != nil {
t.Fatalf("Error reading fat32 filesystem from %s: %v", f.Name(), err)
}
for _, tt := range tests {
output, err := fs.ReadDir(tt.path)
switch {
case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())):
t.Errorf("ReadDir(%s): mismatched errors, actual: %v , expected: %v", tt.path, err, tt.err)
case output == nil && tt.err == nil:
t.Errorf("ReadDir(%s): Unexpected nil output", tt.path)
case len(output) != tt.count:
t.Errorf("ReadDir(%s): output gave %d entries instead of expected %d", tt.path, len(output), tt.count)
case output != nil && len(output) > 0 && output[0].IsDir() != tt.isDir:
t.Errorf("ReadDir(%s): output gave directory %t expected %t", tt.path, output[0].IsDir(), tt.isDir)
case output != nil && len(output) > 0 && output[0].Name() != tt.name:
t.Errorf("ReadDir(%s): output gave name %s expected %s", tt.path, output[0].Name(), tt.name)
}
}
}
t.Run("entire image", func(t *testing.T) {
runTest(t, 0, 0)
})
t.Run("embedded filesystem", func(t *testing.T) {
runTest(t, 500, 1000)
})
}
func TestFat32OpenFile(t *testing.T) {
// opening directories and files for reading
t.Run("Read", func(t *testing.T) {
runTest := func(t *testing.T, pre, post int64) {
// get a temporary working file
f, err := tmpFat32(true, pre, post)
if err != nil {
t.Fatal(err)
}
if keepTmpFiles == "" {
defer os.Remove(f.Name())
} else {
fmt.Println(f.Name())
}
tests := []struct {
path string
mode int
expected string
err error
}{
// error opening a directory
{"/", os.O_RDONLY, "", fmt.Errorf("Cannot open directory %s as file", "/")},
{"/", os.O_RDWR, "", fmt.Errorf("Cannot open directory %s as file", "/")},
{"/", os.O_CREATE, "", fmt.Errorf("Cannot open directory %s as file", "/")},
// open non-existent file for read or read write
{"/abcdefg", os.O_RDONLY, "", fmt.Errorf("Target file %s does not exist", "/abcdefg")},
{"/abcdefg", os.O_RDWR, "", fmt.Errorf("Target file %s does not exist", "/abcdefg")},
{"/abcdefg", os.O_APPEND, "", fmt.Errorf("Target file %s does not exist", "/abcdefg")},
// open file for read or read write and check contents
{"/CORTO1.TXT", os.O_RDONLY, "Tenemos un archivo corto\n", nil},
{"/CORTO1.TXT", os.O_RDWR, "Tenemos un archivo corto\n", nil},
// open file for create that already exists
//{"/CORTO1.TXT", os.O_CREATE | os.O_RDWR, "Tenemos un archivo corto\n", nil},
//{"/CORTO1.TXT", os.O_CREATE | os.O_RDONLY, "Tenemos un archivo corto\n", nil},
}
fileInfo, err := f.Stat()
if err != nil {
t.Fatalf("Error getting file info for tmpfile %s: %v", f.Name(), err)
}
fs, err := fat32.Read(f, fileInfo.Size()-pre-post, pre, 512)
if err != nil {
t.Fatalf("Error reading fat32 filesystem from %s: %v", f.Name(), err)
}
for _, tt := range tests {
header := fmt.Sprintf("OpenFile(%s, %s)", tt.path, getOpenMode(tt.mode))
reader, err := fs.OpenFile(tt.path, tt.mode)
switch {
case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())):
t.Errorf("%s: mismatched errors, actual: %v , expected: %v", header, err, tt.err)
case reader == nil && (tt.err == nil || tt.expected != ""):
t.Errorf("%s: Unexpected nil output", header)
case reader != nil:
b, err := ioutil.ReadAll(reader)
if err != nil {
t.Errorf("%s: ioutil.ReadAll(reader) unexpected error: %v", header, err)
}
if string(b) != tt.expected {
t.Errorf("%s: mismatched contents, actual then expected", header)
t.Log(string(b))
t.Log(tt.expected)
}
}
}
}
t.Run("entire image", func(t *testing.T) {
runTest(t, 0, 0)
})
t.Run("embedded filesystem", func(t *testing.T) {
runTest(t, 500, 1000)
})
})
// write / create-and-write files and check contents
// *** Write - writes right after last write or read
// *** Read - reads right after last write or read
// ** WriteAt - writes at specific location in file
// ** ReadAt - reads at specific location in file
t.Run("Write", func(t *testing.T) {
runTest := func(t *testing.T, pre, post int64) {
tests := []struct {
path string
mode int
beginning bool // true = "Seek() to beginning of file before writing"; false = "read entire file then write"
contents string
expected string
err error
}{
// - open for create file that does not exist (write contents, check that written)
{"/abcdefg", os.O_RDWR | os.O_CREATE, false, "This is a test", "This is a test", nil},
// - open for readwrite file that does exist (write contents, check that overwritten)
{"/CORTO1.TXT", os.O_RDWR, true, "This is a very long replacement string", "This is a very long replacement string", nil},
{"/CORTO1.TXT", os.O_RDWR, true, "Two", "Twoemos un archivo corto\n", nil},
{"/CORTO1.TXT", os.O_RDWR, false, "This is a very long replacement string", "Tenemos un archivo corto\nThis is a very long replacement string", nil},
{"/CORTO1.TXT", os.O_RDWR, false, "Two", "Tenemos un archivo corto\nTwo", nil},
// - open for append file that does exist (write contents, check that appended)
{"/CORTO1.TXT", os.O_APPEND, false, "More", "", fmt.Errorf("Cannot write to file opened read-only")},
{"/CORTO1.TXT", os.O_APPEND | os.O_RDWR, false, "More", "Tenemos un archivo corto\nMore", nil},
{"/CORTO1.TXT", os.O_APPEND, true, "More", "", fmt.Errorf("Cannot write to file opened read-only")},
{"/CORTO1.TXT", os.O_APPEND | os.O_RDWR, true, "More", "Moremos un archivo corto\n", nil},
}
for _, tt := range tests {
header := fmt.Sprintf("OpenFile(%s, %s, %t)", tt.path, getOpenMode(tt.mode), tt.beginning)
// get a temporary working file
f, err := tmpFat32(true, pre, post)
if err != nil {
t.Fatal(err)
}
if keepTmpFiles == "" {
defer os.Remove(f.Name())
} else {
fmt.Println(f.Name())
}
fileInfo, err := f.Stat()
if err != nil {
t.Fatalf("Error getting file info for tmpfile %s: %v", f.Name(), err)
}
fs, err := fat32.Read(f, fileInfo.Size()-pre-post, pre, 512)
if err != nil {
t.Fatalf("Error reading fat32 filesystem from %s: %v", f.Name(), err)
}
readWriter, err := fs.OpenFile(tt.path, tt.mode)
switch {
case err != nil:
t.Errorf("%s: unexpected error: %v", header, err)
case readWriter == nil:
t.Errorf("%s: Unexpected nil output", header)
default:
// write and then read
bWrite := []byte(tt.contents)
if tt.beginning {
offset, err := readWriter.Seek(0, 0)
if err != nil {
t.Errorf("%s: Seek(0,0) unexpected error: %v", header, err)
continue
}
if offset != 0 {
t.Errorf("%s: Seek(0,0) reset to %d instead of %d", header, offset, 0)
continue
}
} else {
b := make([]byte, 512, 512)
_, err := readWriter.Read(b)
if err != nil && err != io.EOF {
t.Errorf("%s: ioutil.ReadAll(readWriter) unexpected error: %v", header, err)
continue
}
}
written, writeErr := readWriter.Write(bWrite)
readWriter.Seek(0, 0)
bRead, readErr := ioutil.ReadAll(readWriter)
switch {
case readErr != nil:
t.Errorf("%s: ioutil.ReadAll() unexpected error: %v", header, readErr)
case (writeErr == nil && tt.err != nil) || (writeErr != nil && tt.err == nil) || (writeErr != nil && tt.err != nil && !strings.HasPrefix(writeErr.Error(), tt.err.Error())):
t.Errorf("%s: readWriter.Write(b) mismatched errors, actual: %v , expected: %v", header, writeErr, tt.err)
case written != len(bWrite) && tt.err == nil:
t.Errorf("%s: readWriter.Write(b) wrote %d bytes instead of expected %d", header, written, len(bWrite))
case string(bRead) != tt.expected && tt.err == nil:
t.Errorf("%s: mismatched contents, actual then expected", header)
t.Log(string(bRead))
t.Log(tt.expected)
}
}
}
}
t.Run("entire image", func(t *testing.T) {
runTest(t, 0, 0)
})
t.Run("embedded filesystem", func(t *testing.T) {
runTest(t, 500, 1000)
})
})
// write many files to exceed the first cluster, then read back
t.Run("Write Many", func(t *testing.T) {
runTest := func(t *testing.T, pre, post int64) {
f, err := tmpFat32(false, pre, post)
if err != nil {
t.Fatal(err)
}
if keepTmpFiles == "" {
defer os.Remove(f.Name())
} else {
fmt.Println(f.Name())
}
fileInfo, err := f.Stat()
if err != nil {
t.Fatalf("Error getting file info for tmpfile %s: %v", f.Name(), err)
}
fs, err := fat32.Create(f, fileInfo.Size()-pre-post, pre, 512, " NO NAME")
if err != nil {
t.Fatalf("Error reading fat32 filesystem from %s: %v", f.Name(), err)
}
pathPrefix := "/f"
fileCount := 32
for fileNumber := 1; fileNumber <= fileCount; fileNumber++ {
fileName := fmt.Sprintf("%s%d", pathPrefix, fileNumber)
fileContent := []byte(fileName)
readWriter, err := fs.OpenFile(fileName, os.O_RDWR|os.O_CREATE)
switch {
case err != nil:
t.Errorf("write many: unexpected error writing %s: %v", fileName, err)
case readWriter == nil:
t.Errorf("write many: unexpected nil output writing %s", fileName)
default:
readWriter.Seek(0, 0)
written, writeErr := readWriter.Write(fileContent)
readWriter.Seek(0, 0)
readFileContent, readErr := ioutil.ReadAll(readWriter)
switch {
case readErr != nil:
t.Errorf("write many: ioutil.ReadAll() unexpected error on %s: %v", fileName, readErr)
case writeErr != nil:
t.Errorf("write many: readWriter.Write(b) error on %s: %v", fileName, writeErr)
case written != len(fileContent):
t.Errorf("write many: readWriter.Write(b) wrote %d bytes instead of expected %d on %s", written, len(fileContent), fileName)
case string(readFileContent) != fileName:
t.Errorf("write many: mismatched contents on %s, expected: %s, got: %s", fileName, fileName, string(readFileContent))
}
}
}
dir, err := fs.ReadDir("/")
if err != nil {
t.Errorf("write many: error reading /: %v", err)
}
if len(dir) != fileCount+1 {
t.Errorf("write many: entry count mismatch on /: expected %d, got %d -- %v", fileCount, len(dir), dir)
}
}
t.Run("entire image", func(t *testing.T) {
runTest(t, 0, 0)
})
t.Run("embedded filesystem", func(t *testing.T) {
runTest(t, 500, 1000)
})
})
// large file should cross multiple clusters
// out cluster size is 512 bytes, so make it 10+ clusters
t.Run("Large File", func(t *testing.T) {
runTest := func(t *testing.T, pre, post int64) {
// get a temporary working file
f, err := tmpFat32(true, pre, post)
if err != nil {
t.Fatal(err)
}
if keepTmpFiles == "" {
defer os.Remove(f.Name())
} else {
fmt.Println(f.Name())
}
fileInfo, err := f.Stat()
if err != nil {
t.Fatalf("Error getting file info for tmpfile %s: %v", f.Name(), err)
}
fs, err := fat32.Read(f, fileInfo.Size()-pre-post, pre, 512)
if err != nil {
t.Fatalf("Error reading fat32 filesystem from %s: %v", f.Name(), err)
}
path := "/abcdefghi"
mode := os.O_RDWR | os.O_CREATE
// each cluster is 512 bytes, so use 10 clusters and a bit of another
size := 10*512 + 22
bWrite := make([]byte, size, size)
header := fmt.Sprintf("OpenFile(%s, %s)", path, getOpenMode(mode))
readWriter, err := fs.OpenFile(path, mode)
switch {
case err != nil:
t.Errorf("%s: unexpected error: %v", header, err)
case readWriter == nil:
t.Errorf("%s: Unexpected nil output", header)
default:
// write and then read
rand.Read(bWrite)
written, writeErr := readWriter.Write(bWrite)
readWriter.Seek(0, 0)
bRead, readErr := ioutil.ReadAll(readWriter)
switch {
case readErr != nil:
t.Errorf("%s: ioutil.ReadAll() unexpected error: %v", header, readErr)
case writeErr != nil:
t.Errorf("%s: readWriter.Write(b) unexpected error: %v", header, writeErr)
case written != len(bWrite):
t.Errorf("%s: readWriter.Write(b) wrote %d bytes instead of expected %d", header, written, len(bWrite))
case bytes.Compare(bWrite, bRead) != 0:
t.Errorf("%s: mismatched contents, read %d expected %d, actual data then expected:", header, len(bRead), len(bWrite))
//t.Log(bRead)
//t.Log(bWrite)
}
}
}
t.Run("entire image", func(t *testing.T) {
runTest(t, 0, 0)
})
t.Run("embedded filesystem", func(t *testing.T) {
runTest(t, 500, 1000)
})
})
// large file should cross multiple clusters
// out cluster size is 512 bytes, so make it 10+ clusters
t.Run("Truncate File", func(t *testing.T) {
// get a temporary working file
f, err := tmpFat32(true, 0, 0)
if err != nil {
t.Fatal(err)
}
if keepTmpFiles == "" {
defer os.Remove(f.Name())
} else {
fmt.Println(f.Name())
}
fileInfo, err := f.Stat()
if err != nil {
t.Fatalf("Error getting file info for tmpfile %s: %v", f.Name(), err)
}
fs, err := fat32.Read(f, fileInfo.Size(), 0, 512)
if err != nil {
t.Fatalf("Error reading fat32 filesystem from %s: %v", f.Name(), err)
}
path := "/abcdefghi"
mode := os.O_RDWR | os.O_CREATE
// each cluster is 512 bytes, so use 10 clusters and a bit of another
size := 10*512 + 22
bWrite := make([]byte, size, size)
header := fmt.Sprintf("OpenFile(%s, %s)", path, getOpenMode(mode))
readWriter, err := fs.OpenFile(path, mode)
switch {
case err != nil:
t.Fatalf("%s: unexpected error: %v", header, err)
case readWriter == nil:
t.Fatalf("%s: Unexpected nil output", header)
default:
// write and then read
rand.Read(bWrite)
written, writeErr := readWriter.Write(bWrite)
readWriter.Seek(0, 0)
switch {
case writeErr != nil:
t.Fatalf("%s: readWriter.Write(b) unexpected error: %v", header, writeErr)
case written != len(bWrite):
t.Fatalf("%s: readWriter.Write(b) wrote %d bytes instead of expected %d", header, written, len(bWrite))
}
}
// we now have written lots of data to the file. Close it, then reopen it to truncate
if err := readWriter.Close(); err != nil {
t.Fatalf("error closing file: %v", err)
}
// and open to truncate
mode = os.O_RDWR | os.O_TRUNC
readWriter, err = fs.OpenFile(path, mode)
if err != nil {
t.Fatalf("could not reopen file: %v", err)
}
// read the data
bRead, readErr := ioutil.ReadAll(readWriter)
switch {
case readErr != nil:
t.Fatalf("%s: ioutil.ReadAll() unexpected error: %v", header, readErr)
case len(bRead) != 0:
t.Fatalf("%s: readWriter.ReadAll(b) read %d bytes after truncate instead of expected %d", header, len(bRead), 0)
}
})
// large files are often written in multiple passes
t.Run("Streaming Large File", func(t *testing.T) {
runTest := func(t *testing.T, pre, post int64) {
// get a temporary working file
f, err := tmpFat32(true, pre, post)
if err != nil {
t.Fatal(err)
}
if keepTmpFiles == "" {
defer os.Remove(f.Name())
} else {
fmt.Println(f.Name())
}
fileInfo, err := f.Stat()
if err != nil {
t.Fatalf("Error getting file info for tmpfile %s: %v", f.Name(), err)
}
fs, err := fat32.Read(f, fileInfo.Size()-pre-post, pre, 512)
if err != nil {
t.Fatalf("Error reading fat32 filesystem from %s: %v", f.Name(), err)
}
path := "/abcdefghi"
mode := os.O_RDWR | os.O_CREATE
// each cluster is 512 bytes, so use 10 clusters and a bit of another
size := 10*512 + 22
bWrite := make([]byte, size, size)
header := fmt.Sprintf("OpenFile(%s, %s)", path, getOpenMode(mode))
readWriter, err := fs.OpenFile(path, mode)
switch {
case err != nil:
t.Errorf("%s: unexpected error: %v", header, err)
case readWriter == nil:
t.Errorf("%s: Unexpected nil output", header)
default:
// success
}
rand.Read(bWrite)
writeSizes := []int{512, 1024, 256}
low := 0
for i := 0; low < len(bWrite); i++ {
high := low + writeSizes[i%len(writeSizes)]
if high > len(bWrite) {
high = len(bWrite)
}
written, err := readWriter.Write(bWrite[low:high])
if err != nil {
t.Errorf("%s: readWriter.Write(b) unexpected error: %v", header, err)
}
if written != high-low {
t.Errorf("%s: readWriter.Write(b) wrote %d bytes instead of expected %d", header, written, high-low)
}
low = high
}
readWriter.Seek(0, 0)
bRead, readErr := ioutil.ReadAll(readWriter)
switch {
case readErr != nil:
t.Errorf("%s: ioutil.ReadAll() unexpected error: %v", header, readErr)
case bytes.Compare(bWrite, bRead) != 0:
t.Errorf("%s: mismatched contents, read %d expected %d, actual data then expected:", header, len(bRead), len(bWrite))
//t.Log(bRead)
//t.Log(bWrite)
}
}
t.Run("entire image", func(t *testing.T) {
runTest(t, 0, 0)
})
t.Run("embedded filesystem", func(t *testing.T) {
runTest(t, 500, 1000)
})
})
}
|
[
"\"TEST_IMAGE\"",
"\"KEEPTESTFILES\"",
"\"TEST_IMAGE\""
] |
[] |
[
"TEST_IMAGE",
"KEEPTESTFILES"
] |
[]
|
["TEST_IMAGE", "KEEPTESTFILES"]
|
go
| 2 | 0 | |
editfile.py
|
#!/usr/bin/env python
# Utility functions to spawn an editor to edit a file
import os
import stat
import tempfile
class FileNotFoundError: pass
default_editor = "vi"
def edit_file (filename):
"Spawn an editor to edit the specified file."
if not filename:
raise FileNotFoundError, "Could not open file (%s)" % filename
try:
editor = os.getenv('EDITOR')
except KeyError:
editor = default_editor
if not editor:
editor = default_editor
command = "%s %s" % (editor, filename)
status = os.system(command)
if status != 0:
if os.WIFEXITED(status):
print "exit status", os.WEXITSTATUS(status)
if __name__ == "__main__":
filename = tempfile.mktemp()
print "filename:", filename
fh = open(filename, "w")
print >>fh, "Hello, world!"
fh.close()
mtime_before = os.stat(filename)[stat.ST_MTIME]
print "mtime before:", mtime_before
edit_file(filename)
mtime_after = os.stat(filename)[stat.ST_MTIME]
print "mtime after:", mtime_after
|
[] |
[] |
[
"EDITOR"
] |
[]
|
["EDITOR"]
|
python
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.