filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
tsdb/engine/tsm1/cache_test.go | package tsm1
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"math"
"math/rand"
"os"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"testing"
"github.com/golang/snappy"
)
func TestCache_NewCache(t *testing.T) {
c := NewCache(100, "")
if c == nil {
t.Fatalf("failed to create new cache")
}
if c.MaxSize() != 100 {
t.Fatalf("new cache max size not correct")
}
if c.Size() != 0 {
t.Fatalf("new cache size not correct")
}
if len(c.Keys()) != 0 {
t.Fatalf("new cache keys not correct: %v", c.Keys())
}
}
func TestCache_CacheWrite(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
v2 := NewValue(3, 3.0)
values := Values{v0, v1, v2}
valuesSize := uint64(v0.Size() + v1.Size() + v2.Size())
c := NewCache(3*valuesSize, "")
if err := c.Write([]byte("foo"), values); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if err := c.Write([]byte("bar"), values); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if n := c.Size(); n != 2*valuesSize+6 {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
}
if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
}
func TestCache_CacheWrite_TypeConflict(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(2, int(64))
values := Values{v0, v1}
valuesSize := v0.Size() + v1.Size()
c := NewCache(uint64(2*valuesSize), "")
if err := c.Write([]byte("foo"), values[:1]); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if err := c.Write([]byte("foo"), values[1:]); err == nil {
t.Fatalf("expected field type conflict")
}
if exp, got := uint64(v0.Size())+3, c.Size(); exp != got {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got)
}
}
func TestCache_CacheWriteMulti(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
v2 := NewValue(3, 3.0)
values := Values{v0, v1, v2}
valuesSize := uint64(v0.Size() + v1.Size() + v2.Size())
c := NewCache(30*valuesSize, "")
if err := c.WriteMulti(map[string][]Value{"foo": values, "bar": values}); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if n := c.Size(); n != 2*valuesSize+6 {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
}
if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
}
// Tests that the cache stats and size are correctly maintained during writes.
func TestCache_WriteMulti_Stats(t *testing.T) {
limit := uint64(1)
c := NewCache(limit, "")
ms := NewTestStore()
c.store = ms
// Not enough room in the cache.
v := NewValue(1, 1.0)
values := map[string][]Value{"foo": []Value{v, v}}
if got, exp := c.WriteMulti(values), ErrCacheMemorySizeLimitExceeded(uint64(v.Size()*2), limit); !reflect.DeepEqual(got, exp) {
t.Fatalf("got %q, expected %q", got, exp)
}
// Fail one of the values in the write.
c = NewCache(50, "")
c.init()
c.store = ms
ms.writef = func(key []byte, v Values) (bool, error) {
if bytes.Equal(key, []byte("foo")) {
return false, errors.New("write failed")
}
return true, nil
}
values = map[string][]Value{"foo": []Value{v, v}, "bar": []Value{v}}
if got, exp := c.WriteMulti(values), errors.New("write failed"); !reflect.DeepEqual(got, exp) {
t.Fatalf("got %v, expected %v", got, exp)
}
// Cache size decreased correctly.
if got, exp := c.Size(), uint64(16)+3; got != exp {
t.Fatalf("got %v, expected %v", got, exp)
}
// Write stats updated
if got, exp := c.stats.WriteDropped, int64(1); got != exp {
t.Fatalf("got %v, expected %v", got, exp)
} else if got, exp := c.stats.WriteErr, int64(1); got != exp {
t.Fatalf("got %v, expected %v", got, exp)
}
}
func TestCache_CacheWriteMulti_TypeConflict(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
v2 := NewValue(3, int64(3))
values := Values{v0, v1, v2}
valuesSize := uint64(v0.Size() + v1.Size() + v2.Size())
c := NewCache(3*valuesSize, "")
if err := c.WriteMulti(map[string][]Value{"foo": values[:1], "bar": values[1:]}); err == nil {
t.Fatalf(" expected field type conflict")
}
if exp, got := uint64(v0.Size())+3, c.Size(); exp != got {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got)
}
if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
}
func TestCache_Cache_DeleteRange(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
v2 := NewValue(3, 3.0)
values := Values{v0, v1, v2}
valuesSize := uint64(v0.Size() + v1.Size() + v2.Size())
c := NewCache(30*valuesSize, "")
if err := c.WriteMulti(map[string][]Value{"foo": values, "bar": values}); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if n := c.Size(); n != 2*valuesSize+6 {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
}
if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
c.DeleteRange([][]byte{[]byte("bar")}, 2, math.MaxInt64)
if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
if got, exp := c.Size(), valuesSize+uint64(v0.Size())+6; exp != got {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got)
}
if got, exp := len(c.Values([]byte("bar"))), 1; got != exp {
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
}
if got, exp := len(c.Values([]byte("foo"))), 3; got != exp {
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
}
}
func TestCache_DeleteRange_NoValues(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
v2 := NewValue(3, 3.0)
values := Values{v0, v1, v2}
valuesSize := uint64(v0.Size() + v1.Size() + v2.Size())
c := NewCache(3*valuesSize, "")
if err := c.WriteMulti(map[string][]Value{"foo": values}); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if n := c.Size(); n != valuesSize+3 {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
}
if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
c.DeleteRange([][]byte{[]byte("foo")}, math.MinInt64, math.MaxInt64)
if exp, keys := 0, len(c.Keys()); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
if got, exp := c.Size(), uint64(0); exp != got {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got)
}
if got, exp := len(c.Values([]byte("foo"))), 0; got != exp {
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
}
}
func TestCache_DeleteRange_NotSorted(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(3, 3.0)
v2 := NewValue(2, 2.0)
values := Values{v0, v1, v2}
valuesSize := uint64(v0.Size() + v1.Size() + v2.Size())
c := NewCache(3*valuesSize, "")
if err := c.WriteMulti(map[string][]Value{"foo": values}); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if n := c.Size(); n != valuesSize+3 {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
}
if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
c.DeleteRange([][]byte{[]byte("foo")}, 1, 3)
if exp, keys := 0, len(c.Keys()); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after delete, exp %v, got %v", exp, keys)
}
if got, exp := c.Size(), uint64(0); exp != got {
t.Fatalf("cache size incorrect after delete, exp %d, got %d", exp, got)
}
if got, exp := len(c.Values([]byte("foo"))), 0; got != exp {
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
}
}
func TestCache_Cache_Delete(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
v2 := NewValue(3, 3.0)
values := Values{v0, v1, v2}
valuesSize := uint64(v0.Size() + v1.Size() + v2.Size())
c := NewCache(30*valuesSize, "")
if err := c.WriteMulti(map[string][]Value{"foo": values, "bar": values}); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if n := c.Size(); n != 2*valuesSize+6 {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
}
if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
c.Delete([][]byte{[]byte("bar")})
if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
if got, exp := c.Size(), valuesSize+3; exp != got {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got)
}
if got, exp := len(c.Values([]byte("bar"))), 0; got != exp {
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
}
if got, exp := len(c.Values([]byte("foo"))), 3; got != exp {
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
}
}
func TestCache_Cache_Delete_NonExistent(t *testing.T) {
c := NewCache(1024, "")
c.Delete([][]byte{[]byte("bar")})
if got, exp := c.Size(), uint64(0); exp != got {
t.Fatalf("cache size incorrect exp %d, got %d", exp, got)
}
}
// This tests writing two batches to the same series. The first batch
// is sorted. The second batch is also sorted but contains duplicates.
func TestCache_CacheWriteMulti_Duplicates(t *testing.T) {
v0 := NewValue(2, 1.0)
v1 := NewValue(3, 1.0)
values0 := Values{v0, v1}
v3 := NewValue(4, 2.0)
v4 := NewValue(5, 3.0)
v5 := NewValue(5, 3.0)
values1 := Values{v3, v4, v5}
c := NewCache(0, "")
if err := c.WriteMulti(map[string][]Value{"foo": values0}); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if err := c.WriteMulti(map[string][]Value{"foo": values1}); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
expAscValues := Values{v0, v1, v3, v5}
if exp, got := len(expAscValues), len(c.Values([]byte("foo"))); exp != got {
t.Fatalf("value count mismatch: exp: %v, got %v", exp, got)
}
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expAscValues, deduped) {
t.Fatalf("deduped ascending values for foo incorrect, exp: %v, got %v", expAscValues, deduped)
}
}
func TestCache_CacheValues(t *testing.T) {
v0 := NewValue(1, 0.0)
v1 := NewValue(2, 2.0)
v2 := NewValue(3, 3.0)
v3 := NewValue(1, 1.0)
v4 := NewValue(4, 4.0)
c := NewCache(512, "")
if deduped := c.Values([]byte("no such key")); deduped != nil {
t.Fatalf("Values returned for no such key")
}
if err := c.Write([]byte("foo"), Values{v0, v1, v2, v3}); err != nil {
t.Fatalf("failed to write 3 values, key foo to cache: %s", err.Error())
}
if err := c.Write([]byte("foo"), Values{v4}); err != nil {
t.Fatalf("failed to write 1 value, key foo to cache: %s", err.Error())
}
expAscValues := Values{v3, v1, v2, v4}
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expAscValues, deduped) {
t.Fatalf("deduped ascending values for foo incorrect, exp: %v, got %v", expAscValues, deduped)
}
}
func TestCache_CacheSnapshot(t *testing.T) {
v0 := NewValue(2, 0.0)
v1 := NewValue(3, 2.0)
v2 := NewValue(4, 3.0)
v3 := NewValue(5, 4.0)
v4 := NewValue(6, 5.0)
v5 := NewValue(1, 5.0)
v6 := NewValue(7, 5.0)
v7 := NewValue(2, 5.0)
c := NewCache(512, "")
if err := c.Write([]byte("foo"), Values{v0, v1, v2, v3}); err != nil {
t.Fatalf("failed to write 3 values, key foo to cache: %s", err.Error())
}
// Grab snapshot, and ensure it's as expected.
snapshot, err := c.Snapshot()
if err != nil {
t.Fatalf("failed to snapshot cache: %v", err)
}
expValues := Values{v0, v1, v2, v3}
if deduped := snapshot.values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) {
t.Fatalf("snapshotted values for foo incorrect, exp: %v, got %v", expValues, deduped)
}
// Ensure cache is still as expected.
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) {
t.Fatalf("post-snapshot values for foo incorrect, exp: %v, got %v", expValues, deduped)
}
// Write a new value to the cache.
if err := c.Write([]byte("foo"), Values{v4}); err != nil {
t.Fatalf("failed to write post-snap value, key foo to cache: %s", err.Error())
}
expValues = Values{v0, v1, v2, v3, v4}
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) {
t.Fatalf("post-snapshot write values for foo incorrect, exp: %v, got %v", expValues, deduped)
}
// Write a new, out-of-order, value to the cache.
if err := c.Write([]byte("foo"), Values{v5}); err != nil {
t.Fatalf("failed to write post-snap value, key foo to cache: %s", err.Error())
}
expValues = Values{v5, v0, v1, v2, v3, v4}
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) {
t.Fatalf("post-snapshot out-of-order write values for foo incorrect, exp: %v, got %v", expValues, deduped)
}
// Clear snapshot, ensuring non-snapshot data untouched.
c.ClearSnapshot(true)
expValues = Values{v5, v4}
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) {
t.Fatalf("post-clear values for foo incorrect, exp: %v, got %v", expValues, deduped)
}
// Create another snapshot
_, err = c.Snapshot()
if err != nil {
t.Fatalf("failed to snapshot cache: %v", err)
}
if err := c.Write([]byte("foo"), Values{v4, v5}); err != nil {
t.Fatalf("failed to write post-snap value, key foo to cache: %s", err.Error())
}
c.ClearSnapshot(true)
_, err = c.Snapshot()
if err != nil {
t.Fatalf("failed to snapshot cache: %v", err)
}
if err := c.Write([]byte("foo"), Values{v6, v7}); err != nil {
t.Fatalf("failed to write post-snap value, key foo to cache: %s", err.Error())
}
expValues = Values{v5, v7, v4, v6}
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) {
t.Fatalf("post-snapshot out-of-order write values for foo incorrect, exp: %v, got %v", expValues, deduped)
}
}
// Tests that Snapshot updates statistics correctly.
func TestCache_Snapshot_Stats(t *testing.T) {
limit := uint64(16)
c := NewCache(limit, "")
values := map[string][]Value{"foo": []Value{NewValue(1, 1.0)}}
if err := c.WriteMulti(values); err != nil {
t.Fatal(err)
}
if got, exp := c.stats.MemSizeBytes, int64(16)+3; got != exp {
t.Fatalf("got %v, expected %v", got, exp)
}
_, err := c.Snapshot()
if err != nil {
t.Fatal(err)
}
// Store size should have been reset.
if got, exp := c.Size(), uint64(16)+3; got != exp {
t.Fatalf("got %v, expected %v", got, exp)
}
// Cached bytes should have been increased.
if got, exp := c.stats.CachedBytes, int64(16)+3; got != exp {
t.Fatalf("got %v, expected %v", got, exp)
}
if got, exp := c.stats.MemSizeBytes, int64(16)+3; got != exp {
t.Fatalf("got %v, expected %v", got, exp)
}
}
func TestCache_CacheEmptySnapshot(t *testing.T) {
c := NewCache(512, "")
// Grab snapshot, and ensure it's as expected.
snapshot, err := c.Snapshot()
if err != nil {
t.Fatalf("failed to snapshot cache: %v", err)
}
if deduped := snapshot.values([]byte("foo")); !reflect.DeepEqual(Values(nil), deduped) {
t.Fatalf("snapshotted values for foo incorrect, exp: %v, got %v", nil, deduped)
}
// Ensure cache is still as expected.
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(Values(nil), deduped) {
t.Fatalf("post-snapshotted values for foo incorrect, exp: %v, got %v", Values(nil), deduped)
}
// Clear snapshot.
c.ClearSnapshot(true)
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(Values(nil), deduped) {
t.Fatalf("post-snapshot-clear values for foo incorrect, exp: %v, got %v", Values(nil), deduped)
}
}
func TestCache_CacheWriteMemoryExceeded(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
c := NewCache(uint64(v1.Size()), "")
if err := c.Write([]byte("foo"), Values{v0}); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after writes, exp %v, got %v", exp, keys)
}
if err := c.Write([]byte("bar"), Values{v1}); err == nil || !strings.Contains(err.Error(), "cache-max-memory-size") {
t.Fatalf("wrong error writing key bar to cache: %v", err)
}
// Grab snapshot, write should still fail since we're still using the memory.
_, err := c.Snapshot()
if err != nil {
t.Fatalf("failed to snapshot cache: %v", err)
}
if err := c.Write([]byte("bar"), Values{v1}); err == nil || !strings.Contains(err.Error(), "cache-max-memory-size") {
t.Fatalf("wrong error writing key bar to cache: %v", err)
}
// Clear the snapshot and the write should now succeed.
c.ClearSnapshot(true)
if err := c.Write([]byte("bar"), Values{v1}); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
expAscValues := Values{v1}
if deduped := c.Values([]byte("bar")); !reflect.DeepEqual(expAscValues, deduped) {
t.Fatalf("deduped ascending values for bar incorrect, exp: %v, got %v", expAscValues, deduped)
}
}
func TestCache_Deduplicate_Concurrent(t *testing.T) {
if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" {
t.Skip("Skipping test in short, race, appveyor mode.")
}
values := make(map[string][]Value)
for i := 0; i < 1000; i++ {
for j := 0; j < 100; j++ {
values[fmt.Sprintf("cpu%d", i)] = []Value{NewValue(int64(i+j)+int64(rand.Intn(10)), float64(i))}
}
}
wg := sync.WaitGroup{}
c := NewCache(1000000, "")
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 1000; i++ {
c.WriteMulti(values)
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 1000; i++ {
c.Deduplicate()
}
}()
wg.Wait()
}
// Ensure the CacheLoader can correctly load from a single segment, even if it's corrupted.
func TestCacheLoader_LoadSingle(t *testing.T) {
// Create a WAL segment.
dir := mustTempDir()
defer os.RemoveAll(dir)
f := mustTempFile(dir)
w := NewWALSegmentWriter(f)
p1 := NewValue(1, 1.1)
p2 := NewValue(1, int64(1))
p3 := NewValue(1, true)
values := map[string][]Value{
"foo": []Value{p1},
"bar": []Value{p2},
"baz": []Value{p3},
}
entry := &WriteWALEntry{
Values: values,
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
t.Fatal("write points", err)
}
if err := w.Flush(); err != nil {
t.Fatalf("flush error: %v", err)
}
// Load the cache using the segment.
cache := NewCache(1024, "")
loader := NewCacheLoader([]string{f.Name()})
if err := loader.Load(cache); err != nil {
t.Fatalf("failed to load cache: %s", err.Error())
}
// Check the cache.
if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) {
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
}
if values := cache.Values([]byte("bar")); !reflect.DeepEqual(values, Values{p2}) {
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p2})
}
if values := cache.Values([]byte("baz")); !reflect.DeepEqual(values, Values{p3}) {
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p3})
}
// Corrupt the WAL segment.
if _, err := f.Write([]byte{1, 4, 0, 0, 0}); err != nil {
t.Fatalf("corrupt WAL segment: %s", err.Error())
}
// Reload the cache using the segment.
cache = NewCache(1024, "")
loader = NewCacheLoader([]string{f.Name()})
if err := loader.Load(cache); err != nil {
t.Fatalf("failed to load cache: %s", err.Error())
}
// Check the cache.
if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) {
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
}
if values := cache.Values([]byte("bar")); !reflect.DeepEqual(values, Values{p2}) {
t.Fatalf("cache key bar not as expected, got %v, exp %v", values, Values{p2})
}
if values := cache.Values([]byte("baz")); !reflect.DeepEqual(values, Values{p3}) {
t.Fatalf("cache key baz not as expected, got %v, exp %v", values, Values{p3})
}
}
// Ensure the CacheLoader can correctly load from two segments, even if one is corrupted.
func TestCacheLoader_LoadDouble(t *testing.T) {
// Create a WAL segment.
dir := mustTempDir()
defer os.RemoveAll(dir)
f1, f2 := mustTempFile(dir), mustTempFile(dir)
w1, w2 := NewWALSegmentWriter(f1), NewWALSegmentWriter(f2)
p1 := NewValue(1, 1.1)
p2 := NewValue(1, int64(1))
p3 := NewValue(1, true)
p4 := NewValue(1, "string")
// Write first and second segment.
segmentWrite := func(w *WALSegmentWriter, values map[string][]Value) {
entry := &WriteWALEntry{
Values: values,
}
if err := w1.Write(mustMarshalEntry(entry)); err != nil {
t.Fatal("write points", err)
}
if err := w1.Flush(); err != nil {
t.Fatalf("flush error: %v", err)
}
}
values := map[string][]Value{
"foo": []Value{p1},
"bar": []Value{p2},
}
segmentWrite(w1, values)
values = map[string][]Value{
"baz": []Value{p3},
"qux": []Value{p4},
}
segmentWrite(w2, values)
// Corrupt the first WAL segment.
if _, err := f1.Write([]byte{1, 4, 0, 0, 0}); err != nil {
t.Fatalf("corrupt WAL segment: %s", err.Error())
}
// Load the cache using the segments.
cache := NewCache(1024, "")
loader := NewCacheLoader([]string{f1.Name(), f2.Name()})
if err := loader.Load(cache); err != nil {
t.Fatalf("failed to load cache: %s", err.Error())
}
// Check the cache.
if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) {
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
}
if values := cache.Values([]byte("bar")); !reflect.DeepEqual(values, Values{p2}) {
t.Fatalf("cache key bar not as expected, got %v, exp %v", values, Values{p2})
}
if values := cache.Values([]byte("baz")); !reflect.DeepEqual(values, Values{p3}) {
t.Fatalf("cache key baz not as expected, got %v, exp %v", values, Values{p3})
}
if values := cache.Values([]byte("qux")); !reflect.DeepEqual(values, Values{p4}) {
t.Fatalf("cache key qux not as expected, got %v, exp %v", values, Values{p4})
}
}
// Ensure the CacheLoader can load deleted series
func TestCacheLoader_LoadDeleted(t *testing.T) {
// Create a WAL segment.
dir := mustTempDir()
defer os.RemoveAll(dir)
f := mustTempFile(dir)
w := NewWALSegmentWriter(f)
p1 := NewValue(1, 1.0)
p2 := NewValue(2, 2.0)
p3 := NewValue(3, 3.0)
values := map[string][]Value{
"foo": []Value{p1, p2, p3},
}
entry := &WriteWALEntry{
Values: values,
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
t.Fatal("write points", err)
}
if err := w.Flush(); err != nil {
t.Fatalf("flush error: %v", err)
}
dentry := &DeleteRangeWALEntry{
Keys: [][]byte{[]byte("foo")},
Min: 2,
Max: 3,
}
if err := w.Write(mustMarshalEntry(dentry)); err != nil {
t.Fatal("write points", err)
}
if err := w.Flush(); err != nil {
t.Fatalf("flush error: %v", err)
}
// Load the cache using the segment.
cache := NewCache(1024, "")
loader := NewCacheLoader([]string{f.Name()})
if err := loader.Load(cache); err != nil {
t.Fatalf("failed to load cache: %s", err.Error())
}
// Check the cache.
if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) {
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
}
// Reload the cache using the segment.
cache = NewCache(1024, "")
loader = NewCacheLoader([]string{f.Name()})
if err := loader.Load(cache); err != nil {
t.Fatalf("failed to load cache: %s", err.Error())
}
// Check the cache.
if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) {
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
}
}
func TestCache_Split(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
v2 := NewValue(3, 3.0)
values := Values{v0, v1, v2}
valuesSize := uint64(v0.Size() + v1.Size() + v2.Size())
c := NewCache(0, "")
if err := c.Write([]byte("foo"), values); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if err := c.Write([]byte("bar"), values); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if err := c.Write([]byte("baz"), values); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if n := c.Size(); n != 3*valuesSize+9 {
t.Fatalf("cache size incorrect after 3 writes, exp %d, got %d", 3*valuesSize*9, n)
}
splits := c.Split(3)
keys := make(map[string]int)
for _, s := range splits {
for _, k := range s.Keys() {
keys[string(k)] = s.Values(k).Size()
}
}
for _, key := range []string{"foo", "bar", "baz"} {
if _, ok := keys[key]; !ok {
t.Fatalf("missing key, exp %s, got %v", key, nil)
}
}
}
func mustTempDir() string {
dir, err := ioutil.TempDir("", "tsm1-test")
if err != nil {
panic(fmt.Sprintf("failed to create temp dir: %v", err))
}
return dir
}
func mustTempFile(dir string) *os.File {
f, err := ioutil.TempFile(dir, "tsm1test")
if err != nil {
panic(fmt.Sprintf("failed to create temp file: %v", err))
}
return f
}
func mustMarshalEntry(entry WALEntry) (WalEntryType, []byte) {
bytes := make([]byte, 1024<<2)
b, err := entry.Encode(bytes)
if err != nil {
panic(fmt.Sprintf("error encoding: %v", err))
}
return entry.Type(), snappy.Encode(b, b)
}
// TestStore implements the storer interface and can be used to mock out a
// Cache's storer implememation.
type TestStore struct {
entryf func(key []byte) *entry
writef func(key []byte, values Values) (bool, error)
addf func(key []byte, entry *entry)
removef func(key []byte)
keysf func(sorted bool) [][]byte
applyf func(f func([]byte, *entry) error) error
applySerialf func(f func([]byte, *entry) error) error
resetf func()
splitf func(n int) []storer
countf func() int
}
func NewTestStore() *TestStore { return &TestStore{} }
func (s *TestStore) entry(key []byte) *entry { return s.entryf(key) }
func (s *TestStore) write(key []byte, values Values) (bool, error) { return s.writef(key, values) }
func (s *TestStore) add(key []byte, entry *entry) { s.addf(key, entry) }
func (s *TestStore) remove(key []byte) { s.removef(key) }
func (s *TestStore) keys(sorted bool) [][]byte { return s.keysf(sorted) }
func (s *TestStore) apply(f func([]byte, *entry) error) error { return s.applyf(f) }
func (s *TestStore) applySerial(f func([]byte, *entry) error) error { return s.applySerialf(f) }
func (s *TestStore) reset() { s.resetf() }
func (s *TestStore) split(n int) []storer { return s.splitf(n) }
func (s *TestStore) count() int { return s.countf() }
var fvSize = uint64(NewValue(1, float64(1)).Size())
func BenchmarkCacheFloatEntries(b *testing.B) {
cache := NewCache(uint64(b.N)*fvSize, "")
vals := make([][]Value, b.N)
for i := 0; i < b.N; i++ {
vals[i] = []Value{NewValue(1, float64(i))}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := cache.Write([]byte("test"), vals[i]); err != nil {
b.Fatal("err:", err, "i:", i, "N:", b.N)
}
}
}
type points struct {
key []byte
vals []Value
}
func BenchmarkCacheParallelFloatEntries(b *testing.B) {
c := b.N * runtime.GOMAXPROCS(0)
cache := NewCache(uint64(c)*fvSize*10, "")
vals := make([]points, c)
for i := 0; i < c; i++ {
v := make([]Value, 10)
for j := 0; j < 10; j++ {
v[j] = NewValue(1, float64(i+j))
}
vals[i] = points{key: []byte(fmt.Sprintf("cpu%v", rand.Intn(20))), vals: v}
}
i := int32(-1)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
j := atomic.AddInt32(&i, 1)
v := vals[j]
if err := cache.Write(v.key, v.vals); err != nil {
b.Fatal("err:", err, "j:", j, "N:", b.N)
}
}
})
}
func BenchmarkEntry_add(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
b.StopTimer()
values := make([]Value, 10)
for i := 0; i < 10; i++ {
values[i] = NewValue(int64(i+1), float64(i))
}
otherValues := make([]Value, 10)
for i := 0; i < 10; i++ {
otherValues[i] = NewValue(1, float64(i))
}
entry, err := newEntryValues(values)
if err != nil {
b.Fatal(err)
}
b.StartTimer()
if err := entry.add(otherValues); err != nil {
b.Fatal(err)
}
}
})
}
| [
"\"GORACE\"",
"\"APPVEYOR\""
]
| []
| [
"APPVEYOR",
"GORACE"
]
| [] | ["APPVEYOR", "GORACE"] | go | 2 | 0 | |
tests/utils.py | import binascii
import logging
import os
import re
import sqlite3
import subprocess
import threading
import time
from bitcoin.rpc import RawProxy as BitcoinProxy
BITCOIND_CONFIG = {
"rpcuser": "rpcuser",
"rpcpassword": "rpcpass",
"rpcport": 18332,
}
LIGHTNINGD_CONFIG = {
"bitcoind-poll": "1s",
"log-level": "debug",
"cltv-delta": 6,
"cltv-final": 5,
"locktime-blocks": 5,
}
DEVELOPER = os.getenv("DEVELOPER", "0") == "1"
def write_config(filename, opts):
with open(filename, 'w') as f:
for k, v in opts.items():
f.write("{}={}\n".format(k, v))
class TailableProc(object):
"""A monitorable process that we can start, stop and tail.
This is the base class for the daemons. It allows us to directly
tail the processes and react to their output.
"""
def __init__(self, outputDir=None):
self.logs = []
self.logs_cond = threading.Condition(threading.RLock())
self.cmd_line = None
self.env = os.environ
self.running = False
self.proc = None
self.outputDir = outputDir
self.logsearch_start = 0
def start(self):
"""Start the underlying process and start monitoring it.
"""
logging.debug("Starting '%s'", " ".join(self.cmd_line))
self.proc = subprocess.Popen(self.cmd_line, stdout=subprocess.PIPE, env=self.env)
self.thread = threading.Thread(target=self.tail)
self.thread.daemon = True
self.thread.start()
self.running = True
def save_log(self):
if self.outputDir:
logpath = os.path.join(self.outputDir, 'log')
with open(logpath, 'w') as f:
for l in self.logs:
f.write(l + '\n')
def stop(self, timeout=10):
self.save_log()
self.proc.terminate()
# Now give it some time to react to the signal
rc = self.proc.wait(timeout)
if rc is None:
self.proc.kill()
self.proc.wait()
self.thread.join()
if self.proc.returncode:
raise ValueError("Process '{}' did not cleanly shutdown: return code {}".format(self.proc.pid, rc))
return self.proc.returncode
def kill(self):
"""Kill process without giving it warning."""
self.proc.kill()
self.proc.wait()
self.thread.join()
def tail(self):
"""Tail the stdout of the process and remember it.
Stores the lines of output produced by the process in
self.logs and signals that a new line was read so that it can
be picked up by consumers.
"""
for line in iter(self.proc.stdout.readline, ''):
if len(line) == 0:
break
with self.logs_cond:
self.logs.append(str(line.rstrip()))
logging.debug("%s: %s", self.prefix, line.decode().rstrip())
self.logs_cond.notifyAll()
self.running = False
self.proc.stdout.close()
def is_in_log(self, regex, start=0):
"""Look for `regex` in the logs."""
ex = re.compile(regex)
for l in self.logs[start:]:
if ex.search(l):
logging.debug("Found '%s' in logs", regex)
return True
logging.debug("Did not find '%s' in logs", regex)
return False
def wait_for_logs(self, regexs, timeout=60):
"""Look for `regexs` in the logs.
We tail the stdout of the process and look for each regex in `regexs`,
starting from last of the previous waited-for log entries (if any). We
fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
pos = self.logsearch_start
while True:
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
elif not self.running:
raise ValueError('Process died while waiting for logs')
with self.logs_cond:
if pos >= len(self.logs):
self.logs_cond.wait(1)
continue
for r in exs.copy():
self.logsearch_start = pos + 1
if r.search(self.logs[pos]):
logging.debug("Found '%s' in logs", r)
exs.remove(r)
break
if len(exs) == 0:
return self.logs[pos]
pos += 1
def wait_for_log(self, regex, timeout=60):
"""Look for `regex` in the logs.
Convenience wrapper for the common case of only seeking a single entry.
"""
return self.wait_for_logs([regex], timeout)
class SimpleBitcoinProxy:
"""Wrapper for BitcoinProxy to reconnect.
Long wait times between calls to the Bitcoin RPC could result in
`bitcoind` closing the connection, so here we just create
throwaway connections. This is easier than to reach into the RPC
library to close, reopen and reauth upon failure.
"""
def __init__(self, btc_conf_file, *args, **kwargs):
self.__btc_conf_file__ = btc_conf_file
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
proxy = BitcoinProxy(btc_conf_file=self.__btc_conf_file__)
f = lambda *args: proxy._call(name, *args)
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class BitcoinD(TailableProc):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=18332):
TailableProc.__init__(self, bitcoin_dir)
self.bitcoin_dir = bitcoin_dir
self.rpcport = rpcport
self.prefix = 'bitcoind'
regtestdir = os.path.join(bitcoin_dir, 'regtest')
if not os.path.exists(regtestdir):
os.makedirs(regtestdir)
self.cmd_line = [
'bitcoind',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-regtest',
'-logtimestamps',
'-nolisten',
]
BITCOIND_CONFIG['rpcport'] = rpcport
btc_conf_file = os.path.join(regtestdir, 'bitcoin.conf')
write_config(os.path.join(bitcoin_dir, 'bitcoin.conf'), BITCOIND_CONFIG)
write_config(btc_conf_file, BITCOIND_CONFIG)
self.rpc = SimpleBitcoinProxy(btc_conf_file=btc_conf_file)
def start(self):
TailableProc.start(self)
self.wait_for_log("Done loading", timeout=60)
logging.info("BitcoinD started")
def generate_block(self, numblocks=1):
# As of 0.16, generate() is removed; use generatetoaddress.
self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
# lightning-1 => 0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518 aka JUNIORBEAM #0266e4
# lightning-2 => 022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59 aka SILENTARTIST #022d22
# lightning-3 => 035d2b1192dfba134e10e540875d366ebc8bc353d5aa766b80c090b39c3a5d885d aka HOPPINGFIRE #035d2b
# lightning-4 => 0382ce59ebf18be7d84677c2e35f23294b9992ceca95491fcf8a56c6cb2d9de199 aka JUNIORFELONY #0382ce
# lightning-5 => 032cf15d1ad9c4a08d26eab1918f732d8ef8fdc6abb9640bf3db174372c491304e aka SOMBERFIRE #032cf1
class LightningD(TailableProc):
def __init__(self, lightning_dir, bitcoin_dir, port=9735, random_hsm=False):
TailableProc.__init__(self, lightning_dir)
self.lightning_dir = lightning_dir
self.port = port
# Last 32-bytes of final part of dir -> seed.
seed = (bytes(re.search('([^/]+)/*$', lightning_dir).group(1), encoding='utf-8') + bytes(32))[:32]
self.cmd_line = [
'lightningd/lightningd',
'--bitcoin-datadir={}'.format(bitcoin_dir),
'--lightning-dir={}'.format(lightning_dir),
'--port={}'.format(port),
'--allow-deprecated-apis=false',
'--override-fee-rates=15000/7500/1000',
'--network=regtest',
'--ignore-fee-limits=false'
]
if DEVELOPER:
self.cmd_line += ['--dev-broadcast-interval=1000']
if not random_hsm:
self.cmd_line += ['--dev-hsm-seed={}'.format(binascii.hexlify(seed).decode('ascii'))]
self.cmd_line += ["--{}={}".format(k, v) for k, v in sorted(LIGHTNINGD_CONFIG.items())]
self.prefix = 'lightningd(%d)' % (port)
if not os.path.exists(lightning_dir):
os.makedirs(lightning_dir)
def start(self):
TailableProc.start(self)
self.wait_for_log("Server started with public key")
logging.info("LightningD started")
def wait(self, timeout=10):
"""Wait for the daemon to stop for up to timeout seconds
Returns the returncode of the process, None if the process did
not return before the timeout triggers.
"""
self.proc.wait(timeout)
return self.proc.returncode
class LightningNode(object):
def __init__(self, daemon, rpc, btc, executor, may_fail=False):
self.rpc = rpc
self.daemon = daemon
self.bitcoin = btc
self.executor = executor
self.may_fail = may_fail
# Use batch if you're doing more than one async.
def connect(self, remote_node, capacity, async=False):
# Collect necessary information
addr = self.rpc.newaddr()['address']
txid = self.bitcoin.rpc.sendtoaddress(addr, capacity)
tx = self.bitcoin.rpc.gettransaction(txid)
start_size = self.bitcoin.rpc.getmempoolinfo()['size']
def call_connect():
try:
self.rpc.connect('127.0.0.1', remote_node.daemon.port, tx['hex'], async=False)
except Exception:
pass
t = threading.Thread(target=call_connect)
t.daemon = True
t.start()
def wait_connected():
# Up to 10 seconds to get tx into mempool.
start_time = time.time()
while self.bitcoin.rpc.getmempoolinfo()['size'] == start_size:
if time.time() > start_time + 10:
raise TimeoutError('No new transactions in mempool')
time.sleep(0.1)
self.bitcoin.generate_block(1)
# fut.result(timeout=5)
# Now wait for confirmation
self.daemon.wait_for_log(" to CHANNELD_NORMAL|STATE_NORMAL")
remote_node.daemon.wait_for_log(" to CHANNELD_NORMAL|STATE_NORMAL")
if async:
return self.executor.submit(wait_connected)
else:
return wait_connected()
def openchannel(self, remote_node, capacity, addrtype="p2sh-segwit"):
addr, wallettxid = self.fundwallet(capacity, addrtype)
fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)
self.daemon.wait_for_log('sendrawtx exit 0, gave')
self.bitcoin.generate_block(6)
self.daemon.wait_for_log('to CHANNELD_NORMAL|STATE_NORMAL')
return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': fundingtx}
def fundwallet(self, sats, addrtype="p2sh-segwit"):
addr = self.rpc.newaddr(addrtype)['address']
txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**6)
self.bitcoin.generate_block(1)
self.daemon.wait_for_log('Owning output .* txid {}'.format(txid))
return addr, txid
def getactivechannels(self):
return [c for c in self.rpc.listchannels()['channels'] if c['active']]
def db_query(self, query):
from shutil import copyfile
orig = os.path.join(self.daemon.lightning_dir, "lightningd.sqlite3")
copy = os.path.join(self.daemon.lightning_dir, "lightningd-copy.sqlite3")
copyfile(orig, copy)
db = sqlite3.connect(copy)
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
rows = c.fetchall()
result = []
for row in rows:
result.append(dict(zip(row.keys(), row)))
c.close()
db.close()
return result
# Assumes node is stopped!
def db_manip(self, query):
db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, "lightningd.sqlite3"))
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
db.commit()
c.close()
db.close()
def stop(self, timeout=10):
""" Attempt to do a clean shutdown, but kill if it hangs
"""
# Tell the daemon to stop
try:
# May fail if the process already died
self.rpc.stop()
except Exception:
pass
rc = self.daemon.wait(timeout)
# If it did not stop be more insistent
if rc is None:
rc = self.daemon.stop()
self.daemon.save_log()
if rc != 0 and not self.may_fail:
raise ValueError("Node did not exit cleanly, rc={}".format(rc))
else:
return rc
def restart(self, timeout=10, clean=True):
"""Stop and restart the lightning node.
Keyword arguments:
timeout: number of seconds to wait for a shutdown
clean: whether to issue a `stop` RPC command before killing
"""
if clean:
self.stop(timeout)
else:
self.daemon.stop()
self.daemon.start()
| []
| []
| [
"DEVELOPER"
]
| [] | ["DEVELOPER"] | python | 1 | 0 | |
common/src/java/org/apache/hadoop/hive/conf/HiveConf.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.conf;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.classification.InterfaceAudience;
import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.hive.common.type.TimestampTZUtil;
import org.apache.hadoop.hive.common.ZooKeeperHiveHelper;
import org.apache.hadoop.hive.conf.Validator.PatternSet;
import org.apache.hadoop.hive.conf.Validator.RangeValidator;
import org.apache.hadoop.hive.conf.Validator.RatioValidator;
import org.apache.hadoop.hive.conf.Validator.SizeValidator;
import org.apache.hadoop.hive.conf.Validator.StringSet;
import org.apache.hadoop.hive.conf.Validator.TimeValidator;
import org.apache.hadoop.hive.conf.Validator.WritableDirectoryValidator;
import org.apache.hadoop.hive.shims.Utils;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hive.common.HiveCompat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.login.LoginException;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URL;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.time.ZoneId;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Hive Configuration.
*/
public class HiveConf extends Configuration {
protected String hiveJar;
protected Properties origProp;
protected String auxJars;
private static final Logger LOG = LoggerFactory.getLogger(HiveConf.class);
private static boolean loadMetastoreConfig = false;
private static boolean loadHiveServer2Config = false;
private static URL hiveDefaultURL = null;
private static URL hiveSiteURL = null;
private static URL hivemetastoreSiteUrl = null;
private static URL hiveServer2SiteUrl = null;
private static byte[] confVarByteArray = null;
private static final Map<String, ConfVars> vars = new HashMap<String, ConfVars>();
private static final Map<String, ConfVars> metaConfs = new HashMap<String, ConfVars>();
private final List<String> restrictList = new ArrayList<String>();
private final Set<String> hiddenSet = new HashSet<String>();
private final List<String> rscList = new ArrayList<>();
private Pattern modWhiteListPattern = null;
private volatile boolean isSparkConfigUpdated = false;
private static final int LOG_PREFIX_LENGTH = 64;
public boolean getSparkConfigUpdated() {
return isSparkConfigUpdated;
}
public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
this.isSparkConfigUpdated = isSparkConfigUpdated;
}
public interface EncoderDecoder<K, V> {
V encode(K key);
K decode(V value);
}
public static class URLEncoderDecoder implements EncoderDecoder<String, String> {
@Override
public String encode(String key) {
try {
return URLEncoder.encode(key, StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
return key;
}
}
@Override
public String decode(String value) {
try {
return URLDecoder.decode(value, StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
return value;
}
}
}
public static class EncoderDecoderFactory {
public static final URLEncoderDecoder URL_ENCODER_DECODER = new URLEncoderDecoder();
}
static {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
if (classLoader == null) {
classLoader = HiveConf.class.getClassLoader();
}
hiveDefaultURL = classLoader.getResource("hive-default.xml");
// Look for hive-site.xml on the CLASSPATH and log its location if found.
hiveSiteURL = findConfigFile(classLoader, "hive-site.xml", true);
hivemetastoreSiteUrl = findConfigFile(classLoader, "hivemetastore-site.xml", false);
hiveServer2SiteUrl = findConfigFile(classLoader, "hiveserver2-site.xml", false);
for (ConfVars confVar : ConfVars.values()) {
vars.put(confVar.varname, confVar);
}
Set<String> llapDaemonConfVarsSetLocal = new LinkedHashSet<>();
populateLlapDaemonVarsSet(llapDaemonConfVarsSetLocal);
llapDaemonVarsSet = Collections.unmodifiableSet(llapDaemonConfVarsSetLocal);
}
private static URL findConfigFile(ClassLoader classLoader, String name, boolean doLog) {
URL result = classLoader.getResource(name);
if (result == null) {
String confPath = System.getenv("HIVE_CONF_DIR");
result = checkConfigFile(new File(confPath, name));
if (result == null) {
String homePath = System.getenv("HIVE_HOME");
String nameInConf = "conf" + File.separator + name;
result = checkConfigFile(new File(homePath, nameInConf));
if (result == null) {
URI jarUri = null;
try {
// Handle both file:// and jar:<url>!{entry} in the case of shaded hive libs
URL sourceUrl = HiveConf.class.getProtectionDomain().getCodeSource().getLocation();
jarUri = sourceUrl.getProtocol().equalsIgnoreCase("jar") ? new URI(sourceUrl.getPath()) : sourceUrl.toURI();
} catch (Throwable e) {
LOG.info("Cannot get jar URI", e);
System.err.println("Cannot get jar URI: " + e.getMessage());
}
// From the jar file, the parent is /lib folder
File parent = new File(jarUri).getParentFile();
if (parent != null) {
result = checkConfigFile(new File(parent.getParentFile(), nameInConf));
}
}
}
}
if (doLog) {
LOG.info("Found configuration file {}", result);
}
return result;
}
private static URL checkConfigFile(File f) {
try {
return (f.exists() && f.isFile()) ? f.toURI().toURL() : null;
} catch (Throwable e) {
LOG.info("Error looking for config {}", f, e);
System.err.println("Error looking for config " + f + ": " + e.getMessage());
return null;
}
}
@InterfaceAudience.Private
public static final String PREFIX_LLAP = "llap.";
@InterfaceAudience.Private
public static final String PREFIX_HIVE_LLAP = "hive.llap.";
/**
* Metastore related options that the db is initialized against. When a conf
* var in this is list is changed, the metastore instance for the CLI will
* be recreated so that the change will take effect.
*/
public static final HiveConf.ConfVars[] metaVars = {
HiveConf.ConfVars.METASTOREWAREHOUSE,
HiveConf.ConfVars.REPLDIR,
HiveConf.ConfVars.METASTOREURIS,
HiveConf.ConfVars.METASTORESELECTION,
HiveConf.ConfVars.METASTORE_SERVER_PORT,
HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES,
HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES,
HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY,
HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT,
HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME,
HiveConf.ConfVars.METASTOREPWD,
HiveConf.ConfVars.METASTORECONNECTURLHOOK,
HiveConf.ConfVars.METASTORECONNECTURLKEY,
HiveConf.ConfVars.METASTORESERVERMINTHREADS,
HiveConf.ConfVars.METASTORESERVERMAXTHREADS,
HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE,
HiveConf.ConfVars.METASTORE_INT_ORIGINAL,
HiveConf.ConfVars.METASTORE_INT_ARCHIVED,
HiveConf.ConfVars.METASTORE_INT_EXTRACTED,
HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE,
HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL,
HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL,
HiveConf.ConfVars.METASTORE_TOKEN_SIGNATURE,
HiveConf.ConfVars.METASTORE_CACHE_PINOBJTYPES,
HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE,
HiveConf.ConfVars.METASTORE_VALIDATE_TABLES,
HiveConf.ConfVars.METASTORE_DATANUCLEUS_INIT_COL_INFO,
HiveConf.ConfVars.METASTORE_VALIDATE_COLUMNS,
HiveConf.ConfVars.METASTORE_VALIDATE_CONSTRAINTS,
HiveConf.ConfVars.METASTORE_STORE_MANAGER_TYPE,
HiveConf.ConfVars.METASTORE_AUTO_CREATE_ALL,
HiveConf.ConfVars.METASTORE_TRANSACTION_ISOLATION,
HiveConf.ConfVars.METASTORE_CACHE_LEVEL2,
HiveConf.ConfVars.METASTORE_CACHE_LEVEL2_TYPE,
HiveConf.ConfVars.METASTORE_IDENTIFIER_FACTORY,
HiveConf.ConfVars.METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK,
HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS,
HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX,
HiveConf.ConfVars.METASTORE_EVENT_LISTENERS,
HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS,
HiveConf.ConfVars.METASTORE_EVENT_CLEAN_FREQ,
HiveConf.ConfVars.METASTORE_EVENT_EXPIRY_DURATION,
HiveConf.ConfVars.METASTORE_EVENT_MESSAGE_FACTORY,
HiveConf.ConfVars.METASTORE_FILTER_HOOK,
HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL,
HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS,
HiveConf.ConfVars.METASTORE_PART_INHERIT_TBL_PROPS,
HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX,
HiveConf.ConfVars.METASTORE_INIT_HOOKS,
HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS,
HiveConf.ConfVars.HMSHANDLERATTEMPTS,
HiveConf.ConfVars.HMSHANDLERINTERVAL,
HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF,
HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN,
HiveConf.ConfVars.METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS,
HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES,
HiveConf.ConfVars.USERS_IN_ADMIN_ROLE,
HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
HiveConf.ConfVars.HIVE_TXN_MANAGER,
HiveConf.ConfVars.HIVE_TXN_TIMEOUT,
HiveConf.ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES,
HiveConf.ConfVars.HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE,
HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH,
HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX,
HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_TUNER,
HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_ENABLED,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_SIZE,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_FPP,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_TTL,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL,
HiveConf.ConfVars.METASTORE_FASTPATH,
HiveConf.ConfVars.METASTORE_HBASE_FILE_METADATA_THREADS,
HiveConf.ConfVars.METASTORE_WM_DEFAULT_POOL_SIZE
};
/**
* User configurable Metastore vars
*/
public static final HiveConf.ConfVars[] metaConfVars = {
HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL,
HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL_DDL,
HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT,
HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN,
HiveConf.ConfVars.METASTORE_CAPABILITY_CHECK,
HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES
};
static {
for (ConfVars confVar : metaConfVars) {
metaConfs.put(confVar.varname, confVar);
}
}
public static final String HIVE_LLAP_DAEMON_SERVICE_PRINCIPAL_NAME = "hive.llap.daemon.service.principal";
public static final String HIVE_SERVER2_AUTHENTICATION_LDAP_USERMEMBERSHIPKEY_NAME =
"hive.server2.authentication.ldap.userMembershipKey";
public static final String HIVE_SPARK_SUBMIT_CLIENT = "spark-submit";
public static final String HIVE_SPARK_LAUNCHER_CLIENT = "spark-launcher";
/**
* dbVars are the parameters can be set per database. If these
* parameters are set as a database property, when switching to that
* database, the HiveConf variable will be changed. The change of these
* parameters will effectively change the DFS and MapReduce clusters
* for different databases.
*/
public static final HiveConf.ConfVars[] dbVars = {
HiveConf.ConfVars.HADOOPBIN,
HiveConf.ConfVars.METASTOREWAREHOUSE,
HiveConf.ConfVars.SCRATCHDIR
};
/**
* encoded parameter values are ;-) encoded. Use decoder to get ;-) decoded string
*/
public static final HiveConf.ConfVars[] ENCODED_CONF = {
ConfVars.HIVEQUERYSTRING
};
/**
* Variables used by LLAP daemons.
* TODO: Eventually auto-populate this based on prefixes. The conf variables
* will need to be renamed for this.
*/
private static final Set<String> llapDaemonVarsSet;
private static void populateLlapDaemonVarsSet(Set<String> llapDaemonVarsSetLocal) {
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_ENABLED.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_MEMORY_MODE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_MIN_ALLOC.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_MAX_ALLOC.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_ARENA_COUNT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_DIRECT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_USE_LRFU.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_LRFU_LAMBDA.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_CACHE_ALLOW_SYNTHETIC_FILEID.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_USE_FILEID_PATH.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_DECODING_METRICS_PERCENTILE_INTERVALS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ORC_ENABLE_TIME_COUNTERS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_THREADPOOL_SIZE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_KERBEROS_PRINCIPAL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_KERBEROS_KEYTAB_FILE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ZKSM_ZK_CONNECTION_STRING.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_SECURITY_ACL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_MANAGEMENT_ACL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_SECURITY_ACL_DENY.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_MANAGEMENT_ACL_DENY.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DELEGATION_TOKEN_LIFETIME.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_MANAGEMENT_RPC_PORT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_WEB_AUTO_AUTH.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_RPC_NUM_HANDLERS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WORK_DIRS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_YARN_SHUFFLE_PORT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_YARN_CONTAINER_MB.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_SHUFFLE_DIR_WATCHER_ENABLED.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_LIVENESS_HEARTBEAT_INTERVAL_MS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_LIVENESS_CONNECTION_SLEEP_BETWEEN_RETRIES_MS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_RPC_PORT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_XMX_HEADROOM.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_VCPUS_PER_INSTANCE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_NUM_FILE_CLEANER_THREADS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_FILE_CLEANUP_DELAY_SECONDS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_SERVICE_REFRESH_INTERVAL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOW_PERMANENT_FNS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_DOWNLOAD_PERMANENT_FNS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_TASK_PREEMPTION_METRICS_INTERVALS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WEB_PORT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WEB_SSL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_CONTAINER_ID.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_VALIDATE_ACLS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_LOGGER.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_USE_FQDN.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_OUTPUT_FORMAT_ARROW.varname);
}
/**
* Get a set containing configuration parameter names used by LLAP Server isntances
* @return an unmodifiable set containing llap ConfVars
*/
public static final Set<String> getLlapDaemonConfVars() {
return llapDaemonVarsSet;
}
/**
* ConfVars.
*
* These are the default configuration properties for Hive. Each HiveConf
* object is initialized as follows:
*
* 1) Hadoop configuration properties are applied.
* 2) ConfVar properties with non-null values are overlayed.
* 3) hive-site.xml properties are overlayed.
* 4) System Properties and Manual Overrides are overlayed.
*
* WARNING: think twice before adding any Hadoop configuration properties
* with non-null values to this list as they will override any values defined
* in the underlying Hadoop configuration.
*/
public static enum ConfVars {
// QL execution stuff
SCRIPTWRAPPER("hive.exec.script.wrapper", null, ""),
PLAN("hive.exec.plan", "", ""),
STAGINGDIR("hive.exec.stagingdir", ".hive-staging",
"Directory name that will be created inside table locations in order to support HDFS encryption. " +
"This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. " +
"In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans."),
SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive",
"HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. " +
"For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, " +
"with ${hive.scratch.dir.permission}."),
REPLDIR("hive.repl.rootdir","/user/${system:user.name}/repl/",
"HDFS root dir for all replication dumps."),
REPLCMENABLED("hive.repl.cm.enabled", false,
"Turn on ChangeManager, so delete files will go to cmrootdir."),
REPLCMDIR("hive.repl.cmrootdir","/user/${system:user.name}/cmroot/",
"Root dir for ChangeManager, used for deleted files."),
REPLCMRETIAN("hive.repl.cm.retain","24h",
new TimeValidator(TimeUnit.HOURS),
"Time to retain removed files in cmrootdir."),
REPLCMINTERVAL("hive.repl.cm.interval","3600s",
new TimeValidator(TimeUnit.SECONDS),
"Inteval for cmroot cleanup thread."),
REPL_FUNCTIONS_ROOT_DIR("hive.repl.replica.functions.root.dir","/user/${system:user.name}/repl/functions/",
"Root directory on the replica warehouse where the repl sub-system will store jars from the primary warehouse"),
REPL_APPROX_MAX_LOAD_TASKS("hive.repl.approx.max.load.tasks", 10000,
"Provide an approximation of the maximum number of tasks that should be executed before \n"
+ "dynamically generating the next set of tasks. The number is approximate as Hive \n"
+ "will stop at a slightly higher number, the reason being some events might lead to a \n"
+ "task increment that would cross the specified limit."),
REPL_PARTITIONS_DUMP_PARALLELISM("hive.repl.partitions.dump.parallelism",100,
"Number of threads that will be used to dump partition data information during repl dump."),
REPL_DUMPDIR_CLEAN_FREQ("hive.repl.dumpdir.clean.freq", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Frequency at which timer task runs to purge expired dump dirs."),
REPL_DUMPDIR_TTL("hive.repl.dumpdir.ttl", "7d",
new TimeValidator(TimeUnit.DAYS),
"TTL of dump dirs before cleanup."),
REPL_DUMP_METADATA_ONLY("hive.repl.dump.metadata.only", false,
"Indicates whether replication dump only metadata information or data + metadata. \n"
+ "This config makes hive.repl.include.external.tables config ineffective."),
REPL_BOOTSTRAP_ACID_TABLES("hive.repl.bootstrap.acid.tables", false,
"Indicates if repl dump should bootstrap the information about ACID tables along with \n"
+ "incremental dump for replication. It is recommended to keep this config parameter \n"
+ "as false always and should be set to true only via WITH clause of REPL DUMP \n"
+ "command. It should be set to true only once for incremental repl dump on \n"
+ "each of the existing replication policies after enabling acid tables replication."),
REPL_BOOTSTRAP_DUMP_OPEN_TXN_TIMEOUT("hive.repl.bootstrap.dump.open.txn.timeout", "1h",
new TimeValidator(TimeUnit.HOURS),
"Indicates the timeout for all transactions which are opened before triggering bootstrap REPL DUMP. "
+ "If these open transactions are not closed within the timeout value, then REPL DUMP will "
+ "forcefully abort those transactions and continue with bootstrap dump."),
//https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/TransparentEncryption.html#Running_as_the_superuser
REPL_ADD_RAW_RESERVED_NAMESPACE("hive.repl.add.raw.reserved.namespace", false,
"For TDE with same encryption keys on source and target, allow Distcp super user to access \n"
+ "the raw bytes from filesystem without decrypting on source and then encrypting on target."),
REPL_INCLUDE_EXTERNAL_TABLES("hive.repl.include.external.tables", false,
"Indicates if repl dump should include information about external tables. It should be \n"
+ "used in conjunction with 'hive.repl.dump.metadata.only' set to false. if 'hive.repl.dump.metadata.only' \n"
+ " is set to true then this config parameter has no effect as external table meta data is flushed \n"
+ " always by default. If this config parameter is enabled on an on-going replication policy which is in\n"
+ " incremental phase, then need to set 'hive.repl.bootstrap.external.tables' to true for the first \n"
+ " repl dump to bootstrap all external tables."),
REPL_BOOTSTRAP_EXTERNAL_TABLES("hive.repl.bootstrap.external.tables", false,
"Indicates if repl dump should bootstrap the information about external tables along with incremental \n"
+ "dump for replication. It is recommended to keep this config parameter as false always and should be \n"
+ "set to true only via WITH clause of REPL DUMP command. It should be used in conjunction with \n"
+ "'hive.repl.include.external.tables' when sets to true. If 'hive.repl.include.external.tables' is \n"
+ "set to false, then this config parameter has no effect. It should be set to true only once for \n"
+ "incremental repl dump on each existing replication policy after enabling external tables replication."),
REPL_ENABLE_MOVE_OPTIMIZATION("hive.repl.enable.move.optimization", false,
"If its set to true, REPL LOAD copies data files directly to the target table/partition location \n"
+ "instead of copying to staging directory first and then move to target location. This optimizes \n"
+ " the REPL LOAD on object data stores such as S3 or WASB where creating a directory and move \n"
+ " files are costly operations. In file system like HDFS where move operation is atomic, this \n"
+ " optimization should not be enabled as it may lead to inconsistent data read for non acid tables."),
REPL_MOVE_OPTIMIZED_FILE_SCHEMES("hive.repl.move.optimized.scheme", "s3a, wasb",
"Comma separated list of schemes for which move optimization will be enabled during repl load. \n"
+ "This configuration overrides the value set using REPL_ENABLE_MOVE_OPTIMIZATION for the given schemes. \n"
+ " Schemes of the file system which does not support atomic move (rename) can be specified here to \n "
+ " speed up the repl load operation. In file system like HDFS where move operation is atomic, this \n"
+ " optimization should not be enabled as it may lead to inconsistent data read for non acid tables."),
REPL_EXTERNAL_TABLE_BASE_DIR("hive.repl.replica.external.table.base.dir", "/",
"This is the base directory on the target/replica warehouse under which data for "
+ "external tables is stored. This is relative base path and hence prefixed to the source "
+ "external table path on target cluster."),
LOCALSCRATCHDIR("hive.exec.local.scratchdir",
"${system:java.io.tmpdir}" + File.separator + "${system:user.name}",
"Local scratch space for Hive jobs"),
DOWNLOADED_RESOURCES_DIR("hive.downloaded.resources.dir",
"${system:java.io.tmpdir}" + File.separator + "${hive.session.id}_resources",
"Temporary local directory for added resources in the remote file system."),
SCRATCHDIRPERMISSION("hive.scratch.dir.permission", "700",
"The permission for the user specific scratch directories that get created."),
SUBMITVIACHILD("hive.exec.submitviachild", false, ""),
SUBMITLOCALTASKVIACHILD("hive.exec.submit.local.task.via.child", true,
"Determines whether local tasks (typically mapjoin hashtable generation phase) runs in \n" +
"separate JVM (true recommended) or not. \n" +
"Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues."),
SCRIPTERRORLIMIT("hive.exec.script.maxerrsize", 100000,
"Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). \n" +
"This prevents runaway scripts from filling logs partitions to capacity"),
ALLOWPARTIALCONSUMP("hive.exec.script.allow.partial.consumption", false,
"When enabled, this option allows a user script to exit successfully without consuming \n" +
"all the data from the standard input."),
STREAMREPORTERPERFIX("stream.stderr.reporter.prefix", "reporter:",
"Streaming jobs that log to standard error with this prefix can log counter or status information."),
STREAMREPORTERENABLED("stream.stderr.reporter.enabled", true,
"Enable consumption of status and counter messages for streaming jobs."),
COMPRESSRESULT("hive.exec.compress.output", false,
"This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed. \n" +
"The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"),
COMPRESSINTERMEDIATE("hive.exec.compress.intermediate", false,
"This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed. \n" +
"The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"),
COMPRESSINTERMEDIATECODEC("hive.intermediate.compression.codec", "", ""),
COMPRESSINTERMEDIATETYPE("hive.intermediate.compression.type", "", ""),
BYTESPERREDUCER("hive.exec.reducers.bytes.per.reducer", (long) (256 * 1000 * 1000),
"size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers."),
MAXREDUCERS("hive.exec.reducers.max", 1009,
"max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is\n" +
"negative, Hive will use this one as the max number of reducers when automatically determine number of reducers."),
PREEXECHOOKS("hive.exec.pre.hooks", "",
"Comma-separated list of pre-execution hooks to be invoked for each statement. \n" +
"A pre-execution hook is specified as the name of a Java class which implements the \n" +
"org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
POSTEXECHOOKS("hive.exec.post.hooks", "",
"Comma-separated list of post-execution hooks to be invoked for each statement. \n" +
"A post-execution hook is specified as the name of a Java class which implements the \n" +
"org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
ONFAILUREHOOKS("hive.exec.failure.hooks", "",
"Comma-separated list of on-failure hooks to be invoked for each statement. \n" +
"An on-failure hook is specified as the name of Java class which implements the \n" +
"org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
QUERYREDACTORHOOKS("hive.exec.query.redactor.hooks", "",
"Comma-separated list of hooks to be invoked for each query which can \n" +
"tranform the query before it's placed in the job.xml file. Must be a Java class which \n" +
"extends from the org.apache.hadoop.hive.ql.hooks.Redactor abstract class."),
CLIENTSTATSPUBLISHERS("hive.client.stats.publishers", "",
"Comma-separated list of statistics publishers to be invoked on counters on each job. \n" +
"A client stats publisher is specified as the name of a Java class which implements the \n" +
"org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface."),
ATSHOOKQUEUECAPACITY("hive.ats.hook.queue.capacity", 64,
"Queue size for the ATS Hook executor. If the number of outstanding submissions \n" +
"to the ATS executor exceed this amount, the Hive ATS Hook will not try to log queries to ATS."),
EXECPARALLEL("hive.exec.parallel", false, "Whether to execute jobs in parallel"),
EXECPARALLETHREADNUMBER("hive.exec.parallel.thread.number", 8,
"How many jobs at most can be executed in parallel"),
HIVESPECULATIVEEXECREDUCERS("hive.mapred.reduce.tasks.speculative.execution", true,
"Whether speculative execution for reducers should be turned on. "),
HIVECOUNTERSPULLINTERVAL("hive.exec.counters.pull.interval", 1000L,
"The interval with which to poll the JobTracker for the counters the running job. \n" +
"The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be."),
DYNAMICPARTITIONING("hive.exec.dynamic.partition", true,
"Whether or not to allow dynamic partitions in DML/DDL."),
DYNAMICPARTITIONINGMODE("hive.exec.dynamic.partition.mode", "strict",
"In strict mode, the user must specify at least one static partition\n" +
"in case the user accidentally overwrites all partitions.\n" +
"In nonstrict mode all partitions are allowed to be dynamic."),
DYNAMICPARTITIONMAXPARTS("hive.exec.max.dynamic.partitions", 1000,
"Maximum number of dynamic partitions allowed to be created in total."),
DYNAMICPARTITIONMAXPARTSPERNODE("hive.exec.max.dynamic.partitions.pernode", 100,
"Maximum number of dynamic partitions allowed to be created in each mapper/reducer node."),
MAXCREATEDFILES("hive.exec.max.created.files", 100000L,
"Maximum number of HDFS files created by all mappers/reducers in a MapReduce job."),
DEFAULTPARTITIONNAME("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__",
"The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. \n" +
"This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). \n" +
"The user has to be aware that the dynamic partition value should not contain this value to avoid confusions."),
DEFAULT_ZOOKEEPER_PARTITION_NAME("hive.lockmgr.zookeeper.default.partition.name", "__HIVE_DEFAULT_ZOOKEEPER_PARTITION__", ""),
// Whether to show a link to the most failed task + debugging tips
SHOW_JOB_FAIL_DEBUG_INFO("hive.exec.show.job.failure.debug.info", true,
"If a job fails, whether to provide a link in the CLI to the task with the\n" +
"most failures, along with debugging hints if applicable."),
JOB_DEBUG_CAPTURE_STACKTRACES("hive.exec.job.debug.capture.stacktraces", true,
"Whether or not stack traces parsed from the task logs of a sampled failed task \n" +
"for each failed job should be stored in the SessionState"),
JOB_DEBUG_TIMEOUT("hive.exec.job.debug.timeout", 30000, ""),
TASKLOG_DEBUG_TIMEOUT("hive.exec.tasklog.debug.timeout", 20000, ""),
OUTPUT_FILE_EXTENSION("hive.output.file.extension", null,
"String used as a file extension for output files. \n" +
"If not set, defaults to the codec extension for text files (e.g. \".gz\"), or no extension otherwise."),
HIVE_IN_TEST("hive.in.test", false, "internal usage only, true in test mode", true),
HIVE_IN_TEST_SSL("hive.in.ssl.test", false, "internal usage only, true in SSL test mode", true),
// TODO: this needs to be removed; see TestReplicationScenarios* comments.
HIVE_IN_TEST_REPL("hive.in.repl.test", false, "internal usage only, true in replication test mode", true),
HIVE_IN_TEST_IDE("hive.in.ide.test", false, "internal usage only, true if test running in ide",
true),
HIVE_TESTING_SHORT_LOGS("hive.testing.short.logs", false,
"internal usage only, used only in test mode. If set true, when requesting the " +
"operation logs the short version (generated by LogDivertAppenderForTest) will be " +
"returned"),
HIVE_TESTING_REMOVE_LOGS("hive.testing.remove.logs", true,
"internal usage only, used only in test mode. If set false, the operation logs, and the " +
"operation log directory will not be removed, so they can be found after the test runs."),
HIVE_TEST_LOAD_HOSTNAMES("hive.test.load.hostnames", "",
"Specify host names for load testing. (e.g., \"host1,host2,host3\"). Leave it empty if no " +
"load generation is needed (eg. for production)."),
HIVE_TEST_LOAD_INTERVAL("hive.test.load.interval", "10ms", new TimeValidator(TimeUnit.MILLISECONDS),
"The interval length used for load and idle periods in milliseconds."),
HIVE_TEST_LOAD_UTILIZATION("hive.test.load.utilization", 0.2f,
"Specify processor load utilization between 0.0 (not loaded on all threads) and 1.0 " +
"(fully loaded on all threads). Comparing this with a random value the load generator creates " +
"hive.test.load.interval length active loops or idle periods"),
HIVE_IN_TEZ_TEST("hive.in.tez.test", false, "internal use only, true when in testing tez",
true),
HIVE_MAPJOIN_TESTING_NO_HASH_TABLE_LOAD("hive.mapjoin.testing.no.hash.table.load", false, "internal use only, true when in testing map join",
true),
HIVE_ADDITIONAL_PARTIAL_MASKS_PATTERN("hive.qtest.additional.partial.mask.pattern", "",
"internal use only, used in only qtests. Provide additional partial masks pattern" +
"for qtests as a ',' separated list"),
HIVE_ADDITIONAL_PARTIAL_MASKS_REPLACEMENT_TEXT("hive.qtest.additional.partial.mask.replacement.text", "",
"internal use only, used in only qtests. Provide additional partial masks replacement" +
"text for qtests as a ',' separated list"),
HIVE_IN_REPL_TEST_FILES_SORTED("hive.in.repl.test.files.sorted", false,
"internal usage only, set to true if the file listing is required in sorted order during bootstrap load", true),
LOCALMODEAUTO("hive.exec.mode.local.auto", false,
"Let Hive determine whether to run in local mode automatically"),
LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L,
"When hive.exec.mode.local.auto is true, input bytes should less than this for local mode."),
LOCALMODEMAXINPUTFILES("hive.exec.mode.local.auto.input.files.max", 4,
"When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode."),
DROPIGNORESNONEXISTENT("hive.exec.drop.ignorenonexistent", true,
"Do not report an error if DROP TABLE/VIEW/Index/Function specifies a non-existent table/view/function"),
HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true, "Ignore the mapjoin hint"),
HIVE_FILE_MAX_FOOTER("hive.file.max.footer", 100,
"maximum number of lines for footer user can define for a table file"),
HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES("hive.resultset.use.unique.column.names", true,
"Make column names unique in the result set by qualifying column names with table alias if needed.\n" +
"Table alias will be added to column names for queries of type \"select *\" or \n" +
"if query explicitly uses table alias \"select r1.x..\"."),
HIVE_PROTO_EVENTS_BASE_PATH("hive.hook.proto.base-directory", "",
"Base directory into which the proto event messages are written by HiveProtoLoggingHook."),
HIVE_PROTO_EVENTS_ROLLOVER_CHECK_INTERVAL("hive.hook.proto.rollover-interval", "600s",
new TimeValidator(TimeUnit.SECONDS, 0L, true, 3600 * 24L, true),
"Frequency at which the file rollover check is triggered."),
HIVE_PROTO_EVENTS_CLEAN_FREQ("hive.hook.proto.events.clean.freq", "1d",
new TimeValidator(TimeUnit.DAYS),
"Frequency at which timer task runs to purge expired proto event files."),
HIVE_PROTO_EVENTS_TTL("hive.hook.proto.events.ttl", "7d",
new TimeValidator(TimeUnit.DAYS),
"Time-To-Live (TTL) of proto event files before cleanup."),
HIVE_PROTO_FILE_PER_EVENT("hive.hook.proto.file.per.event", false,
"Whether each proto event has to be written to separate file. " +
"(Use this for FS that does not hflush immediately like S3A)"),
// Hadoop Configuration Properties
// Properties with null values are ignored and exist only for the purpose of giving us
// a symbolic name to reference in the Hive source code. Properties with non-null
// values will override any values set in the underlying Hadoop configuration.
HADOOPBIN("hadoop.bin.path", findHadoopBinary(), "", true),
YARNBIN("yarn.bin.path", findYarnBinary(), "", true),
MAPREDBIN("mapred.bin.path", findMapRedBinary(), "", true),
HIVE_FS_HAR_IMPL("fs.har.impl", "org.apache.hadoop.hive.shims.HiveHarFileSystem",
"The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20"),
MAPREDMAXSPLITSIZE(FileInputFormat.SPLIT_MAXSIZE, 256000000L, "", true),
MAPREDMINSPLITSIZE(FileInputFormat.SPLIT_MINSIZE, 1L, "", true),
MAPREDMINSPLITSIZEPERNODE(CombineFileInputFormat.SPLIT_MINSIZE_PERNODE, 1L, "", true),
MAPREDMINSPLITSIZEPERRACK(CombineFileInputFormat.SPLIT_MINSIZE_PERRACK, 1L, "", true),
// The number of reduce tasks per job. Hadoop sets this value to 1 by default
// By setting this property to -1, Hive will automatically determine the correct
// number of reducers.
HADOOPNUMREDUCERS("mapreduce.job.reduces", -1, "", true),
// Metastore stuff. Be sure to update HiveConf.metaVars when you add something here!
METASTOREDBTYPE("hive.metastore.db.type", "DERBY", new StringSet("DERBY", "ORACLE", "MYSQL", "MSSQL", "POSTGRES"),
"Type of database used by the metastore. Information schema & JDBCStorageHandler depend on it."),
/**
* @deprecated Use MetastoreConf.WAREHOUSE
*/
@Deprecated
METASTOREWAREHOUSE("hive.metastore.warehouse.dir", "/user/hive/warehouse",
"location of default database for the warehouse"),
HIVE_METASTORE_WAREHOUSE_EXTERNAL("hive.metastore.warehouse.external.dir", null,
"Default location for external tables created in the warehouse. " +
"If not set or null, then the normal warehouse location will be used as the default location."),
/**
* @deprecated Use MetastoreConf.THRIFT_URIS
*/
@Deprecated
METASTOREURIS("hive.metastore.uris", "",
"Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore."),
/**
* @deprecated Use MetastoreConf.THRIFT_URI_SELECTION
*/
@Deprecated
METASTORESELECTION("hive.metastore.uri.selection", "RANDOM",
new StringSet("SEQUENTIAL", "RANDOM"),
"Determines the selection mechanism used by metastore client to connect to remote " +
"metastore. SEQUENTIAL implies that the first valid metastore from the URIs specified " +
"as part of hive.metastore.uris will be picked. RANDOM implies that the metastore " +
"will be picked randomly"),
/**
* @deprecated Use MetastoreConf.CAPABILITY_CHECK
*/
@Deprecated
METASTORE_CAPABILITY_CHECK("hive.metastore.client.capability.check", true,
"Whether to check client capabilities for potentially breaking API usage."),
METASTORE_CLIENT_CACHE_ENABLED("hive.metastore.client.cache.enabled", false,
"Whether to enable metastore client cache"),
METASTORE_CLIENT_CACHE_EXPIRY_TIME("hive.metastore.client.cache.expiry.time", "120s",
new TimeValidator(TimeUnit.SECONDS), "Expiry time for metastore client cache"),
METASTORE_CLIENT_CACHE_INITIAL_CAPACITY("hive.metastore.client.cache.initial.capacity", 50,
"Initial capacity for metastore client cache"),
METASTORE_CLIENT_CACHE_MAX_CAPACITY("hive.metastore.client.cache.max.capacity", 50,
"Max capacity for metastore client cache"),
METASTORE_CLIENT_CACHE_STATS_ENABLED("hive.metastore.client.cache.stats.enabled", false,
"Whether to enable metastore client cache stats"),
METASTORE_FASTPATH("hive.metastore.fastpath", false,
"Used to avoid all of the proxies and object copies in the metastore. Note, if this is " +
"set, you MUST use a local metastore (hive.metastore.uris must be empty) otherwise " +
"undefined and most likely undesired behavior will result"),
/**
* @deprecated Use MetastoreConf.FS_HANDLER_THREADS_COUNT
*/
@Deprecated
METASTORE_FS_HANDLER_THREADS_COUNT("hive.metastore.fshandler.threads", 15,
"Number of threads to be allocated for metastore handler for fs operations."),
/**
* @deprecated Use MetastoreConf.FILE_METADATA_THREADS
*/
@Deprecated
METASTORE_HBASE_FILE_METADATA_THREADS("hive.metastore.hbase.file.metadata.threads", 1,
"Number of threads to use to read file metadata in background to cache it."),
/**
* @deprecated Use MetastoreConf.URI_RESOLVER
*/
@Deprecated
METASTORE_URI_RESOLVER("hive.metastore.uri.resolver", "",
"If set, fully qualified class name of resolver for hive metastore uri's"),
/**
* @deprecated Use MetastoreConf.THRIFT_CONNECTION_RETRIES
*/
@Deprecated
METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3,
"Number of retries while opening a connection to metastore"),
/**
* @deprecated Use MetastoreConf.THRIFT_FAILURE_RETRIES
*/
@Deprecated
METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1,
"Number of retries upon failure of Thrift metastore calls"),
/**
* @deprecated Use MetastoreConf.SERVER_PORT
*/
@Deprecated
METASTORE_SERVER_PORT("hive.metastore.port", 9083, "Hive metastore listener port"),
/**
* @deprecated Use MetastoreConf.CLIENT_CONNECT_RETRY_DELAY
*/
@Deprecated
METASTORE_CLIENT_CONNECT_RETRY_DELAY("hive.metastore.client.connect.retry.delay", "1s",
new TimeValidator(TimeUnit.SECONDS),
"Number of seconds for the client to wait between consecutive connection attempts"),
/**
* @deprecated Use MetastoreConf.CLIENT_SOCKET_TIMEOUT
*/
@Deprecated
METASTORE_CLIENT_SOCKET_TIMEOUT("hive.metastore.client.socket.timeout", "600s",
new TimeValidator(TimeUnit.SECONDS),
"MetaStore Client socket timeout in seconds"),
/**
* @deprecated Use MetastoreConf.CLIENT_SOCKET_LIFETIME
*/
@Deprecated
METASTORE_CLIENT_SOCKET_LIFETIME("hive.metastore.client.socket.lifetime", "0s",
new TimeValidator(TimeUnit.SECONDS),
"MetaStore Client socket lifetime in seconds. After this time is exceeded, client\n" +
"reconnects on the next MetaStore operation. A value of 0s means the connection\n" +
"has an infinite lifetime."),
/**
* @deprecated Use MetastoreConf.PWD
*/
@Deprecated
METASTOREPWD("javax.jdo.option.ConnectionPassword", "mine",
"password to use against metastore database"),
/**
* @deprecated Use MetastoreConf.CONNECT_URL_HOOK
*/
@Deprecated
METASTORECONNECTURLHOOK("hive.metastore.ds.connection.url.hook", "",
"Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used"),
/**
* @deprecated Use MetastoreConf.MULTITHREADED
*/
@Deprecated
METASTOREMULTITHREADED("javax.jdo.option.Multithreaded", true,
"Set this to true if multiple threads access metastore through JDO concurrently."),
/**
* @deprecated Use MetastoreConf.CONNECT_URL_KEY
*/
@Deprecated
METASTORECONNECTURLKEY("javax.jdo.option.ConnectionURL",
"jdbc:derby:;databaseName=metastore_db;create=true",
"JDBC connect string for a JDBC metastore.\n" +
"To use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.\n" +
"For example, jdbc:postgresql://myhost/db?ssl=true for postgres database."),
/**
* @deprecated Use MetastoreConf.DBACCESS_SSL_PROPS
*/
@Deprecated
METASTORE_DBACCESS_SSL_PROPS("hive.metastore.dbaccess.ssl.properties", "",
"Comma-separated SSL properties for metastore to access database when JDO connection URL\n" +
"enables SSL access. e.g. javax.net.ssl.trustStore=/tmp/truststore,javax.net.ssl.trustStorePassword=pwd."),
/**
* @deprecated Use MetastoreConf.HMS_HANDLER_ATTEMPTS
*/
@Deprecated
HMSHANDLERATTEMPTS("hive.hmshandler.retry.attempts", 10,
"The number of times to retry a HMSHandler call if there were a connection error."),
/**
* @deprecated Use MetastoreConf.HMS_HANDLER_INTERVAL
*/
@Deprecated
HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", "2000ms",
new TimeValidator(TimeUnit.MILLISECONDS), "The time between HMSHandler retry attempts on failure."),
/**
* @deprecated Use MetastoreConf.HMS_HANDLER_FORCE_RELOAD_CONF
*/
@Deprecated
HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false,
"Whether to force reloading of the HMSHandler configuration (including\n" +
"the connection URL, before the next metastore query that accesses the\n" +
"datastore. Once reloaded, this value is reset to false. Used for\n" +
"testing only."),
/**
* @deprecated Use MetastoreConf.SERVER_MAX_MESSAGE_SIZE
*/
@Deprecated
METASTORESERVERMAXMESSAGESIZE("hive.metastore.server.max.message.size", 100*1024*1024L,
"Maximum message size in bytes a HMS will accept."),
/**
* @deprecated Use MetastoreConf.SERVER_MIN_THREADS
*/
@Deprecated
METASTORESERVERMINTHREADS("hive.metastore.server.min.threads", 200,
"Minimum number of worker threads in the Thrift server's pool."),
/**
* @deprecated Use MetastoreConf.SERVER_MAX_THREADS
*/
@Deprecated
METASTORESERVERMAXTHREADS("hive.metastore.server.max.threads", 1000,
"Maximum number of worker threads in the Thrift server's pool."),
/**
* @deprecated Use MetastoreConf.TCP_KEEP_ALIVE
*/
@Deprecated
METASTORE_TCP_KEEP_ALIVE("hive.metastore.server.tcp.keepalive", true,
"Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections."),
/**
* @deprecated Use MetastoreConf.WM_DEFAULT_POOL_SIZE
*/
@Deprecated
METASTORE_WM_DEFAULT_POOL_SIZE("hive.metastore.wm.default.pool.size", 4,
"The size of a default pool to create when creating an empty resource plan;\n" +
"If not positive, no default pool will be created."),
METASTORE_INT_ORIGINAL("hive.metastore.archive.intermediate.original",
"_INTERMEDIATE_ORIGINAL",
"Intermediate dir suffixes used for archiving. Not important what they\n" +
"are, as long as collisions are avoided"),
METASTORE_INT_ARCHIVED("hive.metastore.archive.intermediate.archived",
"_INTERMEDIATE_ARCHIVED", ""),
METASTORE_INT_EXTRACTED("hive.metastore.archive.intermediate.extracted",
"_INTERMEDIATE_EXTRACTED", ""),
/**
* @deprecated Use MetastoreConf.KERBEROS_KEYTAB_FILE
*/
@Deprecated
METASTORE_KERBEROS_KEYTAB_FILE("hive.metastore.kerberos.keytab.file", "",
"The path to the Kerberos Keytab file containing the metastore Thrift server's service principal."),
/**
* @deprecated Use MetastoreConf.KERBEROS_PRINCIPAL
*/
@Deprecated
METASTORE_KERBEROS_PRINCIPAL("hive.metastore.kerberos.principal",
"hive-metastore/[email protected]",
"The service principal for the metastore Thrift server. \n" +
"The special string _HOST will be replaced automatically with the correct host name."),
/**
* @deprecated Use MetastoreConf.CLIENT_KERBEROS_PRINCIPAL
*/
@Deprecated
METASTORE_CLIENT_KERBEROS_PRINCIPAL("hive.metastore.client.kerberos.principal",
"", // E.g. "hive-metastore/[email protected]".
"The Kerberos principal associated with the HA cluster of hcat_servers."),
/**
* @deprecated Use MetastoreConf.USE_THRIFT_SASL
*/
@Deprecated
METASTORE_USE_THRIFT_SASL("hive.metastore.sasl.enabled", false,
"If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos."),
/**
* @deprecated Use MetastoreConf.USE_THRIFT_FRAMED_TRANSPORT
*/
@Deprecated
METASTORE_USE_THRIFT_FRAMED_TRANSPORT("hive.metastore.thrift.framed.transport.enabled", false,
"If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used."),
/**
* @deprecated Use MetastoreConf.USE_THRIFT_COMPACT_PROTOCOL
*/
@Deprecated
METASTORE_USE_THRIFT_COMPACT_PROTOCOL("hive.metastore.thrift.compact.protocol.enabled", false,
"If true, the metastore Thrift interface will use TCompactProtocol. When false (default) TBinaryProtocol will be used.\n" +
"Setting it to true will break compatibility with older clients running TBinaryProtocol."),
/**
* @deprecated Use MetastoreConf.TOKEN_SIGNATURE
*/
@Deprecated
METASTORE_TOKEN_SIGNATURE("hive.metastore.token.signature", "",
"The delegation token service name to match when selecting a token from the current user's tokens."),
/**
* @deprecated Use MetastoreConf.DELEGATION_TOKEN_STORE_CLS
*/
@Deprecated
METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS("hive.cluster.delegation.token.store.class",
"org.apache.hadoop.hive.thrift.MemoryTokenStore",
"The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster."),
METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_CONNECTSTR(
"hive.cluster.delegation.token.store.zookeeper.connectString", "",
"The ZooKeeper token store connect string. You can re-use the configuration value\n" +
"set in hive.zookeeper.quorum, by leaving this parameter unset."),
METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ZNODE(
"hive.cluster.delegation.token.store.zookeeper.znode", "/hivedelegation",
"The root path for token store data. Note that this is used by both HiveServer2 and\n" +
"MetaStore to store delegation Token. One directory gets created for each of them.\n" +
"The final directory names would have the servername appended to it (HIVESERVER2,\n" +
"METASTORE)."),
METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ACL(
"hive.cluster.delegation.token.store.zookeeper.acl", "",
"ACL for token store entries. Comma separated list of ACL entries. For example:\n" +
"sasl:hive/[email protected]:cdrwa,sasl:hive/[email protected]:cdrwa\n" +
"Defaults to all permissions for the hiveserver2/metastore process user."),
/**
* @deprecated Use MetastoreConf.CACHE_PINOBJTYPES
*/
@Deprecated
METASTORE_CACHE_PINOBJTYPES("hive.metastore.cache.pinobjtypes", "Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order",
"List of comma separated metastore object types that should be pinned in the cache"),
/**
* @deprecated Use MetastoreConf.CONNECTION_POOLING_TYPE
*/
@Deprecated
METASTORE_CONNECTION_POOLING_TYPE("datanucleus.connectionPoolingType", "HikariCP", new StringSet("BONECP", "DBCP",
"HikariCP", "NONE"),
"Specify connection pool library for datanucleus"),
/**
* @deprecated Use MetastoreConf.CONNECTION_POOLING_MAX_CONNECTIONS
*/
@Deprecated
METASTORE_CONNECTION_POOLING_MAX_CONNECTIONS("datanucleus.connectionPool.maxPoolSize", 10,
"Specify the maximum number of connections in the connection pool. Note: The configured size will be used by\n" +
"2 connection pools (TxnHandler and ObjectStore). When configuring the max connection pool size, it is\n" +
"recommended to take into account the number of metastore instances and the number of HiveServer2 instances\n" +
"configured with embedded metastore. To get optimal performance, set config to meet the following condition\n"+
"(2 * pool_size * metastore_instances + 2 * pool_size * HS2_instances_with_embedded_metastore) = \n" +
"(2 * physical_core_count + hard_disk_count)."),
// Workaround for DN bug on Postgres:
// http://www.datanucleus.org/servlet/forum/viewthread_thread,7985_offset
/**
* @deprecated Use MetastoreConf.DATANUCLEUS_INIT_COL_INFO
*/
@Deprecated
METASTORE_DATANUCLEUS_INIT_COL_INFO("datanucleus.rdbms.initializeColumnInfo", "NONE",
"initializeColumnInfo setting for DataNucleus; set to NONE at least on Postgres."),
/**
* @deprecated Use MetastoreConf.VALIDATE_TABLES
*/
@Deprecated
METASTORE_VALIDATE_TABLES("datanucleus.schema.validateTables", false,
"validates existing schema against code. turn this on if you want to verify existing schema"),
/**
* @deprecated Use MetastoreConf.VALIDATE_COLUMNS
*/
@Deprecated
METASTORE_VALIDATE_COLUMNS("datanucleus.schema.validateColumns", false,
"validates existing schema against code. turn this on if you want to verify existing schema"),
/**
* @deprecated Use MetastoreConf.VALIDATE_CONSTRAINTS
*/
@Deprecated
METASTORE_VALIDATE_CONSTRAINTS("datanucleus.schema.validateConstraints", false,
"validates existing schema against code. turn this on if you want to verify existing schema"),
/**
* @deprecated Use MetastoreConf.STORE_MANAGER_TYPE
*/
@Deprecated
METASTORE_STORE_MANAGER_TYPE("datanucleus.storeManagerType", "rdbms", "metadata store type"),
/**
* @deprecated Use MetastoreConf.AUTO_CREATE_ALL
*/
@Deprecated
METASTORE_AUTO_CREATE_ALL("datanucleus.schema.autoCreateAll", false,
"Auto creates necessary schema on a startup if one doesn't exist. Set this to false, after creating it once."
+ "To enable auto create also set hive.metastore.schema.verification=false. Auto creation is not "
+ "recommended for production use cases, run schematool command instead." ),
/**
* @deprecated Use MetastoreConf.SCHEMA_VERIFICATION
*/
@Deprecated
METASTORE_SCHEMA_VERIFICATION("hive.metastore.schema.verification", true,
"Enforce metastore schema version consistency.\n" +
"True: Verify that version information stored in is compatible with one from Hive jars. Also disable automatic\n" +
" schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures\n" +
" proper metastore schema migration. (Default)\n" +
"False: Warn if the version information stored in metastore doesn't match with one from in Hive jars."),
/**
* @deprecated Use MetastoreConf.SCHEMA_VERIFICATION_RECORD_VERSION
*/
@Deprecated
METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION("hive.metastore.schema.verification.record.version", false,
"When true the current MS version is recorded in the VERSION table. If this is disabled and verification is\n" +
" enabled the MS will be unusable."),
/**
* @deprecated Use MetastoreConf.SCHEMA_INFO_CLASS
*/
@Deprecated
METASTORE_SCHEMA_INFO_CLASS("hive.metastore.schema.info.class",
"org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo",
"Fully qualified class name for the metastore schema information class \n"
+ "which is used by schematool to fetch the schema information.\n"
+ " This class should implement the IMetaStoreSchemaInfo interface"),
/**
* @deprecated Use MetastoreConf.DATANUCLEUS_TRANSACTION_ISOLATION
*/
@Deprecated
METASTORE_TRANSACTION_ISOLATION("datanucleus.transactionIsolation", "read-committed",
"Default transaction isolation level for identity generation."),
/**
* @deprecated Use MetastoreConf.DATANUCLEUS_CACHE_LEVEL2
*/
@Deprecated
METASTORE_CACHE_LEVEL2("datanucleus.cache.level2", false,
"Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server"),
METASTORE_CACHE_LEVEL2_TYPE("datanucleus.cache.level2.type", "none", ""),
/**
* @deprecated Use MetastoreConf.IDENTIFIER_FACTORY
*/
@Deprecated
METASTORE_IDENTIFIER_FACTORY("datanucleus.identifierFactory", "datanucleus1",
"Name of the identifier factory to use when generating table/column names etc. \n" +
"'datanucleus1' is used for backward compatibility with DataNucleus v1"),
/**
* @deprecated Use MetastoreConf.DATANUCLEUS_USE_LEGACY_VALUE_STRATEGY
*/
@Deprecated
METASTORE_USE_LEGACY_VALUE_STRATEGY("datanucleus.rdbms.useLegacyNativeValueStrategy", true, ""),
/**
* @deprecated Use MetastoreConf.DATANUCLEUS_PLUGIN_REGISTRY_BUNDLE_CHECK
*/
@Deprecated
METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK("datanucleus.plugin.pluginRegistryBundleCheck", "LOG",
"Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]"),
/**
* @deprecated Use MetastoreConf.BATCH_RETRIEVE_MAX
*/
@Deprecated
METASTORE_BATCH_RETRIEVE_MAX("hive.metastore.batch.retrieve.max", 300,
"Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. \n" +
"The higher the number, the less the number of round trips is needed to the Hive metastore server, \n" +
"but it may also cause higher memory requirement at the client side."),
/**
* @deprecated Use MetastoreConf.BATCH_RETRIEVE_OBJECTS_MAX
*/
@Deprecated
METASTORE_BATCH_RETRIEVE_OBJECTS_MAX(
"hive.metastore.batch.retrieve.table.partition.max", 1000,
"Maximum number of objects that metastore internally retrieves in one batch."),
/**
* @deprecated Use MetastoreConf.INIT_HOOKS
*/
@Deprecated
METASTORE_INIT_HOOKS("hive.metastore.init.hooks", "",
"A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. \n" +
"An init hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener."),
/**
* @deprecated Use MetastoreConf.PRE_EVENT_LISTENERS
*/
@Deprecated
METASTORE_PRE_EVENT_LISTENERS("hive.metastore.pre.event.listeners", "",
"List of comma separated listeners for metastore events."),
/**
* @deprecated Use MetastoreConf.EVENT_LISTENERS
*/
@Deprecated
METASTORE_EVENT_LISTENERS("hive.metastore.event.listeners", "",
"A comma separated list of Java classes that implement the org.apache.hadoop.hive.metastore.MetaStoreEventListener" +
" interface. The metastore event and corresponding listener method will be invoked in separate JDO transactions. " +
"Alternatively, configure hive.metastore.transactional.event.listeners to ensure both are invoked in same JDO transaction."),
/**
* @deprecated Use MetastoreConf.TRANSACTIONAL_EVENT_LISTENERS
*/
@Deprecated
METASTORE_TRANSACTIONAL_EVENT_LISTENERS("hive.metastore.transactional.event.listeners", "",
"A comma separated list of Java classes that implement the org.apache.hadoop.hive.metastore.MetaStoreEventListener" +
" interface. Both the metastore event and corresponding listener method will be invoked in the same JDO transaction."),
/**
* @deprecated Use MetastoreConf.NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES
*/
@Deprecated
NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES("hive.notification.sequence.lock.max.retries", 10,
"Number of retries required to acquire a lock when getting the next notification sequential ID for entries "
+ "in the NOTIFICATION_LOG table."),
/**
* @deprecated Use MetastoreConf.NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL
*/
@Deprecated
NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL("hive.notification.sequence.lock.retry.sleep.interval", 10L,
new TimeValidator(TimeUnit.SECONDS),
"Sleep interval between retries to acquire a notification lock as described part of property "
+ NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES.name()),
/**
* @deprecated Use MetastoreConf.EVENT_DB_LISTENER_TTL
*/
@Deprecated
METASTORE_EVENT_DB_LISTENER_TTL("hive.metastore.event.db.listener.timetolive", "86400s",
new TimeValidator(TimeUnit.SECONDS),
"time after which events will be removed from the database listener queue"),
/**
* @deprecated Use MetastoreConf.EVENT_DB_NOTIFICATION_API_AUTH
*/
@Deprecated
METASTORE_EVENT_DB_NOTIFICATION_API_AUTH("hive.metastore.event.db.notification.api.auth", true,
"Should metastore do authorization against database notification related APIs such as get_next_notification.\n" +
"If set to true, then only the superusers in proxy settings have the permission"),
/**
* @deprecated Use MetastoreConf.AUTHORIZATION_STORAGE_AUTH_CHECKS
*/
@Deprecated
METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS("hive.metastore.authorization.storage.checks", false,
"Should the metastore do authorization checks against the underlying storage (usually hdfs) \n" +
"for operations like drop-partition (disallow the drop-partition if the user in\n" +
"question doesn't have permissions to delete the corresponding directory\n" +
"on the storage)."),
METASTORE_AUTHORIZATION_EXTERNALTABLE_DROP_CHECK("hive.metastore.authorization.storage.check.externaltable.drop", true,
"Should StorageBasedAuthorization check permission of the storage before dropping external table.\n" +
"StorageBasedAuthorization already does this check for managed table. For external table however,\n" +
"anyone who has read permission of the directory could drop external table, which is surprising.\n" +
"The flag is set to false by default to maintain backward compatibility."),
/**
* @deprecated Use MetastoreConf.EVENT_CLEAN_FREQ
*/
@Deprecated
METASTORE_EVENT_CLEAN_FREQ("hive.metastore.event.clean.freq", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Frequency at which timer task runs to purge expired events in metastore."),
/**
* @deprecated Use MetastoreConf.EVENT_EXPIRY_DURATION
*/
@Deprecated
METASTORE_EVENT_EXPIRY_DURATION("hive.metastore.event.expiry.duration", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Duration after which events expire from events table"),
/**
* @deprecated Use MetastoreConf.EVENT_MESSAGE_FACTORY
*/
@Deprecated
METASTORE_EVENT_MESSAGE_FACTORY("hive.metastore.event.message.factory",
"org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder",
"Factory class for making encoding and decoding messages in the events generated."),
/**
* @deprecated Use MetastoreConf.EXECUTE_SET_UGI
*/
@Deprecated
METASTORE_EXECUTE_SET_UGI("hive.metastore.execute.setugi", true,
"In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using \n" +
"the client's reported user and group permissions. Note that this property must be set on \n" +
"both the client and server sides. Further note that its best effort. \n" +
"If client sets its to true and server sets it to false, client setting will be ignored."),
/**
* @deprecated Use MetastoreConf.PARTITION_NAME_WHITELIST_PATTERN
*/
@Deprecated
METASTORE_PARTITION_NAME_WHITELIST_PATTERN("hive.metastore.partition.name.whitelist.pattern", "",
"Partition names will be checked against this regex pattern and rejected if not matched."),
/**
* @deprecated Use MetastoreConf.INTEGER_JDO_PUSHDOWN
*/
@Deprecated
METASTORE_INTEGER_JDO_PUSHDOWN("hive.metastore.integral.jdo.pushdown", false,
"Allow JDO query pushdown for integral partition columns in metastore. Off by default. This\n" +
"improves metastore perf for integral columns, especially if there's a large number of partitions.\n" +
"However, it doesn't work correctly with integral values that are not normalized (e.g. have\n" +
"leading zeroes, like 0012). If metastore direct SQL is enabled and works, this optimization\n" +
"is also irrelevant."),
/**
* @deprecated Use MetastoreConf.TRY_DIRECT_SQL
*/
@Deprecated
METASTORE_TRY_DIRECT_SQL("hive.metastore.try.direct.sql", true,
"Whether the Hive metastore should try to use direct SQL queries instead of the\n" +
"DataNucleus for certain read paths. This can improve metastore performance when\n" +
"fetching many partitions or column statistics by orders of magnitude; however, it\n" +
"is not guaranteed to work on all RDBMS-es and all versions. In case of SQL failures,\n" +
"the metastore will fall back to the DataNucleus, so it's safe even if SQL doesn't\n" +
"work for all queries on your datastore. If all SQL queries fail (for example, your\n" +
"metastore is backed by MongoDB), you might want to disable this to save the\n" +
"try-and-fall-back cost."),
/**
* @deprecated Use MetastoreConf.DIRECT_SQL_PARTITION_BATCH_SIZE
*/
@Deprecated
METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE("hive.metastore.direct.sql.batch.size", 0,
"Batch size for partition and other object retrieval from the underlying DB in direct\n" +
"SQL. For some DBs like Oracle and MSSQL, there are hardcoded or perf-based limitations\n" +
"that necessitate this. For DBs that can handle the queries, this isn't necessary and\n" +
"may impede performance. -1 means no batching, 0 means automatic batching."),
/**
* @deprecated Use MetastoreConf.TRY_DIRECT_SQL_DDL
*/
@Deprecated
METASTORE_TRY_DIRECT_SQL_DDL("hive.metastore.try.direct.sql.ddl", true,
"Same as hive.metastore.try.direct.sql, for read statements within a transaction that\n" +
"modifies metastore data. Due to non-standard behavior in Postgres, if a direct SQL\n" +
"select query has incorrect syntax or something similar inside a transaction, the\n" +
"entire transaction will fail and fall-back to DataNucleus will not be possible. You\n" +
"should disable the usage of direct SQL inside transactions if that happens in your case."),
/**
* @deprecated Use MetastoreConf.DIRECT_SQL_MAX_QUERY_LENGTH
*/
@Deprecated
METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH("hive.direct.sql.max.query.length", 100, "The maximum\n" +
" size of a query string (in KB)."),
/**
* @deprecated Use MetastoreConf.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE
*/
@Deprecated
METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE("hive.direct.sql.max.elements.in.clause", 1000,
"The maximum number of values in a IN clause. Once exceeded, it will be broken into\n" +
" multiple OR separated IN clauses."),
/**
* @deprecated Use MetastoreConf.DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE
*/
@Deprecated
METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE("hive.direct.sql.max.elements.values.clause",
1000, "The maximum number of values in a VALUES clause for INSERT statement."),
/**
* @deprecated Use MetastoreConf.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS
*/
@Deprecated
METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false,
"Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must " +
"either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings " +
"as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " +
"pruning is the correct behaviour"),
/**
* @deprecated Use MetastoreConf.DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES
*/
@Deprecated
METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES(
"hive.metastore.disallow.incompatible.col.type.changes", true,
"If true (default is false), ALTER TABLE operations which change the type of a\n" +
"column (say STRING) to an incompatible type (say MAP) are disallowed.\n" +
"RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\n" +
"datatypes can be converted from string to any type. The map is also serialized as\n" +
"a string, which can be read as a string as well. However, with any binary\n" +
"serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions\n" +
"when subsequently trying to access old partitions.\n" +
"\n" +
"Primitive types like INT, STRING, BIGINT, etc., are compatible with each other and are\n" +
"not blocked.\n" +
"\n" +
"See HIVE-4409 for more details."),
/**
* @deprecated Use MetastoreConf.LIMIT_PARTITION_REQUEST
*/
@Deprecated
METASTORE_LIMIT_PARTITION_REQUEST("hive.metastore.limit.partition.request", -1,
"This limits the number of partitions that can be requested from the metastore for a given table.\n" +
"The default value \"-1\" means no limit."),
NEWTABLEDEFAULTPARA("hive.table.parameters.default", "",
"Default property values for newly created tables"),
DDL_CTL_PARAMETERS_WHITELIST("hive.ddl.createtablelike.properties.whitelist", "",
"Table Properties to copy over when executing a Create Table Like."),
/**
* @deprecated Use MetastoreConf.RAW_STORE_IMPL
*/
@Deprecated
METASTORE_RAW_STORE_IMPL("hive.metastore.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore",
"Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. \n" +
"This class is used to store and retrieval of raw metadata objects such as table, database"),
/**
* @deprecated Use MetastoreConf.TXN_STORE_IMPL
*/
@Deprecated
METASTORE_TXN_STORE_IMPL("hive.metastore.txn.store.impl",
"org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler",
"Name of class that implements org.apache.hadoop.hive.metastore.txn.TxnStore. This " +
"class is used to store and retrieve transactions and locks"),
/**
* @deprecated Use MetastoreConf.CONNECTION_DRIVER
*/
@Deprecated
METASTORE_CONNECTION_DRIVER("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver",
"Driver class name for a JDBC metastore"),
/**
* @deprecated Use MetastoreConf.MANAGER_FACTORY_CLASS
*/
@Deprecated
METASTORE_MANAGER_FACTORY_CLASS("javax.jdo.PersistenceManagerFactoryClass",
"org.datanucleus.api.jdo.JDOPersistenceManagerFactory",
"class implementing the jdo persistence"),
/**
* @deprecated Use MetastoreConf.EXPRESSION_PROXY_CLASS
*/
@Deprecated
METASTORE_EXPRESSION_PROXY_CLASS("hive.metastore.expression.proxy",
"org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore", ""),
/**
* @deprecated Use MetastoreConf.DETACH_ALL_ON_COMMIT
*/
@Deprecated
METASTORE_DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit", true,
"Detaches all objects from session so that they can be used after transaction is committed"),
/**
* @deprecated Use MetastoreConf.NON_TRANSACTIONAL_READ
*/
@Deprecated
METASTORE_NON_TRANSACTIONAL_READ("javax.jdo.option.NonTransactionalRead", true,
"Reads outside of transactions"),
/**
* @deprecated Use MetastoreConf.CONNECTION_USER_NAME
*/
@Deprecated
METASTORE_CONNECTION_USER_NAME("javax.jdo.option.ConnectionUserName", "APP",
"Username to use against metastore database"),
/**
* @deprecated Use MetastoreConf.END_FUNCTION_LISTENERS
*/
@Deprecated
METASTORE_END_FUNCTION_LISTENERS("hive.metastore.end.function.listeners", "",
"List of comma separated listeners for the end of metastore functions."),
/**
* @deprecated Use MetastoreConf.PART_INHERIT_TBL_PROPS
*/
@Deprecated
METASTORE_PART_INHERIT_TBL_PROPS("hive.metastore.partition.inherit.table.properties", "",
"List of comma separated keys occurring in table properties which will get inherited to newly created partitions. \n" +
"* implies all the keys will get inherited."),
/**
* @deprecated Use MetastoreConf.FILTER_HOOK
*/
@Deprecated
METASTORE_FILTER_HOOK("hive.metastore.filter.hook", "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl",
"Metastore hook class for filtering the metadata read results. If hive.security.authorization.manager"
+ "is set to instance of HiveAuthorizerFactory, then this value is ignored."),
FIRE_EVENTS_FOR_DML("hive.metastore.dml.events", false, "If true, the metastore will be asked" +
" to fire events for DML operations"),
METASTORE_CLIENT_DROP_PARTITIONS_WITH_EXPRESSIONS("hive.metastore.client.drop.partitions.using.expressions", true,
"Choose whether dropping partitions with HCatClient pushes the partition-predicate to the metastore, " +
"or drops partitions iteratively"),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_ENABLED
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_ENABLED("hive.metastore.aggregate.stats.cache.enabled", false,
"Whether aggregate stats caching is enabled or not."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_SIZE
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_SIZE("hive.metastore.aggregate.stats.cache.size", 10000,
"Maximum number of aggregate stats nodes that we will place in the metastore aggregate stats cache."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_PARTITIONS
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS("hive.metastore.aggregate.stats.cache.max.partitions", 10000,
"Maximum number of partitions that are aggregated per cache node."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_FPP
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_FPP("hive.metastore.aggregate.stats.cache.fpp", (float) 0.01,
"Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%)."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_VARIANCE
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE("hive.metastore.aggregate.stats.cache.max.variance", (float) 0.01,
"Maximum tolerable variance in number of partitions between a cached node and our request (default 1%)."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_TTL
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_TTL("hive.metastore.aggregate.stats.cache.ttl", "600s", new TimeValidator(TimeUnit.SECONDS),
"Number of seconds for a cached node to be active in the cache before they become stale."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT("hive.metastore.aggregate.stats.cache.max.writer.wait", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Number of milliseconds a writer will wait to acquire the writelock before giving up."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_READER_WAIT
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT("hive.metastore.aggregate.stats.cache.max.reader.wait", "1000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Number of milliseconds a reader will wait to acquire the readlock before giving up."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_FULL
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL("hive.metastore.aggregate.stats.cache.max.full", (float) 0.9,
"Maximum cache full % after which the cache cleaner thread kicks in."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_CLEAN_UNTIL
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL("hive.metastore.aggregate.stats.cache.clean.until", (float) 0.8,
"The cleaner thread cleans until cache reaches this % full size."),
/**
* @deprecated Use MetastoreConf.METRICS_ENABLED
*/
@Deprecated
METASTORE_METRICS("hive.metastore.metrics.enabled", false, "Enable metrics on the metastore."),
/**
* @deprecated Use MetastoreConf.INIT_METADATA_COUNT_ENABLED
*/
@Deprecated
METASTORE_INIT_METADATA_COUNT_ENABLED("hive.metastore.initial.metadata.count.enabled", true,
"Enable a metadata count at metastore startup for metrics."),
// Metastore SSL settings
/**
* @deprecated Use MetastoreConf.USE_SSL
*/
@Deprecated
HIVE_METASTORE_USE_SSL("hive.metastore.use.SSL", false,
"Set this to true for using SSL encryption in HMS server."),
/**
* @deprecated Use MetastoreConf.SSL_KEYSTORE_PATH
*/
@Deprecated
HIVE_METASTORE_SSL_KEYSTORE_PATH("hive.metastore.keystore.path", "",
"Metastore SSL certificate keystore location."),
/**
* @deprecated Use MetastoreConf.SSL_KEYSTORE_PASSWORD
*/
@Deprecated
HIVE_METASTORE_SSL_KEYSTORE_PASSWORD("hive.metastore.keystore.password", "",
"Metastore SSL certificate keystore password."),
/**
* @deprecated Use MetastoreConf.SSL_TRUSTSTORE_PATH
*/
@Deprecated
HIVE_METASTORE_SSL_TRUSTSTORE_PATH("hive.metastore.truststore.path", "",
"Metastore SSL certificate truststore location."),
/**
* @deprecated Use MetastoreConf.SSL_TRUSTSTORE_PASSWORD
*/
@Deprecated
HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD("hive.metastore.truststore.password", "",
"Metastore SSL certificate truststore password."),
// Parameters for exporting metadata on table drop (requires the use of the)
// org.apache.hadoop.hive.ql.parse.MetaDataExportListener preevent listener
/**
* @deprecated Use MetastoreConf.METADATA_EXPORT_LOCATION
*/
@Deprecated
METADATA_EXPORT_LOCATION("hive.metadata.export.location", "",
"When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
"it is the location to which the metadata will be exported. The default is an empty string, which results in the \n" +
"metadata being exported to the current user's home directory on HDFS."),
/**
* @deprecated Use MetastoreConf.MOVE_EXPORTED_METADATA_TO_TRASH
*/
@Deprecated
MOVE_EXPORTED_METADATA_TO_TRASH("hive.metadata.move.exported.metadata.to.trash", true,
"When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
"this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory \n" +
"alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data."),
// CLI
CLIIGNOREERRORS("hive.cli.errors.ignore", false, ""),
CLIPRINTCURRENTDB("hive.cli.print.current.db", false,
"Whether to include the current database in the Hive prompt."),
CLIPROMPT("hive.cli.prompt", "hive",
"Command line prompt configuration value. Other hiveconf can be used in this configuration value. \n" +
"Variable substitution will only be invoked at the Hive CLI startup."),
/**
* @deprecated Use MetastoreConf.FS_HANDLER_CLS
*/
@Deprecated
HIVE_METASTORE_FS_HANDLER_CLS("hive.metastore.fs.handler.class", "org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl", ""),
// Things we log in the jobconf
// session identifier
HIVESESSIONID("hive.session.id", "", ""),
// whether session is running in silent mode or not
HIVESESSIONSILENT("hive.session.silent", false, ""),
HIVE_LOCAL_TIME_ZONE("hive.local.time.zone", "LOCAL",
"Sets the time-zone for displaying and interpreting time stamps. If this property value is set to\n" +
"LOCAL, it is not specified, or it is not a correct time-zone, the system default time-zone will be\n " +
"used instead. Time-zone IDs can be specified as region-based zone IDs (based on IANA time-zone data),\n" +
"abbreviated zone IDs, or offset IDs."),
HIVE_SESSION_HISTORY_ENABLED("hive.session.history.enabled", false,
"Whether to log Hive query, query plan, runtime statistics etc."),
HIVEQUERYSTRING("hive.query.string", "",
"Query being executed (might be multiple per a session)"),
HIVEQUERYID("hive.query.id", "",
"ID for query being executed (might be multiple per a session)"),
HIVEQUERYTAG("hive.query.tag", null, "Tag for the queries in the session. User can kill the queries with the tag " +
"in another session. Currently there is no tag duplication check, user need to make sure his tag is unique. " +
"Also 'kill query' needs to be issued to all HiveServer2 instances to proper kill the queries"),
HIVESPARKJOBNAMELENGTH("hive.spark.jobname.length", 100000, "max jobname length for Hive on " +
"Spark queries"),
HIVEJOBNAMELENGTH("hive.jobname.length", 50, "max jobname length"),
// hive jar
HIVEJAR("hive.jar.path", "",
"The location of hive_cli.jar that is used when submitting jobs in a separate jvm."),
HIVEAUXJARS("hive.aux.jars.path", "",
"The location of the plugin jars that contain implementations of user defined functions and serdes."),
// reloadable jars
HIVERELOADABLEJARS("hive.reloadable.aux.jars.path", "",
"The locations of the plugin jars, which can be a comma-separated folders or jars. Jars can be renewed\n"
+ "by executing reload command. And these jars can be "
+ "used as the auxiliary classes like creating a UDF or SerDe."),
// hive added files and jars
HIVEADDEDFILES("hive.added.files.path", "", "This an internal parameter."),
HIVEADDEDJARS("hive.added.jars.path", "", "This an internal parameter."),
HIVEADDEDARCHIVES("hive.added.archives.path", "", "This an internal parameter."),
HIVEADDFILESUSEHDFSLOCATION("hive.resource.use.hdfs.location", true, "Reference HDFS based files/jars directly instead of "
+ "copy to session based HDFS scratch directory, to make distributed cache more useful."),
HIVE_CURRENT_DATABASE("hive.current.database", "", "Database name used by current session. Internal usage only.", true),
// for hive script operator
HIVES_AUTO_PROGRESS_TIMEOUT("hive.auto.progress.timeout", "0s",
new TimeValidator(TimeUnit.SECONDS),
"How long to run autoprogressor for the script/UDTF operators.\n" +
"Set to 0 for forever."),
HIVESCRIPTAUTOPROGRESS("hive.script.auto.progress", false,
"Whether Hive Transform/Map/Reduce Clause should automatically send progress information to TaskTracker \n" +
"to avoid the task getting killed because of inactivity. Hive sends progress information when the script is \n" +
"outputting to stderr. This option removes the need of periodically producing stderr messages, \n" +
"but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker."),
HIVESCRIPTIDENVVAR("hive.script.operator.id.env.var", "HIVE_SCRIPT_OPERATOR_ID",
"Name of the environment variable that holds the unique script operator ID in the user's \n" +
"transform function (the custom mapper/reducer that the user has specified in the query)"),
HIVESCRIPTTRUNCATEENV("hive.script.operator.truncate.env", false,
"Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)"),
HIVESCRIPT_ENV_BLACKLIST("hive.script.operator.env.blacklist",
"hive.txn.valid.txns,hive.txn.tables.valid.writeids,hive.txn.valid.writeids,hive.script.operator.env.blacklist,hive.repl.current.table.write.id",
"Comma separated list of keys from the configuration file not to convert to environment " +
"variables when invoking the script operator"),
HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT("hive.strict.checks.orderby.no.limit", false,
"Enabling strict large query checks disallows the following:\n" +
" Orderby without limit.\n" +
"Note that this check currently does not consider data size, only the query pattern."),
HIVE_STRICT_CHECKS_NO_PARTITION_FILTER("hive.strict.checks.no.partition.filter", false,
"Enabling strict large query checks disallows the following:\n" +
" No partition being picked up for a query against partitioned table.\n" +
"Note that this check currently does not consider data size, only the query pattern."),
HIVE_STRICT_CHECKS_TYPE_SAFETY("hive.strict.checks.type.safety", true,
"Enabling strict type safety checks disallows the following:\n" +
" Comparing bigints and strings.\n" +
" Comparing bigints and doubles."),
HIVE_STRICT_CHECKS_CARTESIAN("hive.strict.checks.cartesian.product", false,
"Enabling strict Cartesian join checks disallows the following:\n" +
" Cartesian product (cross join)."),
HIVE_STRICT_CHECKS_BUCKETING("hive.strict.checks.bucketing", true,
"Enabling strict bucketing checks disallows the following:\n" +
" Load into bucketed tables."),
HIVE_LOAD_DATA_OWNER("hive.load.data.owner", "",
"Set the owner of files loaded using load data in managed tables."),
@Deprecated
HIVEMAPREDMODE("hive.mapred.mode", null,
"Deprecated; use hive.strict.checks.* settings instead."),
HIVEALIAS("hive.alias", "", ""),
HIVEMAPSIDEAGGREGATE("hive.map.aggr", true, "Whether to use map-side aggregation in Hive Group By queries"),
HIVEGROUPBYSKEW("hive.groupby.skewindata", false, "Whether there is skew in data to optimize group by queries"),
HIVEJOINEMITINTERVAL("hive.join.emit.interval", 1000,
"How many rows in the right-most join operand Hive should buffer before emitting the join result."),
HIVEJOINCACHESIZE("hive.join.cache.size", 25000,
"How many rows in the joining tables (except the streaming table) should be cached in memory."),
HIVE_PUSH_RESIDUAL_INNER("hive.join.inner.residual", false,
"Whether to push non-equi filter predicates within inner joins. This can improve efficiency in "
+ "the evaluation of certain joins, since we will not be emitting rows which are thrown away by "
+ "a Filter operator straight away. However, currently vectorization does not support them, thus "
+ "enabling it is only recommended when vectorization is disabled."),
HIVE_PTF_RANGECACHE_SIZE("hive.ptf.rangecache.size", 10000,
"Size of the cache used on reducer side, that stores boundaries of ranges within a PTF " +
"partition. Used if a query specifies a RANGE type window including an orderby clause." +
"Set this to 0 to disable this cache."),
// CBO related
HIVE_CBO_ENABLED("hive.cbo.enable", true, "Flag to control enabling Cost Based Optimizations using Calcite framework."),
HIVE_CBO_CNF_NODES_LIMIT("hive.cbo.cnf.maxnodes", -1, "When converting to conjunctive normal form (CNF), fail if" +
"the expression exceeds this threshold; the threshold is expressed in terms of number of nodes (leaves and" +
"interior nodes). -1 to not set up a threshold."),
HIVE_CBO_RETPATH_HIVEOP("hive.cbo.returnpath.hiveop", false, "Flag to control calcite plan to hive operator conversion"),
HIVE_CBO_EXTENDED_COST_MODEL("hive.cbo.costmodel.extended", false, "Flag to control enabling the extended cost model based on"
+ "CPU, IO and cardinality. Otherwise, the cost model is based on cardinality."),
HIVE_CBO_COST_MODEL_CPU("hive.cbo.costmodel.cpu", "0.000001", "Default cost of a comparison"),
HIVE_CBO_COST_MODEL_NET("hive.cbo.costmodel.network", "150.0", "Default cost of a transferring a byte over network;"
+ " expressed as multiple of CPU cost"),
HIVE_CBO_COST_MODEL_LFS_WRITE("hive.cbo.costmodel.local.fs.write", "4.0", "Default cost of writing a byte to local FS;"
+ " expressed as multiple of NETWORK cost"),
HIVE_CBO_COST_MODEL_LFS_READ("hive.cbo.costmodel.local.fs.read", "4.0", "Default cost of reading a byte from local FS;"
+ " expressed as multiple of NETWORK cost"),
HIVE_CBO_COST_MODEL_HDFS_WRITE("hive.cbo.costmodel.hdfs.write", "10.0", "Default cost of writing a byte to HDFS;"
+ " expressed as multiple of Local FS write cost"),
HIVE_CBO_COST_MODEL_HDFS_READ("hive.cbo.costmodel.hdfs.read", "1.5", "Default cost of reading a byte from HDFS;"
+ " expressed as multiple of Local FS read cost"),
HIVE_CBO_SHOW_WARNINGS("hive.cbo.show.warnings", true,
"Toggle display of CBO warnings like missing column stats"),
HIVE_CBO_STATS_CORRELATED_MULTI_KEY_JOINS("hive.cbo.stats.correlated.multi.key.joins", true,
"When CBO estimates output rows for a join involving multiple columns, the default behavior assumes" +
"the columns are independent. Setting this flag to true will cause the estimator to assume" +
"the columns are correlated."),
AGGR_JOIN_TRANSPOSE("hive.transpose.aggr.join", false, "push aggregates through join"),
SEMIJOIN_CONVERSION("hive.optimize.semijoin.conversion", true, "convert group by followed by inner equi join into semijoin"),
HIVE_COLUMN_ALIGNMENT("hive.order.columnalignment", true, "Flag to control whether we want to try to align" +
"columns in operators such as Aggregate or Join so that we try to reduce the number of shuffling stages"),
// materialized views
HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING("hive.materializedview.rewriting", true,
"Whether to try to rewrite queries using the materialized views enabled for rewriting"),
HIVE_MATERIALIZED_VIEW_REWRITING_SELECTION_STRATEGY("hive.materializedview.rewriting.strategy", "heuristic",
new StringSet("heuristic", "costbased"),
"The strategy that should be used to cost and select the materialized view rewriting. \n" +
" heuristic: Always try to select the plan using the materialized view if rewriting produced one," +
"choosing the plan with lower cost among possible plans containing a materialized view\n" +
" costbased: Fully cost-based strategy, always use plan with lower cost, independently on whether " +
"it uses a materialized view or not"),
HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW("hive.materializedview.rewriting.time.window", "0min", new TimeValidator(TimeUnit.MINUTES),
"Time window, specified in seconds, after which outdated materialized views become invalid for automatic query rewriting.\n" +
"For instance, if more time than the value assigned to the property has passed since the materialized view " +
"was created or rebuilt, and one of its source tables has changed since, the materialized view will not be " +
"considered for rewriting. Default value 0 means that the materialized view cannot be " +
"outdated to be used automatically in query rewriting. Value -1 means to skip this check."),
HIVE_MATERIALIZED_VIEW_REWRITING_INCREMENTAL("hive.materializedview.rewriting.incremental", false,
"Whether to try to execute incremental rewritings based on outdated materializations and\n" +
"current content of tables. Default value of true effectively amounts to enabling incremental\n" +
"rebuild for the materializations too."),
HIVE_MATERIALIZED_VIEW_REBUILD_INCREMENTAL("hive.materializedview.rebuild.incremental", true,
"Whether to try to execute incremental rebuild for the materialized views. Incremental rebuild\n" +
"tries to modify the original materialization contents to reflect the latest changes to the\n" +
"materialized view source tables, instead of rebuilding the contents fully. Incremental rebuild\n" +
"is based on the materialized view algebraic incremental rewriting."),
HIVE_MATERIALIZED_VIEW_REBUILD_INCREMENTAL_FACTOR("hive.materializedview.rebuild.incremental.factor", 0.1f,
"The estimated cost of the resulting plan for incremental maintenance of materialization\n" +
"with aggregations will be multiplied by this value. Reducing the value can be useful to\n" +
"favour incremental rebuild over full rebuild."),
HIVE_MATERIALIZED_VIEW_FILE_FORMAT("hive.materializedview.fileformat", "ORC",
new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"),
"Default file format for CREATE MATERIALIZED VIEW statement"),
HIVE_MATERIALIZED_VIEW_SERDE("hive.materializedview.serde",
"org.apache.hadoop.hive.ql.io.orc.OrcSerde", "Default SerDe used for materialized views"),
HIVE_ENABLE_JDBC_PUSHDOWN("hive.jdbc.pushdown.enable", true,
"Flag to control enabling pushdown of operators into JDBC connection and subsequent SQL generation\n" +
"using Calcite"),
HIVE_ENABLE_JDBC_SAFE_PUSHDOWN("hive.jdbc.pushdown.safe.enable", false,
"Flag to control enabling pushdown of operators using Calcite that prevent splitting results\n" +
"retrieval in the JDBC storage handler"),
// hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row,
// need to remove by hive .13. Also, do not change default (see SMB operator)
HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100, ""),
HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true,
"Whether Hive should use memory-optimized hash table for MapJoin.\n" +
"Only works on Tez and Spark, because memory-optimized hashtable cannot be serialized."),
HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT("hive.mapjoin.optimized.hashtable.probe.percent",
(float) 0.5, "Probing space percentage of the optimized hashtable"),
HIVEUSEHYBRIDGRACEHASHJOIN("hive.mapjoin.hybridgrace.hashtable", true, "Whether to use hybrid" +
"grace hash join as the join method for mapjoin. Tez only."),
HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ("hive.mapjoin.hybridgrace.memcheckfrequency", 1024, "For " +
"hybrid grace hash join, how often (how many rows apart) we check if memory is full. " +
"This number should be power of 2."),
HIVEHYBRIDGRACEHASHJOINMINWBSIZE("hive.mapjoin.hybridgrace.minwbsize", 524288, "For hybrid grace" +
"Hash join, the minimum write buffer size used by optimized hashtable. Default is 512 KB."),
HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS("hive.mapjoin.hybridgrace.minnumpartitions", 16, "For" +
"Hybrid grace hash join, the minimum number of partitions to create."),
HIVEHASHTABLEWBSIZE("hive.mapjoin.optimized.hashtable.wbsize", 8 * 1024 * 1024,
"Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to\n" +
"store data. This is one buffer size. HT may be slightly faster if this is larger, but for small\n" +
"joins unnecessary memory will be allocated and then trimmed."),
HIVEHYBRIDGRACEHASHJOINBLOOMFILTER("hive.mapjoin.hybridgrace.bloomfilter", true, "Whether to " +
"use BloomFilter in Hybrid grace hash join to minimize unnecessary spilling."),
HIVEMAPJOINFULLOUTER("hive.mapjoin.full.outer", true,
"Whether to use MapJoin for FULL OUTER JOINs."),
HIVE_TEST_MAPJOINFULLOUTER_OVERRIDE(
"hive.test.mapjoin.full.outer.override",
"none", new StringSet("none", "enable", "disable"),
"internal use only, used to override the hive.mapjoin.full.outer\n" +
"setting. Using enable will force it on and disable will force it off.\n" +
"The default none is do nothing, of course",
true),
HIVESMBJOINCACHEROWS("hive.smbjoin.cache.rows", 10000,
"How many rows with the same key value should be cached in memory per smb joined table."),
HIVEGROUPBYMAPINTERVAL("hive.groupby.mapaggr.checkinterval", 100000,
"Number of rows after which size of the grouping keys/aggregation classes is performed"),
HIVEMAPAGGRHASHMEMORY("hive.map.aggr.hash.percentmemory", (float) 0.5,
"Portion of total memory to be used by map-side group aggregation hash table"),
HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY("hive.mapjoin.followby.map.aggr.hash.percentmemory", (float) 0.3,
"Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join"),
HIVEMAPAGGRMEMORYTHRESHOLD("hive.map.aggr.hash.force.flush.memory.threshold", (float) 0.9,
"The max memory to be used by map-side group aggregation hash table.\n" +
"If the memory usage is higher than this number, force to flush data"),
HIVEMAPAGGRHASHMINREDUCTION("hive.map.aggr.hash.min.reduction", (float) 0.99,
"Hash aggregation will be turned off if the ratio between hash table size and input rows is bigger than this number. \n" +
"Set to 1 to make sure hash aggregation is never turned off."),
HIVEMAPAGGRHASHMINREDUCTIONSTATSADJUST("hive.map.aggr.hash.min.reduction.stats", true,
"Whether the value for hive.map.aggr.hash.min.reduction should be set statically using stats estimates. \n" +
"If this is enabled, the default value for hive.map.aggr.hash.min.reduction is only used as an upper-bound\n" +
"for the value set in the map-side group by operators."),
HIVEMULTIGROUPBYSINGLEREDUCER("hive.multigroupby.singlereducer", true,
"Whether to optimize multi group by query to generate single M/R job plan. If the multi group by query has \n" +
"common group by keys, it will be optimized to generate single M/R job."),
HIVE_MAP_GROUPBY_SORT("hive.map.groupby.sorted", true,
"If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform \n" +
"the group by in the mapper by using BucketizedHiveInputFormat. The only downside to this\n" +
"is that it limits the number of mappers to the number of files."),
HIVE_DEFAULT_NULLS_LAST("hive.default.nulls.last", true,
"Whether to set NULLS LAST as the default null ordering"),
HIVE_GROUPBY_POSITION_ALIAS("hive.groupby.position.alias", false,
"Whether to enable using Column Position Alias in Group By"),
HIVE_ORDERBY_POSITION_ALIAS("hive.orderby.position.alias", true,
"Whether to enable using Column Position Alias in Order By"),
@Deprecated
HIVE_GROUPBY_ORDERBY_POSITION_ALIAS("hive.groupby.orderby.position.alias", false,
"Whether to enable using Column Position Alias in Group By or Order By (deprecated).\n" +
"Use " + HIVE_ORDERBY_POSITION_ALIAS.varname + " or " + HIVE_GROUPBY_POSITION_ALIAS.varname + " instead"),
HIVE_NEW_JOB_GROUPING_SET_CARDINALITY("hive.new.job.grouping.set.cardinality", 30,
"Whether a new map-reduce job should be launched for grouping sets/rollups/cubes.\n" +
"For a query like: select a, b, c, count(1) from T group by a, b, c with rollup;\n" +
"4 rows are created per row: (a, b, c), (a, b, null), (a, null, null), (null, null, null).\n" +
"This can lead to explosion across map-reduce boundary if the cardinality of T is very high,\n" +
"and map-side aggregation does not do a very good job. \n" +
"\n" +
"This parameter decides if Hive should add an additional map-reduce job. If the grouping set\n" +
"cardinality (4 in the example above), is more than this value, a new MR job is added under the\n" +
"assumption that the original group by will reduce the data size."),
HIVE_GROUPBY_LIMIT_EXTRASTEP("hive.groupby.limit.extrastep", true, "This parameter decides if Hive should \n" +
"create new MR job for sorting final output"),
// Max file num and size used to do a single copy (after that, distcp is used)
HIVE_EXEC_COPYFILE_MAXNUMFILES("hive.exec.copyfile.maxnumfiles", 1L,
"Maximum number of files Hive uses to do sequential HDFS copies between directories." +
"Distributed copies (distcp) will be used instead for larger numbers of files so that copies can be done faster."),
HIVE_EXEC_COPYFILE_MAXSIZE("hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/,
"Maximum file size (in bytes) that Hive uses to do single HDFS copies between directories." +
"Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."),
// for hive udtf operator
HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false,
"Whether Hive should automatically send progress information to TaskTracker \n" +
"when using UDTF's to prevent the task getting killed because of inactivity. Users should be cautious \n" +
"because this may prevent TaskTracker from killing tasks with infinite loops."),
HIVEDEFAULTFILEFORMAT("hive.default.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile", "ORC", "parquet"),
"Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]"),
HIVEDEFAULTMANAGEDFILEFORMAT("hive.default.fileformat.managed", "none",
new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC", "parquet"),
"Default file format for CREATE TABLE statement applied to managed tables only. External tables will be \n" +
"created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat \n" +
"for all tables."),
HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", "SequenceFile", new StringSet("TextFile", "SequenceFile", "RCfile", "Llap"),
"Default file format for storing result of the query."),
HIVECHECKFILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"),
// default serde for rcfile
HIVEDEFAULTRCFILESERDE("hive.default.rcfile.serde",
"org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe",
"The default SerDe Hive will use for the RCFile format"),
HIVEDEFAULTSERDE("hive.default.serde",
"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
"The default SerDe Hive will use for storage formats that do not specify a SerDe."),
/**
* @deprecated Use MetastoreConf.SERDES_USING_METASTORE_FOR_SCHEMA
*/
@Deprecated
SERDESUSINGMETASTOREFORSCHEMA("hive.serdes.using.metastore.for.schema",
"org.apache.hadoop.hive.ql.io.orc.OrcSerde," +
"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe," +
"org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe," +
"org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe," +
"org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe," +
"org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe," +
"org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe," +
"org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe",
"SerDes retrieving schema from metastore. This is an internal parameter."),
@Deprecated
HIVE_LEGACY_SCHEMA_FOR_ALL_SERDES("hive.legacy.schema.for.all.serdes",
false,
"A backward compatibility setting for external metastore users that do not handle \n" +
SERDESUSINGMETASTOREFORSCHEMA.varname + " correctly. This may be removed at any time."),
HIVEHISTORYFILELOC("hive.querylog.location",
"${system:java.io.tmpdir}" + File.separator + "${system:user.name}",
"Location of Hive run time structured log file"),
HIVE_LOG_INCREMENTAL_PLAN_PROGRESS("hive.querylog.enable.plan.progress", true,
"Whether to log the plan's progress every time a job's progress is checked.\n" +
"These logs are written to the location specified by hive.querylog.location"),
HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL("hive.querylog.plan.progress.interval", "60000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"The interval to wait between logging the plan's progress.\n" +
"If there is a whole number percentage change in the progress of the mappers or the reducers,\n" +
"the progress is logged regardless of this value.\n" +
"The actual interval will be the ceiling of (this value divided by the value of\n" +
"hive.exec.counters.pull.interval) multiplied by the value of hive.exec.counters.pull.interval\n" +
"I.e. if it is not divide evenly by the value of hive.exec.counters.pull.interval it will be\n" +
"logged less frequently than specified.\n" +
"This only has an effect if hive.querylog.enable.plan.progress is set to true."),
HIVESCRIPTSERDE("hive.script.serde", "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
"The default SerDe for transmitting input data to and reading output data from the user scripts. "),
HIVESCRIPTRECORDREADER("hive.script.recordreader",
"org.apache.hadoop.hive.ql.exec.TextRecordReader",
"The default record reader for reading data from the user scripts. "),
HIVESCRIPTRECORDWRITER("hive.script.recordwriter",
"org.apache.hadoop.hive.ql.exec.TextRecordWriter",
"The default record writer for writing data to the user scripts. "),
HIVESCRIPTESCAPE("hive.transform.escape.input", false,
"This adds an option to escape special chars (newlines, carriage returns and\n" +
"tabs) when they are passed to the user script. This is useful if the Hive tables\n" +
"can contain data that contains special characters."),
HIVEBINARYRECORDMAX("hive.binary.record.max.length", 1000,
"Read from a binary stream and treat each hive.binary.record.max.length bytes as a record. \n" +
"The last record before the end of stream can have less than hive.binary.record.max.length bytes"),
HIVEHADOOPMAXMEM("hive.mapred.local.mem", 0, "mapper/reducer memory in local mode"),
//small table file size
HIVESMALLTABLESFILESIZE("hive.mapjoin.smalltable.filesize", 25000000L,
"The threshold for the input file size of the small tables; if the file size is smaller \n" +
"than this threshold, it will try to convert the common join into map join"),
HIVE_SCHEMA_EVOLUTION("hive.exec.schema.evolution", true,
"Use schema evolution to convert self-describing file format's data to the schema desired by the reader."),
HIVE_ORC_FORCE_POSITIONAL_SCHEMA_EVOLUTION("orc.force.positional.evolution", true,
"Whether to use column position based schema evolution or not (as opposed to column name based evolution)"),
/** Don't use this directly - use AcidUtils! */
HIVE_TRANSACTIONAL_TABLE_SCAN("hive.transactional.table.scan", false,
"internal usage only -- do transaction (ACID or insert-only) table scan.", true),
HIVE_TRANSACTIONAL_NUM_EVENTS_IN_MEMORY("hive.transactional.events.mem", 10000000,
"Vectorized ACID readers can often load all the delete events from all the delete deltas\n"
+ "into memory to optimize for performance. To prevent out-of-memory errors, this is a rough heuristic\n"
+ "that limits the total number of delete events that can be loaded into memory at once.\n"
+ "Roughly it has been set to 10 million delete events per bucket (~160 MB).\n"),
FILTER_DELETE_EVENTS("hive.txn.filter.delete.events", true,
"If true, VectorizedOrcAcidRowBatchReader will compute min/max " +
"ROW__ID for the split and only load delete events in that range.\n"
),
HIVESAMPLERANDOMNUM("hive.sample.seednumber", 0,
"A number used to percentage sampling. By changing this number, user will change the subsets of data sampled."),
// test mode in hive mode
HIVETESTMODE("hive.test.mode", false,
"Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.",
false),
HIVEEXIMTESTMODE("hive.exim.test.mode", false,
"The subset of test mode that only enables custom path handling for ExIm.", false),
HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_",
"In test mode, specifies prefixes for the output table", false),
HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32,
"In test mode, specifies sampling frequency for table, which is not bucketed,\n" +
"For example, the following query:\n" +
" INSERT OVERWRITE TABLE dest SELECT col1 from src\n" +
"would be converted to\n" +
" INSERT OVERWRITE TABLE test_dest\n" +
" SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1))", false),
HIVETESTMODENOSAMPLE("hive.test.mode.nosamplelist", "",
"In test mode, specifies comma separated table names which would not apply sampling", false),
HIVETESTMODEDUMMYSTATAGGR("hive.test.dummystats.aggregator", "", "internal variable for test", false),
HIVETESTMODEDUMMYSTATPUB("hive.test.dummystats.publisher", "", "internal variable for test", false),
HIVETESTCURRENTTIMESTAMP("hive.test.currenttimestamp", null, "current timestamp for test", false),
HIVETESTMODEROLLBACKTXN("hive.test.rollbacktxn", false, "For testing only. Will mark every ACID transaction aborted", false),
HIVETESTMODEFAILCOMPACTION("hive.test.fail.compaction", false, "For testing only. Will cause CompactorMR to fail.", false),
HIVETESTMODEFAILHEARTBEATER("hive.test.fail.heartbeater", false, "For testing only. Will cause Heartbeater to fail.", false),
TESTMODE_BUCKET_CODEC_VERSION("hive.test.bucketcodec.version", 1,
"For testing only. Will make ACID subsystem write RecordIdentifier.bucketId in specified\n" +
"format", false),
HIVETESTMODEACIDKEYIDXSKIP("hive.test.acid.key.index.skip", false, "For testing only. OrcRecordUpdater will skip "
+ "generation of the hive.acid.key.index", false),
HIVEMERGEMAPFILES("hive.merge.mapfiles", true,
"Merge small files at the end of a map-only job"),
HIVEMERGEMAPREDFILES("hive.merge.mapredfiles", false,
"Merge small files at the end of a map-reduce job"),
HIVEMERGETEZFILES("hive.merge.tezfiles", false, "Merge small files at the end of a Tez DAG"),
HIVEMERGESPARKFILES("hive.merge.sparkfiles", false, "Merge small files at the end of a Spark DAG Transformation"),
HIVEMERGEMAPFILESSIZE("hive.merge.size.per.task", (long) (256 * 1000 * 1000),
"Size of merged files at the end of the job"),
HIVEMERGEMAPFILESAVGSIZE("hive.merge.smallfiles.avgsize", (long) (16 * 1000 * 1000),
"When the average output file size of a job is less than this number, Hive will start an additional \n" +
"map-reduce job to merge the output files into bigger files. This is only done for map-only jobs \n" +
"if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true."),
HIVEMERGERCFILEBLOCKLEVEL("hive.merge.rcfile.block.level", true, ""),
HIVEMERGEORCFILESTRIPELEVEL("hive.merge.orcfile.stripe.level", true,
"When hive.merge.mapfiles, hive.merge.mapredfiles or hive.merge.tezfiles is enabled\n" +
"while writing a table with ORC file format, enabling this config will do stripe-level\n" +
"fast merge for small ORC files. Note that enabling this config will not honor the\n" +
"padding tolerance config (hive.exec.orc.block.padding.tolerance)."),
HIVE_ORC_CODEC_POOL("hive.use.orc.codec.pool", false,
"Whether to use codec pool in ORC. Disable if there are bugs with codec reuse."),
HIVEUSEEXPLICITRCFILEHEADER("hive.exec.rcfile.use.explicit.header", true,
"If this is set the header for RCFiles will simply be RCF. If this is not\n" +
"set the header will be that borrowed from sequence files, e.g. SEQ- followed\n" +
"by the input and output RCFile formats."),
HIVEUSERCFILESYNCCACHE("hive.exec.rcfile.use.sync.cache", true, ""),
HIVE_RCFILE_RECORD_INTERVAL("hive.io.rcfile.record.interval", Integer.MAX_VALUE, ""),
HIVE_RCFILE_COLUMN_NUMBER_CONF("hive.io.rcfile.column.number.conf", 0, ""),
HIVE_RCFILE_TOLERATE_CORRUPTIONS("hive.io.rcfile.tolerate.corruptions", false, ""),
HIVE_RCFILE_RECORD_BUFFER_SIZE("hive.io.rcfile.record.buffer.size", 4194304, ""), // 4M
PARQUET_MEMORY_POOL_RATIO("parquet.memory.pool.ratio", 0.5f,
"Maximum fraction of heap that can be used by Parquet file writers in one task.\n" +
"It is for avoiding OutOfMemory error in tasks. Work with Parquet 1.6.0 and above.\n" +
"This config parameter is defined in Parquet, so that it does not start with 'hive.'."),
HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION("hive.parquet.timestamp.skip.conversion", false,
"Current Hive implementation of parquet stores timestamps to UTC, this flag allows skipping of the conversion" +
"on reading parquet files from other tools"),
HIVE_AVRO_TIMESTAMP_SKIP_CONVERSION("hive.avro.timestamp.skip.conversion", false,
"Some older Hive implementations (pre-3.1) wrote Avro timestamps in a UTC-normalized" +
"manner, while from version 3.1 until now Hive wrote time zone agnostic timestamps. " +
"Setting this flag to true will treat legacy timestamps as time zone agnostic. Setting " +
"it to false will treat legacy timestamps as UTC-normalized. This flag will not affect " +
"timestamps written after this change."),
HIVE_INT_TIMESTAMP_CONVERSION_IN_SECONDS("hive.int.timestamp.conversion.in.seconds", false,
"Boolean/tinyint/smallint/int/bigint value is interpreted as milliseconds during the timestamp conversion.\n" +
"Set this flag to true to interpret the value as seconds to be consistent with float/double." ),
HIVE_ORC_BASE_DELTA_RATIO("hive.exec.orc.base.delta.ratio", 8, "The ratio of base writer and\n" +
"delta writer in terms of STRIPE_SIZE and BUFFER_SIZE."),
HIVE_ORC_DELTA_STREAMING_OPTIMIZATIONS_ENABLED("hive.exec.orc.delta.streaming.optimizations.enabled", false,
"Whether to enable streaming optimizations for ORC delta files. This will disable ORC's internal indexes,\n" +
"disable compression, enable fast encoding and disable dictionary encoding."),
HIVE_ORC_SPLIT_STRATEGY("hive.exec.orc.split.strategy", "HYBRID", new StringSet("HYBRID", "BI", "ETL"),
"This is not a user level config. BI strategy is used when the requirement is to spend less time in split generation" +
" as opposed to query execution (split generation does not read or cache file footers)." +
" ETL strategy is used when spending little more time in split generation is acceptable" +
" (split generation reads and caches file footers). HYBRID chooses between the above strategies" +
" based on heuristics."),
HIVE_ORC_BLOB_STORAGE_SPLIT_SIZE("hive.exec.orc.blob.storage.split.size", 128L * 1024 * 1024,
"When blob storage is used, BI split strategy does not have block locations for splitting orc files.\n" +
"In such cases, split generation will use this config to split orc file"),
HIVE_ORC_WRITER_LLAP_MEMORY_MANAGER_ENABLED("hive.exec.orc.writer.llap.memory.manager.enabled", true,
"Whether orc writers should use llap-aware memory manager. LLAP aware memory manager will use memory\n" +
"per executor instead of entire heap memory when concurrent orc writers are involved. This will let\n" +
"task fragments to use memory within its limit (memory per executor) when performing ETL in LLAP."),
// hive streaming ingest settings
HIVE_STREAMING_AUTO_FLUSH_ENABLED("hive.streaming.auto.flush.enabled", true, "Whether to enable memory \n" +
"monitoring and automatic flushing of open record updaters during streaming ingest. This is an expert level \n" +
"setting and disabling this may have severe performance impact under memory pressure."),
HIVE_HEAP_MEMORY_MONITOR_USAGE_THRESHOLD("hive.heap.memory.monitor.usage.threshold", 0.7f,
"Hive streaming does automatic memory management across all open record writers. This threshold will let the \n" +
"memory monitor take an action (flush open files) when heap memory usage exceeded this threshold."),
HIVE_STREAMING_AUTO_FLUSH_CHECK_INTERVAL_SIZE("hive.streaming.auto.flush.check.interval.size", "100Mb",
new SizeValidator(),
"Hive streaming ingest has auto flush mechanism to flush all open record updaters under memory pressure.\n" +
"When memory usage exceed hive.heap.memory.monitor.default.usage.threshold, the auto-flush mechanism will \n" +
"wait until this size (default 100Mb) of records are ingested before triggering flush."),
HIVE_CLASSLOADER_SHADE_PREFIX("hive.classloader.shade.prefix", "", "During reflective instantiation of a class\n" +
"(input, output formats, serde etc.), when classloader throws ClassNotFoundException, as a fallback this\n" +
"shade prefix will be used before class reference and retried."),
HIVE_ORC_MS_FOOTER_CACHE_ENABLED("hive.orc.splits.ms.footer.cache.enabled", false,
"Whether to enable using file metadata cache in metastore for ORC file footers."),
HIVE_ORC_MS_FOOTER_CACHE_PPD("hive.orc.splits.ms.footer.cache.ppd.enabled", true,
"Whether to enable file footer cache PPD (hive.orc.splits.ms.footer.cache.enabled\n" +
"must also be set to true for this to work)."),
HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS("hive.orc.splits.include.file.footer", false,
"If turned on splits generated by orc will include metadata about the stripes in the file. This\n" +
"data is read remotely (from the client or HS2 machine) and sent to all the tasks."),
HIVE_ORC_SPLIT_DIRECTORY_BATCH_MS("hive.orc.splits.directory.batch.ms", 0,
"How long, in ms, to wait to batch input directories for processing during ORC split\n" +
"generation. 0 means process directories individually. This can increase the number of\n" +
"metastore calls if metastore metadata cache is used."),
HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS("hive.orc.splits.include.fileid", true,
"Include file ID in splits on file systems that support it."),
HIVE_ORC_ALLOW_SYNTHETIC_FILE_ID_IN_SPLITS("hive.orc.splits.allow.synthetic.fileid", true,
"Allow synthetic file ID in splits on file systems that don't have a native one."),
HIVE_ORC_CACHE_STRIPE_DETAILS_MEMORY_SIZE("hive.orc.cache.stripe.details.mem.size", "256Mb",
new SizeValidator(), "Maximum size of orc splits cached in the client."),
HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS("hive.orc.compute.splits.num.threads", 10,
"How many threads orc should use to create splits in parallel."),
HIVE_ORC_CACHE_USE_SOFT_REFERENCES("hive.orc.cache.use.soft.references", false,
"By default, the cache that ORC input format uses to store orc file footer use hard\n" +
"references for the cached object. Setting this to true can help avoid out of memory\n" +
"issues under memory pressure (in some cases) at the cost of slight unpredictability in\n" +
"overall query performance."),
HIVE_IO_SARG_CACHE_MAX_WEIGHT_MB("hive.io.sarg.cache.max.weight.mb", 10,
"The max weight allowed for the SearchArgument Cache. By default, the cache allows a max-weight of 10MB, " +
"after which entries will be evicted."),
HIVE_LAZYSIMPLE_EXTENDED_BOOLEAN_LITERAL("hive.lazysimple.extended_boolean_literal", false,
"LazySimpleSerde uses this property to determine if it treats 'T', 't', 'F', 'f',\n" +
"'1', and '0' as extended, legal boolean literal, in addition to 'TRUE' and 'FALSE'.\n" +
"The default is false, which means only 'TRUE' and 'FALSE' are treated as legal\n" +
"boolean literal."),
HIVESKEWJOIN("hive.optimize.skewjoin", false,
"Whether to enable skew join optimization. \n" +
"The algorithm is as follows: At runtime, detect the keys with a large skew. Instead of\n" +
"processing those keys, store them temporarily in an HDFS directory. In a follow-up map-reduce\n" +
"job, process those skewed keys. The same key need not be skewed for all the tables, and so,\n" +
"the follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a\n" +
"map-join."),
HIVEDYNAMICPARTITIONHASHJOIN("hive.optimize.dynamic.partition.hashjoin", false,
"Whether to enable dynamically partitioned hash join optimization. \n" +
"This setting is also dependent on enabling hive.auto.convert.join"),
HIVECONVERTJOIN("hive.auto.convert.join", true,
"Whether Hive enables the optimization about converting common join into mapjoin based on the input file size"),
HIVECONVERTJOINNOCONDITIONALTASK("hive.auto.convert.join.noconditionaltask", true,
"Whether Hive enables the optimization about converting common join into mapjoin based on the input file size. \n" +
"If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the\n" +
"specified size, the join is directly converted to a mapjoin (there is no conditional task)."),
HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD("hive.auto.convert.join.noconditionaltask.size",
10000000L,
"If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" +
"However, if it is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, \n" +
"the join is directly converted to a mapjoin(there is no conditional task). The default is 10MB"),
HIVECONVERTJOINUSENONSTAGED("hive.auto.convert.join.use.nonstaged", false,
"For conditional joins, if input stream from a small alias can be directly applied to join operator without \n" +
"filtering or projection, the alias need not to be pre-staged in distributed cache via mapred local task.\n" +
"Currently, this is not working with vectorization or tez execution engine."),
HIVESKEWJOINKEY("hive.skewjoin.key", 100000,
"Determine if we get a skew key in join. If we see more than the specified number of rows with the same key in join operator,\n" +
"we think the key as a skew join key. "),
HIVESKEWJOINMAPJOINNUMMAPTASK("hive.skewjoin.mapjoin.map.tasks", 10000,
"Determine the number of map task used in the follow up map join job for a skew join.\n" +
"It should be used together with hive.skewjoin.mapjoin.min.split to perform a fine grained control."),
HIVESKEWJOINMAPJOINMINSPLIT("hive.skewjoin.mapjoin.min.split", 33554432L,
"Determine the number of map task at most used in the follow up map join job for a skew join by specifying \n" +
"the minimum split size. It should be used together with hive.skewjoin.mapjoin.map.tasks to perform a fine grained control."),
HIVESENDHEARTBEAT("hive.heartbeat.interval", 1000,
"Send a heartbeat after this interval - used by mapjoin and filter operators"),
HIVELIMITMAXROWSIZE("hive.limit.row.max.size", 100000L,
"When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee each row to have at least."),
HIVELIMITOPTLIMITFILE("hive.limit.optimize.limit.file", 10,
"When trying a smaller subset of data for simple LIMIT, maximum number of files we can sample."),
HIVELIMITOPTENABLE("hive.limit.optimize.enable", false,
"Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first."),
HIVELIMITOPTMAXFETCH("hive.limit.optimize.fetch.max", 50000,
"Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. \n" +
"Insert queries are not restricted by this limit."),
HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", 0.1f, new RatioValidator(),
"The fraction of available memory to be used for buffering rows in Reducesink operator for limit pushdown optimization."),
HIVECONVERTJOINMAXENTRIESHASHTABLE("hive.auto.convert.join.hashtable.max.entries", 21000000L,
"If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" +
"However, if it is on, and the predicted number of entries in hashtable for a given join \n" +
"input is larger than this number, the join will not be converted to a mapjoin. \n" +
"The value \"-1\" means no limit."),
XPRODSMALLTABLEROWSTHRESHOLD("hive.xprod.mapjoin.small.table.rows", 1,"Maximum number of rows on build side"
+ " of map join before it switches over to cross product edge"),
HIVECONVERTJOINMAXSHUFFLESIZE("hive.auto.convert.join.shuffle.max.size", 10000000000L,
"If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" +
"However, if it is on, and the predicted size of the larger input for a given join is greater \n" +
"than this number, the join will not be converted to a dynamically partitioned hash join. \n" +
"The value \"-1\" means no limit."),
HIVEHASHTABLEKEYCOUNTADJUSTMENT("hive.hashtable.key.count.adjustment", 0.99f,
"Adjustment to mapjoin hashtable size derived from table and column statistics; the estimate" +
" of the number of keys is divided by this value. If the value is 0, statistics are not used" +
"and hive.hashtable.initialCapacity is used instead."),
HIVEHASHTABLETHRESHOLD("hive.hashtable.initialCapacity", 100000, "Initial capacity of " +
"mapjoin hashtable if statistics are absent, or if hive.hashtable.key.count.adjustment is set to 0"),
HIVEHASHTABLELOADFACTOR("hive.hashtable.loadfactor", (float) 0.75, ""),
HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE("hive.mapjoin.followby.gby.localtask.max.memory.usage", (float) 0.55,
"This number means how much memory the local task can take to hold the key/value into an in-memory hash table \n" +
"when this map join is followed by a group by. If the local task's memory usage is more than this number, \n" +
"the local task will abort by itself. It means the data of the small table is too large " +
"to be held in memory. Does not apply to Hive-on-Spark (replaced by " +
"hive.mapjoin.max.gc.time.percentage)"),
HIVEHASHTABLEMAXMEMORYUSAGE("hive.mapjoin.localtask.max.memory.usage", (float) 0.90,
"This number means how much memory the local task can take to hold the key/value into an in-memory hash table. \n" +
"If the local task's memory usage is more than this number, the local task will abort by itself. \n" +
"It means the data of the small table is too large to be held in memory. Does not apply to " +
"Hive-on-Spark (replaced by hive.mapjoin.max.gc.time.percentage)"),
HIVEHASHTABLESCALE("hive.mapjoin.check.memory.rows", (long)100000,
"The number means after how many rows processed it needs to check the memory usage"),
HIVEHASHTABLEMAXGCTIMEPERCENTAGE("hive.mapjoin.max.gc.time.percentage", (float) 0.60,
new RangeValidator(0.0f, 1.0f), "This number means how much time (what percentage, " +
"0..1, of wallclock time) the JVM is allowed to spend in garbage collection when running " +
"the local task. If GC time percentage exceeds this number, the local task will abort by " +
"itself. Applies to Hive-on-Spark only"),
HIVEDEBUGLOCALTASK("hive.debug.localtask",false, ""),
HIVEINPUTFORMAT("hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat",
"The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat."),
HIVETEZINPUTFORMAT("hive.tez.input.format", "org.apache.hadoop.hive.ql.io.HiveInputFormat",
"The default input format for tez. Tez groups splits in the AM."),
HIVETEZCONTAINERSIZE("hive.tez.container.size", -1,
"By default Tez will spawn containers of the size of a mapper. This can be used to overwrite."),
HIVETEZCPUVCORES("hive.tez.cpu.vcores", -1,
"By default Tez will ask for however many cpus map-reduce is configured to use per container.\n" +
"This can be used to overwrite."),
HIVETEZJAVAOPTS("hive.tez.java.opts", null,
"By default Tez will use the Java options from map tasks. This can be used to overwrite."),
HIVETEZLOGLEVEL("hive.tez.log.level", "INFO",
"The log level to use for tasks executing as part of the DAG.\n" +
"Used only if hive.tez.java.opts is used to configure Java options."),
HIVETEZHS2USERACCESS("hive.tez.hs2.user.access", true,
"Whether to grant access to the hs2/hive user for queries"),
HIVEQUERYNAME ("hive.query.name", null,
"This named is used by Tez to set the dag name. This name in turn will appear on \n" +
"the Tez UI representing the work that was done. Used by Spark to set the query name, will show up in the\n" +
"Spark UI."),
SYSLOG_INPUT_FORMAT_FILE_PRUNING("hive.syslog.input.format.file.pruning", true,
"Whether syslog input format should prune files based on timestamp (ts) column in sys.logs table."),
SYSLOG_INPUT_FORMAT_FILE_TIME_SLICE("hive.syslog.input.format.file.time.slice", "300s",
new TimeValidator(TimeUnit.SECONDS, 0L, false, Long.MAX_VALUE, false),
"Files stored in sys.logs typically are chunked with time interval. For example: depending on the\n" +
"logging library used this represents the flush interval/time slice. \n" +
"If time slice/flust interval is set to 5 minutes, then the expectation is that the filename \n" +
"2019-01-02-10-00_0.log represent time range from 10:00 to 10:05.\n" +
"This time slice should align with the flush interval of the logging library else file pruning may\n" +
"incorrectly prune files leading to incorrect results from sys.logs table."),
HIVEOPTIMIZEBUCKETINGSORTING("hive.optimize.bucketingsorting", true,
"Don't create a reducer for enforcing \n" +
"bucketing/sorting for queries of the form: \n" +
"insert overwrite table T2 select * from T1;\n" +
"where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets."),
HIVEPARTITIONER("hive.mapred.partitioner", "org.apache.hadoop.hive.ql.io.DefaultHivePartitioner", ""),
HIVEENFORCESORTMERGEBUCKETMAPJOIN("hive.enforce.sortmergebucketmapjoin", false,
"If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?"),
HIVEENFORCEBUCKETMAPJOIN("hive.enforce.bucketmapjoin", false,
"If the user asked for bucketed map-side join, and it cannot be performed, \n" +
"should the query fail or not ? For example, if the buckets in the tables being joined are\n" +
"not a multiple of each other, bucketed map-side join cannot be performed, and the\n" +
"query will fail if hive.enforce.bucketmapjoin is set to true."),
HIVE_ENFORCE_NOT_NULL_CONSTRAINT("hive.constraint.notnull.enforce", true,
"Should \"IS NOT NULL \" constraint be enforced?"),
HIVE_AUTO_SORTMERGE_JOIN("hive.auto.convert.sortmerge.join", true,
"Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join."),
HIVE_AUTO_SORTMERGE_JOIN_REDUCE("hive.auto.convert.sortmerge.join.reduce.side", true,
"Whether hive.auto.convert.sortmerge.join (if enabled) should be applied to reduce side."),
HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR(
"hive.auto.convert.sortmerge.join.bigtable.selection.policy",
"org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ",
"The policy to choose the big table for automatic conversion to sort-merge join. \n" +
"By default, the table with the largest partitions is assigned the big table. All policies are:\n" +
". based on position of the table - the leftmost table is selected\n" +
"org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSMJ.\n" +
". based on total size (all the partitions selected in the query) of the table \n" +
"org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ.\n" +
". based on average size (all the partitions selected in the query) of the table \n" +
"org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.\n" +
"New policies can be added in future."),
HIVE_AUTO_SORTMERGE_JOIN_TOMAPJOIN(
"hive.auto.convert.sortmerge.join.to.mapjoin", false,
"If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join, \n" +
"this parameter decides whether each table should be tried as a big table, and effectively a map-join should be\n" +
"tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the\n" +
"big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a\n" +
"sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted\n" +
"and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table\n" +
"with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster\n" +
"if the complete small table can fit in memory, and a map-join can be performed."),
HIVESCRIPTOPERATORTRUST("hive.exec.script.trust", false, ""),
HIVEROWOFFSET("hive.exec.rowoffset", false,
"Whether to provide the row offset virtual column"),
// Optimizer
HIVEOPTINDEXFILTER("hive.optimize.index.filter", true, "Whether to enable automatic use of indexes"),
HIVEOPTPPD("hive.optimize.ppd", true,
"Whether to enable predicate pushdown"),
HIVEOPTPPD_WINDOWING("hive.optimize.ppd.windowing", true,
"Whether to enable predicate pushdown through windowing"),
HIVEPPDRECOGNIZETRANSITIVITY("hive.ppd.recognizetransivity", true,
"Whether to transitively replicate predicate filters over equijoin conditions."),
HIVEPPDREMOVEDUPLICATEFILTERS("hive.ppd.remove.duplicatefilters", true,
"During query optimization, filters may be pushed down in the operator tree. \n" +
"If this config is true only pushed down filters remain in the operator tree, \n" +
"and the original filter is removed. If this config is false, the original filter \n" +
"is also left in the operator tree at the original place."),
HIVEPOINTLOOKUPOPTIMIZER("hive.optimize.point.lookup", true,
"Whether to transform OR clauses in Filter operators into IN clauses"),
HIVEPOINTLOOKUPOPTIMIZERMIN("hive.optimize.point.lookup.min", 2,
"Minimum number of OR clauses needed to transform into IN clauses"),
HIVECOUNTDISTINCTOPTIMIZER("hive.optimize.countdistinct", true,
"Whether to transform count distinct into two stages"),
HIVEPARTITIONCOLUMNSEPARATOR("hive.optimize.partition.columns.separate", true,
"Extract partition columns from IN clauses"),
// Constant propagation optimizer
HIVEOPTCONSTANTPROPAGATION("hive.optimize.constant.propagation", true, "Whether to enable constant propagation optimizer"),
HIVEIDENTITYPROJECTREMOVER("hive.optimize.remove.identity.project", true, "Removes identity project from operator tree"),
HIVEMETADATAONLYQUERIES("hive.optimize.metadataonly", false,
"Whether to eliminate scans of the tables from which no columns are selected. Note\n" +
"that, when selecting from empty tables with data files, this can produce incorrect\n" +
"results, so it's disabled by default. It works correctly for normal tables."),
HIVENULLSCANOPTIMIZE("hive.optimize.null.scan", true, "Dont scan relations which are guaranteed to not generate any rows"),
HIVEOPTPPD_STORAGE("hive.optimize.ppd.storage", true,
"Whether to push predicates down to storage handlers"),
HIVEOPTGROUPBY("hive.optimize.groupby", true,
"Whether to enable the bucketed group by from bucketed partitions/tables."),
HIVEOPTBUCKETMAPJOIN("hive.optimize.bucketmapjoin", false,
"Whether to try bucket mapjoin"),
HIVEOPTSORTMERGEBUCKETMAPJOIN("hive.optimize.bucketmapjoin.sortedmerge", false,
"Whether to try sorted bucket merge map join"),
HIVEOPTREDUCEDEDUPLICATION("hive.optimize.reducededuplication", true,
"Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again. \n" +
"This should always be set to true. Since it is a new feature, it has been made configurable."),
HIVEOPTREDUCEDEDUPLICATIONMINREDUCER("hive.optimize.reducededuplication.min.reducer", 4,
"Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS. \n" +
"That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.\n" +
"The optimization will be automatically disabled if number of reducers would be less than specified value."),
HIVEOPTJOINREDUCEDEDUPLICATION("hive.optimize.joinreducededuplication", true,
"Remove extra shuffle/sorting operations after join algorithm selection has been executed. \n" +
"Currently it only works with Apache Tez. This should always be set to true. \n" +
"Since it is a new feature, it has been made configurable."),
HIVEOPTSORTDYNAMICPARTITION("hive.optimize.sort.dynamic.partition", false,
"When enabled dynamic partitioning column will be globally sorted.\n" +
"This way we can keep only one record writer open for each partition value\n" +
"in the reducer thereby reducing the memory pressure on reducers."),
HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD("hive.optimize.sort.dynamic.partition.threshold", 0,
"When enabled dynamic partitioning column will be globally sorted.\n" +
"This way we can keep only one record writer open for each partition value\n" +
"in the reducer thereby reducing the memory pressure on reducers.\n" +
"This config has following possible values: \n" +
"\t-1 - This completely disables the optimization. \n" +
"\t1 - This always enable the optimization. \n" +
"\t0 - This makes the optimization a cost based decision. \n" +
"Setting it to any other positive integer will make Hive use this as threshold for number of writers."),
HIVESAMPLINGFORORDERBY("hive.optimize.sampling.orderby", false, "Uses sampling on order-by clause for parallel execution."),
HIVESAMPLINGNUMBERFORORDERBY("hive.optimize.sampling.orderby.number", 1000, "Total number of samples to be obtained."),
HIVESAMPLINGPERCENTFORORDERBY("hive.optimize.sampling.orderby.percent", 0.1f, new RatioValidator(),
"Probability with which a row will be chosen."),
HIVE_REMOVE_ORDERBY_IN_SUBQUERY("hive.remove.orderby.in.subquery", true,
"If set to true, order/sort by without limit in sub queries will be removed."),
HIVEOPTIMIZEDISTINCTREWRITE("hive.optimize.distinct.rewrite", true, "When applicable this "
+ "optimization rewrites distinct aggregates from a single stage to multi-stage "
+ "aggregation. This may not be optimal in all cases. Ideally, whether to trigger it or "
+ "not should be cost based decision. Until Hive formalizes cost model for this, this is config driven."),
// whether to optimize union followed by select followed by filesink
// It creates sub-directories in the final output, so should not be turned on in systems
// where MAPREDUCE-1501 is not present
HIVE_OPTIMIZE_UNION_REMOVE("hive.optimize.union.remove", false,
"Whether to remove the union and push the operators between union and the filesink above union. \n" +
"This avoids an extra scan of the output by union. This is independently useful for union\n" +
"queries, and specially useful when hive.optimize.skewjoin.compiletime is set to true, since an\n" +
"extra union is inserted.\n" +
"\n" +
"The merge is triggered if either of hive.merge.mapfiles or hive.merge.mapredfiles is set to true.\n" +
"If the user has set hive.merge.mapfiles to true and hive.merge.mapredfiles to false, the idea was the\n" +
"number of reducers are few, so the number of files anyway are small. However, with this optimization,\n" +
"we are increasing the number of files possibly by a big margin. So, we merge aggressively."),
HIVEOPTCORRELATION("hive.optimize.correlation", false, "exploit intra-query correlations."),
HIVE_OPTIMIZE_LIMIT_TRANSPOSE("hive.optimize.limittranspose", false,
"Whether to push a limit through left/right outer join or union. If the value is true and the size of the outer\n" +
"input is reduced enough (as specified in hive.optimize.limittranspose.reduction), the limit is pushed\n" +
"to the outer input or union; to remain semantically correct, the limit is kept on top of the join or the union too."),
HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_PERCENTAGE("hive.optimize.limittranspose.reductionpercentage", 1.0f,
"When hive.optimize.limittranspose is true, this variable specifies the minimal reduction of the\n" +
"size of the outer input of the join or input of the union that we should get in order to apply the rule."),
HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_TUPLES("hive.optimize.limittranspose.reductiontuples", (long) 0,
"When hive.optimize.limittranspose is true, this variable specifies the minimal reduction in the\n" +
"number of tuples of the outer input of the join or the input of the union that you should get in order to apply the rule."),
HIVE_OPTIMIZE_CONSTRAINTS_JOIN("hive.optimize.constraints.join", true, "Whether to use referential constraints\n" +
"to optimize (remove or transform) join operators"),
HIVE_OPTIMIZE_REDUCE_WITH_STATS("hive.optimize.filter.stats.reduction", false, "Whether to simplify comparison\n" +
"expressions in filter operators using column stats"),
HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME("hive.optimize.skewjoin.compiletime", false,
"Whether to create a separate plan for skewed keys for the tables in the join.\n" +
"This is based on the skewed keys stored in the metadata. At compile time, the plan is broken\n" +
"into different joins: one for the skewed keys, and the other for the remaining keys. And then,\n" +
"a union is performed for the 2 joins generated above. So unless the same skewed key is present\n" +
"in both the joined tables, the join for the skewed key will be performed as a map-side join.\n" +
"\n" +
"The main difference between this parameter and hive.optimize.skewjoin is that this parameter\n" +
"uses the skew information stored in the metastore to optimize the plan at compile time itself.\n" +
"If there is no skew information in the metadata, this parameter will not have any affect.\n" +
"Both hive.optimize.skewjoin.compiletime and hive.optimize.skewjoin should be set to true.\n" +
"Ideally, hive.optimize.skewjoin should be renamed as hive.optimize.skewjoin.runtime, but not doing\n" +
"so for backward compatibility.\n" +
"\n" +
"If the skew information is correctly stored in the metadata, hive.optimize.skewjoin.compiletime\n" +
"would change the query plan to take care of it, and hive.optimize.skewjoin will be a no-op."),
HIVE_OPTIMIZE_TOPNKEY("hive.optimize.topnkey", true, "Whether to enable top n key optimizer."),
HIVE_SHARED_WORK_OPTIMIZATION("hive.optimize.shared.work", true,
"Whether to enable shared work optimizer. The optimizer finds scan operator over the same table\n" +
"and follow-up operators in the query plan and merges them if they meet some preconditions. Tez only."),
HIVE_SHARED_WORK_EXTENDED_OPTIMIZATION("hive.optimize.shared.work.extended", true,
"Whether to enable shared work extended optimizer. The optimizer tries to merge equal operators\n" +
"after a work boundary after shared work optimizer has been executed. Requires hive.optimize.shared.work\n" +
"to be set to true. Tez only."),
HIVE_SHARED_WORK_SEMIJOIN_OPTIMIZATION("hive.optimize.shared.work.semijoin", true,
"Whether to enable shared work extended optimizer for semijoins. The optimizer tries to merge\n" +
"scan operators if one of them reads the full table, even if the other one is the target for\n" +
"one or more semijoin edges. Tez only."),
HIVE_SHARED_WORK_REUSE_MAPJOIN_CACHE("hive.optimize.shared.work.mapjoin.cache.reuse", true,
"When shared work optimizer is enabled, whether we should reuse the cache for the broadcast side\n" +
"of mapjoin operators that share same broadcast input. Requires hive.optimize.shared.work\n" +
"to be set to true. Tez only."),
HIVE_COMBINE_EQUIVALENT_WORK_OPTIMIZATION("hive.combine.equivalent.work.optimization", true, "Whether to " +
"combine equivalent work objects during physical optimization.\n This optimization looks for equivalent " +
"work objects and combines them if they meet certain preconditions. Spark only."),
HIVE_REMOVE_SQ_COUNT_CHECK("hive.optimize.remove.sq_count_check", true,
"Whether to remove an extra join with sq_count_check for scalar subqueries "
+ "with constant group by keys."),
HIVE_OPTIMIZE_TABLE_PROPERTIES_FROM_SERDE("hive.optimize.update.table.properties.from.serde", false,
"Whether to update table-properties by initializing tables' SerDe instances during logical-optimization. \n" +
"By doing so, certain SerDe classes (like AvroSerDe) can pre-calculate table-specific information, and \n" +
"store it in table-properties, to be used later in the SerDe, while running the job."),
HIVE_OPTIMIZE_TABLE_PROPERTIES_FROM_SERDE_LIST("hive.optimize.update.table.properties.from.serde.list",
"org.apache.hadoop.hive.serde2.avro.AvroSerDe",
"The comma-separated list of SerDe classes that are considered when enhancing table-properties \n" +
"during logical optimization."),
// CTE
HIVE_CTE_MATERIALIZE_THRESHOLD("hive.optimize.cte.materialize.threshold", -1,
"If the number of references to a CTE clause exceeds this threshold, Hive will materialize it\n" +
"before executing the main query block. -1 will disable this feature."),
// Statistics
HIVE_STATS_ESTIMATE_STATS("hive.stats.estimate", true,
"Estimate statistics in absence of statistics."),
HIVE_STATS_NDV_ESTIMATE_PERC("hive.stats.ndv.estimate.percent", (float)20,
"This many percentage of rows will be estimated as count distinct in absence of statistics."),
HIVE_STATS_NUM_NULLS_ESTIMATE_PERC("hive.stats.num.nulls.estimate.percent", (float)5,
"This many percentage of rows will be estimated as number of nulls in absence of statistics."),
HIVESTATSAUTOGATHER("hive.stats.autogather", true,
"A flag to gather statistics (only basic) automatically during the INSERT OVERWRITE command."),
HIVESTATSCOLAUTOGATHER("hive.stats.column.autogather", true,
"A flag to gather column statistics automatically."),
HIVESTATSDBCLASS("hive.stats.dbclass", "fs", new PatternSet("custom", "fs"),
"The storage that stores temporary Hive statistics. In filesystem based statistics collection ('fs'), \n" +
"each task writes statistics it has collected in a file on the filesystem, which will be aggregated \n" +
"after the job has finished. Supported values are fs (filesystem) and custom as defined in StatsSetupConst.java."), // StatsSetupConst.StatDB
/**
* @deprecated Use MetastoreConf.STATS_DEFAULT_PUBLISHER
*/
@Deprecated
HIVE_STATS_DEFAULT_PUBLISHER("hive.stats.default.publisher", "",
"The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type."),
/**
* @deprecated Use MetastoreConf.STATS_DEFAULT_AGGRETATOR
*/
@Deprecated
HIVE_STATS_DEFAULT_AGGREGATOR("hive.stats.default.aggregator", "",
"The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type."),
CLIENT_STATS_COUNTERS("hive.client.stats.counters", "",
"Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). \n" +
"Non-display names should be used"),
//Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). Non-display names should be used".
HIVE_STATS_RELIABLE("hive.stats.reliable", false,
"Whether queries will fail because stats cannot be collected completely accurately. \n" +
"If this is set to true, reading/writing from/into a partition may fail because the stats\n" +
"could not be computed accurately."),
HIVE_STATS_COLLECT_PART_LEVEL_STATS("hive.analyze.stmt.collect.partlevel.stats", true,
"analyze table T compute statistics for columns. Queries like these should compute partition"
+ "level stats for partitioned table even when no part spec is specified."),
HIVE_STATS_GATHER_NUM_THREADS("hive.stats.gather.num.threads", 10,
"Number of threads used by noscan analyze command for partitioned tables.\n" +
"This is applicable only for file formats that implement StatsProvidingRecordReader (like ORC)."),
// Collect table access keys information for operators that can benefit from bucketing
HIVE_STATS_COLLECT_TABLEKEYS("hive.stats.collect.tablekeys", false,
"Whether join and group by keys on tables are derived and maintained in the QueryPlan.\n" +
"This is useful to identify how tables are accessed and to determine if they should be bucketed."),
// Collect column access information
HIVE_STATS_COLLECT_SCANCOLS("hive.stats.collect.scancols", false,
"Whether column accesses are tracked in the QueryPlan.\n" +
"This is useful to identify how tables are accessed and to determine if there are wasted columns that can be trimmed."),
HIVE_STATS_NDV_ALGO("hive.stats.ndv.algo", "hll", new PatternSet("hll", "fm"),
"hll and fm stand for HyperLogLog and FM-sketch, respectively for computing ndv."),
/**
* @deprecated Use MetastoreConf.STATS_FETCH_BITVECTOR
*/
@Deprecated
HIVE_STATS_FETCH_BITVECTOR("hive.stats.fetch.bitvector", false,
"Whether we fetch bitvector when we compute ndv. Users can turn it off if they want to use old schema"),
// standard error allowed for ndv estimates for FM-sketch. A lower value indicates higher accuracy and a
// higher compute cost.
HIVE_STATS_NDV_ERROR("hive.stats.ndv.error", (float)20.0,
"Standard error expressed in percentage. Provides a tradeoff between accuracy and compute cost. \n" +
"A lower value for error indicates higher accuracy and a higher compute cost."),
/**
* @deprecated Use MetastoreConf.STATS_NDV_TUNER
*/
@Deprecated
HIVE_METASTORE_STATS_NDV_TUNER("hive.metastore.stats.ndv.tuner", (float)0.0,
"Provides a tunable parameter between the lower bound and the higher bound of ndv for aggregate ndv across all the partitions. \n" +
"The lower bound is equal to the maximum of ndv of all the partitions. The higher bound is equal to the sum of ndv of all the partitions.\n" +
"Its value should be between 0.0 (i.e., choose lower bound) and 1.0 (i.e., choose higher bound)"),
/**
* @deprecated Use MetastoreConf.STATS_NDV_DENSITY_FUNCTION
*/
@Deprecated
HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION("hive.metastore.stats.ndv.densityfunction", false,
"Whether to use density function to estimate the NDV for the whole table based on the NDV of partitions"),
HIVE_STATS_KEY_PREFIX("hive.stats.key.prefix", "", "", true), // internal usage only
// if length of variable length data type cannot be determined this length will be used.
HIVE_STATS_MAX_VARIABLE_LENGTH("hive.stats.max.variable.length", 100,
"To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\n" +
"average row size is multiplied with the total number of rows coming out of each operator.\n" +
"Average row size is computed from average column size of all columns in the row. In the absence\n" +
"of column statistics, for variable length columns (like string, bytes etc.), this value will be\n" +
"used. For fixed length columns their corresponding Java equivalent sizes are used\n" +
"(float - 4 bytes, double - 8 bytes etc.)."),
// if number of elements in list cannot be determined, this value will be used
HIVE_STATS_LIST_NUM_ENTRIES("hive.stats.list.num.entries", 10,
"To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\n" +
"average row size is multiplied with the total number of rows coming out of each operator.\n" +
"Average row size is computed from average column size of all columns in the row. In the absence\n" +
"of column statistics and for variable length complex columns like list, the average number of\n" +
"entries/values can be specified using this config."),
// if number of elements in map cannot be determined, this value will be used
HIVE_STATS_MAP_NUM_ENTRIES("hive.stats.map.num.entries", 10,
"To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\n" +
"average row size is multiplied with the total number of rows coming out of each operator.\n" +
"Average row size is computed from average column size of all columns in the row. In the absence\n" +
"of column statistics and for variable length complex columns like map, the average number of\n" +
"entries/values can be specified using this config."),
// statistics annotation fetches column statistics for all required columns which can
// be very expensive sometimes
HIVE_STATS_FETCH_COLUMN_STATS("hive.stats.fetch.column.stats", true,
"Annotation of operator tree with statistics information requires column statistics.\n" +
"Column statistics are fetched from metastore. Fetching column statistics for each needed column\n" +
"can be expensive when the number of columns is high. This flag can be used to disable fetching\n" +
"of column statistics from metastore."),
// in the absence of column statistics, the estimated number of rows/data size that will
// be emitted from join operator will depend on this factor
HIVE_STATS_JOIN_FACTOR("hive.stats.join.factor", (float) 1.1,
"Hive/Tez optimizer estimates the data size flowing through each of the operators. JOIN operator\n" +
"uses column statistics to estimate the number of rows flowing out of it and hence the data size.\n" +
"In the absence of column statistics, this factor determines the amount of rows that flows out\n" +
"of JOIN operator."),
HIVE_STATS_CORRELATED_MULTI_KEY_JOINS("hive.stats.correlated.multi.key.joins", true,
"When estimating output rows for a join involving multiple columns, the default behavior assumes" +
"the columns are independent. Setting this flag to true will cause the estimator to assume" +
"the columns are correlated."),
// in the absence of uncompressed/raw data size, total file size will be used for statistics
// annotation. But the file may be compressed, encoded and serialized which may be lesser in size
// than the actual uncompressed/raw data size. This factor will be multiplied to file size to estimate
// the raw data size.
HIVE_STATS_DESERIALIZATION_FACTOR("hive.stats.deserialization.factor", (float) 10.0,
"Hive/Tez optimizer estimates the data size flowing through each of the operators. In the absence\n" +
"of basic statistics like number of rows and data size, file size is used to estimate the number\n" +
"of rows and data size. Since files in tables/partitions are serialized (and optionally\n" +
"compressed) the estimates of number of rows and data size cannot be reliably determined.\n" +
"This factor is multiplied with the file size to account for serialization and compression."),
HIVE_STATS_IN_CLAUSE_FACTOR("hive.stats.filter.in.factor", (float) 1.0,
"Currently column distribution is assumed to be uniform. This can lead to overestimation/underestimation\n" +
"in the number of rows filtered by a certain operator, which in turn might lead to overprovision or\n" +
"underprovision of resources. This factor is applied to the cardinality estimation of IN clauses in\n" +
"filter operators."),
HIVE_STATS_IN_MIN_RATIO("hive.stats.filter.in.min.ratio", (float) 0.0f,
"Output estimation of an IN filter can't be lower than this ratio"),
HIVE_STATS_UDTF_FACTOR("hive.stats.udtf.factor", (float) 1.0,
"UDTFs change the number of rows of the output. A common UDTF is the explode() method that creates\n" +
"multiple rows for each element in the input array. This factor is applied to the number of\n" +
"output rows and output size."),
// Concurrency
HIVE_SUPPORT_CONCURRENCY("hive.support.concurrency", false,
"Whether Hive supports concurrency control or not. \n" +
"A ZooKeeper instance must be up and running when using zookeeper Hive lock manager "),
HIVE_LOCK_MANAGER("hive.lock.manager", "org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager", ""),
HIVE_LOCK_NUMRETRIES("hive.lock.numretries", 100,
"The number of times you want to try to get all the locks"),
HIVE_UNLOCK_NUMRETRIES("hive.unlock.numretries", 10,
"The number of times you want to retry to do one unlock"),
HIVE_LOCK_SLEEP_BETWEEN_RETRIES("hive.lock.sleep.between.retries", "60s",
new TimeValidator(TimeUnit.SECONDS, 0L, false, Long.MAX_VALUE, false),
"The maximum sleep time between various retries"),
HIVE_LOCK_MAPRED_ONLY("hive.lock.mapred.only.operation", false,
"This param is to control whether or not only do lock on queries\n" +
"that need to execute at least one mapred job."),
HIVE_LOCK_QUERY_STRING_MAX_LENGTH("hive.lock.query.string.max.length", 1000000,
"The maximum length of the query string to store in the lock.\n" +
"The default value is 1000000, since the data limit of a znode is 1MB"),
HIVE_MM_ALLOW_ORIGINALS("hive.mm.allow.originals", false,
"Whether to allow original files in MM tables. Conversion to MM may be expensive if\n" +
"this is set to false, however unless MAPREDUCE-7086 fix is present, queries that\n" +
"read MM tables with original files will fail. The default in Hive 3.0 is false."),
// Zookeeper related configs
HIVE_ZOOKEEPER_QUORUM("hive.zookeeper.quorum", "",
"List of ZooKeeper servers to talk to. This is needed for: \n" +
"1. Read/write locks - when hive.lock.manager is set to \n" +
"org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager, \n" +
"2. When HiveServer2 supports service discovery via Zookeeper.\n" +
"3. For delegation token storage if zookeeper store is used, if\n" +
"hive.cluster.delegation.token.store.zookeeper.connectString is not set\n" +
"4. LLAP daemon registry service\n" +
"5. Leader selection for privilege synchronizer"),
HIVE_ZOOKEEPER_CLIENT_PORT("hive.zookeeper.client.port", "2181",
"The port of ZooKeeper servers to talk to.\n" +
"If the list of Zookeeper servers specified in hive.zookeeper.quorum\n" +
"does not contain port numbers, this value is used."),
HIVE_ZOOKEEPER_SESSION_TIMEOUT("hive.zookeeper.session.timeout", "1200000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"ZooKeeper client's session timeout (in milliseconds). The client is disconnected, and as a result, all locks released, \n" +
"if a heartbeat is not sent in the timeout."),
HIVE_ZOOKEEPER_CONNECTION_TIMEOUT("hive.zookeeper.connection.timeout", "15s",
new TimeValidator(TimeUnit.SECONDS),
"ZooKeeper client's connection timeout in seconds. Connection timeout * hive.zookeeper.connection.max.retries\n" +
"with exponential backoff is when curator client deems connection is lost to zookeeper."),
HIVE_ZOOKEEPER_NAMESPACE("hive.zookeeper.namespace", "hive_zookeeper_namespace",
"The parent node under which all ZooKeeper nodes are created."),
HIVE_ZOOKEEPER_CLEAN_EXTRA_NODES("hive.zookeeper.clean.extra.nodes", false,
"Clean extra nodes at the end of the session."),
HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES("hive.zookeeper.connection.max.retries", 3,
"Max number of times to retry when connecting to the ZooKeeper server."),
HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME("hive.zookeeper.connection.basesleeptime", "1000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Initial amount of time (in milliseconds) to wait between retries\n" +
"when connecting to the ZooKeeper server when using ExponentialBackoffRetry policy."),
// Transactions
HIVE_TXN_MANAGER("hive.txn.manager",
"org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager",
"Set to org.apache.hadoop.hive.ql.lockmgr.DbTxnManager as part of turning on Hive\n" +
"transactions, which also requires appropriate settings for hive.compactor.initiator.on,\n" +
"hive.compactor.worker.threads, hive.support.concurrency (true),\n" +
"and hive.exec.dynamic.partition.mode (nonstrict).\n" +
"The default DummyTxnManager replicates pre-Hive-0.13 behavior and provides\n" +
"no transactions."),
HIVE_TXN_STRICT_LOCKING_MODE("hive.txn.strict.locking.mode", true, "In strict mode non-ACID\n" +
"resources use standard R/W lock semantics, e.g. INSERT will acquire exclusive lock.\n" +
"In nonstrict mode, for non-ACID resources, INSERT will only acquire shared lock, which\n" +
"allows two concurrent writes to the same partition but still lets lock manager prevent\n" +
"DROP TABLE etc. when the table is being written to"),
TXN_OVERWRITE_X_LOCK("hive.txn.xlock.iow", true,
"Ensures commands with OVERWRITE (such as INSERT OVERWRITE) acquire Exclusive locks for\n" +
"transactional tables. This ensures that inserts (w/o overwrite) running concurrently\n" +
"are not hidden by the INSERT OVERWRITE."),
HIVE_TXN_STATS_ENABLED("hive.txn.stats.enabled", true,
"Whether Hive supports transactional stats (accurate stats for transactional tables)"),
/**
* @deprecated Use MetastoreConf.TXN_TIMEOUT
*/
@Deprecated
HIVE_TXN_TIMEOUT("hive.txn.timeout", "300s", new TimeValidator(TimeUnit.SECONDS),
"time after which transactions are declared aborted if the client has not sent a heartbeat."),
/**
* @deprecated Use MetastoreConf.TXN_HEARTBEAT_THREADPOOL_SIZE
*/
@Deprecated
HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE("hive.txn.heartbeat.threadpool.size", 5, "The number of " +
"threads to use for heartbeating. For Hive CLI, 1 is enough. For HiveServer2, we need a few"),
TXN_MGR_DUMP_LOCK_STATE_ON_ACQUIRE_TIMEOUT("hive.txn.manager.dump.lock.state.on.acquire.timeout", false,
"Set this to true so that when attempt to acquire a lock on resource times out, the current state" +
" of the lock manager is dumped to log file. This is for debugging. See also " +
"hive.lock.numretries and hive.lock.sleep.between.retries."),
HIVE_TXN_OPERATIONAL_PROPERTIES("hive.txn.operational.properties", 1,
"1: Enable split-update feature found in the newer version of Hive ACID subsystem\n" +
"4: Make the table 'quarter-acid' as it only supports insert. But it doesn't require ORC or bucketing.\n" +
"This is intended to be used as an internal property for future versions of ACID. (See\n" +
"HIVE-14035 for details. User sets it tblproperites via transactional_properties.)", true),
/**
* @deprecated Use MetastoreConf.MAX_OPEN_TXNS
*/
@Deprecated
HIVE_MAX_OPEN_TXNS("hive.max.open.txns", 100000, "Maximum number of open transactions. If \n" +
"current open transactions reach this limit, future open transaction requests will be \n" +
"rejected, until this number goes below the limit."),
/**
* @deprecated Use MetastoreConf.COUNT_OPEN_TXNS_INTERVAL
*/
@Deprecated
HIVE_COUNT_OPEN_TXNS_INTERVAL("hive.count.open.txns.interval", "1s",
new TimeValidator(TimeUnit.SECONDS), "Time in seconds between checks to count open transactions."),
/**
* @deprecated Use MetastoreConf.TXN_MAX_OPEN_BATCH
*/
@Deprecated
HIVE_TXN_MAX_OPEN_BATCH("hive.txn.max.open.batch", 1000,
"Maximum number of transactions that can be fetched in one call to open_txns().\n" +
"This controls how many transactions streaming agents such as Flume or Storm open\n" +
"simultaneously. The streaming agent then writes that number of entries into a single\n" +
"file (per Flume agent or Storm bolt). Thus increasing this value decreases the number\n" +
"of delta files created by streaming agents. But it also increases the number of open\n" +
"transactions that Hive has to track at any given time, which may negatively affect\n" +
"read performance."),
/**
* @deprecated Use MetastoreConf.TXN_RETRYABLE_SQLEX_REGEX
*/
@Deprecated
HIVE_TXN_RETRYABLE_SQLEX_REGEX("hive.txn.retryable.sqlex.regex", "", "Comma separated list\n" +
"of regular expression patterns for SQL state, error code, and error message of\n" +
"retryable SQLExceptions, that's suitable for the metastore DB.\n" +
"For example: Can't serialize.*,40001$,^Deadlock,.*ORA-08176.*\n" +
"The string that the regex will be matched against is of the following form, where ex is a SQLException:\n" +
"ex.getMessage() + \" (SQLState=\" + ex.getSQLState() + \", ErrorCode=\" + ex.getErrorCode() + \")\""),
/**
* @deprecated Use MetastoreConf.COMPACTOR_INITIATOR_ON
*/
@Deprecated
HIVE_COMPACTOR_INITIATOR_ON("hive.compactor.initiator.on", false,
"Whether to run the initiator and cleaner threads on this metastore instance or not.\n" +
"Set this to true on one instance of the Thrift metastore service as part of turning\n" +
"on Hive transactions. For a complete list of parameters required for turning on\n" +
"transactions, see hive.txn.manager."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_WORKER_THREADS
*/
@Deprecated
HIVE_COMPACTOR_WORKER_THREADS("hive.compactor.worker.threads", 0,
"How many compactor worker threads to run on this metastore instance. Set this to a\n" +
"positive number on one or more instances of the Thrift metastore service as part of\n" +
"turning on Hive transactions. For a complete list of parameters required for turning\n" +
"on transactions, see hive.txn.manager.\n" +
"Worker threads spawn MapReduce jobs to do compactions. They do not do the compactions\n" +
"themselves. Increasing the number of worker threads will decrease the time it takes\n" +
"tables or partitions to be compacted once they are determined to need compaction.\n" +
"It will also increase the background load on the Hadoop cluster as more MapReduce jobs\n" +
"will be running in the background."),
HIVE_COMPACTOR_WORKER_TIMEOUT("hive.compactor.worker.timeout", "86400s",
new TimeValidator(TimeUnit.SECONDS),
"Time in seconds after which a compaction job will be declared failed and the\n" +
"compaction re-queued."),
HIVE_COMPACTOR_CHECK_INTERVAL("hive.compactor.check.interval", "300s",
new TimeValidator(TimeUnit.SECONDS),
"Time in seconds between checks to see if any tables or partitions need to be\n" +
"compacted. This should be kept high because each check for compaction requires\n" +
"many calls against the NameNode.\n" +
"Decreasing this value will reduce the time it takes for compaction to be started\n" +
"for a table or partition that requires compaction. However, checking if compaction\n" +
"is needed requires several calls to the NameNode for each table or partition that\n" +
"has had a transaction done on it since the last major compaction. So decreasing this\n" +
"value will increase the load on the NameNode."),
HIVE_COMPACTOR_DELTA_NUM_THRESHOLD("hive.compactor.delta.num.threshold", 10,
"Number of delta directories in a table or partition that will trigger a minor\n" +
"compaction."),
HIVE_COMPACTOR_DELTA_PCT_THRESHOLD("hive.compactor.delta.pct.threshold", 0.1f,
"Percentage (fractional) size of the delta files relative to the base that will trigger\n" +
"a major compaction. (1.0 = 100%, so the default 0.1 = 10%.)"),
COMPACTOR_MAX_NUM_DELTA("hive.compactor.max.num.delta", 500, "Maximum number of delta files that " +
"the compactor will attempt to handle in a single job."),
HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD("hive.compactor.abortedtxn.threshold", 1000,
"Number of aborted transactions involving a given table or partition that will trigger\n" +
"a major compaction."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_INITIATOR_FAILED_THRESHOLD
*/
@Deprecated
COMPACTOR_INITIATOR_FAILED_THRESHOLD("hive.compactor.initiator.failed.compacts.threshold", 2,
new RangeValidator(1, 20), "Number of consecutive compaction failures (per table/partition) " +
"after which automatic compactions will not be scheduled any more. Note that this must be less " +
"than hive.compactor.history.retention.failed."),
HIVE_COMPACTOR_CLEANER_RUN_INTERVAL("hive.compactor.cleaner.run.interval", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS), "Time between runs of the cleaner thread"),
COMPACTOR_JOB_QUEUE("hive.compactor.job.queue", "", "Used to specify name of Hadoop queue to which\n" +
"Compaction jobs will be submitted. Set to empty string to let Hadoop choose the queue."),
TRANSACTIONAL_CONCATENATE_NOBLOCK("hive.transactional.concatenate.noblock", false,
"Will cause 'alter table T concatenate' to be non-blocking"),
HIVE_COMPACTOR_COMPACT_MM("hive.compactor.compact.insert.only", true,
"Whether the compactor should compact insert-only tables. A safety switch."),
COMPACTOR_CRUD_QUERY_BASED("hive.compactor.crud.query.based", false,
"Means Major compaction on full CRUD tables is done as a query, "
+ "and minor compaction will be disabled."),
SPLIT_GROUPING_MODE("hive.split.grouping.mode", "query", new StringSet("query", "compactor"),
"This is set to compactor from within the query based compactor. This enables the Tez SplitGrouper "
+ "to group splits based on their bucket number, so that all rows from different bucket files "
+ " for the same bucket number can end up in the same bucket file after the compaction."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_HISTORY_RETENTION_SUCCEEDED
*/
@Deprecated
COMPACTOR_HISTORY_RETENTION_SUCCEEDED("hive.compactor.history.retention.succeeded", 3,
new RangeValidator(0, 100), "Determines how many successful compaction records will be " +
"retained in compaction history for a given table/partition."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_HISTORY_RETENTION_FAILED
*/
@Deprecated
COMPACTOR_HISTORY_RETENTION_FAILED("hive.compactor.history.retention.failed", 3,
new RangeValidator(0, 100), "Determines how many failed compaction records will be " +
"retained in compaction history for a given table/partition."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_HISTORY_RETENTION_ATTEMPTED
*/
@Deprecated
COMPACTOR_HISTORY_RETENTION_ATTEMPTED("hive.compactor.history.retention.attempted", 2,
new RangeValidator(0, 100), "Determines how many attempted compaction records will be " +
"retained in compaction history for a given table/partition."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_HISTORY_REAPER_INTERVAL
*/
@Deprecated
COMPACTOR_HISTORY_REAPER_INTERVAL("hive.compactor.history.reaper.interval", "2m",
new TimeValidator(TimeUnit.MILLISECONDS), "Determines how often compaction history reaper runs"),
/**
* @deprecated Use MetastoreConf.TIMEDOUT_TXN_REAPER_START
*/
@Deprecated
HIVE_TIMEDOUT_TXN_REAPER_START("hive.timedout.txn.reaper.start", "100s",
new TimeValidator(TimeUnit.MILLISECONDS), "Time delay of 1st reaper run after metastore start"),
/**
* @deprecated Use MetastoreConf.TIMEDOUT_TXN_REAPER_INTERVAL
*/
@Deprecated
HIVE_TIMEDOUT_TXN_REAPER_INTERVAL("hive.timedout.txn.reaper.interval", "180s",
new TimeValidator(TimeUnit.MILLISECONDS), "Time interval describing how often the reaper runs"),
/**
* @deprecated Use MetastoreConf.WRITE_SET_REAPER_INTERVAL
*/
@Deprecated
WRITE_SET_REAPER_INTERVAL("hive.writeset.reaper.interval", "60s",
new TimeValidator(TimeUnit.MILLISECONDS), "Frequency of WriteSet reaper runs"),
MERGE_CARDINALITY_VIOLATION_CHECK("hive.merge.cardinality.check", true,
"Set to true to ensure that each SQL Merge statement ensures that for each row in the target\n" +
"table there is at most 1 matching row in the source table per SQL Specification."),
MERGE_SPLIT_UPDATE("hive.merge.split.update", false,
"If true, SQL Merge statement will handle WHEN MATCHED UPDATE by splitting it into 2\n" +
"branches of a multi-insert, representing delete of existing row and an insert of\n" +
"the new version of the row. Updating bucketing and partitioning columns should\n" +
"only be permitted if this is true."),
OPTIMIZE_ACID_META_COLUMNS("hive.optimize.acid.meta.columns", true,
"If true, don't decode Acid metadata columns from storage unless" +
" they are needed."),
// For Arrow SerDe
HIVE_ARROW_ROOT_ALLOCATOR_LIMIT("hive.arrow.root.allocator.limit", Long.MAX_VALUE,
"Arrow root allocator memory size limitation in bytes."),
HIVE_ARROW_BATCH_ALLOCATOR_LIMIT("hive.arrow.batch.allocator.limit", 10_000_000_000L,
"Max bytes per arrow batch. This is a threshold, the memory is not pre-allocated."),
HIVE_ARROW_BATCH_SIZE("hive.arrow.batch.size", 1000, "The number of rows sent in one Arrow batch."),
// For Druid storage handler
HIVE_DRUID_INDEXING_GRANULARITY("hive.druid.indexer.segments.granularity", "DAY",
new PatternSet("YEAR", "MONTH", "WEEK", "DAY", "HOUR", "MINUTE", "SECOND"),
"Granularity for the segments created by the Druid storage handler"
),
HIVE_DRUID_MAX_PARTITION_SIZE("hive.druid.indexer.partition.size.max", 5000000,
"Maximum number of records per segment partition"
),
HIVE_DRUID_MAX_ROW_IN_MEMORY("hive.druid.indexer.memory.rownum.max", 75000,
"Maximum number of records in memory while storing data in Druid"
),
HIVE_DRUID_BROKER_DEFAULT_ADDRESS("hive.druid.broker.address.default", "localhost:8082",
"Address of the Druid broker. If we are querying Druid from Hive, this address needs to be\n"
+
"declared"
),
HIVE_DRUID_COORDINATOR_DEFAULT_ADDRESS("hive.druid.coordinator.address.default", "localhost:8081",
"Address of the Druid coordinator. It is used to check the load status of newly created segments"
),
HIVE_DRUID_OVERLORD_DEFAULT_ADDRESS("hive.druid.overlord.address.default", "localhost:8090",
"Address of the Druid overlord. It is used to submit indexing tasks to druid."
),
HIVE_DRUID_SELECT_THRESHOLD("hive.druid.select.threshold", 10000,
"Takes only effect when hive.druid.select.distribute is set to false. \n" +
"When we can split a Select query, this is the maximum number of rows that we try to retrieve\n" +
"per query. In order to do that, we obtain the estimated size for the complete result. If the\n" +
"number of records of the query results is larger than this threshold, we split the query in\n" +
"total number of rows/threshold parts across the time dimension. Note that we assume the\n" +
"records to be split uniformly across the time dimension."),
HIVE_DRUID_NUM_HTTP_CONNECTION("hive.druid.http.numConnection", 20, "Number of connections used by\n" +
"the HTTP client."),
HIVE_DRUID_HTTP_READ_TIMEOUT("hive.druid.http.read.timeout", "PT1M", "Read timeout period for the HTTP\n" +
"client in ISO8601 format (for example P2W, P3M, PT1H30M, PT0.750S), default is period of 1 minute."),
HIVE_DRUID_SLEEP_TIME("hive.druid.sleep.time", "PT10S",
"Sleep time between retries in ISO8601 format (for example P2W, P3M, PT1H30M, PT0.750S), default is period of 10 seconds."
),
HIVE_DRUID_BASE_PERSIST_DIRECTORY("hive.druid.basePersistDirectory", "",
"Local temporary directory used to persist intermediate indexing state, will default to JVM system property java.io.tmpdir."
),
HIVE_DRUID_ROLLUP("hive.druid.rollup", true, "Whether to rollup druid rows or not."),
DRUID_SEGMENT_DIRECTORY("hive.druid.storage.storageDirectory", "/druid/segments"
, "druid deep storage location."),
DRUID_METADATA_BASE("hive.druid.metadata.base", "druid", "Default prefix for metadata tables"),
DRUID_METADATA_DB_TYPE("hive.druid.metadata.db.type", "mysql",
new PatternSet("mysql", "postgresql", "derby"), "Type of the metadata database."
),
DRUID_METADATA_DB_USERNAME("hive.druid.metadata.username", "",
"Username to connect to Type of the metadata DB."
),
DRUID_METADATA_DB_PASSWORD("hive.druid.metadata.password", "",
"Password to connect to Type of the metadata DB."
),
DRUID_METADATA_DB_URI("hive.druid.metadata.uri", "",
"URI to connect to the database (for example jdbc:mysql://hostname:port/DBName)."
),
DRUID_WORKING_DIR("hive.druid.working.directory", "/tmp/workingDirectory",
"Default hdfs working directory used to store some intermediate metadata"
),
HIVE_DRUID_MAX_TRIES("hive.druid.maxTries", 5, "Maximum number of retries before giving up"),
HIVE_DRUID_PASSIVE_WAIT_TIME("hive.druid.passiveWaitTimeMs", 30000L,
"Wait time in ms default to 30 seconds."
),
HIVE_DRUID_BITMAP_FACTORY_TYPE("hive.druid.bitmap.type", "roaring", new PatternSet("roaring", "concise"), "Coding algorithm use to encode the bitmaps"),
// For HBase storage handler
HIVE_HBASE_WAL_ENABLED("hive.hbase.wal.enabled", true,
"Whether writes to HBase should be forced to the write-ahead log. \n" +
"Disabling this improves HBase write performance at the risk of lost writes in case of a crash."),
HIVE_HBASE_GENERATE_HFILES("hive.hbase.generatehfiles", false,
"True when HBaseStorageHandler should generate hfiles instead of operate against the online table."),
HIVE_HBASE_SNAPSHOT_NAME("hive.hbase.snapshot.name", null, "The HBase table snapshot name to use."),
HIVE_HBASE_SNAPSHOT_RESTORE_DIR("hive.hbase.snapshot.restoredir", "/tmp", "The directory in which to " +
"restore the HBase table snapshot."),
// For har files
HIVEARCHIVEENABLED("hive.archive.enabled", false, "Whether archiving operations are permitted"),
HIVEFETCHTASKCONVERSION("hive.fetch.task.conversion", "more", new StringSet("none", "minimal", "more"),
"Some select queries can be converted to single FETCH task minimizing latency.\n" +
"Currently the query should be single sourced not having any subquery and should not have\n" +
"any aggregations or distincts (which incurs RS), lateral views and joins.\n" +
"0. none : disable hive.fetch.task.conversion\n" +
"1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only\n" +
"2. more : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)"
),
HIVEFETCHTASKCONVERSIONTHRESHOLD("hive.fetch.task.conversion.threshold", 1073741824L,
"Input threshold for applying hive.fetch.task.conversion. If target table is native, input length\n" +
"is calculated by summation of file lengths. If it's not native, storage handler for the table\n" +
"can optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface."),
HIVEFETCHTASKAGGR("hive.fetch.task.aggr", false,
"Aggregation queries with no group-by clause (for example, select count(*) from src) execute\n" +
"final aggregations in single reduce task. If this is set true, Hive delegates final aggregation\n" +
"stage to fetch task, possibly decreasing the query time."),
HIVEOPTIMIZEMETADATAQUERIES("hive.compute.query.using.stats", true,
"When set to true Hive will answer a few queries like count(1) purely using stats\n" +
"stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.\n" +
"For more advanced stats collection need to run analyze table queries."),
// Serde for FetchTask
HIVEFETCHOUTPUTSERDE("hive.fetch.output.serde", "org.apache.hadoop.hive.serde2.DelimitedJSONSerDe",
"The SerDe used by FetchTask to serialize the fetch output."),
HIVEEXPREVALUATIONCACHE("hive.cache.expr.evaluation", true,
"If true, the evaluation result of a deterministic expression referenced twice or more\n" +
"will be cached.\n" +
"For example, in a filter condition like '.. where key + 10 = 100 or key + 10 = 0'\n" +
"the expression 'key + 10' will be evaluated/cached once and reused for the following\n" +
"expression ('key + 10 = 0'). Currently, this is applied only to expressions in select\n" +
"or filter operators."),
// Hive Variables
HIVEVARIABLESUBSTITUTE("hive.variable.substitute", true,
"This enables substitution using syntax like ${var} ${system:var} and ${env:var}."),
HIVEVARIABLESUBSTITUTEDEPTH("hive.variable.substitute.depth", 40,
"The maximum replacements the substitution engine will do."),
HIVECONFVALIDATION("hive.conf.validation", true,
"Enables type checking for registered Hive configurations"),
SEMANTIC_ANALYZER_HOOK("hive.semantic.analyzer.hook", "", ""),
HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE(
"hive.test.authz.sstd.hs2.mode", false, "test hs2 mode from .q tests", true),
HIVE_AUTHORIZATION_ENABLED("hive.security.authorization.enabled", false,
"enable or disable the Hive client authorization"),
HIVE_AUTHORIZATION_KERBEROS_USE_SHORTNAME("hive.security.authorization.kerberos.use.shortname", true,
"use short name in Kerberos cluster"),
HIVE_AUTHORIZATION_MANAGER("hive.security.authorization.manager",
"org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory",
"The Hive client authorization manager class name. The user defined authorization class should implement \n" +
"interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider."),
HIVE_AUTHENTICATOR_MANAGER("hive.security.authenticator.manager",
"org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator",
"hive client authenticator manager class name. The user defined authenticator should implement \n" +
"interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider."),
HIVE_METASTORE_AUTHORIZATION_MANAGER("hive.security.metastore.authorization.manager",
"org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider",
"Names of authorization manager classes (comma separated) to be used in the metastore\n" +
"for authorization. The user defined authorization class should implement interface\n" +
"org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.\n" +
"All authorization manager classes have to successfully authorize the metastore API\n" +
"call for the command execution to be allowed."),
HIVE_METASTORE_AUTHORIZATION_AUTH_READS("hive.security.metastore.authorization.auth.reads", true,
"If this is true, metastore authorizer authorizes read actions on database, table"),
HIVE_METASTORE_AUTHENTICATOR_MANAGER("hive.security.metastore.authenticator.manager",
"org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
"authenticator manager class name to be used in the metastore for authentication. \n" +
"The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider."),
HIVE_AUTHORIZATION_TABLE_USER_GRANTS("hive.security.authorization.createtable.user.grants", "",
"the privileges automatically granted to some users whenever a table gets created.\n" +
"An example like \"userX,userY:select;userZ:create\" will grant select privilege to userX and userY,\n" +
"and grant create privilege to userZ whenever a new table created."),
HIVE_AUTHORIZATION_TABLE_GROUP_GRANTS("hive.security.authorization.createtable.group.grants",
"",
"the privileges automatically granted to some groups whenever a table gets created.\n" +
"An example like \"groupX,groupY:select;groupZ:create\" will grant select privilege to groupX and groupY,\n" +
"and grant create privilege to groupZ whenever a new table created."),
HIVE_AUTHORIZATION_TABLE_ROLE_GRANTS("hive.security.authorization.createtable.role.grants", "",
"the privileges automatically granted to some roles whenever a table gets created.\n" +
"An example like \"roleX,roleY:select;roleZ:create\" will grant select privilege to roleX and roleY,\n" +
"and grant create privilege to roleZ whenever a new table created."),
HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS("hive.security.authorization.createtable.owner.grants",
"",
"The privileges automatically granted to the owner whenever a table gets created.\n" +
"An example like \"select,drop\" will grant select and drop privilege to the owner\n" +
"of the table. Note that the default gives the creator of a table no access to the\n" +
"table (but see HIVE-8067)."),
HIVE_AUTHORIZATION_TASK_FACTORY("hive.security.authorization.task.factory",
"org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl",
"Authorization DDL task factory implementation"),
// if this is not set default value is set during config initialization
// Default value can't be set in this constructor as it would refer names in other ConfVars
// whose constructor would not have been called
HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST(
"hive.security.authorization.sqlstd.confwhitelist", "",
"A Java regex. Configurations parameters that match this\n" +
"regex can be modified by user when SQL standard authorization is enabled.\n" +
"To get the default value, use the 'set <param>' command.\n" +
"Note that the hive.conf.restricted.list checks are still enforced after the white list\n" +
"check"),
HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST_APPEND(
"hive.security.authorization.sqlstd.confwhitelist.append", "",
"2nd Java regex that it would match in addition to\n" +
"hive.security.authorization.sqlstd.confwhitelist.\n" +
"Do not include a starting \"|\" in the value. Using this regex instead\n" +
"of updating the original regex means that you can append to the default\n" +
"set by SQL standard authorization instead of replacing it entirely."),
HIVE_CLI_PRINT_HEADER("hive.cli.print.header", false, "Whether to print the names of the columns in query output."),
HIVE_CLI_PRINT_ESCAPE_CRLF("hive.cli.print.escape.crlf", false,
"Whether to print carriage returns and line feeds in row output as escaped \\r and \\n"),
HIVE_CLI_TEZ_SESSION_ASYNC("hive.cli.tez.session.async", true, "Whether to start Tez\n" +
"session in background when running CLI with Tez, allowing CLI to be available earlier."),
HIVE_DISABLE_UNSAFE_EXTERNALTABLE_OPERATIONS("hive.disable.unsafe.external.table.operations", true,
"Whether to disable certain optimizations and operations on external tables," +
" on the assumption that data changes by external applications may have negative effects" +
" on these operations."),
HIVE_STRICT_MANAGED_TABLES("hive.strict.managed.tables", false,
"Whether strict managed tables mode is enabled. With this mode enabled, " +
"only transactional tables (both full and insert-only) are allowed to be created as managed tables"),
HIVE_EXTERNALTABLE_PURGE_DEFAULT("hive.external.table.purge.default", false,
"Set to true to set external.table.purge=true on newly created external tables," +
" which will specify that the table data should be deleted when the table is dropped." +
" Set to false maintain existing behavior that external tables do not delete data" +
" when the table is dropped."),
HIVE_ERROR_ON_EMPTY_PARTITION("hive.error.on.empty.partition", false,
"Whether to throw an exception if dynamic partition insert generates empty results."),
HIVE_EXIM_URI_SCHEME_WL("hive.exim.uri.scheme.whitelist", "hdfs,pfile,file,s3,s3a,gs",
"A comma separated list of acceptable URI schemes for import and export."),
// temporary variable for testing. This is added just to turn off this feature in case of a bug in
// deployment. It has not been documented in hive-default.xml intentionally, this should be removed
// once the feature is stable
HIVE_EXIM_RESTRICT_IMPORTS_INTO_REPLICATED_TABLES("hive.exim.strict.repl.tables",true,
"Parameter that determines if 'regular' (non-replication) export dumps can be\n" +
"imported on to tables that are the target of replication. If this parameter is\n" +
"set, regular imports will check if the destination table(if it exists) has a " +
"'repl.last.id' set on it. If so, it will fail."),
HIVE_REPL_TASK_FACTORY("hive.repl.task.factory",
"org.apache.hive.hcatalog.api.repl.exim.EximReplicationTaskFactory",
"Parameter that can be used to override which ReplicationTaskFactory will be\n" +
"used to instantiate ReplicationTask events. Override for third party repl plugins"),
HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS("hive.mapper.cannot.span.multiple.partitions", false, ""),
HIVE_REWORK_MAPREDWORK("hive.rework.mapredwork", false,
"should rework the mapred work or not.\n" +
"This is first introduced by SymlinkTextInputFormat to replace symlink files with real paths at compile time."),
HIVE_IO_EXCEPTION_HANDLERS("hive.io.exception.handlers", "",
"A list of io exception handler class names. This is used\n" +
"to construct a list exception handlers to handle exceptions thrown\n" +
"by record readers"),
// logging configuration
HIVE_LOG4J_FILE("hive.log4j.file", "",
"Hive log4j configuration file.\n" +
"If the property is not set, then logging will be initialized using hive-log4j2.properties found on the classpath.\n" +
"If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \n" +
"which you can then extract a URL from and pass to PropertyConfigurator.configure(URL)."),
HIVE_EXEC_LOG4J_FILE("hive.exec.log4j.file", "",
"Hive log4j configuration file for execution mode(sub command).\n" +
"If the property is not set, then logging will be initialized using hive-exec-log4j2.properties found on the classpath.\n" +
"If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \n" +
"which you can then extract a URL from and pass to PropertyConfigurator.configure(URL)."),
HIVE_ASYNC_LOG_ENABLED("hive.async.log.enabled", true,
"Whether to enable Log4j2's asynchronous logging. Asynchronous logging can give\n" +
" significant performance improvement as logging will be handled in separate thread\n" +
" that uses LMAX disruptor queue for buffering log messages.\n" +
" Refer https://logging.apache.org/log4j/2.x/manual/async.html for benefits and\n" +
" drawbacks."),
HIVE_LOG_EXPLAIN_OUTPUT("hive.log.explain.output", false,
"Whether to log explain output for every query.\n"
+ "When enabled, will log EXPLAIN EXTENDED output for the query at INFO log4j log level."),
HIVE_EXPLAIN_USER("hive.explain.user", true,
"Whether to show explain result at user level.\n" +
"When enabled, will log EXPLAIN output for the query at user level. Tez only."),
HIVE_SPARK_EXPLAIN_USER("hive.spark.explain.user", false,
"Whether to show explain result at user level.\n" +
"When enabled, will log EXPLAIN output for the query at user level. Spark only."),
HIVE_SPARK_LOG_EXPLAIN_WEBUI("hive.spark.log.explain.webui", true, "Whether to show the " +
"explain plan in the Spark Web UI. Only shows the regular EXPLAIN plan, and ignores " +
"any extra EXPLAIN configuration (e.g. hive.spark.explain.user, etc.). The explain " +
"plan for each stage is truncated at 100,000 characters."),
// prefix used to auto generated column aliases (this should be s,tarted with '_')
HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL("hive.autogen.columnalias.prefix.label", "_c",
"String used as a prefix when auto generating column alias.\n" +
"By default the prefix label will be appended with a column position number to form the column alias. \n" +
"Auto generation would happen if an aggregate function is used in a select clause without an explicit alias."),
HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME(
"hive.autogen.columnalias.prefix.includefuncname", false,
"Whether to include function name in the column alias auto generated by Hive."),
HIVE_METRICS_CLASS("hive.service.metrics.class",
"org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics",
new StringSet(
"org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics",
"org.apache.hadoop.hive.common.metrics.LegacyMetrics"),
"Hive metrics subsystem implementation class."),
HIVE_CODAHALE_METRICS_REPORTER_CLASSES("hive.service.metrics.codahale.reporter.classes",
"org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " +
"org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter",
"Comma separated list of reporter implementation classes for metric class "
+ "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics. Overrides "
+ "HIVE_METRICS_REPORTER conf if present"),
@Deprecated
HIVE_METRICS_REPORTER("hive.service.metrics.reporter", "",
"Reporter implementations for metric class "
+ "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics;" +
"Deprecated, use HIVE_CODAHALE_METRICS_REPORTER_CLASSES instead. This configuraiton will be"
+ " overridden by HIVE_CODAHALE_METRICS_REPORTER_CLASSES if present. " +
"Comma separated list of JMX, CONSOLE, JSON_FILE, HADOOP2"),
HIVE_METRICS_JSON_FILE_LOCATION("hive.service.metrics.file.location", "/tmp/report.json",
"For metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics JSON_FILE reporter, the location of local JSON metrics file. " +
"This file will get overwritten at every interval."),
HIVE_METRICS_JSON_FILE_INTERVAL("hive.service.metrics.file.frequency", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"For metric class org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " +
"the frequency of updating JSON metrics file."),
HIVE_METRICS_HADOOP2_INTERVAL("hive.service.metrics.hadoop2.frequency", "30s",
new TimeValidator(TimeUnit.SECONDS),
"For metric class org.apache.hadoop.hive.common.metrics.metrics2.Metrics2Reporter, " +
"the frequency of updating the HADOOP2 metrics system."),
HIVE_METRICS_HADOOP2_COMPONENT_NAME("hive.service.metrics.hadoop2.component",
"hive",
"Component name to provide to Hadoop2 Metrics system. Ideally 'hivemetastore' for the MetaStore " +
" and and 'hiveserver2' for HiveServer2."
),
HIVE_PERF_LOGGER("hive.exec.perf.logger", "org.apache.hadoop.hive.ql.log.PerfLogger",
"The class responsible for logging client side performance metrics. \n" +
"Must be a subclass of org.apache.hadoop.hive.ql.log.PerfLogger"),
HIVE_START_CLEANUP_SCRATCHDIR("hive.start.cleanup.scratchdir", false,
"To cleanup the Hive scratchdir when starting the Hive Server"),
HIVE_SCRATCH_DIR_LOCK("hive.scratchdir.lock", false,
"To hold a lock file in scratchdir to prevent to be removed by cleardanglingscratchdir"),
HIVE_INSERT_INTO_MULTILEVEL_DIRS("hive.insert.into.multilevel.dirs", false,
"Where to insert into multilevel directories like\n" +
"\"insert directory '/HIVEFT25686/chinna/' from table\""),
HIVE_CTAS_EXTERNAL_TABLES("hive.ctas.external.tables", true,
"whether CTAS for external tables is allowed"),
HIVE_INSERT_INTO_EXTERNAL_TABLES("hive.insert.into.external.tables", true,
"whether insert into external tables is allowed"),
HIVE_TEMPORARY_TABLE_STORAGE(
"hive.exec.temporary.table.storage", "default", new StringSet("memory",
"ssd", "default"), "Define the storage policy for temporary tables." +
"Choices between memory, ssd and default"),
HIVE_QUERY_LIFETIME_HOOKS("hive.query.lifetime.hooks", "",
"A comma separated list of hooks which implement QueryLifeTimeHook. These will be triggered" +
" before/after query compilation and before/after query execution, in the order specified." +
"Implementations of QueryLifeTimeHookWithParseHooks can also be specified in this list. If they are" +
"specified then they will be invoked in the same places as QueryLifeTimeHooks and will be invoked during pre " +
"and post query parsing"),
HIVE_DRIVER_RUN_HOOKS("hive.exec.driver.run.hooks", "",
"A comma separated list of hooks which implement HiveDriverRunHook. Will be run at the beginning " +
"and end of Driver.run, these will be run in the order specified."),
HIVE_DDL_OUTPUT_FORMAT("hive.ddl.output.format", null,
"The data format to use for DDL output. One of \"text\" (for human\n" +
"readable text) or \"json\" (for a json object)."),
HIVE_ENTITY_SEPARATOR("hive.entity.separator", "@",
"Separator used to construct names of tables and partitions. For example, dbname@tablename@partitionname"),
HIVE_CAPTURE_TRANSFORM_ENTITY("hive.entity.capture.transform", false,
"Compiler to capture transform URI referred in the query"),
HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY("hive.display.partition.cols.separately", true,
"In older Hive version (0.10 and earlier) no distinction was made between\n" +
"partition columns or non-partition columns while displaying columns in describe\n" +
"table. From 0.12 onwards, they are displayed separately. This flag will let you\n" +
"get old behavior, if desired. See, test-case in patch for HIVE-6689."),
HIVE_SSL_PROTOCOL_BLACKLIST("hive.ssl.protocol.blacklist", "SSLv2,SSLv3",
"SSL Versions to disable for all Hive Servers"),
HIVE_PRIVILEGE_SYNCHRONIZER("hive.privilege.synchronizer", true,
"Whether to synchronize privileges from external authorizer periodically in HS2"),
HIVE_PRIVILEGE_SYNCHRONIZER_INTERVAL("hive.privilege.synchronizer.interval",
"1800s", new TimeValidator(TimeUnit.SECONDS),
"Interval to synchronize privileges from external authorizer periodically in HS2"),
// HiveServer2 specific configs
HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR("hive.server2.clear.dangling.scratchdir", false,
"Clear dangling scratch dir periodically in HS2"),
HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR_INTERVAL("hive.server2.clear.dangling.scratchdir.interval",
"1800s", new TimeValidator(TimeUnit.SECONDS),
"Interval to clear dangling scratch dir periodically in HS2"),
HIVE_SERVER2_SLEEP_INTERVAL_BETWEEN_START_ATTEMPTS("hive.server2.sleep.interval.between.start.attempts",
"60s", new TimeValidator(TimeUnit.MILLISECONDS, 0l, true, Long.MAX_VALUE, true),
"Amount of time to sleep between HiveServer2 start attempts. Primarily meant for tests"),
HIVE_SERVER2_MAX_START_ATTEMPTS("hive.server2.max.start.attempts", 30L, new RangeValidator(0L, null),
"Number of times HiveServer2 will attempt to start before exiting. The sleep interval between retries" +
" is determined by " + ConfVars.HIVE_SERVER2_SLEEP_INTERVAL_BETWEEN_START_ATTEMPTS.varname +
"\n The default of 30 will keep trying for 30 minutes."),
HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY("hive.server2.support.dynamic.service.discovery", false,
"Whether HiveServer2 supports dynamic service discovery for its clients. " +
"To support this, each instance of HiveServer2 currently uses ZooKeeper to register itself, " +
"when it is brought up. JDBC/ODBC clients should use the ZooKeeper ensemble: " +
"hive.zookeeper.quorum in their connection string."),
HIVE_SERVER2_ZOOKEEPER_NAMESPACE("hive.server2.zookeeper.namespace", "hiveserver2",
"The parent node in ZooKeeper used by HiveServer2 when supporting dynamic service discovery."),
HIVE_SERVER2_ZOOKEEPER_PUBLISH_CONFIGS("hive.server2.zookeeper.publish.configs", true,
"Whether we should publish HiveServer2's configs to ZooKeeper."),
// HiveServer2 global init file location
HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION("hive.server2.global.init.file.location", "${env:HIVE_CONF_DIR}",
"Either the location of a HS2 global init file or a directory containing a .hiverc file. If the \n" +
"property is set, the value must be a valid path to an init file or directory where the init file is located."),
HIVE_SERVER2_TRANSPORT_MODE("hive.server2.transport.mode", "binary", new StringSet("binary", "http"),
"Transport mode of HiveServer2."),
HIVE_SERVER2_THRIFT_BIND_HOST("hive.server2.thrift.bind.host", "",
"Bind host on which to run the HiveServer2 Thrift service."),
HIVE_SERVER2_PARALLEL_COMPILATION("hive.driver.parallel.compilation", false, "Whether to\n" +
"enable parallel compilation of the queries between sessions and within the same session on HiveServer2. The default is false."),
HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT("hive.driver.parallel.compilation.global.limit", -1, "Determines the " +
"degree of parallelism for queries compilation between sessions on HiveServer2. The default is -1."),
HIVE_SERVER2_COMPILE_LOCK_TIMEOUT("hive.server2.compile.lock.timeout", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Number of seconds a request will wait to acquire the compile lock before giving up. " +
"Setting it to 0s disables the timeout."),
HIVE_SERVER2_PARALLEL_OPS_IN_SESSION("hive.server2.parallel.ops.in.session", true,
"Whether to allow several parallel operations (such as SQL statements) in one session."),
HIVE_SERVER2_MATERIALIZED_VIEWS_REGISTRY_IMPL("hive.server2.materializedviews.registry.impl", "DEFAULT",
new StringSet("DEFAULT", "DUMMY"),
"The implementation that we should use for the materialized views registry. \n" +
" DEFAULT: Default cache for materialized views\n" +
" DUMMY: Do not cache materialized views and hence forward requests to metastore"),
// HiveServer2 WebUI
HIVE_SERVER2_WEBUI_BIND_HOST("hive.server2.webui.host", "0.0.0.0", "The host address the HiveServer2 WebUI will listen on"),
HIVE_SERVER2_WEBUI_PORT("hive.server2.webui.port", 10002, "The port the HiveServer2 WebUI will listen on. This can be"
+ "set to 0 or a negative integer to disable the web UI"),
HIVE_SERVER2_WEBUI_MAX_THREADS("hive.server2.webui.max.threads", 50, "The max HiveServer2 WebUI threads"),
HIVE_SERVER2_WEBUI_USE_SSL("hive.server2.webui.use.ssl", false,
"Set this to true for using SSL encryption for HiveServer2 WebUI."),
HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PATH("hive.server2.webui.keystore.path", "",
"SSL certificate keystore location for HiveServer2 WebUI."),
HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PASSWORD("hive.server2.webui.keystore.password", "",
"SSL certificate keystore password for HiveServer2 WebUI."),
HIVE_SERVER2_WEBUI_USE_SPNEGO("hive.server2.webui.use.spnego", false,
"If true, the HiveServer2 WebUI will be secured with SPNEGO. Clients must authenticate with Kerberos."),
HIVE_SERVER2_WEBUI_SPNEGO_KEYTAB("hive.server2.webui.spnego.keytab", "",
"The path to the Kerberos Keytab file containing the HiveServer2 WebUI SPNEGO service principal."),
HIVE_SERVER2_WEBUI_SPNEGO_PRINCIPAL("hive.server2.webui.spnego.principal",
"HTTP/[email protected]", "The HiveServer2 WebUI SPNEGO service principal.\n" +
"The special string _HOST will be replaced automatically with \n" +
"the value of hive.server2.webui.host or the correct host name."),
HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES("hive.server2.webui.max.historic.queries", 25,
"The maximum number of past queries to show in HiverSever2 WebUI."),
HIVE_SERVER2_WEBUI_USE_PAM("hive.server2.webui.use.pam", false,
"If true, the HiveServer2 WebUI will be secured with PAM."),
HIVE_SERVER2_WEBUI_EXPLAIN_OUTPUT("hive.server2.webui.explain.output", false,
"When set to true, the EXPLAIN output for every query is displayed"
+ " in the HS2 WebUI / Drilldown / Query Plan tab.\n"),
HIVE_SERVER2_WEBUI_SHOW_GRAPH("hive.server2.webui.show.graph", false,
"Set this to true to to display query plan as a graph instead of text in the WebUI. " +
"Only works with hive.server2.webui.explain.output set to true."),
HIVE_SERVER2_WEBUI_MAX_GRAPH_SIZE("hive.server2.webui.max.graph.size", 25,
"Max number of stages graph can display. If number of stages exceeds this, no query" +
"plan will be shown. Only works when hive.server2.webui.show.graph and " +
"hive.server2.webui.explain.output set to true."),
HIVE_SERVER2_WEBUI_SHOW_STATS("hive.server2.webui.show.stats", false,
"Set this to true to to display statistics for MapReduce tasks in the WebUI. " +
"Only works when hive.server2.webui.show.graph and hive.server2.webui.explain.output " +
"set to true."),
HIVE_SERVER2_WEBUI_ENABLE_CORS("hive.server2.webui.enable.cors", false,
"Whether to enable cross origin requests (CORS)\n"),
HIVE_SERVER2_WEBUI_CORS_ALLOWED_ORIGINS("hive.server2.webui.cors.allowed.origins", "*",
"Comma separated list of origins that are allowed when CORS is enabled.\n"),
HIVE_SERVER2_WEBUI_CORS_ALLOWED_METHODS("hive.server2.webui.cors.allowed.methods", "GET,POST,DELETE,HEAD",
"Comma separated list of http methods that are allowed when CORS is enabled.\n"),
HIVE_SERVER2_WEBUI_CORS_ALLOWED_HEADERS("hive.server2.webui.cors.allowed.headers",
"X-Requested-With,Content-Type,Accept,Origin",
"Comma separated list of http headers that are allowed when CORS is enabled.\n"),
// Tez session settings
HIVE_SERVER2_ACTIVE_PASSIVE_HA_ENABLE("hive.server2.active.passive.ha.enable", false,
"Whether HiveServer2 Active/Passive High Availability be enabled when Hive Interactive sessions are enabled." +
"This will also require hive.server2.support.dynamic.service.discovery to be enabled."),
HIVE_SERVER2_ACTIVE_PASSIVE_HA_REGISTRY_NAMESPACE("hive.server2.active.passive.ha.registry.namespace",
"hs2ActivePassiveHA",
"When HiveServer2 Active/Passive High Availability is enabled, uses this namespace for registering HS2\n" +
"instances with zookeeper"),
HIVE_SERVER2_TEZ_INTERACTIVE_QUEUE("hive.server2.tez.interactive.queue", "",
"A single YARN queues to use for Hive Interactive sessions. When this is specified,\n" +
"workload management is enabled and used for these sessions."),
HIVE_SERVER2_WM_NAMESPACE("hive.server2.wm.namespace", "default",
"The WM namespace to use when one metastore is used by multiple compute clusters each \n" +
"with their own workload management. The special value 'default' (the default) will \n" +
"also include any resource plans created before the namespaces were introduced."),
HIVE_SERVER2_WM_WORKER_THREADS("hive.server2.wm.worker.threads", 4,
"Number of worker threads to use to perform the synchronous operations with Tez\n" +
"sessions for workload management (e.g. opening, closing, etc.)"),
HIVE_SERVER2_WM_ALLOW_ANY_POOL_VIA_JDBC("hive.server2.wm.allow.any.pool.via.jdbc", false,
"Applies when a user specifies a target WM pool in the JDBC connection string. If\n" +
"false, the user can only specify a pool he is mapped to (e.g. make a choice among\n" +
"multiple group mappings); if true, the user can specify any existing pool."),
HIVE_SERVER2_WM_POOL_METRICS("hive.server2.wm.pool.metrics", true,
"Whether per-pool WM metrics should be enabled."),
HIVE_SERVER2_TEZ_WM_AM_REGISTRY_TIMEOUT("hive.server2.tez.wm.am.registry.timeout", "30s",
new TimeValidator(TimeUnit.SECONDS),
"The timeout for AM registry registration, after which (on attempting to use the\n" +
"session), we kill it and try to get another one."),
HIVE_SERVER2_TEZ_DEFAULT_QUEUES("hive.server2.tez.default.queues", "",
"A list of comma separated values corresponding to YARN queues of the same name.\n" +
"When HiveServer2 is launched in Tez mode, this configuration needs to be set\n" +
"for multiple Tez sessions to run in parallel on the cluster."),
HIVE_SERVER2_TEZ_SESSIONS_PER_DEFAULT_QUEUE("hive.server2.tez.sessions.per.default.queue", 1,
"A positive integer that determines the number of Tez sessions that should be\n" +
"launched on each of the queues specified by \"hive.server2.tez.default.queues\".\n" +
"Determines the parallelism on each queue."),
HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS("hive.server2.tez.initialize.default.sessions",
false,
"This flag is used in HiveServer2 to enable a user to use HiveServer2 without\n" +
"turning on Tez for HiveServer2. The user could potentially want to run queries\n" +
"over Tez without the pool of sessions."),
HIVE_SERVER2_TEZ_QUEUE_ACCESS_CHECK("hive.server2.tez.queue.access.check", false,
"Whether to check user access to explicitly specified YARN queues. " +
"yarn.resourcemanager.webapp.address must be configured to use this."),
HIVE_SERVER2_TEZ_SESSION_LIFETIME("hive.server2.tez.session.lifetime", "162h",
new TimeValidator(TimeUnit.HOURS),
"The lifetime of the Tez sessions launched by HS2 when default sessions are enabled.\n" +
"Set to 0 to disable session expiration."),
HIVE_SERVER2_TEZ_SESSION_LIFETIME_JITTER("hive.server2.tez.session.lifetime.jitter", "3h",
new TimeValidator(TimeUnit.HOURS),
"The jitter for Tez session lifetime; prevents all the sessions from restarting at once."),
HIVE_SERVER2_TEZ_SESSION_MAX_INIT_THREADS("hive.server2.tez.sessions.init.threads", 16,
"If hive.server2.tez.initialize.default.sessions is enabled, the maximum number of\n" +
"threads to use to initialize the default sessions."),
HIVE_SERVER2_TEZ_SESSION_RESTRICTED_CONFIGS("hive.server2.tez.sessions.restricted.configs", "",
"The configuration settings that cannot be set when submitting jobs to HiveServer2. If\n" +
"any of these are set to values different from those in the server configuration, an\n" +
"exception will be thrown."),
HIVE_SERVER2_TEZ_SESSION_CUSTOM_QUEUE_ALLOWED("hive.server2.tez.sessions.custom.queue.allowed",
"true", new StringSet("true", "false", "ignore"),
"Whether Tez session pool should allow submitting queries to custom queues. The options\n" +
"are true, false (error out), ignore (accept the query but ignore the queue setting)."),
// Operation log configuration
HIVE_SERVER2_LOGGING_OPERATION_ENABLED("hive.server2.logging.operation.enabled", true,
"When true, HS2 will save operation logs and make them available for clients"),
HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION("hive.server2.logging.operation.log.location",
"${system:java.io.tmpdir}" + File.separator + "${system:user.name}" + File.separator +
"operation_logs",
"Top level directory where operation logs are stored if logging functionality is enabled"),
HIVE_SERVER2_LOGGING_OPERATION_LEVEL("hive.server2.logging.operation.level", "EXECUTION",
new StringSet("NONE", "EXECUTION", "PERFORMANCE", "VERBOSE"),
"HS2 operation logging mode available to clients to be set at session level.\n" +
"For this to work, hive.server2.logging.operation.enabled should be set to true.\n" +
" NONE: Ignore any logging\n" +
" EXECUTION: Log completion of tasks\n" +
" PERFORMANCE: Execution + Performance logs \n" +
" VERBOSE: All logs" ),
HIVE_SERVER2_OPERATION_LOG_CLEANUP_DELAY("hive.server2.operation.log.cleanup.delay", "300s",
new TimeValidator(TimeUnit.SECONDS), "When a query is cancelled (via kill query, query timeout or triggers),\n" +
" operation logs gets cleaned up after this delay"),
// HS2 connections guard rails
HIVE_SERVER2_LIMIT_CONNECTIONS_PER_USER("hive.server2.limit.connections.per.user", 0,
"Maximum hive server2 connections per user. Any user exceeding this limit will not be allowed to connect. " +
"Default=0 does not enforce limits."),
HIVE_SERVER2_LIMIT_CONNECTIONS_PER_IPADDRESS("hive.server2.limit.connections.per.ipaddress", 0,
"Maximum hive server2 connections per ipaddress. Any ipaddress exceeding this limit will not be allowed " +
"to connect. Default=0 does not enforce limits."),
HIVE_SERVER2_LIMIT_CONNECTIONS_PER_USER_IPADDRESS("hive.server2.limit.connections.per.user.ipaddress", 0,
"Maximum hive server2 connections per user:ipaddress combination. Any user-ipaddress exceeding this limit will " +
"not be allowed to connect. Default=0 does not enforce limits."),
// Enable metric collection for HiveServer2
HIVE_SERVER2_METRICS_ENABLED("hive.server2.metrics.enabled", false, "Enable metrics on the HiveServer2."),
// http (over thrift) transport settings
HIVE_SERVER2_THRIFT_HTTP_PORT("hive.server2.thrift.http.port", 10001,
"Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'http'."),
HIVE_SERVER2_THRIFT_HTTP_PATH("hive.server2.thrift.http.path", "cliservice",
"Path component of URL endpoint when in HTTP mode."),
HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE("hive.server2.thrift.max.message.size", 100*1024*1024,
"Maximum message size in bytes a HS2 server will accept."),
HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME("hive.server2.thrift.http.max.idle.time", "1800s",
new TimeValidator(TimeUnit.MILLISECONDS),
"Maximum idle time for a connection on the server when in HTTP mode."),
HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME("hive.server2.thrift.http.worker.keepalive.time", "60s",
new TimeValidator(TimeUnit.SECONDS),
"Keepalive time for an idle http worker thread. When the number of workers exceeds min workers, " +
"excessive threads are killed after this time interval."),
HIVE_SERVER2_THRIFT_HTTP_REQUEST_HEADER_SIZE("hive.server2.thrift.http.request.header.size", 6*1024,
"Request header size in bytes, when using HTTP transport mode. Jetty defaults used."),
HIVE_SERVER2_THRIFT_HTTP_RESPONSE_HEADER_SIZE("hive.server2.thrift.http.response.header.size", 6*1024,
"Response header size in bytes, when using HTTP transport mode. Jetty defaults used."),
HIVE_SERVER2_THRIFT_HTTP_COMPRESSION_ENABLED("hive.server2.thrift.http.compression.enabled", true,
"Enable thrift http compression via Jetty compression support"),
// Cookie based authentication when using HTTP Transport
HIVE_SERVER2_THRIFT_HTTP_COOKIE_AUTH_ENABLED("hive.server2.thrift.http.cookie.auth.enabled", true,
"When true, HiveServer2 in HTTP transport mode, will use cookie based authentication mechanism."),
HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE("hive.server2.thrift.http.cookie.max.age", "86400s",
new TimeValidator(TimeUnit.SECONDS),
"Maximum age in seconds for server side cookie used by HS2 in HTTP mode."),
HIVE_SERVER2_THRIFT_HTTP_COOKIE_DOMAIN("hive.server2.thrift.http.cookie.domain", null,
"Domain for the HS2 generated cookies"),
HIVE_SERVER2_THRIFT_HTTP_COOKIE_PATH("hive.server2.thrift.http.cookie.path", null,
"Path for the HS2 generated cookies"),
@Deprecated
HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_SECURE("hive.server2.thrift.http.cookie.is.secure", true,
"Deprecated: Secure attribute of the HS2 generated cookie (this is automatically enabled for SSL enabled HiveServer2)."),
HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_HTTPONLY("hive.server2.thrift.http.cookie.is.httponly", true,
"HttpOnly attribute of the HS2 generated cookie."),
// binary transport settings
HIVE_SERVER2_THRIFT_PORT("hive.server2.thrift.port", 10000,
"Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'binary'."),
HIVE_SERVER2_THRIFT_SASL_QOP("hive.server2.thrift.sasl.qop", "auth",
new StringSet("auth", "auth-int", "auth-conf"),
"Sasl QOP value; set it to one of following values to enable higher levels of\n" +
"protection for HiveServer2 communication with clients.\n" +
"Setting hadoop.rpc.protection to a higher level than HiveServer2 does not\n" +
"make sense in most situations. HiveServer2 ignores hadoop.rpc.protection in favor\n" +
"of hive.server2.thrift.sasl.qop.\n" +
" \"auth\" - authentication only (default)\n" +
" \"auth-int\" - authentication plus integrity protection\n" +
" \"auth-conf\" - authentication plus integrity and confidentiality protection\n" +
"This is applicable only if HiveServer2 is configured to use Kerberos authentication."),
HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS("hive.server2.thrift.min.worker.threads", 5,
"Minimum number of Thrift worker threads"),
HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS("hive.server2.thrift.max.worker.threads", 500,
"Maximum number of Thrift worker threads"),
HIVE_SERVER2_THRIFT_LOGIN_BEBACKOFF_SLOT_LENGTH(
"hive.server2.thrift.exponential.backoff.slot.length", "100ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Binary exponential backoff slot time for Thrift clients during login to HiveServer2,\n" +
"for retries until hitting Thrift client timeout"),
HIVE_SERVER2_THRIFT_LOGIN_TIMEOUT("hive.server2.thrift.login.timeout", "20s",
new TimeValidator(TimeUnit.SECONDS), "Timeout for Thrift clients during login to HiveServer2"),
HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME("hive.server2.thrift.worker.keepalive.time", "60s",
new TimeValidator(TimeUnit.SECONDS),
"Keepalive time (in seconds) for an idle worker thread. When the number of workers exceeds min workers, " +
"excessive threads are killed after this time interval."),
// Configuration for async thread pool in SessionManager
HIVE_SERVER2_ASYNC_EXEC_THREADS("hive.server2.async.exec.threads", 100,
"Number of threads in the async thread pool for HiveServer2"),
HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT("hive.server2.async.exec.shutdown.timeout", "10s",
new TimeValidator(TimeUnit.SECONDS),
"How long HiveServer2 shutdown will wait for async threads to terminate."),
HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE("hive.server2.async.exec.wait.queue.size", 100,
"Size of the wait queue for async thread pool in HiveServer2.\n" +
"After hitting this limit, the async thread pool will reject new requests."),
HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME("hive.server2.async.exec.keepalive.time", "10s",
new TimeValidator(TimeUnit.SECONDS),
"Time that an idle HiveServer2 async thread (from the thread pool) will wait for a new task\n" +
"to arrive before terminating"),
HIVE_SERVER2_ASYNC_EXEC_ASYNC_COMPILE("hive.server2.async.exec.async.compile", false,
"Whether to enable compiling async query asynchronously. If enabled, it is unknown if the query will have any resultset before compilation completed."),
HIVE_SERVER2_LONG_POLLING_TIMEOUT("hive.server2.long.polling.timeout", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Time that HiveServer2 will wait before responding to asynchronous calls that use long polling"),
HIVE_SESSION_IMPL_CLASSNAME("hive.session.impl.classname", null, "Classname for custom implementation of hive session"),
HIVE_SESSION_IMPL_WITH_UGI_CLASSNAME("hive.session.impl.withugi.classname", null, "Classname for custom implementation of hive session with UGI"),
// HiveServer2 auth configuration
HIVE_SERVER2_AUTHENTICATION("hive.server2.authentication", "NONE",
new StringSet("NOSASL", "NONE", "LDAP", "KERBEROS", "PAM", "CUSTOM"),
"Client authentication types.\n" +
" NONE: no authentication check\n" +
" LDAP: LDAP/AD based authentication\n" +
" KERBEROS: Kerberos/GSSAPI authentication\n" +
" CUSTOM: Custom authentication provider\n" +
" (Use with property hive.server2.custom.authentication.class)\n" +
" PAM: Pluggable authentication module\n" +
" NOSASL: Raw transport"),
HIVE_SERVER2_TRUSTED_DOMAIN("hive.server2.trusted.domain", "",
"Specifies the host or a domain to trust connections from. Authentication is skipped " +
"for any connection coming from a host whose hostname ends with the value of this" +
" property. If authentication is expected to be skipped for connections from " +
"only a given host, fully qualified hostname of that host should be specified. By default" +
" it is empty, which means that all the connections to HiveServer2 are authenticated. " +
"When it is non-empty, the client has to provide a Hive user name. Any password, if " +
"provided, will not be used when authentication is skipped."),
HIVE_SERVER2_ALLOW_USER_SUBSTITUTION("hive.server2.allow.user.substitution", true,
"Allow alternate user to be specified as part of HiveServer2 open connection request."),
HIVE_SERVER2_KERBEROS_KEYTAB("hive.server2.authentication.kerberos.keytab", "",
"Kerberos keytab file for server principal"),
HIVE_SERVER2_KERBEROS_PRINCIPAL("hive.server2.authentication.kerberos.principal", "",
"Kerberos server principal"),
HIVE_SERVER2_CLIENT_KERBEROS_PRINCIPAL("hive.server2.authentication.client.kerberos.principal", "",
"Kerberos principal used by the HA hive_server2s."),
HIVE_SERVER2_SPNEGO_KEYTAB("hive.server2.authentication.spnego.keytab", "",
"keytab file for SPNego principal, optional,\n" +
"typical value would look like /etc/security/keytabs/spnego.service.keytab,\n" +
"This keytab would be used by HiveServer2 when Kerberos security is enabled and \n" +
"HTTP transport mode is used.\n" +
"This needs to be set only if SPNEGO is to be used in authentication.\n" +
"SPNego authentication would be honored only if valid\n" +
" hive.server2.authentication.spnego.principal\n" +
"and\n" +
" hive.server2.authentication.spnego.keytab\n" +
"are specified."),
HIVE_SERVER2_SPNEGO_PRINCIPAL("hive.server2.authentication.spnego.principal", "",
"SPNego service principal, optional,\n" +
"typical value would look like HTTP/[email protected]\n" +
"SPNego service principal would be used by HiveServer2 when Kerberos security is enabled\n" +
"and HTTP transport mode is used.\n" +
"This needs to be set only if SPNEGO is to be used in authentication."),
HIVE_SERVER2_PLAIN_LDAP_URL("hive.server2.authentication.ldap.url", null,
"LDAP connection URL(s),\n" +
"this value could contain URLs to multiple LDAP servers instances for HA,\n" +
"each LDAP URL is separated by a SPACE character. URLs are used in the \n" +
" order specified until a connection is successful."),
HIVE_SERVER2_PLAIN_LDAP_BASEDN("hive.server2.authentication.ldap.baseDN", null, "LDAP base DN"),
HIVE_SERVER2_PLAIN_LDAP_DOMAIN("hive.server2.authentication.ldap.Domain", null, ""),
HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN("hive.server2.authentication.ldap.groupDNPattern", null,
"COLON-separated list of patterns to use to find DNs for group entities in this directory.\n" +
"Use %s where the actual group name is to be substituted for.\n" +
"For example: CN=%s,CN=Groups,DC=subdomain,DC=domain,DC=com."),
HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER("hive.server2.authentication.ldap.groupFilter", null,
"COMMA-separated list of LDAP Group names (short name not full DNs).\n" +
"For example: HiveAdmins,HadoopAdmins,Administrators"),
HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN("hive.server2.authentication.ldap.userDNPattern", null,
"COLON-separated list of patterns to use to find DNs for users in this directory.\n" +
"Use %s where the actual group name is to be substituted for.\n" +
"For example: CN=%s,CN=Users,DC=subdomain,DC=domain,DC=com."),
HIVE_SERVER2_PLAIN_LDAP_USERFILTER("hive.server2.authentication.ldap.userFilter", null,
"COMMA-separated list of LDAP usernames (just short names, not full DNs).\n" +
"For example: hiveuser,impalauser,hiveadmin,hadoopadmin"),
HIVE_SERVER2_PLAIN_LDAP_GUIDKEY("hive.server2.authentication.ldap.guidKey", "uid",
"LDAP attribute name whose values are unique in this LDAP server.\n" +
"For example: uid or CN."),
HIVE_SERVER2_PLAIN_LDAP_GROUPMEMBERSHIP_KEY("hive.server2.authentication.ldap.groupMembershipKey", "member",
"LDAP attribute name on the group object that contains the list of distinguished names\n" +
"for the user, group, and contact objects that are members of the group.\n" +
"For example: member, uniqueMember or memberUid"),
HIVE_SERVER2_PLAIN_LDAP_USERMEMBERSHIP_KEY(HIVE_SERVER2_AUTHENTICATION_LDAP_USERMEMBERSHIPKEY_NAME, null,
"LDAP attribute name on the user object that contains groups of which the user is\n" +
"a direct member, except for the primary group, which is represented by the\n" +
"primaryGroupId.\n" +
"For example: memberOf"),
HIVE_SERVER2_PLAIN_LDAP_GROUPCLASS_KEY("hive.server2.authentication.ldap.groupClassKey", "groupOfNames",
"LDAP attribute name on the group entry that is to be used in LDAP group searches.\n" +
"For example: group, groupOfNames or groupOfUniqueNames."),
HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY("hive.server2.authentication.ldap.customLDAPQuery", null,
"A full LDAP query that LDAP Atn provider uses to execute against LDAP Server.\n" +
"If this query returns a null resultset, the LDAP Provider fails the Authentication\n" +
"request, succeeds if the user is part of the resultset." +
"For example: (&(objectClass=group)(objectClass=top)(instanceType=4)(cn=Domain*)) \n" +
"(&(objectClass=person)(|(sAMAccountName=admin)(|(memberOf=CN=Domain Admins,CN=Users,DC=domain,DC=com)" +
"(memberOf=CN=Administrators,CN=Builtin,DC=domain,DC=com))))"),
HIVE_SERVER2_PLAIN_LDAP_BIND_USER("hive.server2.authentication.ldap.binddn", null,
"The user with which to bind to the LDAP server, and search for the full domain name " +
"of the user being authenticated.\n" +
"This should be the full domain name of the user, and should have search access across all " +
"users in the LDAP tree.\n" +
"If not specified, then the user being authenticated will be used as the bind user.\n" +
"For example: CN=bindUser,CN=Users,DC=subdomain,DC=domain,DC=com"),
HIVE_SERVER2_PLAIN_LDAP_BIND_PASSWORD("hive.server2.authentication.ldap.bindpw", null,
"The password for the bind user, to be used to search for the full name of the user being authenticated.\n" +
"If the username is specified, this parameter must also be specified."),
HIVE_SERVER2_CUSTOM_AUTHENTICATION_CLASS("hive.server2.custom.authentication.class", null,
"Custom authentication class. Used when property\n" +
"'hive.server2.authentication' is set to 'CUSTOM'. Provided class\n" +
"must be a proper implementation of the interface\n" +
"org.apache.hive.service.auth.PasswdAuthenticationProvider. HiveServer2\n" +
"will call its Authenticate(user, passed) method to authenticate requests.\n" +
"The implementation may optionally implement Hadoop's\n" +
"org.apache.hadoop.conf.Configurable class to grab Hive's Configuration object."),
HIVE_SERVER2_PAM_SERVICES("hive.server2.authentication.pam.services", null,
"List of the underlying pam services that should be used when auth type is PAM\n" +
"A file with the same name must exist in /etc/pam.d"),
HIVE_SERVER2_ENABLE_DOAS("hive.server2.enable.doAs", true,
"Setting this property to true will have HiveServer2 execute\n" +
"Hive operations as the user making the calls to it."),
HIVE_DISTCP_DOAS_USER("hive.distcp.privileged.doAs","hive",
"This property allows privileged distcp executions done by hive\n" +
"to run as this user."),
HIVE_SERVER2_TABLE_TYPE_MAPPING("hive.server2.table.type.mapping", "CLASSIC", new StringSet("CLASSIC", "HIVE"),
"This setting reflects how HiveServer2 will report the table types for JDBC and other\n" +
"client implementations that retrieve the available tables and supported table types\n" +
" HIVE : Exposes Hive's native table types like MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW\n" +
" CLASSIC : More generic types like TABLE and VIEW"),
HIVE_SERVER2_SESSION_HOOK("hive.server2.session.hook", "", ""),
// SSL settings
HIVE_SERVER2_USE_SSL("hive.server2.use.SSL", false,
"Set this to true for using SSL encryption in HiveServer2."),
HIVE_SERVER2_SSL_KEYSTORE_PATH("hive.server2.keystore.path", "",
"SSL certificate keystore location."),
HIVE_SERVER2_SSL_KEYSTORE_PASSWORD("hive.server2.keystore.password", "",
"SSL certificate keystore password."),
HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE("hive.server2.map.fair.scheduler.queue", true,
"If the YARN fair scheduler is configured and HiveServer2 is running in non-impersonation mode,\n" +
"this setting determines the user for fair scheduler queue mapping.\n" +
"If set to true (default), the logged-in user determines the fair scheduler queue\n" +
"for submitted jobs, so that map reduce resource usage can be tracked by user.\n" +
"If set to false, all Hive jobs go to the 'hive' user's queue."),
HIVE_SERVER2_BUILTIN_UDF_WHITELIST("hive.server2.builtin.udf.whitelist", "",
"Comma separated list of builtin udf names allowed in queries.\n" +
"An empty whitelist allows all builtin udfs to be executed. " +
" The udf black list takes precedence over udf white list"),
HIVE_SERVER2_BUILTIN_UDF_BLACKLIST("hive.server2.builtin.udf.blacklist", "",
"Comma separated list of udfs names. These udfs will not be allowed in queries." +
" The udf black list takes precedence over udf white list"),
HIVE_ALLOW_UDF_LOAD_ON_DEMAND("hive.allow.udf.load.on.demand", false,
"Whether enable loading UDFs from metastore on demand; this is mostly relevant for\n" +
"HS2 and was the default behavior before Hive 1.2. Off by default."),
HIVE_SERVER2_SESSION_CHECK_INTERVAL("hive.server2.session.check.interval", "6h",
new TimeValidator(TimeUnit.MILLISECONDS, 3000l, true, null, false),
"The check interval for session/operation timeout, which can be disabled by setting to zero or negative value."),
HIVE_SERVER2_CLOSE_SESSION_ON_DISCONNECT("hive.server2.close.session.on.disconnect", true,
"Session will be closed when connection is closed. Set this to false to have session outlive its parent connection."),
HIVE_SERVER2_IDLE_SESSION_TIMEOUT("hive.server2.idle.session.timeout", "7d",
new TimeValidator(TimeUnit.MILLISECONDS),
"Session will be closed when it's not accessed for this duration, which can be disabled by setting to zero or negative value."),
HIVE_SERVER2_IDLE_OPERATION_TIMEOUT("hive.server2.idle.operation.timeout", "5d",
new TimeValidator(TimeUnit.MILLISECONDS),
"Operation will be closed when it's not accessed for this duration of time, which can be disabled by setting to zero value.\n" +
" With positive value, it's checked for operations in terminal state only (FINISHED, CANCELED, CLOSED, ERROR).\n" +
" With negative value, it's checked for all of the operations regardless of state."),
HIVE_SERVER2_IDLE_SESSION_CHECK_OPERATION("hive.server2.idle.session.check.operation", true,
"Session will be considered to be idle only if there is no activity, and there is no pending operation.\n" +
" This setting takes effect only if session idle timeout (hive.server2.idle.session.timeout) and checking\n" +
"(hive.server2.session.check.interval) are enabled."),
HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT("hive.server2.thrift.client.retry.limit", 1,"Number of retries upon " +
"failure of Thrift HiveServer2 calls"),
HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT("hive.server2.thrift.client.connect.retry.limit", 1,"Number of " +
"retries while opening a connection to HiveServe2"),
HIVE_SERVER2_THRIFT_CLIENT_RETRY_DELAY_SECONDS("hive.server2.thrift.client.retry.delay.seconds", "1s",
new TimeValidator(TimeUnit.SECONDS), "Number of seconds for the HiveServer2 thrift client to wait between " +
"consecutive connection attempts. Also specifies the time to wait between retrying thrift calls upon failures"),
HIVE_SERVER2_THRIFT_CLIENT_USER("hive.server2.thrift.client.user", "anonymous","Username to use against thrift" +
" client"),
HIVE_SERVER2_THRIFT_CLIENT_PASSWORD("hive.server2.thrift.client.password", "anonymous","Password to use against " +
"thrift client"),
// ResultSet serialization settings
HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS("hive.server2.thrift.resultset.serialize.in.tasks", false,
"Whether we should serialize the Thrift structures used in JDBC ResultSet RPC in task nodes.\n " +
"We use SequenceFile and ThriftJDBCBinarySerDe to read and write the final results if this is true."),
// TODO: Make use of this config to configure fetch size
HIVE_SERVER2_THRIFT_RESULTSET_MAX_FETCH_SIZE("hive.server2.thrift.resultset.max.fetch.size",
10000, "Max number of rows sent in one Fetch RPC call by the server to the client."),
HIVE_SERVER2_THRIFT_RESULTSET_DEFAULT_FETCH_SIZE("hive.server2.thrift.resultset.default.fetch.size", 1000,
"The number of rows sent in one Fetch RPC call by the server to the client, if not\n" +
"specified by the client."),
HIVE_SERVER2_XSRF_FILTER_ENABLED("hive.server2.xsrf.filter.enabled",false,
"If enabled, HiveServer2 will block any requests made to it over http " +
"if an X-XSRF-HEADER header is not present"),
HIVE_SECURITY_COMMAND_WHITELIST("hive.security.command.whitelist",
"set,reset,dfs,add,list,delete,reload,compile,llap",
"Comma separated list of non-SQL Hive commands users are authorized to execute"),
HIVE_SERVER2_JOB_CREDENTIAL_PROVIDER_PATH("hive.server2.job.credential.provider.path", "",
"If set, this configuration property should provide a comma-separated list of URLs that indicates the type and " +
"location of providers to be used by hadoop credential provider API. It provides HiveServer2 the ability to provide job-specific " +
"credential providers for jobs run using MR and Spark execution engines. This functionality has not been tested against Tez."),
HIVE_MOVE_FILES_THREAD_COUNT("hive.mv.files.thread", 15, new SizeValidator(0L, true, 1024L, true), "Number of threads"
+ " used to move files in move task. Set it to 0 to disable multi-threaded file moves. This parameter is also used by"
+ " MSCK to check tables."),
HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT("hive.load.dynamic.partitions.thread", 15,
new SizeValidator(1L, true, 1024L, true),
"Number of threads used to load dynamic partitions."),
// If this is set all move tasks at the end of a multi-insert query will only begin once all
// outputs are ready
HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES(
"hive.multi.insert.move.tasks.share.dependencies", false,
"If this is set all move tasks for tables/partitions (not directories) at the end of a\n" +
"multi-insert query will only begin once the dependencies for all these move tasks have been\n" +
"met.\n" +
"Advantages: If concurrency is enabled, the locks will only be released once the query has\n" +
" finished, so with this config enabled, the time when the table/partition is\n" +
" generated will be much closer to when the lock on it is released.\n" +
"Disadvantages: If concurrency is not enabled, with this disabled, the tables/partitions which\n" +
" are produced by this query and finish earlier will be available for querying\n" +
" much earlier. Since the locks are only released once the query finishes, this\n" +
" does not apply if concurrency is enabled."),
HIVE_INFER_BUCKET_SORT("hive.exec.infer.bucket.sort", false,
"If this is set, when writing partitions, the metadata will include the bucketing/sorting\n" +
"properties with which the data was written if any (this will not overwrite the metadata\n" +
"inherited from the table if the table is bucketed/sorted)"),
HIVE_INFER_BUCKET_SORT_NUM_BUCKETS_POWER_TWO(
"hive.exec.infer.bucket.sort.num.buckets.power.two", false,
"If this is set, when setting the number of reducers for the map reduce task which writes the\n" +
"final output files, it will choose a number which is a power of two, unless the user specifies\n" +
"the number of reducers to use using mapred.reduce.tasks. The number of reducers\n" +
"may be set to a power of two, only to be followed by a merge task meaning preventing\n" +
"anything from being inferred.\n" +
"With hive.exec.infer.bucket.sort set to true:\n" +
"Advantages: If this is not set, the number of buckets for partitions will seem arbitrary,\n" +
" which means that the number of mappers used for optimized joins, for example, will\n" +
" be very low. With this set, since the number of buckets used for any partition is\n" +
" a power of two, the number of mappers used for optimized joins will be the least\n" +
" number of buckets used by any partition being joined.\n" +
"Disadvantages: This may mean a much larger or much smaller number of reducers being used in the\n" +
" final map reduce job, e.g. if a job was originally going to take 257 reducers,\n" +
" it will now take 512 reducers, similarly if the max number of reducers is 511,\n" +
" and a job was going to use this many, it will now use 256 reducers."),
HIVEOPTLISTBUCKETING("hive.optimize.listbucketing", false,
"Enable list bucketing optimizer. Default value is false so that we disable it by default."),
// Allow TCP Keep alive socket option for for HiveServer or a maximum timeout for the socket.
SERVER_READ_SOCKET_TIMEOUT("hive.server.read.socket.timeout", "10s",
new TimeValidator(TimeUnit.SECONDS),
"Timeout for the HiveServer to close the connection if no response from the client. By default, 10 seconds."),
SERVER_TCP_KEEP_ALIVE("hive.server.tcp.keepalive", true,
"Whether to enable TCP keepalive for the Hive Server. Keepalive will prevent accumulation of half-open connections."),
HIVE_DECODE_PARTITION_NAME("hive.decode.partition.name", false,
"Whether to show the unquoted partition names in query results."),
HIVE_EXECUTION_ENGINE("hive.execution.engine", "mr", new StringSet(true, "mr", "tez", "spark"),
"Chooses execution engine. Options are: mr (Map reduce, default), tez, spark. While MR\n" +
"remains the default engine for historical reasons, it is itself a historical engine\n" +
"and is deprecated in Hive 2 line. It may be removed without further warning."),
HIVE_EXECUTION_MODE("hive.execution.mode", "container", new StringSet("container", "llap"),
"Chooses whether query fragments will run in container or in llap"),
HIVE_JAR_DIRECTORY("hive.jar.directory", null,
"This is the location hive in tez mode will look for to find a site wide \n" +
"installed hive instance."),
HIVE_USER_INSTALL_DIR("hive.user.install.directory", "/user/",
"If hive (in tez mode only) cannot find a usable hive jar in \"hive.jar.directory\", \n" +
"it will upload the hive jar to \"hive.user.install.directory/user.name\"\n" +
"and use it to run queries."),
// Vectorization enabled
HIVE_VECTORIZATION_ENABLED("hive.vectorized.execution.enabled", true,
"This flag should be set to true to enable vectorized mode of query execution.\n" +
"The default value is true to reflect that our most expected Hive deployment will be using vectorization."),
HIVE_VECTORIZATION_REDUCE_ENABLED("hive.vectorized.execution.reduce.enabled", true,
"This flag should be set to true to enable vectorized mode of the reduce-side of query execution.\n" +
"The default value is true."),
HIVE_VECTORIZATION_REDUCE_GROUPBY_ENABLED("hive.vectorized.execution.reduce.groupby.enabled", true,
"This flag should be set to true to enable vectorized mode of the reduce-side GROUP BY query execution.\n" +
"The default value is true."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED("hive.vectorized.execution.mapjoin.native.enabled", true,
"This flag should be set to true to enable native (i.e. non-pass through) vectorization\n" +
"of queries using MapJoin.\n" +
"The default value is true."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_MULTIKEY_ONLY_ENABLED("hive.vectorized.execution.mapjoin.native.multikey.only.enabled", false,
"This flag should be set to true to restrict use of native vector map join hash tables to\n" +
"the MultiKey in queries using MapJoin.\n" +
"The default value is false."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_MINMAX_ENABLED("hive.vectorized.execution.mapjoin.minmax.enabled", false,
"This flag should be set to true to enable vector map join hash tables to\n" +
"use max / max filtering for integer join queries using MapJoin.\n" +
"The default value is false."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_OVERFLOW_REPEATED_THRESHOLD("hive.vectorized.execution.mapjoin.overflow.repeated.threshold", -1,
"The number of small table rows for a match in vector map join hash tables\n" +
"where we use the repeated field optimization in overflow vectorized row batch for join queries using MapJoin.\n" +
"A value of -1 means do use the join result optimization. Otherwise, threshold value can be 0 to maximum integer."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED("hive.vectorized.execution.mapjoin.native.fast.hashtable.enabled", false,
"This flag should be set to true to enable use of native fast vector map join hash tables in\n" +
"queries using MapJoin.\n" +
"The default value is false."),
HIVE_VECTORIZATION_GROUPBY_CHECKINTERVAL("hive.vectorized.groupby.checkinterval", 100000,
"Number of entries added to the group by aggregation hash before a recomputation of average entry size is performed."),
HIVE_VECTORIZATION_GROUPBY_MAXENTRIES("hive.vectorized.groupby.maxentries", 1000000,
"Max number of entries in the vector group by aggregation hashtables. \n" +
"Exceeding this will trigger a flush irrelevant of memory pressure condition."),
HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT("hive.vectorized.groupby.flush.percent", (float) 0.1,
"Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded."),
HIVE_VECTORIZATION_REDUCESINK_NEW_ENABLED("hive.vectorized.execution.reducesink.new.enabled", true,
"This flag should be set to true to enable the new vectorization\n" +
"of queries using ReduceSink.\ni" +
"The default value is true."),
HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT("hive.vectorized.use.vectorized.input.format", true,
"This flag should be set to true to enable vectorizing with vectorized input file format capable SerDe.\n" +
"The default value is true."),
HIVE_VECTORIZATION_VECTORIZED_INPUT_FILE_FORMAT_EXCLUDES("hive.vectorized.input.format.excludes","",
"This configuration should be set to fully described input format class names for which \n"
+ " vectorized input format should not be used for vectorized execution."),
HIVE_VECTORIZATION_USE_VECTOR_DESERIALIZE("hive.vectorized.use.vector.serde.deserialize", true,
"This flag should be set to true to enable vectorizing rows using vector deserialize.\n" +
"The default value is true."),
HIVE_VECTORIZATION_USE_ROW_DESERIALIZE("hive.vectorized.use.row.serde.deserialize", true,
"This flag should be set to true to enable vectorizing using row deserialize.\n" +
"The default value is false."),
HIVE_VECTORIZATION_ROW_DESERIALIZE_INPUTFORMAT_EXCLUDES(
"hive.vectorized.row.serde.inputformat.excludes",
"org.apache.parquet.hadoop.ParquetInputFormat,org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat",
"The input formats not supported by row deserialize vectorization."),
HIVE_VECTOR_ADAPTOR_USAGE_MODE("hive.vectorized.adaptor.usage.mode", "all", new StringSet("none", "chosen", "all"),
"Specifies the extent to which the VectorUDFAdaptor will be used for UDFs that do not have a corresponding vectorized class.\n" +
"0. none : disable any usage of VectorUDFAdaptor\n" +
"1. chosen : use VectorUDFAdaptor for a small set of UDFs that were chosen for good performance\n" +
"2. all : use VectorUDFAdaptor for all UDFs"
),
HIVE_TEST_VECTOR_ADAPTOR_OVERRIDE("hive.test.vectorized.adaptor.override", false,
"internal use only, used to force always using the VectorUDFAdaptor.\n" +
"The default is false, of course",
true),
HIVE_VECTORIZATION_PTF_ENABLED("hive.vectorized.execution.ptf.enabled", true,
"This flag should be set to true to enable vectorized mode of the PTF of query execution.\n" +
"The default value is true."),
HIVE_VECTORIZATION_PTF_MAX_MEMORY_BUFFERING_BATCH_COUNT("hive.vectorized.ptf.max.memory.buffering.batch.count", 25,
"Maximum number of vectorized row batches to buffer in memory for PTF\n" +
"The default value is 25"),
HIVE_VECTORIZATION_TESTING_REDUCER_BATCH_SIZE("hive.vectorized.testing.reducer.batch.size", -1,
"internal use only, used for creating small group key vectorized row batches to exercise more logic\n" +
"The default value is -1 which means don't restrict for testing",
true),
HIVE_VECTORIZATION_TESTING_REUSE_SCRATCH_COLUMNS("hive.vectorized.reuse.scratch.columns", true,
"internal use only. Disable this to debug scratch column state issues",
true),
HIVE_VECTORIZATION_COMPLEX_TYPES_ENABLED("hive.vectorized.complex.types.enabled", true,
"This flag should be set to true to enable vectorization\n" +
"of expressions with complex types.\n" +
"The default value is true."),
HIVE_VECTORIZATION_GROUPBY_COMPLEX_TYPES_ENABLED("hive.vectorized.groupby.complex.types.enabled", true,
"This flag should be set to true to enable group by vectorization\n" +
"of aggregations that use complex types.\n",
"For example, AVG uses a complex type (STRUCT) for partial aggregation results" +
"The default value is true."),
HIVE_VECTORIZATION_ROW_IDENTIFIER_ENABLED("hive.vectorized.row.identifier.enabled", true,
"This flag should be set to true to enable vectorization of ROW__ID."),
HIVE_VECTORIZATION_USE_CHECKED_EXPRESSIONS("hive.vectorized.use.checked.expressions", false,
"This flag should be set to true to use overflow checked vector expressions when available.\n" +
"For example, arithmetic expressions which can overflow the output data type can be evaluated using\n" +
" checked vector expressions so that they produce same result as non-vectorized evaluation."),
HIVE_VECTORIZED_ADAPTOR_SUPPRESS_EVALUATE_EXCEPTIONS(
"hive.vectorized.adaptor.suppress.evaluate.exceptions", false,
"This flag should be set to true to suppress HiveException from the generic UDF function\n" +
"evaluate call and turn them into NULLs. Assume, by default, this is not needed"),
HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED(
"hive.vectorized.input.format.supports.enabled",
"decimal_64",
"Which vectorized input format support features are enabled for vectorization.\n" +
"That is, if a VectorizedInputFormat input format does support \"decimal_64\" for example\n" +
"this variable must enable that to be used in vectorization"),
HIVE_VECTORIZED_IF_EXPR_MODE("hive.vectorized.if.expr.mode", "better", new StringSet("adaptor", "good", "better"),
"Specifies the extent to which SQL IF statements will be vectorized.\n" +
"0. adaptor: only use the VectorUDFAdaptor to vectorize IF statements\n" +
"1. good : use regular vectorized IF expression classes that get good performance\n" +
"2. better : use vectorized IF expression classes that conditionally execute THEN/ELSE\n" +
" expressions for better performance.\n"),
HIVE_TEST_VECTORIZATION_ENABLED_OVERRIDE("hive.test.vectorized.execution.enabled.override",
"none", new StringSet("none", "enable", "disable"),
"internal use only, used to override the hive.vectorized.execution.enabled setting and\n" +
"turn off vectorization. The default is false, of course",
true),
HIVE_TEST_VECTORIZATION_SUPPRESS_EXPLAIN_EXECUTION_MODE(
"hive.test.vectorization.suppress.explain.execution.mode", false,
"internal use only, used to suppress \"Execution mode: vectorized\" EXPLAIN display.\n" +
"The default is false, of course",
true),
HIVE_TEST_VECTORIZER_SUPPRESS_FATAL_EXCEPTIONS(
"hive.test.vectorizer.suppress.fatal.exceptions", true,
"internal use only. When false, don't suppress fatal exceptions like\n" +
"NullPointerException, etc so the query will fail and assure it will be noticed",
true),
HIVE_VECTORIZATION_FILESINK_ARROW_NATIVE_ENABLED(
"hive.vectorized.execution.filesink.arrow.native.enabled", false,
"This flag should be set to true to enable the native vectorization\n" +
"of queries using the Arrow SerDe and FileSink.\n" +
"The default value is false."),
HIVE_TYPE_CHECK_ON_INSERT("hive.typecheck.on.insert", true, "This property has been extended to control "
+ "whether to check, convert, and normalize partition value to conform to its column type in "
+ "partition operations including but not limited to insert, such as alter, describe etc."),
HIVE_HADOOP_CLASSPATH("hive.hadoop.classpath", null,
"For Windows OS, we need to pass HIVE_HADOOP_CLASSPATH Java parameter while starting HiveServer2 \n" +
"using \"-hiveconf hive.hadoop.classpath=%HIVE_LIB%\"."),
HIVE_RPC_QUERY_PLAN("hive.rpc.query.plan", false,
"Whether to send the query plan via local resource or RPC"),
HIVE_AM_SPLIT_GENERATION("hive.compute.splits.in.am", true,
"Whether to generate the splits locally or in the AM (tez only)"),
HIVE_TEZ_GENERATE_CONSISTENT_SPLITS("hive.tez.input.generate.consistent.splits", true,
"Whether to generate consistent split locations when generating splits in the AM"),
HIVE_PREWARM_ENABLED("hive.prewarm.enabled", false, "Enables container prewarm for Tez/Spark (Hadoop 2 only)"),
HIVE_PREWARM_NUM_CONTAINERS("hive.prewarm.numcontainers", 10, "Controls the number of containers to prewarm for Tez/Spark (Hadoop 2 only)"),
HIVE_PREWARM_SPARK_TIMEOUT("hive.prewarm.spark.timeout", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Time to wait to finish prewarming spark executors"),
HIVESTAGEIDREARRANGE("hive.stageid.rearrange", "none", new StringSet("none", "idonly", "traverse", "execution"), ""),
HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES("hive.explain.dependency.append.tasktype", false, ""),
HIVECOUNTERGROUP("hive.counters.group.name", "HIVE",
"The name of counter group for internal Hive variables (CREATED_FILE, FATAL_ERROR, etc.)"),
HIVE_QUOTEDID_SUPPORT("hive.support.quoted.identifiers", "column",
new StringSet("none", "column"),
"Whether to use quoted identifier. 'none' or 'column' can be used. \n" +
" none: default(past) behavior. Implies only alphaNumeric and underscore are valid characters in identifiers.\n" +
" column: implies column names can contain any character."
),
/**
* @deprecated Use MetastoreConf.SUPPORT_SPECIAL_CHARACTERS_IN_TABLE_NAMES
*/
@Deprecated
HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES("hive.support.special.characters.tablename", true,
"This flag should be set to true to enable support for special characters in table names.\n"
+ "When it is set to false, only [a-zA-Z_0-9]+ are supported.\n"
+ "The only supported special character right now is '/'. This flag applies only to quoted table names.\n"
+ "The default value is true."),
HIVE_CREATE_TABLES_AS_INSERT_ONLY("hive.create.as.insert.only", false,
"Whether the eligible tables should be created as ACID insert-only by default. Does \n" +
"not apply to external tables, the ones using storage handlers, etc."),
// role names are case-insensitive
USERS_IN_ADMIN_ROLE("hive.users.in.admin.role", "", false,
"Comma separated list of users who are in admin role for bootstrapping.\n" +
"More users can be added in ADMIN role later."),
HIVE_COMPAT("hive.compat", HiveCompat.DEFAULT_COMPAT_LEVEL,
"Enable (configurable) deprecated behaviors by setting desired level of backward compatibility.\n" +
"Setting to 0.12:\n" +
" Maintains division behavior: int / int = double"),
HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ("hive.convert.join.bucket.mapjoin.tez", true,
"Whether joins can be automatically converted to bucket map joins in hive \n" +
"when tez is used as the execution engine."),
HIVE_TEZ_BMJ_USE_SUBCACHE("hive.tez.bmj.use.subcache", true,
"Use subcache to reuse hashtable across multiple tasks"),
HIVE_CHECK_CROSS_PRODUCT("hive.exec.check.crossproducts", true,
"Check if a plan contains a Cross Product. If there is one, output a warning to the Session's console."),
HIVE_LOCALIZE_RESOURCE_WAIT_INTERVAL("hive.localize.resource.wait.interval", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Time to wait for another thread to localize the same resource for hive-tez."),
HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS("hive.localize.resource.num.wait.attempts", 5,
"The number of attempts waiting for localizing a resource in hive-tez."),
TEZ_AUTO_REDUCER_PARALLELISM("hive.tez.auto.reducer.parallelism", false,
"Turn on Tez' auto reducer parallelism feature. When enabled, Hive will still estimate data sizes\n" +
"and set parallelism estimates. Tez will sample source vertices' output sizes and adjust the estimates at runtime as\n" +
"necessary."),
TEZ_LLAP_MIN_REDUCER_PER_EXECUTOR("hive.tez.llap.min.reducer.per.executor", 0.33f,
"If above 0, the min number of reducers for auto-parallelism for LLAP scheduling will\n" +
"be set to this fraction of the number of executors."),
TEZ_MAX_PARTITION_FACTOR("hive.tez.max.partition.factor", 2f,
"When auto reducer parallelism is enabled this factor will be used to over-partition data in shuffle edges."),
TEZ_MIN_PARTITION_FACTOR("hive.tez.min.partition.factor", 0.25f,
"When auto reducer parallelism is enabled this factor will be used to put a lower limit to the number\n" +
"of reducers that tez specifies."),
TEZ_OPTIMIZE_BUCKET_PRUNING(
"hive.tez.bucket.pruning", false,
"When pruning is enabled, filters on bucket columns will be processed by \n" +
"filtering the splits against a bitset of included buckets. This needs predicates \n"+
"produced by hive.optimize.ppd and hive.optimize.index.filters."),
TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT(
"hive.tez.bucket.pruning.compat", true,
"When pruning is enabled, handle possibly broken inserts due to negative hashcodes.\n" +
"This occasionally doubles the data scan cost, but is default enabled for safety"),
TEZ_DYNAMIC_PARTITION_PRUNING(
"hive.tez.dynamic.partition.pruning", true,
"When dynamic pruning is enabled, joins on partition keys will be processed by sending\n" +
"events from the processing vertices to the Tez application master. These events will be\n" +
"used to prune unnecessary partitions."),
TEZ_DYNAMIC_PARTITION_PRUNING_EXTENDED("hive.tez.dynamic.partition.pruning.extended", true,
"Whether we should try to create additional opportunities for dynamic pruning, e.g., considering\n" +
"siblings that may not be created by normal dynamic pruning logic.\n" +
"Only works when dynamic pruning is enabled."),
TEZ_DYNAMIC_PARTITION_PRUNING_MAX_EVENT_SIZE("hive.tez.dynamic.partition.pruning.max.event.size", 1*1024*1024L,
"Maximum size of events sent by processors in dynamic pruning. If this size is crossed no pruning will take place."),
TEZ_DYNAMIC_PARTITION_PRUNING_MAX_DATA_SIZE("hive.tez.dynamic.partition.pruning.max.data.size", 100*1024*1024L,
"Maximum total data size of events in dynamic pruning."),
TEZ_DYNAMIC_SEMIJOIN_REDUCTION("hive.tez.dynamic.semijoin.reduction", true,
"When dynamic semijoin is enabled, shuffle joins will perform a leaky semijoin before shuffle. This " +
"requires hive.tez.dynamic.partition.pruning to be enabled."),
TEZ_MIN_BLOOM_FILTER_ENTRIES("hive.tez.min.bloom.filter.entries", 1000000L,
"Bloom filter should be of at min certain size to be effective"),
TEZ_MAX_BLOOM_FILTER_ENTRIES("hive.tez.max.bloom.filter.entries", 100000000L,
"Bloom filter should be of at max certain size to be effective"),
TEZ_BLOOM_FILTER_FACTOR("hive.tez.bloom.filter.factor", (float) 1.0,
"Bloom filter should be a multiple of this factor with nDV"),
TEZ_BIGTABLE_MIN_SIZE_SEMIJOIN_REDUCTION("hive.tez.bigtable.minsize.semijoin.reduction", 100000000L,
"Big table for runtime filteting should be of atleast this size"),
TEZ_DYNAMIC_SEMIJOIN_REDUCTION_THRESHOLD("hive.tez.dynamic.semijoin.reduction.threshold", (float) 0.50,
"Only perform semijoin optimization if the estimated benefit at or above this fraction of the target table"),
TEZ_DYNAMIC_SEMIJOIN_REDUCTION_FOR_MAPJOIN("hive.tez.dynamic.semijoin.reduction.for.mapjoin", false,
"Use a semi-join branch for map-joins. This may not make it faster, but is helpful in certain join patterns."),
TEZ_DYNAMIC_SEMIJOIN_REDUCTION_FOR_DPP_FACTOR("hive.tez.dynamic.semijoin.reduction.for.dpp.factor",
(float) 1.0,
"The factor to decide if semijoin branch feeds into a TableScan\n" +
"which has an outgoing Dynamic Partition Pruning (DPP) branch based on number of distinct values."),
TEZ_SMB_NUMBER_WAVES(
"hive.tez.smb.number.waves",
(float) 0.5,
"The number of waves in which to run the SMB join. Account for cluster being occupied. Ideally should be 1 wave."),
TEZ_EXEC_SUMMARY(
"hive.tez.exec.print.summary",
false,
"Display breakdown of execution steps, for every query executed by the shell."),
TEZ_SESSION_EVENTS_SUMMARY(
"hive.tez.session.events.print.summary",
"none", new StringSet("none", "text", "json"),
"Display summary of all tez sessions related events in text or json format"),
TEZ_EXEC_INPLACE_PROGRESS(
"hive.tez.exec.inplace.progress",
true,
"Updates tez job execution progress in-place in the terminal when hive-cli is used."),
HIVE_SERVER2_INPLACE_PROGRESS(
"hive.server2.in.place.progress",
true,
"Allows hive server 2 to send progress bar update information. This is currently available"
+ " only if the execution engine is tez or Spark."),
TEZ_DAG_STATUS_CHECK_INTERVAL("hive.tez.dag.status.check.interval", "500ms",
new TimeValidator(TimeUnit.MILLISECONDS), "Interval between subsequent DAG status invocation."),
SPARK_EXEC_INPLACE_PROGRESS("hive.spark.exec.inplace.progress", true,
"Updates spark job execution progress in-place in the terminal."),
TEZ_CONTAINER_MAX_JAVA_HEAP_FRACTION("hive.tez.container.max.java.heap.fraction", 0.8f,
"This is to override the tez setting with the same name"),
TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION_MIN("hive.tez.task.scale.memory.reserve-fraction.min",
0.3f, "This is to override the tez setting tez.task.scale.memory.reserve-fraction"),
TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION_MAX("hive.tez.task.scale.memory.reserve.fraction.max",
0.5f, "The maximum fraction of JVM memory which Tez will reserve for the processor"),
TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION("hive.tez.task.scale.memory.reserve.fraction",
-1f, "The customized fraction of JVM memory which Tez will reserve for the processor"),
TEZ_CARTESIAN_PRODUCT_EDGE_ENABLED("hive.tez.cartesian-product.enabled",
false, "Use Tez cartesian product edge to speed up cross product"),
TEZ_SIMPLE_CUSTOM_EDGE_TINY_BUFFER_SIZE_MB("hive.tez.unordered.output.buffer.size.mb", -1,
"When we have an operation that does not need a large buffer, we use this buffer size for simple custom edge.\n" +
"Value is an integer. Default value is -1, which means that we will estimate this value from operators in the plan."),
// The default is different on the client and server, so it's null here.
LLAP_IO_ENABLED("hive.llap.io.enabled", null, "Whether the LLAP IO layer is enabled."),
LLAP_IO_ROW_WRAPPER_ENABLED("hive.llap.io.row.wrapper.enabled", true, "Whether the LLAP IO row wrapper is enabled for non-vectorized queries."),
LLAP_IO_ACID_ENABLED("hive.llap.io.acid", true, "Whether the LLAP IO layer is enabled for ACID."),
LLAP_IO_TRACE_SIZE("hive.llap.io.trace.size", "2Mb",
new SizeValidator(0L, true, (long)Integer.MAX_VALUE, false),
"The buffer size for a per-fragment LLAP debug trace. 0 to disable."),
LLAP_IO_TRACE_ALWAYS_DUMP("hive.llap.io.trace.always.dump", false,
"Whether to always dump the LLAP IO trace (if enabled); the default is on error."),
LLAP_IO_NONVECTOR_WRAPPER_ENABLED("hive.llap.io.nonvector.wrapper.enabled", true,
"Whether the LLAP IO layer is enabled for non-vectorized queries that read inputs\n" +
"that can be vectorized"),
LLAP_IO_MEMORY_MODE("hive.llap.io.memory.mode", "cache",
new StringSet("cache", "none"),
"LLAP IO memory usage; 'cache' (the default) uses data and metadata cache with a\n" +
"custom off-heap allocator, 'none' doesn't use either (this mode may result in\n" +
"significant performance degradation)"),
LLAP_ALLOCATOR_MIN_ALLOC("hive.llap.io.allocator.alloc.min", "4Kb", new SizeValidator(),
"Minimum allocation possible from LLAP buddy allocator. Allocations below that are\n" +
"padded to minimum allocation. For ORC, should generally be the same as the expected\n" +
"compression buffer size, or next lowest power of 2. Must be a power of 2."),
LLAP_ALLOCATOR_MAX_ALLOC("hive.llap.io.allocator.alloc.max", "16Mb", new SizeValidator(),
"Maximum allocation possible from LLAP buddy allocator. For ORC, should be as large as\n" +
"the largest expected ORC compression buffer size. Must be a power of 2."),
LLAP_ALLOCATOR_ARENA_COUNT("hive.llap.io.allocator.arena.count", 8,
"Arena count for LLAP low-level cache; cache will be allocated in the steps of\n" +
"(size/arena_count) bytes. This size must be <= 1Gb and >= max allocation; if it is\n" +
"not the case, an adjusted size will be used. Using powers of 2 is recommended."),
LLAP_IO_MEMORY_MAX_SIZE("hive.llap.io.memory.size", "1Gb", new SizeValidator(),
"Maximum size for IO allocator or ORC low-level cache.", "hive.llap.io.cache.orc.size"),
LLAP_ALLOCATOR_DIRECT("hive.llap.io.allocator.direct", true,
"Whether ORC low-level cache should use direct allocation."),
LLAP_ALLOCATOR_PREALLOCATE("hive.llap.io.allocator.preallocate", true,
"Whether to preallocate the entire IO memory at init time."),
LLAP_ALLOCATOR_MAPPED("hive.llap.io.allocator.mmap", false,
"Whether ORC low-level cache should use memory mapped allocation (direct I/O). \n" +
"This is recommended to be used along-side NVDIMM (DAX) or NVMe flash storage."),
LLAP_ALLOCATOR_MAPPED_PATH("hive.llap.io.allocator.mmap.path", "/tmp",
new WritableDirectoryValidator(),
"The directory location for mapping NVDIMM/NVMe flash storage into the ORC low-level cache."),
LLAP_ALLOCATOR_DISCARD_METHOD("hive.llap.io.allocator.discard.method", "both",
new StringSet("freelist", "brute", "both"),
"Which method to use to force-evict blocks to deal with fragmentation:\n" +
"freelist - use half-size free list (discards less, but also less reliable); brute -\n" +
"brute force, discard whatever we can; both - first try free list, then brute force."),
LLAP_ALLOCATOR_DEFRAG_HEADROOM("hive.llap.io.allocator.defrag.headroom", "1Mb",
"How much of a headroom to leave to allow allocator more flexibility to defragment.\n" +
"The allocator would further cap it to a fraction of total memory."),
LLAP_ALLOCATOR_MAX_FORCE_EVICTED("hive.llap.io.allocator.max.force.eviction", "16Mb",
"Fragmentation can lead to some cases where more eviction has to happen to accommodate allocations\n" +
" This configuration puts a limit on how many bytes to force evict before using Allocator Discard method."
+ " Higher values will allow allocator more flexibility and will lead to better caching."),
LLAP_TRACK_CACHE_USAGE("hive.llap.io.track.cache.usage", true,
"Whether to tag LLAP cache contents, mapping them to Hive entities (paths for\n" +
"partitions and tables) for reporting."),
LLAP_USE_LRFU("hive.llap.io.use.lrfu", true,
"Whether ORC low-level cache should use LRFU cache policy instead of default (FIFO)."),
LLAP_LRFU_LAMBDA("hive.llap.io.lrfu.lambda", 0.000001f,
"Lambda for ORC low-level cache LRFU cache policy. Must be in [0, 1]. 0 makes LRFU\n" +
"behave like LFU, 1 makes it behave like LRU, values in between balance accordingly.\n" +
"The meaning of this parameter is the inverse of the number of time ticks (cache\n" +
" operations, currently) that cause the combined recency-frequency of a block in cache\n" +
" to be halved."),
LLAP_CACHE_ALLOW_SYNTHETIC_FILEID("hive.llap.cache.allow.synthetic.fileid", true,
"Whether LLAP cache should use synthetic file ID if real one is not available. Systems\n" +
"like HDFS, Isilon, etc. provide a unique file/inode ID. On other FSes (e.g. local\n" +
"FS), the cache would not work by default because LLAP is unable to uniquely track the\n" +
"files; enabling this setting allows LLAP to generate file ID from the path, size and\n" +
"modification time, which is almost certain to identify file uniquely. However, if you\n" +
"use a FS without file IDs and rewrite files a lot (or are paranoid), you might want\n" +
"to avoid this setting."),
LLAP_CACHE_DEFAULT_FS_FILE_ID("hive.llap.cache.defaultfs.only.native.fileid", true,
"Whether LLAP cache should use native file IDs from the default FS only. This is to\n" +
"avoid file ID collisions when several different DFS instances are in use at the same\n" +
"time. Disable this check to allow native file IDs from non-default DFS."),
LLAP_CACHE_ENABLE_ORC_GAP_CACHE("hive.llap.orc.gap.cache", true,
"Whether LLAP cache for ORC should remember gaps in ORC compression buffer read\n" +
"estimates, to avoid re-reading the data that was read once and discarded because it\n" +
"is unneeded. This is only necessary for ORC files written before HIVE-9660."),
LLAP_IO_USE_FILEID_PATH("hive.llap.io.use.fileid.path", true,
"Whether LLAP should use fileId (inode)-based path to ensure better consistency for the\n" +
"cases of file overwrites. This is supported on HDFS. Disabling this also turns off any\n" +
"cache consistency checks based on fileid comparisons."),
// Restricted to text for now as this is a new feature; only text files can be sliced.
LLAP_IO_ENCODE_ENABLED("hive.llap.io.encode.enabled", true,
"Whether LLAP should try to re-encode and cache data for non-ORC formats. This is used\n" +
"on LLAP Server side to determine if the infrastructure for that is initialized."),
LLAP_IO_ENCODE_FORMATS("hive.llap.io.encode.formats",
"org.apache.hadoop.mapred.TextInputFormat,",
"The table input formats for which LLAP IO should re-encode and cache data.\n" +
"Comma-separated list."),
LLAP_IO_ENCODE_ALLOC_SIZE("hive.llap.io.encode.alloc.size", "256Kb", new SizeValidator(),
"Allocation size for the buffers used to cache encoded data from non-ORC files. Must\n" +
"be a power of two between " + LLAP_ALLOCATOR_MIN_ALLOC + " and\n" +
LLAP_ALLOCATOR_MAX_ALLOC + "."),
LLAP_IO_ENCODE_VECTOR_SERDE_ENABLED("hive.llap.io.encode.vector.serde.enabled", true,
"Whether LLAP should use vectorized SerDe reader to read text data when re-encoding."),
LLAP_IO_ENCODE_VECTOR_SERDE_ASYNC_ENABLED("hive.llap.io.encode.vector.serde.async.enabled",
true,
"Whether LLAP should use async mode in vectorized SerDe reader to read text data."),
LLAP_IO_ENCODE_SLICE_ROW_COUNT("hive.llap.io.encode.slice.row.count", 100000,
"Row count to use to separate cache slices when reading encoded data from row-based\n" +
"inputs into LLAP cache, if this feature is enabled."),
LLAP_IO_ENCODE_SLICE_LRR("hive.llap.io.encode.slice.lrr", true,
"Whether to separate cache slices when reading encoded data from text inputs via MR\n" +
"MR LineRecordRedader into LLAP cache, if this feature is enabled. Safety flag."),
LLAP_ORC_ENABLE_TIME_COUNTERS("hive.llap.io.orc.time.counters", true,
"Whether to enable time counters for LLAP IO layer (time spent in HDFS, etc.)"),
LLAP_IO_VRB_QUEUE_LIMIT_BASE("hive.llap.io.vrb.queue.limit.base", 50000,
"The default queue size for VRBs produced by a LLAP IO thread when the processing is\n" +
"slower than the IO. The actual queue size is set per fragment, and is adjusted down\n" +
"from the base, depending on the schema."),
LLAP_IO_VRB_QUEUE_LIMIT_MIN("hive.llap.io.vrb.queue.limit.min", 10,
"The minimum queue size for VRBs produced by a LLAP IO thread when the processing is\n" +
"slower than the IO (used when determining the size from base size)."),
LLAP_IO_SHARE_OBJECT_POOLS("hive.llap.io.share.object.pools", false,
"Whether to used shared object pools in LLAP IO. A safety flag."),
LLAP_AUTO_ALLOW_UBER("hive.llap.auto.allow.uber", false,
"Whether or not to allow the planner to run vertices in the AM."),
LLAP_AUTO_ENFORCE_TREE("hive.llap.auto.enforce.tree", true,
"Enforce that all parents are in llap, before considering vertex"),
LLAP_AUTO_ENFORCE_VECTORIZED("hive.llap.auto.enforce.vectorized", true,
"Enforce that inputs are vectorized, before considering vertex"),
LLAP_AUTO_ENFORCE_STATS("hive.llap.auto.enforce.stats", true,
"Enforce that col stats are available, before considering vertex"),
LLAP_AUTO_MAX_INPUT("hive.llap.auto.max.input.size", 10*1024*1024*1024L,
"Check input size, before considering vertex (-1 disables check)"),
LLAP_AUTO_MAX_OUTPUT("hive.llap.auto.max.output.size", 1*1024*1024*1024L,
"Check output size, before considering vertex (-1 disables check)"),
LLAP_SKIP_COMPILE_UDF_CHECK("hive.llap.skip.compile.udf.check", false,
"Whether to skip the compile-time check for non-built-in UDFs when deciding whether to\n" +
"execute tasks in LLAP. Skipping the check allows executing UDFs from pre-localized\n" +
"jars in LLAP; if the jars are not pre-localized, the UDFs will simply fail to load."),
LLAP_ALLOW_PERMANENT_FNS("hive.llap.allow.permanent.fns", true,
"Whether LLAP decider should allow permanent UDFs."),
LLAP_EXECUTION_MODE("hive.llap.execution.mode", "none",
new StringSet("auto", "none", "all", "map", "only"),
"Chooses whether query fragments will run in container or in llap"),
LLAP_IO_ETL_SKIP_FORMAT("hive.llap.io.etl.skip.format", "encode", new StringSet("none", "encode", "all"),
"For ETL queries, determines whether to skip llap io cache. By default, hive.llap.io.encode.enabled " +
"will be set to false which disables LLAP IO for text formats. Setting it to 'all' will disable LLAP IO for all" +
" formats. 'none' will not disable LLAP IO for any formats."),
LLAP_OBJECT_CACHE_ENABLED("hive.llap.object.cache.enabled", true,
"Cache objects (plans, hashtables, etc) in llap"),
LLAP_IO_DECODING_METRICS_PERCENTILE_INTERVALS("hive.llap.io.decoding.metrics.percentiles.intervals", "30",
"Comma-delimited set of integers denoting the desired rollover intervals (in seconds)\n" +
"for percentile latency metrics on the LLAP daemon IO decoding time.\n" +
"hive.llap.queue.metrics.percentiles.intervals"),
LLAP_IO_THREADPOOL_SIZE("hive.llap.io.threadpool.size", 10,
"Specify the number of threads to use for low-level IO thread pool."),
LLAP_KERBEROS_PRINCIPAL(HIVE_LLAP_DAEMON_SERVICE_PRINCIPAL_NAME, "",
"The name of the LLAP daemon's service principal."),
LLAP_KERBEROS_KEYTAB_FILE("hive.llap.daemon.keytab.file", "",
"The path to the Kerberos Keytab file containing the LLAP daemon's service principal."),
LLAP_WEBUI_SPNEGO_KEYTAB_FILE("hive.llap.webui.spnego.keytab", "",
"The path to the Kerberos Keytab file containing the LLAP WebUI SPNEGO principal.\n" +
"Typical value would look like /etc/security/keytabs/spnego.service.keytab."),
LLAP_WEBUI_SPNEGO_PRINCIPAL("hive.llap.webui.spnego.principal", "",
"The LLAP WebUI SPNEGO service principal. Configured similarly to\n" +
"hive.server2.webui.spnego.principal"),
LLAP_FS_KERBEROS_PRINCIPAL("hive.llap.task.principal", "",
"The name of the principal to use to run tasks. By default, the clients are required\n" +
"to provide tokens to access HDFS/etc."),
LLAP_FS_KERBEROS_KEYTAB_FILE("hive.llap.task.keytab.file", "",
"The path to the Kerberos Keytab file containing the principal to use to run tasks.\n" +
"By default, the clients are required to provide tokens to access HDFS/etc."),
LLAP_ZKSM_ZK_CONNECTION_STRING("hive.llap.zk.sm.connectionString", "",
"ZooKeeper connection string for ZooKeeper SecretManager."),
LLAP_ZKSM_ZK_SESSION_TIMEOUT("hive.llap.zk.sm.session.timeout", "40s", new TimeValidator(
TimeUnit.MILLISECONDS), "ZooKeeper session timeout for ZK SecretManager."),
LLAP_ZK_REGISTRY_USER("hive.llap.zk.registry.user", "",
"In the LLAP ZooKeeper-based registry, specifies the username in the Zookeeper path.\n" +
"This should be the hive user or whichever user is running the LLAP daemon."),
LLAP_ZK_REGISTRY_NAMESPACE("hive.llap.zk.registry.namespace", null,
"In the LLAP ZooKeeper-based registry, overrides the ZK path namespace. Note that\n" +
"using this makes the path management (e.g. setting correct ACLs) your responsibility."),
// Note: do not rename to ..service.acl; Hadoop generates .hosts setting name from this,
// resulting in a collision with existing hive.llap.daemon.service.hosts and bizarre errors.
// These are read by Hadoop IPC, so you should check the usage and naming conventions (e.g.
// ".blocked" is a string hardcoded by Hadoop, and defaults are enforced elsewhere in Hive)
// before making changes or copy-pasting these.
LLAP_SECURITY_ACL("hive.llap.daemon.acl", "*", "The ACL for LLAP daemon."),
LLAP_SECURITY_ACL_DENY("hive.llap.daemon.acl.blocked", "", "The deny ACL for LLAP daemon."),
LLAP_MANAGEMENT_ACL("hive.llap.management.acl", "*", "The ACL for LLAP daemon management."),
LLAP_MANAGEMENT_ACL_DENY("hive.llap.management.acl.blocked", "",
"The deny ACL for LLAP daemon management."),
LLAP_PLUGIN_ACL("hive.llap.plugin.acl", "*", "The ACL for LLAP plugin AM endpoint."),
LLAP_PLUGIN_ACL_DENY("hive.llap.plugin.acl.blocked", "",
"The deny ACL for LLAP plugin AM endpoint."),
LLAP_REMOTE_TOKEN_REQUIRES_SIGNING("hive.llap.remote.token.requires.signing", "true",
new StringSet("false", "except_llap_owner", "true"),
"Whether the token returned from LLAP management API should require fragment signing.\n" +
"True by default; can be disabled to allow CLI to get tokens from LLAP in a secure\n" +
"cluster by setting it to true or 'except_llap_owner' (the latter returns such tokens\n" +
"to everyone except the user LLAP cluster is authenticating under)."),
// Hadoop DelegationTokenManager default is 1 week.
LLAP_DELEGATION_TOKEN_LIFETIME("hive.llap.daemon.delegation.token.lifetime", "14d",
new TimeValidator(TimeUnit.SECONDS),
"LLAP delegation token lifetime, in seconds if specified without a unit."),
LLAP_MANAGEMENT_RPC_PORT("hive.llap.management.rpc.port", 15004,
"RPC port for LLAP daemon management service."),
LLAP_WEB_AUTO_AUTH("hive.llap.auto.auth", false,
"Whether or not to set Hadoop configs to enable auth in LLAP web app."),
LLAP_DAEMON_RPC_NUM_HANDLERS("hive.llap.daemon.rpc.num.handlers", 5,
"Number of RPC handlers for LLAP daemon.", "llap.daemon.rpc.num.handlers"),
LLAP_PLUGIN_RPC_PORT("hive.llap.plugin.rpc.port", 0,
"Port to use for LLAP plugin rpc server"),
LLAP_PLUGIN_RPC_NUM_HANDLERS("hive.llap.plugin.rpc.num.handlers", 1,
"Number of RPC handlers for AM LLAP plugin endpoint."),
LLAP_DAEMON_WORK_DIRS("hive.llap.daemon.work.dirs", "",
"Working directories for the daemon. This should not be set if running as a YARN\n" +
"Service. It must be set when not running on YARN. If the value is set when\n" +
"running as a YARN Service, the specified value will be used.",
"llap.daemon.work.dirs"),
LLAP_DAEMON_YARN_SHUFFLE_PORT("hive.llap.daemon.yarn.shuffle.port", 15551,
"YARN shuffle port for LLAP-daemon-hosted shuffle.", "llap.daemon.yarn.shuffle.port"),
LLAP_DAEMON_YARN_CONTAINER_MB("hive.llap.daemon.yarn.container.mb", -1,
"llap server yarn container size in MB. Used in LlapServiceDriver and package.py", "llap.daemon.yarn.container.mb"),
LLAP_DAEMON_QUEUE_NAME("hive.llap.daemon.queue.name", null,
"Queue name within which the llap application will run." +
" Used in LlapServiceDriver and package.py"),
// TODO Move the following 2 properties out of Configuration to a constant.
LLAP_DAEMON_CONTAINER_ID("hive.llap.daemon.container.id", null,
"ContainerId of a running LlapDaemon. Used to publish to the registry"),
LLAP_DAEMON_NM_ADDRESS("hive.llap.daemon.nm.address", null,
"NM Address host:rpcPort for the NodeManager on which the instance of the daemon is running.\n" +
"Published to the llap registry. Should never be set by users"),
LLAP_DAEMON_SHUFFLE_DIR_WATCHER_ENABLED("hive.llap.daemon.shuffle.dir.watcher.enabled", false,
"TODO doc", "llap.daemon.shuffle.dir-watcher.enabled"),
LLAP_DAEMON_AM_LIVENESS_HEARTBEAT_INTERVAL_MS(
"hive.llap.daemon.am.liveness.heartbeat.interval.ms", "10000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Tez AM-LLAP heartbeat interval (milliseconds). This needs to be below the task timeout\n" +
"interval, but otherwise as high as possible to avoid unnecessary traffic.",
"llap.daemon.am.liveness.heartbeat.interval-ms"),
LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS(
"hive.llap.am.liveness.connection.timeout.ms", "10000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Amount of time to wait on connection failures to the AM from an LLAP daemon before\n" +
"considering the AM to be dead.", "llap.am.liveness.connection.timeout-millis"),
LLAP_DAEMON_AM_USE_FQDN("hive.llap.am.use.fqdn", true,
"Whether to use FQDN of the AM machine when submitting work to LLAP."),
// Not used yet - since the Writable RPC engine does not support this policy.
LLAP_DAEMON_AM_LIVENESS_CONNECTION_SLEEP_BETWEEN_RETRIES_MS(
"hive.llap.am.liveness.connection.sleep.between.retries.ms", "2000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Sleep duration while waiting to retry connection failures to the AM from the daemon for\n" +
"the general keep-alive thread (milliseconds).",
"llap.am.liveness.connection.sleep-between-retries-millis"),
LLAP_DAEMON_TASK_SCHEDULER_TIMEOUT_SECONDS(
"hive.llap.task.scheduler.timeout.seconds", "60s",
new TimeValidator(TimeUnit.SECONDS),
"Amount of time to wait before failing the query when there are no llap daemons running\n" +
"(alive) in the cluster.", "llap.daemon.scheduler.timeout.seconds"),
LLAP_DAEMON_NUM_EXECUTORS("hive.llap.daemon.num.executors", 4,
"Number of executors to use in LLAP daemon; essentially, the number of tasks that can be\n" +
"executed in parallel.", "llap.daemon.num.executors"),
LLAP_MAPJOIN_MEMORY_OVERSUBSCRIBE_FACTOR("hive.llap.mapjoin.memory.oversubscribe.factor", 0.2f,
"Fraction of memory from hive.auto.convert.join.noconditionaltask.size that can be over subscribed\n" +
"by queries running in LLAP mode. This factor has to be from 0.0 to 1.0. Default is 20% over subscription.\n"),
LLAP_MEMORY_OVERSUBSCRIPTION_MAX_EXECUTORS_PER_QUERY("hive.llap.memory.oversubscription.max.executors.per.query",
-1,
"Used along with hive.llap.mapjoin.memory.oversubscribe.factor to limit the number of executors from\n" +
"which memory for mapjoin can be borrowed. Default 3 (from 3 other executors\n" +
"hive.llap.mapjoin.memory.oversubscribe.factor amount of memory can be borrowed based on which mapjoin\n" +
"conversion decision will be made). This is only an upper bound. Lower bound is determined by number of\n" +
"executors and configured max concurrency."),
LLAP_MAPJOIN_MEMORY_MONITOR_CHECK_INTERVAL("hive.llap.mapjoin.memory.monitor.check.interval", 100000L,
"Check memory usage of mapjoin hash tables after every interval of this many rows. If map join hash table\n" +
"memory usage exceeds (hive.auto.convert.join.noconditionaltask.size * hive.hash.table.inflation.factor)\n" +
"when running in LLAP, tasks will get killed and not retried. Set the value to 0 to disable this feature."),
LLAP_DAEMON_AM_REPORTER_MAX_THREADS("hive.llap.daemon.am-reporter.max.threads", 4,
"Maximum number of threads to be used for AM reporter. If this is lower than number of\n" +
"executors in llap daemon, it would be set to number of executors at runtime.",
"llap.daemon.am-reporter.max.threads"),
LLAP_DAEMON_RPC_PORT("hive.llap.daemon.rpc.port", 0, "The LLAP daemon RPC port.",
"llap.daemon.rpc.port. A value of 0 indicates a dynamic port"),
LLAP_DAEMON_MEMORY_PER_INSTANCE_MB("hive.llap.daemon.memory.per.instance.mb", 4096,
"The total amount of memory to use for the executors inside LLAP (in megabytes).",
"llap.daemon.memory.per.instance.mb"),
LLAP_DAEMON_XMX_HEADROOM("hive.llap.daemon.xmx.headroom", "5%",
"The total amount of heap memory set aside by LLAP and not used by the executors. Can\n" +
"be specified as size (e.g. '512Mb'), or percentage (e.g. '5%'). Note that the latter is\n" +
"derived from the total daemon XMX, which can be different from the total executor\n" +
"memory if the cache is on-heap; although that's not the default configuration."),
LLAP_DAEMON_VCPUS_PER_INSTANCE("hive.llap.daemon.vcpus.per.instance", 4,
"The total number of vcpus to use for the executors inside LLAP.",
"llap.daemon.vcpus.per.instance"),
LLAP_DAEMON_NUM_FILE_CLEANER_THREADS("hive.llap.daemon.num.file.cleaner.threads", 1,
"Number of file cleaner threads in LLAP.", "llap.daemon.num.file.cleaner.threads"),
LLAP_FILE_CLEANUP_DELAY_SECONDS("hive.llap.file.cleanup.delay.seconds", "300s",
new TimeValidator(TimeUnit.SECONDS),
"How long to delay before cleaning up query files in LLAP (in seconds, for debugging).",
"llap.file.cleanup.delay-seconds"),
LLAP_DAEMON_SERVICE_HOSTS("hive.llap.daemon.service.hosts", null,
"Explicitly specified hosts to use for LLAP scheduling. Useful for testing. By default,\n" +
"YARN registry is used.", "llap.daemon.service.hosts"),
LLAP_DAEMON_SERVICE_REFRESH_INTERVAL("hive.llap.daemon.service.refresh.interval.sec", "60s",
new TimeValidator(TimeUnit.SECONDS),
"LLAP YARN registry service list refresh delay, in seconds.",
"llap.daemon.service.refresh.interval"),
LLAP_DAEMON_COMMUNICATOR_NUM_THREADS("hive.llap.daemon.communicator.num.threads", 10,
"Number of threads to use in LLAP task communicator in Tez AM.",
"llap.daemon.communicator.num.threads"),
LLAP_PLUGIN_CLIENT_NUM_THREADS("hive.llap.plugin.client.num.threads", 10,
"Number of threads to use in LLAP task plugin client."),
LLAP_DAEMON_DOWNLOAD_PERMANENT_FNS("hive.llap.daemon.download.permanent.fns", false,
"Whether LLAP daemon should localize the resources for permanent UDFs."),
LLAP_TASK_SCHEDULER_AM_REGISTRY_NAME("hive.llap.task.scheduler.am.registry", "llap",
"AM registry name for LLAP task scheduler plugin to register with."),
LLAP_TASK_SCHEDULER_AM_REGISTRY_PRINCIPAL("hive.llap.task.scheduler.am.registry.principal", "",
"The name of the principal used to access ZK AM registry securely."),
LLAP_TASK_SCHEDULER_AM_REGISTRY_KEYTAB_FILE("hive.llap.task.scheduler.am.registry.keytab.file", "",
"The path to the Kerberos keytab file used to access ZK AM registry securely."),
LLAP_TASK_SCHEDULER_NODE_REENABLE_MIN_TIMEOUT_MS(
"hive.llap.task.scheduler.node.reenable.min.timeout.ms", "200ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Minimum time after which a previously disabled node will be re-enabled for scheduling,\n" +
"in milliseconds. This may be modified by an exponential back-off if failures persist.",
"llap.task.scheduler.node.re-enable.min.timeout.ms"),
LLAP_TASK_SCHEDULER_NODE_REENABLE_MAX_TIMEOUT_MS(
"hive.llap.task.scheduler.node.reenable.max.timeout.ms", "10000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Maximum time after which a previously disabled node will be re-enabled for scheduling,\n" +
"in milliseconds. This may be modified by an exponential back-off if failures persist.",
"llap.task.scheduler.node.re-enable.max.timeout.ms"),
LLAP_TASK_SCHEDULER_NODE_DISABLE_BACK_OFF_FACTOR(
"hive.llap.task.scheduler.node.disable.backoff.factor", 1.5f,
"Backoff factor on successive blacklists of a node due to some failures. Blacklist times\n" +
"start at the min timeout and go up to the max timeout based on this backoff factor.",
"llap.task.scheduler.node.disable.backoff.factor"),
LLAP_TASK_SCHEDULER_PREEMPT_INDEPENDENT("hive.llap.task.scheduler.preempt.independent", false,
"Whether the AM LLAP scheduler should preempt a lower priority task for a higher pri one\n" +
"even if the former doesn't depend on the latter (e.g. for two parallel sides of a union)."),
LLAP_TASK_SCHEDULER_NUM_SCHEDULABLE_TASKS_PER_NODE(
"hive.llap.task.scheduler.num.schedulable.tasks.per.node", 0,
"The number of tasks the AM TaskScheduler will try allocating per node. 0 indicates that\n" +
"this should be picked up from the Registry. -1 indicates unlimited capacity; positive\n" +
"values indicate a specific bound.", "llap.task.scheduler.num.schedulable.tasks.per.node"),
LLAP_TASK_SCHEDULER_LOCALITY_DELAY(
"hive.llap.task.scheduler.locality.delay", "0ms",
new TimeValidator(TimeUnit.MILLISECONDS, -1l, true, Long.MAX_VALUE, true),
"Amount of time to wait before allocating a request which contains location information," +
" to a location other than the ones requested. Set to -1 for an infinite delay, 0" +
"for no delay."
),
LLAP_DAEMON_TASK_PREEMPTION_METRICS_INTERVALS(
"hive.llap.daemon.task.preemption.metrics.intervals", "30,60,300",
"Comma-delimited set of integers denoting the desired rollover intervals (in seconds)\n" +
" for percentile latency metrics. Used by LLAP daemon task scheduler metrics for\n" +
" time taken to kill task (due to pre-emption) and useful time wasted by the task that\n" +
" is about to be preempted."
),
LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE("hive.llap.daemon.task.scheduler.wait.queue.size",
10, "LLAP scheduler maximum queue size.", "llap.daemon.task.scheduler.wait.queue.size"),
LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME(
"hive.llap.daemon.wait.queue.comparator.class.name",
"org.apache.hadoop.hive.llap.daemon.impl.comparator.ShortestJobFirstComparator",
"The priority comparator to use for LLAP scheduler priority queue. The built-in options\n" +
"are org.apache.hadoop.hive.llap.daemon.impl.comparator.ShortestJobFirstComparator and\n" +
".....FirstInFirstOutComparator", "llap.daemon.wait.queue.comparator.class.name"),
LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION(
"hive.llap.daemon.task.scheduler.enable.preemption", true,
"Whether non-finishable running tasks (e.g. a reducer waiting for inputs) should be\n" +
"preempted by finishable tasks inside LLAP scheduler.",
"llap.daemon.task.scheduler.enable.preemption"),
LLAP_TASK_COMMUNICATOR_CONNECTION_TIMEOUT_MS(
"hive.llap.task.communicator.connection.timeout.ms", "16000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Connection timeout (in milliseconds) before a failure to an LLAP daemon from Tez AM.",
"llap.task.communicator.connection.timeout-millis"),
LLAP_TASK_COMMUNICATOR_LISTENER_THREAD_COUNT(
"hive.llap.task.communicator.listener.thread-count", 30,
"The number of task communicator listener threads."),
LLAP_TASK_COMMUNICATOR_CONNECTION_SLEEP_BETWEEN_RETRIES_MS(
"hive.llap.task.communicator.connection.sleep.between.retries.ms", "2000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Sleep duration (in milliseconds) to wait before retrying on error when obtaining a\n" +
"connection to LLAP daemon from Tez AM.",
"llap.task.communicator.connection.sleep-between-retries-millis"),
LLAP_TASK_UMBILICAL_SERVER_PORT("hive.llap.daemon.umbilical.port", 0,
"LLAP task umbilical server RPC port"),
LLAP_DAEMON_WEB_PORT("hive.llap.daemon.web.port", 15002, "LLAP daemon web UI port.",
"llap.daemon.service.port"),
LLAP_DAEMON_WEB_SSL("hive.llap.daemon.web.ssl", false,
"Whether LLAP daemon web UI should use SSL.", "llap.daemon.service.ssl"),
LLAP_CLIENT_CONSISTENT_SPLITS("hive.llap.client.consistent.splits", true,
"Whether to setup split locations to match nodes on which llap daemons are running, " +
"preferring one of the locations provided by the split itself. If there is no llap daemon " +
"running on any of those locations (or on the cloud), fall back to a cache affinity to" +
" an LLAP node. This is effective only if hive.execution.mode is llap."),
LLAP_VALIDATE_ACLS("hive.llap.validate.acls", true,
"Whether LLAP should reject permissive ACLs in some cases (e.g. its own management\n" +
"protocol or ZK paths), similar to how ssh refuses a key with bad access permissions."),
LLAP_DAEMON_OUTPUT_SERVICE_PORT("hive.llap.daemon.output.service.port", 15003,
"LLAP daemon output service port"),
LLAP_DAEMON_OUTPUT_STREAM_TIMEOUT("hive.llap.daemon.output.stream.timeout", "120s",
new TimeValidator(TimeUnit.SECONDS),
"The timeout for the client to connect to LLAP output service and start the fragment\n" +
"output after sending the fragment. The fragment will fail if its output is not claimed."),
LLAP_DAEMON_OUTPUT_SERVICE_SEND_BUFFER_SIZE("hive.llap.daemon.output.service.send.buffer.size",
128 * 1024, "Send buffer size to be used by LLAP daemon output service"),
LLAP_DAEMON_OUTPUT_SERVICE_MAX_PENDING_WRITES("hive.llap.daemon.output.service.max.pending.writes",
8, "Maximum number of queued writes allowed per connection when sending data\n" +
" via the LLAP output service to external clients."),
LLAP_EXTERNAL_SPLITS_TEMP_TABLE_STORAGE_FORMAT("hive.llap.external.splits.temp.table.storage.format",
"orc", new StringSet("default", "text", "orc"),
"Storage format for temp tables created using LLAP external client"),
LLAP_EXTERNAL_SPLITS_ORDER_BY_FORCE_SINGLE_SPLIT("hive.llap.external.splits.order.by.force.single.split",
true,
"If LLAP external clients submits ORDER BY queries, force return a single split to guarantee reading\n" +
"data out in ordered way. Setting this to false will let external clients read data out in parallel\n" +
"losing the ordering (external clients are responsible for guaranteeing the ordering)"),
LLAP_ENABLE_GRACE_JOIN_IN_LLAP("hive.llap.enable.grace.join.in.llap", false,
"Override if grace join should be allowed to run in llap."),
LLAP_HS2_ENABLE_COORDINATOR("hive.llap.hs2.coordinator.enabled", true,
"Whether to create the LLAP coordinator; since execution engine and container vs llap\n" +
"settings are both coming from job configs, we don't know at start whether this should\n" +
"be created. Default true."),
LLAP_DAEMON_LOGGER("hive.llap.daemon.logger", Constants.LLAP_LOGGER_NAME_QUERY_ROUTING,
new StringSet(Constants.LLAP_LOGGER_NAME_QUERY_ROUTING,
Constants.LLAP_LOGGER_NAME_RFA,
Constants.LLAP_LOGGER_NAME_CONSOLE),
"logger used for llap-daemons."),
LLAP_OUTPUT_FORMAT_ARROW("hive.llap.output.format.arrow", true,
"Whether LLapOutputFormatService should output arrow batches"),
LLAP_COLLECT_LOCK_METRICS("hive.llap.lockmetrics.collect", false,
"Whether lock metrics (wait times, counts) are collected for LLAP "
+ "related locks"),
LLAP_TASK_TIME_SUMMARY(
"hive.llap.task.time.print.summary", false,
"Display queue and runtime of tasks by host for every query executed by the shell."),
HIVE_TRIGGER_VALIDATION_INTERVAL("hive.trigger.validation.interval", "500ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Interval for validating triggers during execution of a query. Triggers defined in resource plan will get\n" +
"validated for all SQL operations after every defined interval (default: 500ms) and corresponding action\n" +
"defined in the trigger will be taken"),
SPARK_USE_OP_STATS("hive.spark.use.op.stats", true,
"Whether to use operator stats to determine reducer parallelism for Hive on Spark.\n" +
"If this is false, Hive will use source table stats to determine reducer\n" +
"parallelism for all first level reduce tasks, and the maximum reducer parallelism\n" +
"from all parents for all the rest (second level and onward) reducer tasks."),
SPARK_USE_TS_STATS_FOR_MAPJOIN("hive.spark.use.ts.stats.for.mapjoin", false,
"If this is set to true, mapjoin optimization in Hive/Spark will use statistics from\n" +
"TableScan operators at the root of operator tree, instead of parent ReduceSink\n" +
"operators of the Join operator."),
SPARK_OPTIMIZE_SHUFFLE_SERDE("hive.spark.optimize.shuffle.serde", true,
"If this is set to true, Hive on Spark will register custom serializers for data types\n" +
"in shuffle. This should result in less shuffled data."),
SPARK_CLIENT_FUTURE_TIMEOUT("hive.spark.client.future.timeout",
"60s", new TimeValidator(TimeUnit.SECONDS),
"Timeout for requests between Hive client and remote Spark driver."),
SPARK_JOB_MONITOR_TIMEOUT("hive.spark.job.monitor.timeout",
"60s", new TimeValidator(TimeUnit.SECONDS),
"Timeout for job monitor to get Spark job state."),
SPARK_RPC_CLIENT_CONNECT_TIMEOUT("hive.spark.client.connect.timeout",
"1000ms", new TimeValidator(TimeUnit.MILLISECONDS),
"Timeout for remote Spark driver in connecting back to Hive client."),
SPARK_RPC_CLIENT_HANDSHAKE_TIMEOUT("hive.spark.client.server.connect.timeout",
"90000ms", new TimeValidator(TimeUnit.MILLISECONDS),
"Timeout for handshake between Hive client and remote Spark driver. Checked by both processes."),
SPARK_RPC_SECRET_RANDOM_BITS("hive.spark.client.secret.bits", "256",
"Number of bits of randomness in the generated secret for communication between Hive client and remote Spark driver. " +
"Rounded down to the nearest multiple of 8."),
SPARK_RPC_MAX_THREADS("hive.spark.client.rpc.threads", 8,
"Maximum number of threads for remote Spark driver's RPC event loop."),
SPARK_RPC_MAX_MESSAGE_SIZE("hive.spark.client.rpc.max.size", 50 * 1024 * 1024,
"Maximum message size in bytes for communication between Hive client and remote Spark driver. Default is 50MB."),
SPARK_RPC_CHANNEL_LOG_LEVEL("hive.spark.client.channel.log.level", null,
"Channel logging level for remote Spark driver. One of {DEBUG, ERROR, INFO, TRACE, WARN}."),
SPARK_RPC_SASL_MECHANISM("hive.spark.client.rpc.sasl.mechanisms", "DIGEST-MD5",
"Name of the SASL mechanism to use for authentication."),
SPARK_RPC_SERVER_ADDRESS("hive.spark.client.rpc.server.address", "",
"The server address of HiverServer2 host to be used for communication between Hive client and remote Spark driver. " +
"Default is empty, which means the address will be determined in the same way as for hive.server2.thrift.bind.host." +
"This is only necessary if the host has multiple network addresses and if a different network address other than " +
"hive.server2.thrift.bind.host is to be used."),
SPARK_RPC_SERVER_PORT("hive.spark.client.rpc.server.port", "", "A list of port ranges which can be used by RPC server " +
"with the format of 49152-49222,49228 and a random one is selected from the list. Default is empty, which randomly " +
"selects one port from all available ones."),
SPARK_DYNAMIC_PARTITION_PRUNING(
"hive.spark.dynamic.partition.pruning", false,
"When dynamic pruning is enabled, joins on partition keys will be processed by writing\n" +
"to a temporary HDFS file, and read later for removing unnecessary partitions."),
SPARK_DYNAMIC_PARTITION_PRUNING_MAX_DATA_SIZE(
"hive.spark.dynamic.partition.pruning.max.data.size", 100*1024*1024L,
"Maximum total data size in dynamic pruning."),
SPARK_DYNAMIC_PARTITION_PRUNING_MAP_JOIN_ONLY(
"hive.spark.dynamic.partition.pruning.map.join.only", false,
"Turn on dynamic partition pruning only for map joins.\n" +
"If hive.spark.dynamic.partition.pruning is set to true, this parameter value is ignored."),
SPARK_USE_GROUPBY_SHUFFLE(
"hive.spark.use.groupby.shuffle", true,
"Spark groupByKey transformation has better performance but uses unbounded memory." +
"Turn this off when there is a memory issue."),
SPARK_JOB_MAX_TASKS("hive.spark.job.max.tasks", -1, "The maximum number of tasks a Spark job may have.\n" +
"If a Spark job contains more tasks than the maximum, it will be cancelled. A value of -1 means no limit."),
SPARK_STAGE_MAX_TASKS("hive.spark.stage.max.tasks", -1, "The maximum number of tasks a stage in a Spark job may have.\n" +
"If a Spark job stage contains more tasks than the maximum, the job will be cancelled. A value of -1 means no limit."),
SPARK_CLIENT_TYPE("hive.spark.client.type", HIVE_SPARK_SUBMIT_CLIENT,
"Controls how the Spark application is launched. If " + HIVE_SPARK_SUBMIT_CLIENT + " is " +
"specified (default) then the spark-submit shell script is used to launch the Spark " +
"app. If " + HIVE_SPARK_LAUNCHER_CLIENT + " is specified then Spark's " +
"InProcessLauncher is used to programmatically launch the app."),
SPARK_SESSION_TIMEOUT("hive.spark.session.timeout", "30m", new TimeValidator(TimeUnit.MINUTES,
30L, true, null, true), "Amount of time the Spark Remote Driver should wait for " +
" a Spark job to be submitted before shutting down. Minimum value is 30 minutes"),
SPARK_SESSION_TIMEOUT_PERIOD("hive.spark.session.timeout.period", "60s",
new TimeValidator(TimeUnit.SECONDS, 60L, true, null, true),
"How frequently to check for idle Spark sessions. Minimum value is 60 seconds."),
NWAYJOINREORDER("hive.reorder.nway.joins", true,
"Runs reordering of tables within single n-way join (i.e.: picks streamtable)"),
HIVE_MERGE_NWAY_JOINS("hive.merge.nway.joins", false,
"Merge adjacent joins into a single n-way join"),
HIVE_LOG_N_RECORDS("hive.log.every.n.records", 0L, new RangeValidator(0L, null),
"If value is greater than 0 logs in fixed intervals of size n rather than exponentially."),
/**
* @deprecated Use MetastoreConf.MSCK_PATH_VALIDATION
*/
@Deprecated
HIVE_MSCK_PATH_VALIDATION("hive.msck.path.validation", "throw",
new StringSet("throw", "skip", "ignore"), "The approach msck should take with HDFS " +
"directories that are partition-like but contain unsupported characters. 'throw' (an " +
"exception) is the default; 'skip' will skip the invalid directories and still repair the" +
" others; 'ignore' will skip the validation (legacy behavior, causes bugs in many cases)"),
/**
* @deprecated Use MetastoreConf.MSCK_REPAIR_BATCH_SIZE
*/
@Deprecated
HIVE_MSCK_REPAIR_BATCH_SIZE(
"hive.msck.repair.batch.size", 3000,
"Batch size for the msck repair command. If the value is greater than zero,\n "
+ "it will execute batch wise with the configured batch size. In case of errors while\n"
+ "adding unknown partitions the batch size is automatically reduced by half in the subsequent\n"
+ "retry attempt. The default value is 3000 which means it will execute in the batches of 3000."),
/**
* @deprecated Use MetastoreConf.MSCK_REPAIR_BATCH_MAX_RETRIES
*/
@Deprecated
HIVE_MSCK_REPAIR_BATCH_MAX_RETRIES("hive.msck.repair.batch.max.retries", 4,
"Maximum number of retries for the msck repair command when adding unknown partitions.\n "
+ "If the value is greater than zero it will retry adding unknown partitions until the maximum\n"
+ "number of attempts is reached or batch size is reduced to 0, whichever is earlier.\n"
+ "In each retry attempt it will reduce the batch size by a factor of 2 until it reaches zero.\n"
+ "If the value is set to zero it will retry until the batch size becomes zero as described above."),
HIVE_SERVER2_LLAP_CONCURRENT_QUERIES("hive.server2.llap.concurrent.queries", -1,
"The number of queries allowed in parallel via llap. Negative number implies 'infinite'."),
HIVE_TEZ_ENABLE_MEMORY_MANAGER("hive.tez.enable.memory.manager", true,
"Enable memory manager for tez"),
HIVE_HASH_TABLE_INFLATION_FACTOR("hive.hash.table.inflation.factor", (float) 2.0,
"Expected inflation factor between disk/in memory representation of hash tables"),
HIVE_LOG_TRACE_ID("hive.log.trace.id", "",
"Log tracing id that can be used by upstream clients for tracking respective logs. " +
"Truncated to " + LOG_PREFIX_LENGTH + " characters. Defaults to use auto-generated session id."),
HIVE_MM_AVOID_GLOBSTATUS_ON_S3("hive.mm.avoid.s3.globstatus", true,
"Whether to use listFiles (optimized on S3) instead of globStatus when on S3."),
// If a parameter is added to the restricted list, add a test in TestRestrictedList.Java
HIVE_CONF_RESTRICTED_LIST("hive.conf.restricted.list",
"hive.security.authenticator.manager,hive.security.authorization.manager," +
"hive.security.metastore.authorization.manager,hive.security.metastore.authenticator.manager," +
"hive.users.in.admin.role,hive.server2.xsrf.filter.enabled,hive.security.authorization.enabled," +
"hive.distcp.privileged.doAs," +
"hive.server2.authentication.ldap.baseDN," +
"hive.server2.authentication.ldap.url," +
"hive.server2.authentication.ldap.Domain," +
"hive.server2.authentication.ldap.groupDNPattern," +
"hive.server2.authentication.ldap.groupFilter," +
"hive.server2.authentication.ldap.userDNPattern," +
"hive.server2.authentication.ldap.userFilter," +
"hive.server2.authentication.ldap.groupMembershipKey," +
"hive.server2.authentication.ldap.userMembershipKey," +
"hive.server2.authentication.ldap.groupClassKey," +
"hive.server2.authentication.ldap.customLDAPQuery," +
"hive.privilege.synchronizer," +
"hive.privilege.synchronizer.interval," +
"hive.spark.client.connect.timeout," +
"hive.spark.client.server.connect.timeout," +
"hive.spark.client.channel.log.level," +
"hive.spark.client.rpc.max.size," +
"hive.spark.client.rpc.threads," +
"hive.spark.client.secret.bits," +
"hive.spark.client.rpc.server.address," +
"hive.spark.client.rpc.server.port," +
"hive.spark.client.rpc.sasl.mechanisms," +
"bonecp.,"+
"hive.druid.broker.address.default,"+
"hive.druid.coordinator.address.default,"+
"hikaricp.,"+
"hadoop.bin.path,"+
"yarn.bin.path,"+
"spark.home,"+
"hive.driver.parallel.compilation.global.limit",
"Comma separated list of configuration options which are immutable at runtime"),
HIVE_CONF_HIDDEN_LIST("hive.conf.hidden.list",
METASTOREPWD.varname + "," + HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname
+ "," + DRUID_METADATA_DB_PASSWORD.varname
// Adding the S3 credentials from Hadoop config to be hidden
+ ",fs.s3.awsAccessKeyId"
+ ",fs.s3.awsSecretAccessKey"
+ ",fs.s3n.awsAccessKeyId"
+ ",fs.s3n.awsSecretAccessKey"
+ ",fs.s3a.access.key"
+ ",fs.s3a.secret.key"
+ ",fs.s3a.proxy.password"
+ ",dfs.adls.oauth2.credential"
+ ",fs.adl.oauth2.credential",
"Comma separated list of configuration options which should not be read by normal user like passwords"),
HIVE_CONF_INTERNAL_VARIABLE_LIST("hive.conf.internal.variable.list",
"hive.added.files.path,hive.added.jars.path,hive.added.archives.path",
"Comma separated list of variables which are used internally and should not be configurable."),
HIVE_SPARK_RSC_CONF_LIST("hive.spark.rsc.conf.list",
SPARK_OPTIMIZE_SHUFFLE_SERDE.varname + "," +
SPARK_CLIENT_FUTURE_TIMEOUT.varname + "," +
SPARK_CLIENT_TYPE.varname,
"Comma separated list of variables which are related to remote spark context.\n" +
"Changing these variables will result in re-creating the spark session."),
HIVE_QUERY_TIMEOUT_SECONDS("hive.query.timeout.seconds", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Timeout for Running Query in seconds. A nonpositive value means infinite. " +
"If the query timeout is also set by thrift API call, the smaller one will be taken."),
HIVE_EXEC_INPUT_LISTING_MAX_THREADS("hive.exec.input.listing.max.threads", 0, new SizeValidator(0L, true, 1024L, true),
"Maximum number of threads that Hive uses to list file information from file systems (recommended > 1 for blobstore)."),
HIVE_QUERY_REEXECUTION_ENABLED("hive.query.reexecution.enabled", true,
"Enable query reexecutions"),
HIVE_QUERY_REEXECUTION_STRATEGIES("hive.query.reexecution.strategies", "overlay,reoptimize",
"comma separated list of plugin can be used:\n"
+ " overlay: hiveconf subtree 'reexec.overlay' is used as an overlay in case of an execution errors out\n"
+ " reoptimize: collects operator statistics during execution and recompile the query after a failure"),
HIVE_QUERY_REEXECUTION_STATS_PERSISTENCE("hive.query.reexecution.stats.persist.scope", "query",
new StringSet("query", "hiveserver", "metastore"),
"Sets the persistence scope of runtime statistics\n"
+ " query: runtime statistics are only used during re-execution\n"
+ " hiveserver: runtime statistics are persisted in the hiveserver - all sessions share it\n"
+ " metastore: runtime statistics are persisted in the metastore as well"),
HIVE_QUERY_MAX_REEXECUTION_COUNT("hive.query.reexecution.max.count", 1,
"Maximum number of re-executions for a single query."),
HIVE_QUERY_REEXECUTION_ALWAYS_COLLECT_OPERATOR_STATS("hive.query.reexecution.always.collect.operator.stats", false,
"If sessionstats are enabled; this option can be used to collect statistics all the time"),
HIVE_QUERY_REEXECUTION_STATS_CACHE_BATCH_SIZE("hive.query.reexecution.stats.cache.batch.size", -1,
"If runtime stats are stored in metastore; the maximal batch size per round during load."),
HIVE_QUERY_REEXECUTION_STATS_CACHE_SIZE("hive.query.reexecution.stats.cache.size", 100_000,
"Size of the runtime statistics cache. Unit is: OperatorStat entry; a query plan consist ~100."),
HIVE_QUERY_PLANMAPPER_LINK_RELNODES("hive.query.planmapper.link.relnodes", true,
"Wether to link Calcite nodes to runtime statistics."),
HIVE_QUERY_RESULTS_CACHE_ENABLED("hive.query.results.cache.enabled", true,
"If the query results cache is enabled. This will keep results of previously executed queries " +
"to be reused if the same query is executed again."),
HIVE_QUERY_RESULTS_CACHE_NONTRANSACTIONAL_TABLES_ENABLED("hive.query.results.cache.nontransactional.tables.enabled", false,
"If the query results cache is enabled for queries involving non-transactional tables." +
"Users who enable this setting should be willing to tolerate some amount of stale results in the cache."),
HIVE_QUERY_RESULTS_CACHE_WAIT_FOR_PENDING_RESULTS("hive.query.results.cache.wait.for.pending.results", true,
"Should a query wait for the pending results of an already running query, " +
"in order to use the cached result when it becomes ready"),
HIVE_QUERY_RESULTS_CACHE_DIRECTORY("hive.query.results.cache.directory",
"/tmp/hive/_resultscache_",
"Location of the query results cache directory. Temporary results from queries " +
"will be moved to this location."),
HIVE_QUERY_RESULTS_CACHE_MAX_ENTRY_LIFETIME("hive.query.results.cache.max.entry.lifetime", "3600s",
new TimeValidator(TimeUnit.SECONDS),
"Maximum lifetime in seconds for an entry in the query results cache. A nonpositive value means infinite."),
HIVE_QUERY_RESULTS_CACHE_MAX_SIZE("hive.query.results.cache.max.size",
(long) 2 * 1024 * 1024 * 1024,
"Maximum total size in bytes that the query results cache directory is allowed to use on the filesystem."),
HIVE_QUERY_RESULTS_CACHE_MAX_ENTRY_SIZE("hive.query.results.cache.max.entry.size",
(long) 10 * 1024 * 1024,
"Maximum size in bytes that a single query result is allowed to use in the results cache directory"),
HIVE_NOTFICATION_EVENT_POLL_INTERVAL("hive.notification.event.poll.interval", "60s",
new TimeValidator(TimeUnit.SECONDS),
"How often the notification log is polled for new NotificationEvents from the metastore." +
"A nonpositive value means the notification log is never polled."),
HIVE_NOTFICATION_EVENT_CONSUMERS("hive.notification.event.consumers",
"org.apache.hadoop.hive.ql.cache.results.QueryResultsCache$InvalidationEventConsumer",
"Comma-separated list of class names extending EventConsumer," +
"to handle the NotificationEvents retreived by the notification event poll."),
/* BLOBSTORE section */
HIVE_BLOBSTORE_SUPPORTED_SCHEMES("hive.blobstore.supported.schemes", "s3,s3a,s3n",
"Comma-separated list of supported blobstore schemes."),
HIVE_BLOBSTORE_USE_BLOBSTORE_AS_SCRATCHDIR("hive.blobstore.use.blobstore.as.scratchdir", false,
"Enable the use of scratch directories directly on blob storage systems (it may cause performance penalties)."),
HIVE_BLOBSTORE_OPTIMIZATIONS_ENABLED("hive.blobstore.optimizations.enabled", true,
"This parameter enables a number of optimizations when running on blobstores:\n" +
"(1) If hive.blobstore.use.blobstore.as.scratchdir is false, force the last Hive job to write to the blobstore.\n" +
"This is a performance optimization that forces the final FileSinkOperator to write to the blobstore.\n" +
"See HIVE-15121 for details."),
HIVE_ADDITIONAL_CONFIG_FILES("hive.additional.config.files", "",
"The names of additional config files, such as ldap-site.xml," +
"spark-site.xml, etc in comma separated list.");
public final String varname;
public final String altName;
private final String defaultExpr;
public final String defaultStrVal;
public final int defaultIntVal;
public final long defaultLongVal;
public final float defaultFloatVal;
public final boolean defaultBoolVal;
private final Class<?> valClass;
private final VarType valType;
private final Validator validator;
private final String description;
private final boolean excluded;
private final boolean caseSensitive;
ConfVars(String varname, Object defaultVal, String description) {
this(varname, defaultVal, null, description, true, false, null);
}
ConfVars(String varname, Object defaultVal, String description, String altName) {
this(varname, defaultVal, null, description, true, false, altName);
}
ConfVars(String varname, Object defaultVal, Validator validator, String description,
String altName) {
this(varname, defaultVal, validator, description, true, false, altName);
}
ConfVars(String varname, Object defaultVal, String description, boolean excluded) {
this(varname, defaultVal, null, description, true, excluded, null);
}
ConfVars(String varname, String defaultVal, boolean caseSensitive, String description) {
this(varname, defaultVal, null, description, caseSensitive, false, null);
}
ConfVars(String varname, Object defaultVal, Validator validator, String description) {
this(varname, defaultVal, validator, description, true, false, null);
}
ConfVars(String varname, Object defaultVal, Validator validator, String description, boolean excluded) {
this(varname, defaultVal, validator, description, true, excluded, null);
}
ConfVars(String varname, Object defaultVal, Validator validator, String description,
boolean caseSensitive, boolean excluded, String altName) {
this.varname = varname;
this.validator = validator;
this.description = description;
this.defaultExpr = defaultVal == null ? null : String.valueOf(defaultVal);
this.excluded = excluded;
this.caseSensitive = caseSensitive;
this.altName = altName;
if (defaultVal == null || defaultVal instanceof String) {
this.valClass = String.class;
this.valType = VarType.STRING;
this.defaultStrVal = SystemVariables.substitute((String)defaultVal);
this.defaultIntVal = -1;
this.defaultLongVal = -1;
this.defaultFloatVal = -1;
this.defaultBoolVal = false;
} else if (defaultVal instanceof Integer) {
this.valClass = Integer.class;
this.valType = VarType.INT;
this.defaultStrVal = null;
this.defaultIntVal = (Integer)defaultVal;
this.defaultLongVal = -1;
this.defaultFloatVal = -1;
this.defaultBoolVal = false;
} else if (defaultVal instanceof Long) {
this.valClass = Long.class;
this.valType = VarType.LONG;
this.defaultStrVal = null;
this.defaultIntVal = -1;
this.defaultLongVal = (Long)defaultVal;
this.defaultFloatVal = -1;
this.defaultBoolVal = false;
} else if (defaultVal instanceof Float) {
this.valClass = Float.class;
this.valType = VarType.FLOAT;
this.defaultStrVal = null;
this.defaultIntVal = -1;
this.defaultLongVal = -1;
this.defaultFloatVal = (Float)defaultVal;
this.defaultBoolVal = false;
} else if (defaultVal instanceof Boolean) {
this.valClass = Boolean.class;
this.valType = VarType.BOOLEAN;
this.defaultStrVal = null;
this.defaultIntVal = -1;
this.defaultLongVal = -1;
this.defaultFloatVal = -1;
this.defaultBoolVal = (Boolean)defaultVal;
} else {
throw new IllegalArgumentException("Not supported type value " + defaultVal.getClass() +
" for name " + varname);
}
}
public boolean isType(String value) {
return valType.isType(value);
}
public Validator getValidator() {
return validator;
}
public String validate(String value) {
return validator == null ? null : validator.validate(value);
}
public String validatorDescription() {
return validator == null ? null : validator.toDescription();
}
public String typeString() {
String type = valType.typeString();
if (valType == VarType.STRING && validator != null) {
if (validator instanceof TimeValidator) {
type += "(TIME)";
}
}
return type;
}
public String getRawDescription() {
return description;
}
public String getDescription() {
String validator = validatorDescription();
if (validator != null) {
return validator + ".\n" + description;
}
return description;
}
public boolean isExcluded() {
return excluded;
}
public boolean isCaseSensitive() {
return caseSensitive;
}
@Override
public String toString() {
return varname;
}
private static String findHadoopBinary() {
String val = findHadoopHome();
// if can't find hadoop home we can at least try /usr/bin/hadoop
val = (val == null ? File.separator + "usr" : val)
+ File.separator + "bin" + File.separator + "hadoop";
// Launch hadoop command file on windows.
return val;
}
private static String findYarnBinary() {
String val = findHadoopHome();
val = (val == null ? "yarn" : val + File.separator + "bin" + File.separator + "yarn");
return val;
}
private static String findMapRedBinary() {
String val = findHadoopHome();
val = (val == null ? "mapred" : val + File.separator + "bin" + File.separator + "mapred");
return val;
}
private static String findHadoopHome() {
String val = System.getenv("HADOOP_HOME");
// In Hadoop 1.X and Hadoop 2.X HADOOP_HOME is gone and replaced with HADOOP_PREFIX
if (val == null) {
val = System.getenv("HADOOP_PREFIX");
}
return val;
}
public String getDefaultValue() {
return valType.defaultValueString(this);
}
public String getDefaultExpr() {
return defaultExpr;
}
private Set<String> getValidStringValues() {
if (validator == null || !(validator instanceof StringSet)) {
throw new RuntimeException(varname + " does not specify a list of valid values");
}
return ((StringSet)validator).getExpected();
}
enum VarType {
STRING {
@Override
void checkType(String value) throws Exception { }
@Override
String defaultValueString(ConfVars confVar) { return confVar.defaultStrVal; }
},
INT {
@Override
void checkType(String value) throws Exception { Integer.valueOf(value); }
},
LONG {
@Override
void checkType(String value) throws Exception { Long.valueOf(value); }
},
FLOAT {
@Override
void checkType(String value) throws Exception { Float.valueOf(value); }
},
BOOLEAN {
@Override
void checkType(String value) throws Exception { Boolean.valueOf(value); }
};
boolean isType(String value) {
try { checkType(value); } catch (Exception e) { return false; }
return true;
}
String typeString() { return name().toUpperCase();}
String defaultValueString(ConfVars confVar) { return confVar.defaultExpr; }
abstract void checkType(String value) throws Exception;
}
}
/**
* Writes the default ConfVars out to a byte array and returns an input
* stream wrapping that byte array.
*
* We need this in order to initialize the ConfVar properties
* in the underling Configuration object using the addResource(InputStream)
* method.
*
* It is important to use a LoopingByteArrayInputStream because it turns out
* addResource(InputStream) is broken since Configuration tries to read the
* entire contents of the same InputStream repeatedly without resetting it.
* LoopingByteArrayInputStream has special logic to handle this.
*/
private static synchronized InputStream getConfVarInputStream() {
if (confVarByteArray == null) {
try {
// Create a Hadoop configuration without inheriting default settings.
Configuration conf = new Configuration(false);
applyDefaultNonNullConfVars(conf);
ByteArrayOutputStream confVarBaos = new ByteArrayOutputStream();
conf.writeXml(confVarBaos);
confVarByteArray = confVarBaos.toByteArray();
} catch (Exception e) {
// We're pretty screwed if we can't load the default conf vars
throw new RuntimeException("Failed to initialize default Hive configuration variables!", e);
}
}
return new LoopingByteArrayInputStream(confVarByteArray);
}
public void verifyAndSet(String name, String value) throws IllegalArgumentException {
if (modWhiteListPattern != null) {
Matcher wlMatcher = modWhiteListPattern.matcher(name);
if (!wlMatcher.matches()) {
throw new IllegalArgumentException("Cannot modify " + name + " at runtime. "
+ "It is not in list of params that are allowed to be modified at runtime");
}
}
if (Iterables.any(restrictList,
restrictedVar -> name != null && name.startsWith(restrictedVar))) {
throw new IllegalArgumentException("Cannot modify " + name + " at runtime. It is in the list"
+ " of parameters that can't be modified at runtime or is prefixed by a restricted variable");
}
String oldValue = name != null ? get(name) : null;
if (name == null || value == null || !value.equals(oldValue)) {
// When either name or value is null, the set method below will fail,
// and throw IllegalArgumentException
set(name, value);
if (isSparkRelatedConfig(name)) {
isSparkConfigUpdated = true;
}
}
}
public boolean isHiddenConfig(String name) {
return Iterables.any(hiddenSet, hiddenVar -> name.startsWith(hiddenVar));
}
public static boolean isEncodedPar(String name) {
for (ConfVars confVar : HiveConf.ENCODED_CONF) {
ConfVars confVar1 = confVar;
if (confVar1.varname.equals(name)) {
return true;
}
}
return false;
}
/**
* check whether spark related property is updated, which includes spark configurations,
* RSC configurations and yarn configuration in Spark on YARN mode.
* @param name
* @return
*/
private boolean isSparkRelatedConfig(String name) {
boolean result = false;
if (name.startsWith("spark")) { // Spark property.
// for now we don't support changing spark app name on the fly
result = !name.equals("spark.app.name");
} else if (name.startsWith("yarn")) { // YARN property in Spark on YARN mode.
String sparkMaster = get("spark.master");
if (sparkMaster != null && sparkMaster.startsWith("yarn")) {
result = true;
}
} else if (rscList.stream().anyMatch(rscVar -> rscVar.equals(name))) { // Remote Spark Context property.
result = true;
} else if (name.equals("mapreduce.job.queuename")) {
// a special property starting with mapreduce that we would also like to effect if it changes
result = true;
}
return result;
}
public static int getIntVar(Configuration conf, ConfVars var) {
assert (var.valClass == Integer.class) : var.varname;
if (var.altName != null) {
return conf.getInt(var.varname, conf.getInt(var.altName, var.defaultIntVal));
}
return conf.getInt(var.varname, var.defaultIntVal);
}
public static void setIntVar(Configuration conf, ConfVars var, int val) {
assert (var.valClass == Integer.class) : var.varname;
conf.setInt(var.varname, val);
}
public int getIntVar(ConfVars var) {
return getIntVar(this, var);
}
public void setIntVar(ConfVars var, int val) {
setIntVar(this, var, val);
}
public static long getTimeVar(Configuration conf, ConfVars var, TimeUnit outUnit) {
return toTime(getVar(conf, var), getDefaultTimeUnit(var), outUnit);
}
public static void setTimeVar(Configuration conf, ConfVars var, long time, TimeUnit timeunit) {
assert (var.valClass == String.class) : var.varname;
conf.set(var.varname, time + stringFor(timeunit));
}
public long getTimeVar(ConfVars var, TimeUnit outUnit) {
return getTimeVar(this, var, outUnit);
}
public void setTimeVar(ConfVars var, long time, TimeUnit outUnit) {
setTimeVar(this, var, time, outUnit);
}
public static long getSizeVar(Configuration conf, ConfVars var) {
return toSizeBytes(getVar(conf, var));
}
public long getSizeVar(ConfVars var) {
return getSizeVar(this, var);
}
public static TimeUnit getDefaultTimeUnit(ConfVars var) {
TimeUnit inputUnit = null;
if (var.validator instanceof TimeValidator) {
inputUnit = ((TimeValidator)var.validator).getTimeUnit();
}
return inputUnit;
}
public static long toTime(String value, TimeUnit inputUnit, TimeUnit outUnit) {
String[] parsed = parseNumberFollowedByUnit(value.trim());
return outUnit.convert(Long.parseLong(parsed[0].trim()), unitFor(parsed[1].trim(), inputUnit));
}
public static long toSizeBytes(String value) {
String[] parsed = parseNumberFollowedByUnit(value.trim());
return Long.parseLong(parsed[0].trim()) * multiplierFor(parsed[1].trim());
}
private static String[] parseNumberFollowedByUnit(String value) {
char[] chars = value.toCharArray();
int i = 0;
for (; i < chars.length && (chars[i] == '-' || Character.isDigit(chars[i])); i++) {
}
return new String[] {value.substring(0, i), value.substring(i)};
}
private static Set<String> daysSet = ImmutableSet.of("d", "D", "day", "DAY", "days", "DAYS");
private static Set<String> hoursSet = ImmutableSet.of("h", "H", "hour", "HOUR", "hours", "HOURS");
private static Set<String> minutesSet = ImmutableSet.of("m", "M", "min", "MIN", "mins", "MINS",
"minute", "MINUTE", "minutes", "MINUTES");
private static Set<String> secondsSet = ImmutableSet.of("s", "S", "sec", "SEC", "secs", "SECS",
"second", "SECOND", "seconds", "SECONDS");
private static Set<String> millisSet = ImmutableSet.of("ms", "MS", "msec", "MSEC", "msecs", "MSECS",
"millisecond", "MILLISECOND", "milliseconds", "MILLISECONDS");
private static Set<String> microsSet = ImmutableSet.of("us", "US", "usec", "USEC", "usecs", "USECS",
"microsecond", "MICROSECOND", "microseconds", "MICROSECONDS");
private static Set<String> nanosSet = ImmutableSet.of("ns", "NS", "nsec", "NSEC", "nsecs", "NSECS",
"nanosecond", "NANOSECOND", "nanoseconds", "NANOSECONDS");
public static TimeUnit unitFor(String unit, TimeUnit defaultUnit) {
unit = unit.trim().toLowerCase();
if (unit.isEmpty() || unit.equals("l")) {
if (defaultUnit == null) {
throw new IllegalArgumentException("Time unit is not specified");
}
return defaultUnit;
} else if (daysSet.contains(unit)) {
return TimeUnit.DAYS;
} else if (hoursSet.contains(unit)) {
return TimeUnit.HOURS;
} else if (minutesSet.contains(unit)) {
return TimeUnit.MINUTES;
} else if (secondsSet.contains(unit)) {
return TimeUnit.SECONDS;
} else if (millisSet.contains(unit)) {
return TimeUnit.MILLISECONDS;
} else if (microsSet.contains(unit)) {
return TimeUnit.MICROSECONDS;
} else if (nanosSet.contains(unit)) {
return TimeUnit.NANOSECONDS;
}
throw new IllegalArgumentException("Invalid time unit " + unit);
}
public static long multiplierFor(String unit) {
unit = unit.trim().toLowerCase();
if (unit.isEmpty() || unit.equals("b") || unit.equals("bytes")) {
return 1;
} else if (unit.equals("kb")) {
return 1024;
} else if (unit.equals("mb")) {
return 1024*1024;
} else if (unit.equals("gb")) {
return 1024*1024*1024;
} else if (unit.equals("tb")) {
return 1024L*1024*1024*1024;
} else if (unit.equals("pb")) {
return 1024L*1024*1024*1024*1024;
}
throw new IllegalArgumentException("Invalid size unit " + unit);
}
public static String stringFor(TimeUnit timeunit) {
switch (timeunit) {
case DAYS: return "day";
case HOURS: return "hour";
case MINUTES: return "min";
case SECONDS: return "sec";
case MILLISECONDS: return "msec";
case MICROSECONDS: return "usec";
case NANOSECONDS: return "nsec";
}
throw new IllegalArgumentException("Invalid timeunit " + timeunit);
}
public static long getLongVar(Configuration conf, ConfVars var) {
assert (var.valClass == Long.class) : var.varname;
if (var.altName != null) {
return conf.getLong(var.varname, conf.getLong(var.altName, var.defaultLongVal));
}
return conf.getLong(var.varname, var.defaultLongVal);
}
public static long getLongVar(Configuration conf, ConfVars var, long defaultVal) {
if (var.altName != null) {
return conf.getLong(var.varname, conf.getLong(var.altName, defaultVal));
}
return conf.getLong(var.varname, defaultVal);
}
public static void setLongVar(Configuration conf, ConfVars var, long val) {
assert (var.valClass == Long.class) : var.varname;
conf.setLong(var.varname, val);
}
public long getLongVar(ConfVars var) {
return getLongVar(this, var);
}
public void setLongVar(ConfVars var, long val) {
setLongVar(this, var, val);
}
public static float getFloatVar(Configuration conf, ConfVars var) {
assert (var.valClass == Float.class) : var.varname;
if (var.altName != null) {
return conf.getFloat(var.varname, conf.getFloat(var.altName, var.defaultFloatVal));
}
return conf.getFloat(var.varname, var.defaultFloatVal);
}
public static float getFloatVar(Configuration conf, ConfVars var, float defaultVal) {
if (var.altName != null) {
return conf.getFloat(var.varname, conf.getFloat(var.altName, defaultVal));
}
return conf.getFloat(var.varname, defaultVal);
}
public static void setFloatVar(Configuration conf, ConfVars var, float val) {
assert (var.valClass == Float.class) : var.varname;
conf.setFloat(var.varname, val);
}
public float getFloatVar(ConfVars var) {
return getFloatVar(this, var);
}
public void setFloatVar(ConfVars var, float val) {
setFloatVar(this, var, val);
}
public static boolean getBoolVar(Configuration conf, ConfVars var) {
assert (var.valClass == Boolean.class) : var.varname;
if (var.altName != null) {
return conf.getBoolean(var.varname, conf.getBoolean(var.altName, var.defaultBoolVal));
}
return conf.getBoolean(var.varname, var.defaultBoolVal);
}
public static boolean getBoolVar(Configuration conf, ConfVars var, boolean defaultVal) {
if (var.altName != null) {
return conf.getBoolean(var.varname, conf.getBoolean(var.altName, defaultVal));
}
return conf.getBoolean(var.varname, defaultVal);
}
public static void setBoolVar(Configuration conf, ConfVars var, boolean val) {
assert (var.valClass == Boolean.class) : var.varname;
conf.setBoolean(var.varname, val);
}
/* Dynamic partition pruning is enabled in some or all cases if either
* hive.spark.dynamic.partition.pruning is true or
* hive.spark.dynamic.partition.pruning.map.join.only is true
*/
public static boolean isSparkDPPAny(Configuration conf) {
return (conf.getBoolean(ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING.varname,
ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING.defaultBoolVal) ||
conf.getBoolean(ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING_MAP_JOIN_ONLY.varname,
ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING_MAP_JOIN_ONLY.defaultBoolVal));
}
public boolean getBoolVar(ConfVars var) {
return getBoolVar(this, var);
}
public void setBoolVar(ConfVars var, boolean val) {
setBoolVar(this, var, val);
}
public static String getVar(Configuration conf, ConfVars var) {
assert (var.valClass == String.class) : var.varname;
return var.altName != null ? conf.get(var.varname, conf.get(var.altName, var.defaultStrVal))
: conf.get(var.varname, var.defaultStrVal);
}
public static String getVarWithoutType(Configuration conf, ConfVars var) {
return var.altName != null ? conf.get(var.varname, conf.get(var.altName, var.defaultExpr))
: conf.get(var.varname, var.defaultExpr);
}
public static String getTrimmedVar(Configuration conf, ConfVars var) {
assert (var.valClass == String.class) : var.varname;
if (var.altName != null) {
return conf.getTrimmed(var.varname, conf.getTrimmed(var.altName, var.defaultStrVal));
}
return conf.getTrimmed(var.varname, var.defaultStrVal);
}
public static String[] getTrimmedStringsVar(Configuration conf, ConfVars var) {
assert (var.valClass == String.class) : var.varname;
String[] result = conf.getTrimmedStrings(var.varname, (String[])null);
if (result != null) {
return result;
}
if (var.altName != null) {
result = conf.getTrimmedStrings(var.altName, (String[])null);
if (result != null) {
return result;
}
}
return org.apache.hadoop.util.StringUtils.getTrimmedStrings(var.defaultStrVal);
}
public static String getVar(Configuration conf, ConfVars var, String defaultVal) {
String ret = var.altName != null ? conf.get(var.varname, conf.get(var.altName, defaultVal))
: conf.get(var.varname, defaultVal);
return ret;
}
public static String getVar(Configuration conf, ConfVars var, EncoderDecoder<String, String> encoderDecoder) {
return encoderDecoder.decode(getVar(conf, var));
}
public String getLogIdVar(String defaultValue) {
String retval = getVar(ConfVars.HIVE_LOG_TRACE_ID);
if (StringUtils.EMPTY.equals(retval)) {
LOG.info("Using the default value passed in for log id: {}", defaultValue);
retval = defaultValue;
}
if (retval.length() > LOG_PREFIX_LENGTH) {
LOG.warn("The original log id prefix is {} has been truncated to {}", retval,
retval.substring(0, LOG_PREFIX_LENGTH - 1));
retval = retval.substring(0, LOG_PREFIX_LENGTH - 1);
}
return retval;
}
public static void setVar(Configuration conf, ConfVars var, String val) {
assert (var.valClass == String.class) : var.varname;
conf.set(var.varname, val);
}
public static void setVar(Configuration conf, ConfVars var, String val,
EncoderDecoder<String, String> encoderDecoder) {
setVar(conf, var, encoderDecoder.encode(val));
}
public static ConfVars getConfVars(String name) {
return vars.get(name);
}
public static ConfVars getMetaConf(String name) {
return metaConfs.get(name);
}
public String getVar(ConfVars var) {
return getVar(this, var);
}
public void setVar(ConfVars var, String val) {
setVar(this, var, val);
}
public String getQueryString() {
return getQueryString(this);
}
public static String getQueryString(Configuration conf) {
return getVar(conf, ConfVars.HIVEQUERYSTRING, EncoderDecoderFactory.URL_ENCODER_DECODER);
}
public void setQueryString(String query) {
setQueryString(this, query);
}
public static void setQueryString(Configuration conf, String query) {
setVar(conf, ConfVars.HIVEQUERYSTRING, query, EncoderDecoderFactory.URL_ENCODER_DECODER);
}
public void logVars(PrintStream ps) {
for (ConfVars one : ConfVars.values()) {
ps.println(one.varname + "=" + ((get(one.varname) != null) ? get(one.varname) : ""));
}
}
/**
* @return a ZooKeeperHiveHelper instance containing the ZooKeeper specifications from the
* given HiveConf.
*/
public ZooKeeperHiveHelper getZKConfig() {
return new ZooKeeperHiveHelper(getVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_QUORUM),
getVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CLIENT_PORT),
getVar(HiveConf.ConfVars.HIVE_SERVER2_ZOOKEEPER_NAMESPACE),
(int) getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT,
TimeUnit.MILLISECONDS),
(int) getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME,
TimeUnit.MILLISECONDS),
getIntVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES));
}
public HiveConf() {
super();
initialize(this.getClass());
}
public HiveConf(Class<?> cls) {
super();
initialize(cls);
}
public HiveConf(Configuration other, Class<?> cls) {
super(other);
initialize(cls);
}
/**
* Copy constructor
*/
public HiveConf(HiveConf other) {
super(other);
hiveJar = other.hiveJar;
auxJars = other.auxJars;
isSparkConfigUpdated = other.isSparkConfigUpdated;
origProp = (Properties)other.origProp.clone();
restrictList.addAll(other.restrictList);
hiddenSet.addAll(other.hiddenSet);
modWhiteListPattern = other.modWhiteListPattern;
}
public Properties getAllProperties() {
return getProperties(this);
}
public static Properties getProperties(Configuration conf) {
Iterator<Map.Entry<String, String>> iter = conf.iterator();
Properties p = new Properties();
while (iter.hasNext()) {
Map.Entry<String, String> e = iter.next();
p.setProperty(e.getKey(), e.getValue());
}
return p;
}
private void initialize(Class<?> cls) {
hiveJar = (new JobConf(cls)).getJar();
// preserve the original configuration
origProp = getAllProperties();
// Overlay the ConfVars. Note that this ignores ConfVars with null values
addResource(getConfVarInputStream());
// Overlay hive-site.xml if it exists
if (hiveSiteURL != null) {
addResource(hiveSiteURL);
}
// if embedded metastore is to be used as per config so far
// then this is considered like the metastore server case
String msUri = this.getVar(HiveConf.ConfVars.METASTOREURIS);
// This is hackery, but having hive-common depend on standalone-metastore is really bad
// because it will pull all of the metastore code into every module. We need to check that
// we aren't using the standalone metastore. If we are, we should treat it the same as a
// remote metastore situation.
if (msUri == null || msUri.isEmpty()) {
msUri = this.get("metastore.thrift.uris");
}
LOG.debug("Found metastore URI of " + msUri);
if(HiveConfUtil.isEmbeddedMetaStore(msUri)){
setLoadMetastoreConfig(true);
}
// load hivemetastore-site.xml if this is metastore and file exists
if (isLoadMetastoreConfig() && hivemetastoreSiteUrl != null) {
addResource(hivemetastoreSiteUrl);
}
// load hiveserver2-site.xml if this is hiveserver2 and file exists
// metastore can be embedded within hiveserver2, in such cases
// the conf params in hiveserver2-site.xml will override whats defined
// in hivemetastore-site.xml
if (isLoadHiveServer2Config() && hiveServer2SiteUrl != null) {
addResource(hiveServer2SiteUrl);
}
String val = this.getVar(HiveConf.ConfVars.HIVE_ADDITIONAL_CONFIG_FILES);
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
if (val != null && !val.isEmpty()) {
String[] configFiles = val.split(",");
for (String config : configFiles) {
URL configURL = findConfigFile(classLoader, config, true);
if (configURL != null) {
addResource(configURL);
}
}
}
// Overlay the values of any system properties and manual overrides
applySystemProperties();
if ((this.get("hive.metastore.ds.retry.attempts") != null) ||
this.get("hive.metastore.ds.retry.interval") != null) {
LOG.warn("DEPRECATED: hive.metastore.ds.retry.* no longer has any effect. " +
"Use hive.hmshandler.retry.* instead");
}
// if the running class was loaded directly (through eclipse) rather than through a
// jar then this would be needed
if (hiveJar == null) {
hiveJar = this.get(ConfVars.HIVEJAR.varname);
}
if (auxJars == null) {
auxJars = StringUtils.join(FileUtils.getJarFilesByPath(this.get(ConfVars.HIVEAUXJARS.varname), this), ',');
}
if (getBoolVar(ConfVars.METASTORE_SCHEMA_VERIFICATION)) {
setBoolVar(ConfVars.METASTORE_AUTO_CREATE_ALL, false);
}
if (getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) {
List<String> trimmed = new ArrayList<String>();
for (Map.Entry<String,String> entry : this) {
String key = entry.getKey();
if (key == null || !key.startsWith("hive.")) {
continue;
}
ConfVars var = HiveConf.getConfVars(key);
if (var == null) {
var = HiveConf.getConfVars(key.trim());
if (var != null) {
trimmed.add(key);
}
}
if (var == null) {
LOG.warn("HiveConf of name {} does not exist", key);
} else if (!var.isType(entry.getValue())) {
LOG.warn("HiveConf {} expects {} type value", var.varname, var.typeString());
}
}
for (String key : trimmed) {
set(key.trim(), getRaw(key));
unset(key);
}
}
setupSQLStdAuthWhiteList();
// setup list of conf vars that are not allowed to change runtime
setupRestrictList();
hiddenSet.clear();
hiddenSet.addAll(HiveConfUtil.getHiddenSet(this));
setupRSCList();
}
/**
* If the config whitelist param for sql standard authorization is not set, set it up here.
*/
private void setupSQLStdAuthWhiteList() {
String whiteListParamsStr = getVar(ConfVars.HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST);
if (whiteListParamsStr == null || whiteListParamsStr.trim().isEmpty()) {
// set the default configs in whitelist
whiteListParamsStr = getSQLStdAuthDefaultWhiteListPattern();
}
setVar(ConfVars.HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST, whiteListParamsStr);
}
private static String getSQLStdAuthDefaultWhiteListPattern() {
// create the default white list from list of safe config params
// and regex list
String confVarPatternStr = Joiner.on("|").join(convertVarsToRegex(sqlStdAuthSafeVarNames));
String regexPatternStr = Joiner.on("|").join(sqlStdAuthSafeVarNameRegexes);
return regexPatternStr + "|" + confVarPatternStr;
}
/**
* Obtains the local time-zone ID.
*/
public ZoneId getLocalTimeZone() {
String timeZoneStr = getVar(ConfVars.HIVE_LOCAL_TIME_ZONE);
return TimestampTZUtil.parseTimeZone(timeZoneStr);
}
/**
* @param paramList list of parameter strings
* @return list of parameter strings with "." replaced by "\."
*/
private static String[] convertVarsToRegex(String[] paramList) {
String[] regexes = new String[paramList.length];
for(int i=0; i<paramList.length; i++) {
regexes[i] = paramList[i].replace(".", "\\." );
}
return regexes;
}
/**
* Default list of modifiable config parameters for sql standard authorization
* For internal use only.
*/
private static final String [] sqlStdAuthSafeVarNames = new String [] {
ConfVars.AGGR_JOIN_TRANSPOSE.varname,
ConfVars.BYTESPERREDUCER.varname,
ConfVars.CLIENT_STATS_COUNTERS.varname,
ConfVars.DEFAULTPARTITIONNAME.varname,
ConfVars.DROPIGNORESNONEXISTENT.varname,
ConfVars.HIVECOUNTERGROUP.varname,
ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT.varname,
ConfVars.HIVEENFORCEBUCKETMAPJOIN.varname,
ConfVars.HIVEENFORCESORTMERGEBUCKETMAPJOIN.varname,
ConfVars.HIVEEXPREVALUATIONCACHE.varname,
ConfVars.HIVEQUERYRESULTFILEFORMAT.varname,
ConfVars.HIVEHASHTABLELOADFACTOR.varname,
ConfVars.HIVEHASHTABLETHRESHOLD.varname,
ConfVars.HIVEIGNOREMAPJOINHINT.varname,
ConfVars.HIVELIMITMAXROWSIZE.varname,
ConfVars.HIVEMAPREDMODE.varname,
ConfVars.HIVEMAPSIDEAGGREGATE.varname,
ConfVars.HIVEOPTIMIZEMETADATAQUERIES.varname,
ConfVars.HIVEROWOFFSET.varname,
ConfVars.HIVEVARIABLESUBSTITUTE.varname,
ConfVars.HIVEVARIABLESUBSTITUTEDEPTH.varname,
ConfVars.HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME.varname,
ConfVars.HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL.varname,
ConfVars.HIVE_CHECK_CROSS_PRODUCT.varname,
ConfVars.HIVE_CLI_TEZ_SESSION_ASYNC.varname,
ConfVars.HIVE_COMPAT.varname,
ConfVars.HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY.varname,
ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION.varname,
ConfVars.HIVE_EXECUTION_ENGINE.varname,
ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE.varname,
ConfVars.HIVE_EXIM_URI_SCHEME_WL.varname,
ConfVars.HIVE_FILE_MAX_FOOTER.varname,
ConfVars.HIVE_INSERT_INTO_MULTILEVEL_DIRS.varname,
ConfVars.HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS.varname,
ConfVars.HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES.varname,
ConfVars.HIVE_QUERY_RESULTS_CACHE_ENABLED.varname,
ConfVars.HIVE_QUERY_RESULTS_CACHE_WAIT_FOR_PENDING_RESULTS.varname,
ConfVars.HIVE_QUOTEDID_SUPPORT.varname,
ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES.varname,
ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS.varname,
ConfVars.HIVE_SCHEMA_EVOLUTION.varname,
ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname,
ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS.varname,
ConfVars.HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES.varname,
ConfVars.JOB_DEBUG_CAPTURE_STACKTRACES.varname,
ConfVars.JOB_DEBUG_TIMEOUT.varname,
ConfVars.LLAP_IO_ENABLED.varname,
ConfVars.LLAP_IO_USE_FILEID_PATH.varname,
ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname,
ConfVars.LLAP_EXECUTION_MODE.varname,
ConfVars.LLAP_AUTO_ALLOW_UBER.varname,
ConfVars.LLAP_AUTO_ENFORCE_TREE.varname,
ConfVars.LLAP_AUTO_ENFORCE_VECTORIZED.varname,
ConfVars.LLAP_AUTO_ENFORCE_STATS.varname,
ConfVars.LLAP_AUTO_MAX_INPUT.varname,
ConfVars.LLAP_AUTO_MAX_OUTPUT.varname,
ConfVars.LLAP_SKIP_COMPILE_UDF_CHECK.varname,
ConfVars.LLAP_CLIENT_CONSISTENT_SPLITS.varname,
ConfVars.LLAP_ENABLE_GRACE_JOIN_IN_LLAP.varname,
ConfVars.LLAP_ALLOW_PERMANENT_FNS.varname,
ConfVars.MAXCREATEDFILES.varname,
ConfVars.MAXREDUCERS.varname,
ConfVars.NWAYJOINREORDER.varname,
ConfVars.OUTPUT_FILE_EXTENSION.varname,
ConfVars.SHOW_JOB_FAIL_DEBUG_INFO.varname,
ConfVars.TASKLOG_DEBUG_TIMEOUT.varname,
ConfVars.HIVEQUERYID.varname,
ConfVars.HIVEQUERYTAG.varname,
};
/**
* Default list of regexes for config parameters that are modifiable with
* sql standard authorization enabled
*/
static final String [] sqlStdAuthSafeVarNameRegexes = new String [] {
"hive\\.auto\\..*",
"hive\\.cbo\\..*",
"hive\\.convert\\..*",
"hive\\.druid\\..*",
"hive\\.exec\\.dynamic\\.partition.*",
"hive\\.exec\\.max\\.dynamic\\.partitions.*",
"hive\\.exec\\.compress\\..*",
"hive\\.exec\\.infer\\..*",
"hive\\.exec\\.mode.local\\..*",
"hive\\.exec\\.orc\\..*",
"hive\\.exec\\.parallel.*",
"hive\\.explain\\..*",
"hive\\.fetch.task\\..*",
"hive\\.groupby\\..*",
"hive\\.hbase\\..*",
"hive\\.index\\..*",
"hive\\.index\\..*",
"hive\\.intermediate\\..*",
"hive\\.jdbc\\..*",
"hive\\.join\\..*",
"hive\\.limit\\..*",
"hive\\.log\\..*",
"hive\\.mapjoin\\..*",
"hive\\.merge\\..*",
"hive\\.optimize\\..*",
"hive\\.materializedview\\..*",
"hive\\.orc\\..*",
"hive\\.outerjoin\\..*",
"hive\\.parquet\\..*",
"hive\\.ppd\\..*",
"hive\\.prewarm\\..*",
"hive\\.server2\\.thrift\\.resultset\\.default\\.fetch\\.size",
"hive\\.server2\\.proxy\\.user",
"hive\\.skewjoin\\..*",
"hive\\.smbjoin\\..*",
"hive\\.stats\\..*",
"hive\\.strict\\..*",
"hive\\.tez\\..*",
"hive\\.vectorized\\..*",
"fs\\.defaultFS",
"ssl\\.client\\.truststore\\.location",
"distcp\\.atomic",
"distcp\\.ignore\\.failures",
"distcp\\.preserve\\.status",
"distcp\\.preserve\\.rawxattrs",
"distcp\\.sync\\.folders",
"distcp\\.delete\\.missing\\.source",
"distcp\\.keystore\\.resource",
"distcp\\.liststatus\\.threads",
"distcp\\.max\\.maps",
"distcp\\.copy\\.strategy",
"distcp\\.skip\\.crc",
"distcp\\.copy\\.overwrite",
"distcp\\.copy\\.append",
"distcp\\.map\\.bandwidth\\.mb",
"distcp\\.dynamic\\..*",
"distcp\\.meta\\.folder",
"distcp\\.copy\\.listing\\.class",
"distcp\\.filters\\.class",
"distcp\\.options\\.skipcrccheck",
"distcp\\.options\\.m",
"distcp\\.options\\.numListstatusThreads",
"distcp\\.options\\.mapredSslConf",
"distcp\\.options\\.bandwidth",
"distcp\\.options\\.overwrite",
"distcp\\.options\\.strategy",
"distcp\\.options\\.i",
"distcp\\.options\\.p.*",
"distcp\\.options\\.update",
"distcp\\.options\\.delete",
"mapred\\.map\\..*",
"mapred\\.reduce\\..*",
"mapred\\.output\\.compression\\.codec",
"mapred\\.job\\.queue\\.name",
"mapred\\.output\\.compression\\.type",
"mapred\\.min\\.split\\.size",
"mapreduce\\.job\\.reduce\\.slowstart\\.completedmaps",
"mapreduce\\.job\\.queuename",
"mapreduce\\.job\\.tags",
"mapreduce\\.input\\.fileinputformat\\.split\\.minsize",
"mapreduce\\.map\\..*",
"mapreduce\\.reduce\\..*",
"mapreduce\\.output\\.fileoutputformat\\.compress\\.codec",
"mapreduce\\.output\\.fileoutputformat\\.compress\\.type",
"oozie\\..*",
"tez\\.am\\..*",
"tez\\.task\\..*",
"tez\\.runtime\\..*",
"tez\\.queue\\.name",
};
//Take care of conf overrides.
//Includes values in ConfVars as well as underlying configuration properties (ie, hadoop)
public static final Map<String, String> overrides = new HashMap<String, String>();
/**
* Apply system properties to this object if the property name is defined in ConfVars
* and the value is non-null and not an empty string.
*/
private void applySystemProperties() {
Map<String, String> systemProperties = getConfSystemProperties();
for (Entry<String, String> systemProperty : systemProperties.entrySet()) {
this.set(systemProperty.getKey(), systemProperty.getValue());
}
}
/**
* This method returns a mapping from config variable name to its value for all config variables
* which have been set using System properties
*/
public static Map<String, String> getConfSystemProperties() {
Map<String, String> systemProperties = new HashMap<String, String>();
for (ConfVars oneVar : ConfVars.values()) {
if (System.getProperty(oneVar.varname) != null) {
if (System.getProperty(oneVar.varname).length() > 0) {
systemProperties.put(oneVar.varname, System.getProperty(oneVar.varname));
}
}
}
for (Map.Entry<String, String> oneVar : overrides.entrySet()) {
if (overrides.get(oneVar.getKey()) != null) {
if (overrides.get(oneVar.getKey()).length() > 0) {
systemProperties.put(oneVar.getKey(), oneVar.getValue());
}
}
}
return systemProperties;
}
/**
* Overlays ConfVar properties with non-null values
*/
private static void applyDefaultNonNullConfVars(Configuration conf) {
for (ConfVars var : ConfVars.values()) {
String defaultValue = var.getDefaultValue();
if (defaultValue == null) {
// Don't override ConfVars with null values
continue;
}
conf.set(var.varname, defaultValue);
}
}
public Properties getChangedProperties() {
Properties ret = new Properties();
Properties newProp = getAllProperties();
for (Object one : newProp.keySet()) {
String oneProp = (String) one;
String oldValue = origProp.getProperty(oneProp);
if (!StringUtils.equals(oldValue, newProp.getProperty(oneProp))) {
ret.setProperty(oneProp, newProp.getProperty(oneProp));
}
}
return (ret);
}
public String getJar() {
return hiveJar;
}
/**
* @return the auxJars
*/
public String getAuxJars() {
return auxJars;
}
/**
* Set the auxiliary jars. Used for unit tests only.
* @param auxJars the auxJars to set.
*/
public void setAuxJars(String auxJars) {
this.auxJars = auxJars;
setVar(this, ConfVars.HIVEAUXJARS, auxJars);
}
public URL getHiveDefaultLocation() {
return hiveDefaultURL;
}
public static void setHiveSiteLocation(URL location) {
hiveSiteURL = location;
}
public static void setHivemetastoreSiteUrl(URL location) {
hivemetastoreSiteUrl = location;
}
public static URL getHiveSiteLocation() {
return hiveSiteURL;
}
public static URL getMetastoreSiteLocation() {
return hivemetastoreSiteUrl;
}
public static URL getHiveServer2SiteLocation() {
return hiveServer2SiteUrl;
}
/**
* @return the user name set in hadoop.job.ugi param or the current user from System
* @throws IOException
*/
public String getUser() throws IOException {
try {
UserGroupInformation ugi = Utils.getUGI();
return ugi.getUserName();
} catch (LoginException le) {
throw new IOException(le);
}
}
public static String getColumnInternalName(int pos) {
return "_col" + pos;
}
public static int getPositionFromInternalName(String internalName) {
Pattern internalPattern = Pattern.compile("_col([0-9]+)");
Matcher m = internalPattern.matcher(internalName);
if (!m.matches()){
return -1;
} else {
return Integer.parseInt(m.group(1));
}
}
/**
* Append comma separated list of config vars to the restrict List
* @param restrictListStr
*/
public void addToRestrictList(String restrictListStr) {
if (restrictListStr == null) {
return;
}
String oldList = this.getVar(ConfVars.HIVE_CONF_RESTRICTED_LIST);
if (oldList == null || oldList.isEmpty()) {
this.setVar(ConfVars.HIVE_CONF_RESTRICTED_LIST, restrictListStr);
} else {
this.setVar(ConfVars.HIVE_CONF_RESTRICTED_LIST, oldList + "," + restrictListStr);
}
setupRestrictList();
}
/**
* Set white list of parameters that are allowed to be modified
*
* @param paramNameRegex
*/
@LimitedPrivate(value = { "Currently only for use by HiveAuthorizer" })
public void setModifiableWhiteListRegex(String paramNameRegex) {
if (paramNameRegex == null) {
return;
}
modWhiteListPattern = Pattern.compile(paramNameRegex);
}
/**
* Add the HIVE_CONF_RESTRICTED_LIST values to restrictList,
* including HIVE_CONF_RESTRICTED_LIST itself
*/
private void setupRestrictList() {
String restrictListStr = this.getVar(ConfVars.HIVE_CONF_RESTRICTED_LIST);
restrictList.clear();
if (restrictListStr != null) {
for (String entry : restrictListStr.split(",")) {
restrictList.add(entry.trim());
}
}
String internalVariableListStr = this.getVar(ConfVars.HIVE_CONF_INTERNAL_VARIABLE_LIST);
if (internalVariableListStr != null) {
for (String entry : internalVariableListStr.split(",")) {
restrictList.add(entry.trim());
}
}
restrictList.add(ConfVars.HIVE_IN_TEST.varname);
restrictList.add(ConfVars.HIVE_CONF_RESTRICTED_LIST.varname);
restrictList.add(ConfVars.HIVE_CONF_HIDDEN_LIST.varname);
restrictList.add(ConfVars.HIVE_CONF_INTERNAL_VARIABLE_LIST.varname);
restrictList.add(ConfVars.HIVE_SPARK_RSC_CONF_LIST.varname);
}
private void setupRSCList() {
rscList.clear();
String vars = this.getVar(ConfVars.HIVE_SPARK_RSC_CONF_LIST);
if (vars != null) {
for (String var : vars.split(",")) {
rscList.add(var.trim());
}
}
}
/**
* Strips hidden config entries from configuration
*/
public void stripHiddenConfigurations(Configuration conf) {
HiveConfUtil.stripConfigurations(conf, hiddenSet);
}
/**
* @return true if HS2 webui is enabled
*/
public boolean isWebUiEnabled() {
return this.getIntVar(ConfVars.HIVE_SERVER2_WEBUI_PORT) != 0;
}
/**
* @return true if HS2 webui query-info cache is enabled
*/
public boolean isWebUiQueryInfoCacheEnabled() {
return isWebUiEnabled() && this.getIntVar(ConfVars.HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES) > 0;
}
/* Dynamic partition pruning is enabled in some or all cases
*/
public boolean isSparkDPPAny() {
return isSparkDPPAny(this);
}
/* Dynamic partition pruning is enabled only for map join
* hive.spark.dynamic.partition.pruning is false and
* hive.spark.dynamic.partition.pruning.map.join.only is true
*/
public boolean isSparkDPPOnlyMapjoin() {
return (!this.getBoolVar(ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING) &&
this.getBoolVar(ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING_MAP_JOIN_ONLY));
}
public static boolean isLoadMetastoreConfig() {
return loadMetastoreConfig;
}
public static void setLoadMetastoreConfig(boolean loadMetastoreConfig) {
HiveConf.loadMetastoreConfig = loadMetastoreConfig;
}
public static boolean isLoadHiveServer2Config() {
return loadHiveServer2Config;
}
public static void setLoadHiveServer2Config(boolean loadHiveServer2Config) {
HiveConf.loadHiveServer2Config = loadHiveServer2Config;
}
public static class StrictChecks {
private static final String NO_LIMIT_MSG = makeMessage(
"Order by-s without limit", ConfVars.HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT);
public static final String NO_PARTITIONLESS_MSG = makeMessage(
"Queries against partitioned tables without a partition filter",
ConfVars.HIVE_STRICT_CHECKS_NO_PARTITION_FILTER);
private static final String NO_COMPARES_MSG = makeMessage(
"Unsafe compares between different types", ConfVars.HIVE_STRICT_CHECKS_TYPE_SAFETY);
private static final String NO_CARTESIAN_MSG = makeMessage(
"Cartesian products", ConfVars.HIVE_STRICT_CHECKS_CARTESIAN);
private static final String NO_BUCKETING_MSG = makeMessage(
"Load into bucketed tables", ConfVars.HIVE_STRICT_CHECKS_BUCKETING);
private static String makeMessage(String what, ConfVars setting) {
return what + " are disabled for safety reasons. If you know what you are doing, please set "
+ setting.varname + " to false and make sure that " + ConfVars.HIVEMAPREDMODE.varname +
" is not set to 'strict' to proceed. Note that you may get errors or incorrect " +
"results if you make a mistake while using some of the unsafe features.";
}
public static String checkNoLimit(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT) ? null : NO_LIMIT_MSG;
}
public static String checkNoPartitionFilter(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_NO_PARTITION_FILTER)
? null : NO_PARTITIONLESS_MSG;
}
public static String checkTypeSafety(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_TYPE_SAFETY) ? null : NO_COMPARES_MSG;
}
public static String checkCartesian(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_CARTESIAN) ? null : NO_CARTESIAN_MSG;
}
public static String checkBucketing(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_BUCKETING) ? null : NO_BUCKETING_MSG;
}
private static boolean isAllowed(Configuration conf, ConfVars setting) {
String mode = HiveConf.getVar(conf, ConfVars.HIVEMAPREDMODE, (String)null);
return (mode != null) ? !"strict".equals(mode) : !HiveConf.getBoolVar(conf, setting);
}
}
public static String getNonMrEngines() {
String result = StringUtils.EMPTY;
for (String s : ConfVars.HIVE_EXECUTION_ENGINE.getValidStringValues()) {
if ("mr".equals(s)) {
continue;
}
if (!result.isEmpty()) {
result += ", ";
}
result += s;
}
return result;
}
public static String generateMrDeprecationWarning() {
return "Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. "
+ "Consider using a different execution engine (i.e. " + HiveConf.getNonMrEngines()
+ ") or using Hive 1.X releases.";
}
private static final Object reverseMapLock = new Object();
private static HashMap<String, ConfVars> reverseMap = null;
public static HashMap<String, ConfVars> getOrCreateReverseMap() {
// This should be called rarely enough; for now it's ok to just lock every time.
synchronized (reverseMapLock) {
if (reverseMap != null) {
return reverseMap;
}
}
HashMap<String, ConfVars> vars = new HashMap<>();
for (ConfVars val : ConfVars.values()) {
vars.put(val.varname.toLowerCase(), val);
if (val.altName != null && !val.altName.isEmpty()) {
vars.put(val.altName.toLowerCase(), val);
}
}
synchronized (reverseMapLock) {
if (reverseMap != null) {
return reverseMap;
}
reverseMap = vars;
return reverseMap;
}
}
public void verifyAndSetAll(Map<String, String> overlay) {
for (Entry<String, String> entry : overlay.entrySet()) {
verifyAndSet(entry.getKey(), entry.getValue());
}
}
public Map<String, String> subtree(String string) {
Map<String, String> ret = new HashMap<>();
for (Entry<Object, Object> entry : getProps().entrySet()) {
String key = (String) entry.getKey();
String value = (String) entry.getValue();
if (key.startsWith(string)) {
ret.put(key.substring(string.length() + 1), value);
}
}
return ret;
}
}
| [
"\"HIVE_CONF_DIR\"",
"\"HIVE_HOME\"",
"\"HADOOP_HOME\"",
"\"HADOOP_PREFIX\""
]
| []
| [
"HADOOP_PREFIX",
"HADOOP_HOME",
"HIVE_CONF_DIR",
"HIVE_HOME"
]
| [] | ["HADOOP_PREFIX", "HADOOP_HOME", "HIVE_CONF_DIR", "HIVE_HOME"] | java | 4 | 0 | |
vendor/github.com/kubedb/redis/pkg/validator/validate.go | package validator
import (
"fmt"
"github.com/appscode/go/types"
meta_util "github.com/appscode/kutil/meta"
api "github.com/kubedb/apimachinery/apis/kubedb/v1alpha1"
cs "github.com/kubedb/apimachinery/client/clientset/versioned/typed/kubedb/v1alpha1"
amv "github.com/kubedb/apimachinery/pkg/validator"
"github.com/pkg/errors"
kerr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
)
var (
redisVersions = sets.NewString("4", "4.0", "4.0.6")
)
func ValidateRedis(client kubernetes.Interface, extClient cs.KubedbV1alpha1Interface, redis *api.Redis) error {
if redis.Spec.Version == "" {
return fmt.Errorf(`object 'Version' is missing in '%v'`, redis.Spec)
}
// check Redis version validation
if !redisVersions.Has(string(redis.Spec.Version)) {
return fmt.Errorf(`KubeDB doesn't support Redis version: %s`, string(redis.Spec.Version))
}
if redis.Spec.Replicas != nil {
replicas := types.Int32(redis.Spec.Replicas)
if replicas != 1 {
return fmt.Errorf(`spec.replicas "%d" invalid. Value must be one`, replicas)
}
}
if err := matchWithDormantDatabase(extClient, redis); err != nil {
return err
}
if redis.Spec.Storage != nil {
var err error
if err = amv.ValidateStorage(client, redis.Spec.Storage); err != nil {
return err
}
}
monitorSpec := redis.Spec.Monitor
if monitorSpec != nil {
if err := amv.ValidateMonitorSpec(monitorSpec); err != nil {
return err
}
}
return nil
}
func matchWithDormantDatabase(extClient cs.KubedbV1alpha1Interface, redis *api.Redis) error {
// Check if DormantDatabase exists or not
dormantDb, err := extClient.DormantDatabases(redis.Namespace).Get(redis.Name, metav1.GetOptions{})
if err != nil {
if !kerr.IsNotFound(err) {
return err
}
return nil
}
// Check DatabaseKind
if dormantDb.Labels[api.LabelDatabaseKind] != api.ResourceKindRedis {
return fmt.Errorf(`invalid Redis: "%v". Exists DormantDatabase "%v" of different Kind`, redis.Name, dormantDb.Name)
}
// Check Origin Spec
drmnOriginSpec := dormantDb.Spec.Origin.Spec.Redis
originalSpec := redis.Spec
// Skip checking doNotPause
drmnOriginSpec.DoNotPause = originalSpec.DoNotPause
if !meta_util.Equal(drmnOriginSpec, &originalSpec) {
return errors.New("redis spec mismatches with OriginSpec in DormantDatabases")
}
return nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
test/e2e/autoscaling/cluster_size_autoscaling.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"bytes"
"fmt"
"io/ioutil"
"math"
"net/http"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"time"
"k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
schedulerapi "k8s.io/api/scheduling/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
defaultTimeout = 3 * time.Minute
resizeTimeout = 5 * time.Minute
manualResizeTimeout = 6 * time.Minute
scaleUpTimeout = 5 * time.Minute
scaleUpTriggerTimeout = 2 * time.Minute
scaleDownTimeout = 20 * time.Minute
podTimeout = 2 * time.Minute
nodesRecoverTimeout = 5 * time.Minute
rcCreationRetryTimeout = 4 * time.Minute
rcCreationRetryDelay = 20 * time.Second
makeSchedulableTimeout = 10 * time.Minute
makeSchedulableDelay = 20 * time.Second
freshStatusLimit = 20 * time.Second
gkeEndpoint = "https://test-container.sandbox.googleapis.com"
gkeUpdateTimeout = 15 * time.Minute
gkeNodepoolNameKey = "cloud.google.com/gke-nodepool"
disabledTaint = "DisabledForAutoscalingTest"
criticalAddonsOnlyTaint = "CriticalAddonsOnly"
newNodesForScaledownTests = 2
unhealthyClusterThreshold = 4
caNoScaleUpStatus = "NoActivity"
caOngoingScaleUpStatus = "InProgress"
timestampFormat = "2006-01-02 15:04:05 -0700 MST"
expendablePriorityClassName = "expendable-priority"
highPriorityClassName = "high-priority"
gpuLabel = "cloud.google.com/gke-accelerator"
)
var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
f := framework.NewDefaultFramework("autoscaling")
var c clientset.Interface
var nodeCount int
var coreCount int64
var memAllocatableMb int
var originalSizes map[string]int
BeforeEach(func() {
c = f.ClientSet
framework.SkipUnlessProviderIs("gce", "gke")
originalSizes = make(map[string]int)
sum := 0
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
size, err := framework.GroupSize(mig)
framework.ExpectNoError(err)
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
originalSizes[mig] = size
sum += size
}
// Give instances time to spin up
framework.ExpectNoError(framework.WaitForReadyNodes(c, sum, scaleUpTimeout))
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)
coreCount = 0
for _, node := range nodes.Items {
quentity := node.Status.Capacity[v1.ResourceCPU]
coreCount += quentity.Value()
}
By(fmt.Sprintf("Initial number of schedulable nodes: %v", nodeCount))
Expect(nodeCount).NotTo(BeZero())
mem := nodes.Items[0].Status.Allocatable[v1.ResourceMemory]
memAllocatableMb = int((&mem).Value() / 1024 / 1024)
Expect(nodeCount).Should(Equal(sum))
if framework.ProviderIs("gke") {
val, err := isAutoscalerEnabled(5)
framework.ExpectNoError(err)
if !val {
err = enableAutoscaler("default-pool", 3, 5)
framework.ExpectNoError(err)
}
}
})
AfterEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
By(fmt.Sprintf("Restoring initial size of the cluster"))
setMigSizes(originalSizes)
expectedNodes := 0
for _, size := range originalSizes {
expectedNodes += size
}
framework.ExpectNoError(framework.WaitForReadyNodes(c, expectedNodes, scaleDownTimeout))
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)
s := time.Now()
makeSchedulableLoop:
for start := time.Now(); time.Since(start) < makeSchedulableTimeout; time.Sleep(makeSchedulableDelay) {
for _, n := range nodes.Items {
err = makeNodeSchedulable(c, &n, true)
switch err.(type) {
case CriticalAddonsOnlyError:
continue makeSchedulableLoop
default:
framework.ExpectNoError(err)
}
}
break
}
glog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
})
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
By("Creating unschedulable pod")
ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
By("Waiting for scale up hoping it won't happen")
// Verify that the appropriate event was generated
eventFound := false
EventsLoop:
for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) {
By("Waiting for NotTriggerScaleUp event")
events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{})
framework.ExpectNoError(err)
for _, e := range events.Items {
if e.InvolvedObject.Kind == "Pod" && e.Reason == "NotTriggerScaleUp" && strings.Contains(e.Message, "it wouldn't fit if a new node is added") {
By("NotTriggerScaleUp event found")
eventFound = true
break EventsLoop
}
}
}
Expect(eventFound).Should(Equal(true))
// Verify that cluster size is not changed
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size <= nodeCount }, time.Second))
})
simpleScaleUpTest := func(unready int) {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout, unready))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
}
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]",
func() { simpleScaleUpTest(0) })
gpuType := os.Getenv("TESTED_GPU_TYPE")
It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Schedule a pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
})
It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
defer disableAutoscaler(gpuPoolName, 0, 2)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
By("Scale GPU deployment")
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2))
})
It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
// Expect gpu pool to stay intact
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
By("Remove the only POD requiring GPU")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]",
func() {
framework.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleUpTest(1) })
})
It("shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]", func() {
// Wait for the situation to stabilize - CA should be running and have up-to-date node readiness info.
status, err := waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
return s.ready == s.target && s.ready <= nodeCount
}, scaleUpTriggerTimeout)
framework.ExpectNoError(err)
unmanagedNodes := nodeCount - status.ready
By("Schedule more pods than can fit and wait for cluster to scale-up")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
return s.status == caOngoingScaleUpStatus
}, scaleUpTriggerTimeout)
framework.ExpectNoError(err)
target := status.target
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
By("Expect no more scale-up to be happening after all pods are scheduled")
status, err = getScaleUpStatus(c)
framework.ExpectNoError(err)
if status.target != target {
glog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target)
}
Expect(status.timestamp.Add(freshStatusLimit).Before(time.Now())).Should(Equal(false))
Expect(status.status).Should(Equal(caNoScaleUpStatus))
Expect(status.ready).Should(Equal(status.target))
Expect(len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items)).Should(Equal(status.target + unmanagedNodes))
})
It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
framework.SkipUnlessProviderIs("gke")
By("Creating new node-pool with n1-standard-4 machines")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
By("Getting memory available on new nodes, so we can account for it when creating RC")
nodes := getPoolNodes(f, extraPoolName)
Expect(len(nodes)).Should(Equal(extraNodes))
extraMemMb := 0
for _, node := range nodes {
mem := node.Status.Capacity[v1.ResourceMemory]
extraMemMb += int((&mem).Value() / 1024 / 1024)
}
By("Reserving 0.1x more memory than the cluster holds to trigger scale up")
totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout)
// Verify, that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+extraNodes+1 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
})
It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func() {
framework.SkipUnlessProviderIs("gke")
By("Creating new node-pool with n1-standard-4 machines")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2))
})
It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "host-port")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
})
It("should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp]", func() {
pods := nodeCount
newPods := 2
labels := map[string]string{
"anti-affinity": "yes",
}
By("starting a pod with anti-affinity on each node")
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
By("scheduling extra pods with anti-affinity to existing ones")
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
By("creating pods")
pods := nodeCount
newPods := 1
labels := map[string]string{
"anti-affinity": "yes",
}
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
By("waiting for all pods before triggering scale up")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
By("creating a pod requesting EmptyDir")
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
framework.SkipUnlessProviderIs("gce", "gke")
volumeLabels := labels.Set{
framework.VolumeSelectorKey: f.Namespace.Name,
}
selector := metav1.SetAsLabelSelector(volumeLabels)
By("creating volume & pvc")
diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err)
pvConfig := framework.PersistentVolumeConfig{
NamePrefix: "gce-",
Labels: volumeLabels,
PVSource: v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: diskName,
FSType: "ext3",
ReadOnly: false,
},
},
Prebind: nil,
}
emptyStorageClass := ""
pvcConfig := framework.PersistentVolumeClaimConfig{
Selector: selector,
StorageClassName: &emptyStorageClass,
}
pv, pvc, err := framework.CreatePVPVC(c, pvConfig, pvcConfig, f.Namespace.Name, false)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(c, f.Namespace.Name, pv, pvc))
defer func() {
errs := framework.PVPVCCleanup(c, f.Namespace.Name, pv, pvc)
if len(errs) > 0 {
framework.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pv, pvc = nil, nil
if diskName != "" {
framework.ExpectNoError(framework.DeletePDWithRetry(diskName))
}
}()
By("creating pods")
pods := nodeCount
labels := map[string]string{
"anti-affinity": "yes",
}
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer func() {
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
glog.Infof("RC and pods not using volume deleted")
}()
By("waiting for all pods before triggering scale up")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
By("creating a pod requesting PVC")
pvcPodName := "pvc-pod"
newPods := 1
volumes := buildVolumes(pv, pvc)
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, pvcPodName, labels, labels, volumes))
defer func() {
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, pvcPodName)
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
}()
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
labelKey := "cluster-autoscaling-test.special-node"
labelValue := "true"
By("Finding the smallest MIG")
minMig := ""
minSize := nodeCount
for mig, size := range originalSizes {
if size <= minSize {
minMig = mig
minSize = size
}
}
if minSize == 0 {
newSizes := make(map[string]int)
for mig, size := range originalSizes {
newSizes[mig] = size
}
newSizes[minMig] = 1
setMigSizes(newSizes)
}
removeLabels := func(nodesToClean sets.String) {
By("Removing labels from nodes")
for node := range nodesToClean {
framework.RemoveLabelOffNode(c, node, labelKey)
}
}
nodes, err := framework.GetGroupNodes(minMig)
framework.ExpectNoError(err)
nodesSet := sets.NewString(nodes...)
defer removeLabels(nodesSet)
By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes))
for node := range nodesSet {
framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
}
scheduling.CreateNodeSelectorPods(f, "node-selector", minSize+1, map[string]string{labelKey: labelValue}, false)
By("Waiting for new node to appear and annotating it")
framework.WaitForGroupSize(minMig, int32(minSize+1))
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
newNodes, err := framework.GetGroupNodes(minMig)
framework.ExpectNoError(err)
newNodesSet := sets.NewString(newNodes...)
newNodesSet.Delete(nodes...)
if len(newNodesSet) > 1 {
By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet))
glog.Infof("Usually only 1 new node is expected, investigating")
glog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
if output, err := exec.Command("gcloud", "compute", "instances", "list",
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
glog.Infof("Gcloud compute instances list: %s", output)
} else {
glog.Errorf("Failed to get instances list: %v", err)
}
for newNode := range newNodesSet {
if output, err := execCmd("gcloud", "compute", "instances", "describe",
newNode,
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
glog.Infof("Gcloud compute instances describe: %s", output)
} else {
glog.Errorf("Failed to get instances describe: %v", err)
}
}
// TODO: possibly remove broken node from newNodesSet to prevent removeLabel from crashing.
// However at this moment we DO WANT it to crash so that we don't check all test runs for the
// rare behavior, but only the broken ones.
}
By(fmt.Sprintf("New nodes: %v\n", newNodesSet))
registeredNodes := sets.NewString()
for nodeName := range newNodesSet {
node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err == nil && node != nil {
registeredNodes.Insert(nodeName)
} else {
glog.Errorf("Failed to get node %v: %v", nodeName, err)
}
}
By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List()))
for node := range registeredNodes {
framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
}
defer removeLabels(registeredNodes)
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "node-selector"))
})
It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
framework.SkipUnlessProviderIs("gke")
By("Creating new node-pool with n1-standard-4 machines")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
defer disableAutoscaler(extraPoolName, 1, 2)
extraPods := extraNodes + 1
totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb))
By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout)
// Apparently GKE master is restarted couple minutes after the node pool is added
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround
// this issue.
// TODO: Remove the extra time when GKE restart is fixed.
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes+1, scaleUpTimeout+5*time.Minute))
})
simpleScaleDownTest := func(unready int) {
cleanup, err := addKubeSystemPdbs(f)
defer cleanup()
framework.ExpectNoError(err)
By("Manually increase cluster size")
increasedSize := 0
newSizes := make(map[string]int)
for key, val := range originalSizes {
newSizes[key] = val + 2 + unready
increasedSize += val + 2 + unready
}
setMigSizes(newSizes)
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
func(size int) bool { return size >= increasedSize }, manualResizeTimeout, unready))
By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
func(size int) bool { return size < increasedSize }, scaleDownTimeout, unready))
}
It("should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown]",
func() { simpleScaleDownTest(0) })
It("should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown]",
func() {
framework.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleDownTest(1) })
})
It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func() {
framework.SkipUnlessProviderIs("gke")
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-1", 3)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= increasedSize+extraNodes }, scaleUpTimeout))
By("Some node should be removed")
// Apparently GKE master is restarted couple minutes after the node pool is added
// reseting all the timers in scale down code. Adding 10 extra minutes to workaround
// this issue.
// TODO: Remove the extra time when GKE restart is fixed.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < increasedSize+extraNodes }, scaleDownTimeout+10*time.Minute))
})
It("should be able to scale down when rescheduling a pod is required and pdb allows for it[Feature:ClusterSizeAutoscalingScaleDown]", func() {
runDrainTest(f, originalSizes, f.Namespace.Name, 1, 1, func(increasedSize int) {
By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
})
})
It("shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]", func() {
runDrainTest(f, originalSizes, f.Namespace.Name, 1, 0, func(increasedSize int) {
By("No nodes should be removed")
time.Sleep(scaleDownTimeout)
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).Should(Equal(increasedSize))
})
})
It("should be able to scale down by draining multiple pods one by one as dictated by pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() {
runDrainTest(f, originalSizes, f.Namespace.Name, 2, 1, func(increasedSize int) {
By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
})
})
It("should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() {
runDrainTest(f, originalSizes, "kube-system", 2, 1, func(increasedSize int) {
By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
})
})
It("Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp]", func() {
// Provider-specific setup
if framework.ProviderIs("gke") {
// GKE-specific setup
By("Add a new node pool with 0 nodes and min size 0")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 0)
defer deleteNodePool(extraPoolName)
framework.ExpectNoError(enableAutoscaler(extraPoolName, 0, 1))
defer disableAutoscaler(extraPoolName, 0, 1)
} else {
// on GCE, run only if there are already at least 2 node groups
framework.SkipUnlessAtLeast(len(originalSizes), 2, "At least 2 node groups are needed for scale-to-0 tests")
By("Manually scale smallest node group to 0")
minMig := ""
minSize := nodeCount
for mig, size := range originalSizes {
if size <= minSize {
minMig = mig
minSize = size
}
}
framework.ExpectNoError(framework.ResizeGroup(minMig, int32(0)))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount-minSize, resizeTimeout))
}
By("Make remaining nodes unschedulable")
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
framework.ExpectNoError(err)
for _, node := range nodes.Items {
err = makeNodeUnschedulable(f.ClientSet, &node)
defer func(n v1.Node) {
makeNodeSchedulable(f.ClientSet, &n, false)
}(node)
framework.ExpectNoError(err)
}
By("Run a scale-up test")
ReserveMemory(f, "memory-reservation", 1, 100, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= len(nodes.Items)+1 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
})
// Scale to 0 test is split into two functions (for GKE & GCE.)
// The reason for it is that scenario is exactly the same,
// but setup & verification use different APIs.
//
// Scenario:
// (GKE only) add an extra node pool with size 1 & enable autoscaling for it
// (GCE only) find the smallest MIG & resize it to 1
// manually drain the single node from this node pool/MIG
// wait for cluster size to decrease
// verify the targeted node pool/MIG is of size 0
gkeScaleToZero := func() {
// GKE-specific setup
By("Add a new node pool with size 1 and min size 0")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 0, 1))
defer disableAutoscaler(extraPoolName, 0, 1)
ngNodes := getPoolNodes(f, extraPoolName)
Expect(len(ngNodes)).To(Equal(extraNodes))
for _, node := range ngNodes {
By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
}
for _, node := range ngNodes {
drainNode(f, node)
}
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size <= nodeCount }, scaleDownTimeout))
// GKE-specific check
newSize := getPoolSize(f, extraPoolName)
Expect(newSize).Should(Equal(0))
}
gceScaleToZero := func() {
// non-GKE only
By("Find smallest node group and manually scale it to a single node")
minMig := ""
minSize := nodeCount
for mig, size := range originalSizes {
if size <= minSize {
minMig = mig
minSize = size
}
}
framework.ExpectNoError(framework.ResizeGroup(minMig, int32(1)))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount-minSize+1, resizeTimeout))
ngNodes, err := framework.GetGroupNodes(minMig)
framework.ExpectNoError(err)
Expect(len(ngNodes) == 1).To(BeTrue())
node, err := f.ClientSet.CoreV1().Nodes().Get(ngNodes[0], metav1.GetOptions{})
By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
framework.ExpectNoError(err)
// this part is identical
drainNode(f, node)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < nodeCount-minSize+1 }, scaleDownTimeout))
// non-GKE only
newSize, err := framework.GroupSize(minMig)
framework.ExpectNoError(err)
Expect(newSize).Should(Equal(0))
}
It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func() {
if framework.ProviderIs("gke") { // In GKE, we can just add a node pool
gkeScaleToZero()
} else if len(originalSizes) >= 2 {
gceScaleToZero()
} else {
framework.Skipf("At least 2 node groups are needed for scale-to-0 tests")
}
})
It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func() {
clusterSize := nodeCount
for clusterSize < unhealthyClusterThreshold+1 {
clusterSize = manuallyIncreaseClusterSize(f, originalSizes)
}
By("Block network connectivity to some nodes to simulate unhealthy cluster")
nodesToBreakCount := int(math.Ceil(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize))))
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
framework.ExpectNoError(err)
Expect(nodesToBreakCount <= len(nodes.Items)).To(BeTrue())
nodesToBreak := nodes.Items[:nodesToBreakCount]
// TestUnderTemporaryNetworkFailure only removes connectivity to a single node,
// and accepts func() callback. This is expanding the loop to recursive call
// to avoid duplicating TestUnderTemporaryNetworkFailure
var testFunction func()
testFunction = func() {
if len(nodesToBreak) > 0 {
ntb := &nodesToBreak[0]
nodesToBreak = nodesToBreak[1:]
framework.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction)
} else {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
time.Sleep(scaleUpTimeout)
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
framework.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
Expect(len(currentNodes.Items)).Should(Equal(len(nodes.Items) - nodesToBreakCount))
status, err := getClusterwideStatus(c)
framework.Logf("Clusterwide status: %v", status)
framework.ExpectNoError(err)
Expect(status).Should(Equal("Unhealthy"))
}
}
testFunction()
// Give nodes time to recover from network failure
framework.ExpectNoError(framework.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
})
It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
defer createPriorityClasses(f)()
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName)
defer cleanupFunc()
By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String()))
time.Sleep(scaleUpTimeout)
// Verify that cluster size is not changed
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, time.Second))
})
It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
defer createPriorityClasses(f)()
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
defer cleanupFunc()
// Verify that cluster size is not changed
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size > nodeCount }, time.Second))
})
It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func() {
defer createPriorityClasses(f)()
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName)
defer cleanupFunc1()
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node. Pods created here should preempt pods created above.
cleanupFunc2 := ReserveMemoryWithPriority(f, "memory-reservation2", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, highPriorityClassName)
defer cleanupFunc2()
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, time.Second))
})
It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
defer createPriorityClasses(f)()
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, expendablePriorityClassName)
defer cleanupFunc()
By("Waiting for scale down")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
})
It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
defer createPriorityClasses(f)()
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
defer cleanupFunc()
By(fmt.Sprintf("Waiting for scale down hoping it won't happen, sleep for %s", scaleDownTimeout.String()))
time.Sleep(scaleDownTimeout)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == increasedSize }, time.Second))
})
})
func installNvidiaDriversDaemonSet() {
By("Add daemonset which installs nvidia drivers")
// the link differs from one in GKE documentation; discussed with @mindprince this one should be used
framework.RunKubectlOrDie("apply", "-f", "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml")
}
func execCmd(args ...string) *exec.Cmd {
glog.Infof("Executing: %s", strings.Join(args, " "))
return exec.Command(args[0], args[1:]...)
}
func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace string, podsPerNode, pdbSize int, verifyFunction func(int)) {
increasedSize := manuallyIncreaseClusterSize(f, migSizes)
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
framework.ExpectNoError(err)
numPods := len(nodes.Items) * podsPerNode
testID := string(uuid.NewUUID()) // So that we can label and find pods
labelMap := map[string]string{"test_id": testID}
framework.ExpectNoError(runReplicatedPodOnEachNode(f, nodes.Items, namespace, podsPerNode, "reschedulable-pods", labelMap, 0))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, "reschedulable-pods")
By("Create a PodDisruptionBudget")
minAvailable := intstr.FromInt(numPods - pdbSize)
pdb := &policy.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: "test_pdb",
Namespace: namespace,
},
Spec: policy.PodDisruptionBudgetSpec{
Selector: &metav1.LabelSelector{MatchLabels: labelMap},
MinAvailable: &minAvailable,
},
}
_, err = f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(pdb)
defer func() {
f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Delete(pdb.Name, &metav1.DeleteOptions{})
}()
framework.ExpectNoError(err)
verifyFunction(increasedSize)
}
func getGKEURL(apiVersion string, suffix string) string {
out, err := execCmd("gcloud", "auth", "print-access-token").Output()
framework.ExpectNoError(err)
token := strings.Replace(string(out), "\n", "", -1)
return fmt.Sprintf("%s/%s/%s?access_token=%s",
gkeEndpoint,
apiVersion,
suffix,
token)
}
func getGKEClusterURL(apiVersion string) string {
if isRegionalCluster() {
// TODO(bskiba): Use locations API for all clusters once it's graduated to v1.
return getGKEURL(apiVersion, fmt.Sprintf("projects/%s/locations/%s/clusters/%s",
framework.TestContext.CloudConfig.ProjectID,
framework.TestContext.CloudConfig.Region,
framework.TestContext.CloudConfig.Cluster))
} else {
return getGKEURL(apiVersion, fmt.Sprintf("projects/%s/zones/%s/clusters/%s",
framework.TestContext.CloudConfig.ProjectID,
framework.TestContext.CloudConfig.Zone,
framework.TestContext.CloudConfig.Cluster))
}
}
func getCluster(apiVersion string) (string, error) {
resp, err := http.Get(getGKEClusterURL(apiVersion))
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("error: %s %s", resp.Status, body)
}
return string(body), nil
}
func isAutoscalerEnabled(expectedMaxNodeCountInTargetPool int) (bool, error) {
apiVersion := "v1"
if isRegionalCluster() {
apiVersion = "v1beta1"
}
strBody, err := getCluster(apiVersion)
if err != nil {
return false, err
}
if strings.Contains(strBody, "\"maxNodeCount\": "+strconv.Itoa(expectedMaxNodeCountInTargetPool)) {
return true, nil
}
return false, nil
}
func getClusterLocation() string {
if isRegionalCluster() {
return "--region=" + framework.TestContext.CloudConfig.Region
} else {
return "--zone=" + framework.TestContext.CloudConfig.Zone
}
}
func getGcloudCommandFromTrack(commandTrack string, args []string) []string {
command := []string{"gcloud"}
if commandTrack == "beta" || commandTrack == "alpha" {
command = append(command, commandTrack)
}
command = append(command, args...)
command = append(command, getClusterLocation())
command = append(command, "--project="+framework.TestContext.CloudConfig.ProjectID)
return command
}
func getGcloudCommand(args []string) []string {
track := ""
if isRegionalCluster() {
track = "beta"
}
return getGcloudCommandFromTrack(track, args)
}
func isRegionalCluster() bool {
// TODO(bskiba): Use an appropriate indicator that the cluster is regional.
return framework.TestContext.CloudConfig.MultiZone
}
func enableAutoscaler(nodePool string, minCount, maxCount int) error {
glog.Infof("Using gcloud to enable autoscaling for pool %s", nodePool)
args := []string{"container", "clusters", "update", framework.TestContext.CloudConfig.Cluster,
"--enable-autoscaling",
"--min-nodes=" + strconv.Itoa(minCount),
"--max-nodes=" + strconv.Itoa(maxCount),
"--node-pool=" + nodePool}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
if err != nil {
glog.Errorf("Failed config update result: %s", output)
return fmt.Errorf("Failed to enable autoscaling: %v", err)
}
glog.Infof("Config update result: %s", output)
var finalErr error
for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) {
val, err := isAutoscalerEnabled(maxCount)
if err == nil && val {
return nil
}
finalErr = err
}
return fmt.Errorf("autoscaler not enabled, last error: %v", finalErr)
}
func disableAutoscaler(nodePool string, minCount, maxCount int) error {
glog.Infof("Using gcloud to disable autoscaling for pool %s", nodePool)
args := []string{"container", "clusters", "update", framework.TestContext.CloudConfig.Cluster,
"--no-enable-autoscaling",
"--node-pool=" + nodePool}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
if err != nil {
glog.Errorf("Failed config update result: %s", output)
return fmt.Errorf("Failed to disable autoscaling: %v", err)
}
glog.Infof("Config update result: %s", output)
var finalErr error
for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) {
val, err := isAutoscalerEnabled(maxCount)
if err == nil && !val {
return nil
}
finalErr = err
}
return fmt.Errorf("autoscaler still enabled, last error: %v", finalErr)
}
func executeHTTPRequest(method string, url string, body string) (string, error) {
client := &http.Client{}
req, err := http.NewRequest(method, url, strings.NewReader(body))
if err != nil {
By(fmt.Sprintf("Can't create request: %s", err.Error()))
return "", err
}
resp, err := client.Do(req)
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("error: %s %s", resp.Status, string(respBody))
}
return string(respBody), nil
}
func addNodePool(name string, machineType string, numNodes int) {
args := []string{"container", "node-pools", "create", name, "--quiet",
"--machine-type=" + machineType,
"--num-nodes=" + strconv.Itoa(numNodes),
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
glog.Infof("Creating node-pool %s: %s", name, output)
framework.ExpectNoError(err, string(output))
}
func addGpuNodePool(name string, gpuType string, gpuCount int, numNodes int) {
args := []string{"beta", "container", "node-pools", "create", name, "--quiet",
"--accelerator", "type=" + gpuType + ",count=" + strconv.Itoa(gpuCount),
"--num-nodes=" + strconv.Itoa(numNodes),
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
glog.Infof("Creating node-pool %s: %s", name, output)
framework.ExpectNoError(err, string(output))
}
func deleteNodePool(name string) {
glog.Infof("Deleting node pool %s", name)
args := []string{"container", "node-pools", "delete", name, "--quiet",
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
err := wait.ExponentialBackoff(
wait.Backoff{Duration: 1 * time.Minute, Factor: float64(3), Steps: 3},
func() (bool, error) {
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
if err != nil {
glog.Warningf("Error deleting nodegroup - error:%v, output: %s", err, output)
return false, nil
}
glog.Infof("Node-pool deletion output: %s", output)
return true, nil
})
framework.ExpectNoError(err)
}
func getPoolNodes(f *framework.Framework, poolName string) []*v1.Node {
nodes := make([]*v1.Node, 0, 1)
nodeList := framework.GetReadyNodesIncludingTaintedOrDie(f.ClientSet)
for _, node := range nodeList.Items {
if node.Labels[gkeNodepoolNameKey] == poolName {
nodes = append(nodes, &node)
}
}
return nodes
}
// getPoolInitialSize returns the initial size of the node pool taking into
// account that it may span multiple zones. In that case, node pool consists of
// multiple migs all containing initialNodeCount nodes.
func getPoolInitialSize(poolName string) int {
// get initial node count
args := []string{"container", "node-pools", "describe", poolName, "--quiet",
"--cluster=" + framework.TestContext.CloudConfig.Cluster,
"--format=value(initialNodeCount)"}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
glog.Infof("Node-pool initial size: %s", output)
framework.ExpectNoError(err, string(output))
fields := strings.Fields(string(output))
Expect(len(fields)).Should(Equal(1))
size, err := strconv.ParseInt(fields[0], 10, 64)
framework.ExpectNoError(err)
// get number of node pools
args = []string{"container", "node-pools", "describe", poolName, "--quiet",
"--cluster=" + framework.TestContext.CloudConfig.Cluster,
"--format=value(instanceGroupUrls)"}
output, err = execCmd(getGcloudCommand(args)...).CombinedOutput()
framework.ExpectNoError(err, string(output))
nodeGroupCount := len(strings.Split(string(output), ";"))
return int(size) * nodeGroupCount
}
func getPoolSize(f *framework.Framework, poolName string) int {
size := 0
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
for _, node := range nodeList.Items {
if node.Labels[gkeNodepoolNameKey] == poolName {
size++
}
}
return size
}
func doPut(url, content string) (string, error) {
req, err := http.NewRequest("PUT", url, bytes.NewBuffer([]byte(content)))
if err != nil {
return "", err
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
strBody := string(body)
return strBody, nil
}
func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration, priorityClassName string) func() error {
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
request := int64(1024 * 1024 * megabytes / replicas)
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: timeout,
Image: imageutils.GetPauseImageName(),
Replicas: replicas,
MemRequest: request,
NodeSelector: selector,
Tolerations: tolerations,
PriorityClassName: priorityClassName,
}
for start := time.Now(); time.Since(start) < rcCreationRetryTimeout; time.Sleep(rcCreationRetryDelay) {
err := framework.RunRC(*config)
if err != nil && strings.Contains(err.Error(), "Error creating replication controller") {
glog.Warningf("Failed to create memory reservation: %v", err)
continue
}
if expectRunning {
framework.ExpectNoError(err)
}
return func() error {
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
}
}
framework.Failf("Failed to reserve memory within timeout")
return nil
}
// ReserveMemoryWithPriority creates a replication controller with pods with priority that, in summation,
// request the specified amount of memory.
func ReserveMemoryWithPriority(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, priorityClassName string) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, priorityClassName)
}
// ReserveMemoryWithSelector creates a replication controller with pods with node selector that, in summation,
// request the specified amount of memory.
func ReserveMemoryWithSelectorAndTolerations(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, selector, tolerations, "")
}
// ReserveMemory creates a replication controller with pods that, in summation,
// request the specified amount of memory.
func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, "")
}
// WaitForClusterSizeFunc waits until the cluster size matches the given function.
func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error {
return WaitForClusterSizeFuncWithUnready(c, sizeFunc, timeout, 0)
}
// WaitForClusterSizeFuncWithUnready waits until the cluster size matches the given function and assumes some unready nodes.
func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration, expectedUnready int) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
glog.Warningf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
framework.FilterNodes(nodes, func(node v1.Node) bool {
return framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == numReady+expectedUnready && sizeFunc(numNodes) {
glog.Infof("Cluster has reached the desired size")
return nil
}
glog.Infof("Waiting for cluster with func, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
}
func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface, tolerateUnreadyCount int) error {
var notready []string
for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) {
pods, err := c.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return fmt.Errorf("failed to get pods: %v", err)
}
notready = make([]string, 0)
for _, pod := range pods.Items {
ready := false
for _, c := range pod.Status.Conditions {
if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
ready = true
}
}
// Failed pods in this context generally mean that they have been
// double scheduled onto a node, but then failed a constraint check.
if pod.Status.Phase == v1.PodFailed {
glog.Warningf("Pod has failed: %v", pod)
}
if !ready && pod.Status.Phase != v1.PodFailed {
notready = append(notready, pod.Name)
}
}
if len(notready) <= tolerateUnreadyCount {
glog.Infof("sufficient number of pods ready. Tolerating %d unready", tolerateUnreadyCount)
return nil
}
glog.Infof("Too many pods are not ready yet: %v", notready)
}
glog.Info("Timeout on waiting for pods being ready")
glog.Info(framework.RunKubectlOrDie("get", "pods", "-o", "json", "--all-namespaces"))
glog.Info(framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
// Some pods are still not running.
return fmt.Errorf("Too many pods are still not running: %v", notready)
}
func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface) error {
return waitForCaPodsReadyInNamespace(f, c, 0)
}
func getAnyNode(c clientset.Interface) *v1.Node {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
glog.Errorf("Failed to get node list: %v", err)
return nil
}
if len(nodes.Items) == 0 {
glog.Errorf("No nodes")
return nil
}
return &nodes.Items[0]
}
func setMigSizes(sizes map[string]int) bool {
madeChanges := false
for mig, desiredSize := range sizes {
currentSize, err := framework.GroupSize(mig)
framework.ExpectNoError(err)
if desiredSize != currentSize {
By(fmt.Sprintf("Setting size of %s to %d", mig, desiredSize))
err = framework.ResizeGroup(mig, int32(desiredSize))
framework.ExpectNoError(err)
madeChanges = true
}
}
return madeChanges
}
func drainNode(f *framework.Framework, node *v1.Node) {
By("Make the single node unschedulable")
makeNodeUnschedulable(f.ClientSet, node)
By("Manually drain the single node")
podOpts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
framework.ExpectNoError(err)
for _, pod := range pods.Items {
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}
}
func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error {
By(fmt.Sprintf("Taint node %s", node.Name))
for j := 0; j < 3; j++ {
freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil {
return err
}
for _, taint := range freshNode.Spec.Taints {
if taint.Key == disabledTaint {
return nil
}
}
freshNode.Spec.Taints = append(freshNode.Spec.Taints, v1.Taint{
Key: disabledTaint,
Value: "DisabledForTest",
Effect: v1.TaintEffectNoSchedule,
})
_, err = c.CoreV1().Nodes().Update(freshNode)
if err == nil {
return nil
}
if !errors.IsConflict(err) {
return err
}
glog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
}
return fmt.Errorf("Failed to taint node in allowed number of retries")
}
// CriticalAddonsOnlyError implements the `error` interface, and signifies the
// presence of the `CriticalAddonsOnly` taint on the node.
type CriticalAddonsOnlyError struct{}
func (CriticalAddonsOnlyError) Error() string {
return fmt.Sprintf("CriticalAddonsOnly taint found on node")
}
func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAddonsOnly bool) error {
By(fmt.Sprintf("Remove taint from node %s", node.Name))
for j := 0; j < 3; j++ {
freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil {
return err
}
var newTaints []v1.Taint
for _, taint := range freshNode.Spec.Taints {
if failOnCriticalAddonsOnly && taint.Key == criticalAddonsOnlyTaint {
return CriticalAddonsOnlyError{}
}
if taint.Key != disabledTaint {
newTaints = append(newTaints, taint)
}
}
if len(newTaints) == len(freshNode.Spec.Taints) {
return nil
}
freshNode.Spec.Taints = newTaints
_, err = c.CoreV1().Nodes().Update(freshNode)
if err == nil {
return nil
}
if !errors.IsConflict(err) {
return err
}
glog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
}
return fmt.Errorf("Failed to remove taint from node in allowed number of retries")
}
// ScheduleAnySingleGpuPod schedules a pod which requires single GPU of any type
func ScheduleAnySingleGpuPod(f *framework.Framework, id string) error {
return ScheduleGpuPod(f, id, "", 1)
}
// ScheduleGpuPod schedules a pod which requires a given number of gpus of given type
func ScheduleGpuPod(f *framework.Framework, id string, gpuType string, gpuLimit int64) error {
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: 3 * scaleUpTimeout, // spinning up GPU node is slow
Image: imageutils.GetPauseImageName(),
Replicas: 1,
GpuLimit: gpuLimit,
Labels: map[string]string{"requires-gpu": "yes"},
}
if gpuType != "" {
config.NodeSelector = map[string]string{gpuLabel: gpuType}
}
err := framework.RunRC(*config)
if err != nil {
return err
}
return nil
}
// Create an RC running a given number of pods with anti-affinity
func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id string, podLabels, antiAffinityLabels map[string]string) error {
config := &testutils.RCConfig{
Affinity: buildAntiAffinity(antiAffinityLabels),
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: namespace,
Timeout: scaleUpTimeout,
Image: imageutils.GetPauseImageName(),
Replicas: pods,
Labels: podLabels,
}
err := framework.RunRC(*config)
if err != nil {
return err
}
_, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
}
return nil
}
func runVolumeAntiAffinityPods(f *framework.Framework, namespace string, pods int, id string, podLabels, antiAffinityLabels map[string]string, volumes []v1.Volume) error {
config := &testutils.RCConfig{
Affinity: buildAntiAffinity(antiAffinityLabels),
Volumes: volumes,
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: namespace,
Timeout: scaleUpTimeout,
Image: imageutils.GetPauseImageName(),
Replicas: pods,
Labels: podLabels,
}
err := framework.RunRC(*config)
if err != nil {
return err
}
_, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
}
return nil
}
var emptyDirVolumes = []v1.Volume{
{
Name: "empty-volume",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
}
func buildVolumes(pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) []v1.Volume {
return []v1.Volume{
{
Name: pv.Name,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: false,
},
},
},
}
}
func buildAntiAffinity(labels map[string]string) *v1.Affinity {
return &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: labels,
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
}
}
// Create an RC running a given number of pods on each node without adding any constraint forcing
// such pod distribution. This is meant to create a bunch of underutilized (but not unused) nodes
// with pods that can be rescheduled on different nodes.
// This is achieved using the following method:
// 1. disable scheduling on each node
// 2. create an empty RC
// 3. for each node:
// 3a. enable scheduling on that node
// 3b. increase number of replicas in RC by podsPerNode
func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespace string, podsPerNode int, id string, labels map[string]string, memRequest int64) error {
By("Run a pod on each node")
for _, node := range nodes {
err := makeNodeUnschedulable(f.ClientSet, &node)
defer func(n v1.Node) {
makeNodeSchedulable(f.ClientSet, &n, false)
}(node)
if err != nil {
return err
}
}
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: namespace,
Timeout: defaultTimeout,
Image: imageutils.GetPauseImageName(),
Replicas: 0,
Labels: labels,
MemRequest: memRequest,
}
err := framework.RunRC(*config)
if err != nil {
return err
}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
}
for i, node := range nodes {
err = makeNodeSchedulable(f.ClientSet, &node, false)
if err != nil {
return err
}
// Update replicas count, to create new pods that will be allocated on node
// (we retry 409 errors in case rc reference got out of sync)
for j := 0; j < 3; j++ {
*rc.Spec.Replicas = int32((i + 1) * podsPerNode)
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Update(rc)
if err == nil {
break
}
if !errors.IsConflict(err) {
return err
}
glog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j)
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
}
}
err = wait.PollImmediate(5*time.Second, podTimeout, func() (bool, error) {
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil || rc.Status.ReadyReplicas < int32((i+1)*podsPerNode) {
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("failed to coerce RC into spawning a pod on node %s within timeout", node.Name)
}
err = makeNodeUnschedulable(f.ClientSet, &node)
if err != nil {
return err
}
}
return nil
}
// wrap runReplicatedPodOnEachNode to return cleanup
func runReplicatedPodOnEachNodeWithCleanup(f *framework.Framework, nodes []v1.Node, namespace string, podsPerNode int, id string, labels map[string]string, memRequest int64) (func(), error) {
err := runReplicatedPodOnEachNode(f, nodes, namespace, podsPerNode, id, labels, memRequest)
return func() {
framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, id)
}, err
}
// Increase cluster size by newNodesForScaledownTests to create some unused nodes
// that can be later removed by cluster autoscaler.
func manuallyIncreaseClusterSize(f *framework.Framework, originalSizes map[string]int) int {
By("Manually increase cluster size")
increasedSize := 0
newSizes := make(map[string]int)
for key, val := range originalSizes {
newSizes[key] = val + newNodesForScaledownTests
increasedSize += val + newNodesForScaledownTests
}
setMigSizes(newSizes)
checkClusterSize := func(size int) bool {
if size >= increasedSize {
return true
}
resized := setMigSizes(newSizes)
if resized {
glog.Warning("Unexpected node group size while waiting for cluster resize. Setting size to target again.")
}
return false
}
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, checkClusterSize, manualResizeTimeout))
return increasedSize
}
// Try to get clusterwide health from CA status configmap.
// Status configmap is not parsing-friendly, so evil regexpery follows.
func getClusterwideStatus(c clientset.Interface) (string, error) {
configMap, err := c.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
if err != nil {
return "", err
}
status, ok := configMap.Data["status"]
if !ok {
return "", fmt.Errorf("Status information not found in configmap")
}
matcher, err := regexp.Compile("Cluster-wide:\\s*\n\\s*Health:\\s*([A-Za-z]+)")
if err != nil {
return "", err
}
result := matcher.FindStringSubmatch(status)
if len(result) < 2 {
return "", fmt.Errorf("Failed to parse CA status configmap, raw status: %v", status)
}
return result[1], nil
}
type scaleUpStatus struct {
status string
ready int
target int
timestamp time.Time
}
// Try to get timestamp from status.
// Status configmap is not parsing-friendly, so evil regexpery follows.
func getStatusTimestamp(status string) (time.Time, error) {
timestampMatcher, err := regexp.Compile("Cluster-autoscaler status at \\s*([0-9\\-]+ [0-9]+:[0-9]+:[0-9]+\\.[0-9]+ \\+[0-9]+ [A-Za-z]+)")
if err != nil {
return time.Time{}, err
}
timestampMatch := timestampMatcher.FindStringSubmatch(status)
if len(timestampMatch) < 2 {
return time.Time{}, fmt.Errorf("Failed to parse CA status timestamp, raw status: %v", status)
}
timestamp, err := time.Parse(timestampFormat, timestampMatch[1])
if err != nil {
return time.Time{}, err
}
return timestamp, nil
}
// Try to get scaleup statuses of all node groups.
// Status configmap is not parsing-friendly, so evil regexpery follows.
func getScaleUpStatus(c clientset.Interface) (*scaleUpStatus, error) {
configMap, err := c.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
if err != nil {
return nil, err
}
status, ok := configMap.Data["status"]
if !ok {
return nil, fmt.Errorf("Status information not found in configmap")
}
timestamp, err := getStatusTimestamp(status)
if err != nil {
return nil, err
}
matcher, err := regexp.Compile("s*ScaleUp:\\s*([A-Za-z]+)\\s*\\(ready=([0-9]+)\\s*cloudProviderTarget=([0-9]+)\\s*\\)")
if err != nil {
return nil, err
}
matches := matcher.FindAllStringSubmatch(status, -1)
if len(matches) < 1 {
return nil, fmt.Errorf("Failed to parse CA status configmap, raw status: %v", status)
}
result := scaleUpStatus{
status: caNoScaleUpStatus,
ready: 0,
target: 0,
timestamp: timestamp,
}
for _, match := range matches {
if match[1] == caOngoingScaleUpStatus {
result.status = caOngoingScaleUpStatus
}
newReady, err := strconv.Atoi(match[2])
if err != nil {
return nil, err
}
result.ready += newReady
newTarget, err := strconv.Atoi(match[3])
if err != nil {
return nil, err
}
result.target += newTarget
}
glog.Infof("Cluster-Autoscaler scale-up status: %v (%v, %v)", result.status, result.ready, result.target)
return &result, nil
}
func waitForScaleUpStatus(c clientset.Interface, cond func(s *scaleUpStatus) bool, timeout time.Duration) (*scaleUpStatus, error) {
var finalErr error
var status *scaleUpStatus
err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
status, finalErr = getScaleUpStatus(c)
if finalErr != nil {
return false, nil
}
if status.timestamp.Add(freshStatusLimit).Before(time.Now()) {
// stale status
finalErr = fmt.Errorf("Status too old")
return false, nil
}
return cond(status), nil
})
if err != nil {
err = fmt.Errorf("Failed to find expected scale up status: %v, last status: %v, final err: %v", err, status, finalErr)
}
return status, err
}
// This is a temporary fix to allow CA to migrate some kube-system pods
// TODO: Remove this when the PDB is added for some of those components
func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
By("Create PodDisruptionBudgets for kube-system components, so they can be migrated if required")
var newPdbs []string
cleanup := func() {
var finalErr error
for _, newPdbName := range newPdbs {
By(fmt.Sprintf("Delete PodDisruptionBudget %v", newPdbName))
err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{})
if err != nil {
// log error, but attempt to remove other pdbs
glog.Errorf("Failed to delete PodDisruptionBudget %v, err: %v", newPdbName, err)
finalErr = err
}
}
if finalErr != nil {
framework.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr)
}
}
type pdbInfo struct {
label string
minAvailable int
}
pdbsToAdd := []pdbInfo{
{label: "kube-dns", minAvailable: 1},
{label: "kube-dns-autoscaler", minAvailable: 0},
{label: "metrics-server", minAvailable: 0},
{label: "kubernetes-dashboard", minAvailable: 0},
{label: "glbc", minAvailable: 0},
}
for _, pdbData := range pdbsToAdd {
By(fmt.Sprintf("Create PodDisruptionBudget for %v", pdbData.label))
labelMap := map[string]string{"k8s-app": pdbData.label}
pdbName := fmt.Sprintf("test-pdb-for-%v", pdbData.label)
minAvailable := intstr.FromInt(pdbData.minAvailable)
pdb := &policy.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: pdbName,
Namespace: "kube-system",
},
Spec: policy.PodDisruptionBudgetSpec{
Selector: &metav1.LabelSelector{MatchLabels: labelMap},
MinAvailable: &minAvailable,
},
}
_, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Create(pdb)
newPdbs = append(newPdbs, pdbName)
if err != nil {
return cleanup, err
}
}
return cleanup, nil
}
func createPriorityClasses(f *framework.Framework) func() {
priorityClasses := map[string]int32{
expendablePriorityClassName: -15,
highPriorityClassName: 1000,
}
for className, priority := range priorityClasses {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority})
if err != nil {
glog.Errorf("Error creating priority class: %v", err)
}
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
}
return func() {
for className := range priorityClasses {
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(className, nil)
if err != nil {
glog.Errorf("Error deleting priority class: %v", err)
}
}
}
}
| [
"\"TESTED_GPU_TYPE\""
]
| []
| [
"TESTED_GPU_TYPE"
]
| [] | ["TESTED_GPU_TYPE"] | go | 1 | 0 | |
web_scrape_data_science_listings.py | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
import requests
import pandas as pd
import numpy as np
import sys
import re
import os
import json
import time
def initiate_scraper_job_listings(title, location):
'''Function to initiate a selenium webdriver and input initial job search term and job location'''
searchbar_job = title
searchbar_loc = location
'''Set global URL variables, to be used in every scrape'''
chromedriver = "/Applications/chromedriver"
os.environ["webdriver.chrome.driver"] = chromedriver
baseurl = 'https://www.glassdoor.com'
searchbar_link = 'https://www.glassdoor.com/sitedirectory/title-jobs.htm'
'''Initialize Chrome Driver'''
main = webdriver.Chrome(chromedriver)
main.get(searchbar_link)
'''Prepare for searchbar actions'''
searchBar = main.find_element_by_name("clickSource")
searchJob = main.find_element_by_name("sc.keyword")
searchLocation = main.find_element_by_id("sc.location")
'''Take searchbar actions'''
searchJob.send_keys(searchbar_job)
time.sleep(1)
searchLocation.send_keys(searchbar_loc)
time.sleep(1)
searchLocation.send_keys(Keys.ENTER)
time.sleep(5)
'''Get info from main page: html and total pagination'''
main_soup = BeautifulSoup(main.page_source, "html5lib")
for div in main_soup.find_all('div', class_='cell middle hideMob padVertSm'):
pages_str = div.text
pages = [int(s) for s in pages_str.split() if s.isdigit()]
max_pagination = pages[-1]
'''Return webdriver, and the total pagination of the job/city search terms'''
return main
def loop_scraper_job_listings(selenium_webdriver, pages):
'''Function to loop through each job listing and collect information from HTML elements and JSON scripts'''
jobs_all = []
page_links_all = []
for _ in range(1, pages):
'''Get the links for all jobs on search page - approx 30 per each page'''
page_links = []
for link in selenium_webdriver.find_elements_by_xpath("//div[@class='logoWrap']/a"):
page_links.append(link.get_attribute('href'))
'''Loop through each job on the page, going to that job listing to scrape information '''
for link in page_links[0:3]:
browser = webdriver.Chrome(chromedriver)
browser.get(link)
time.sleep(10)
'''Collect job listing information, first by initializing variables to collect'''
soup = BeautifulSoup(browser.page_source, "html5lib")
print(soup)
job = ''
job_desc = ''
city = ''
company = ''
company_details = ''
jsonvar = ''
ratings_n = ''
ceo = ''
friend = ''
benefits_rating = ''
benefits_n = ''
ratings_dict = {}
categories = []
ratings = []
'''Collect information from main job page, before iterating through each tab'''
try:
job_desc = browser.find_element_by_class_name("jobDesc").text
city = browser.find_element_by_class_name("subtle.ib").text[3:]
company = browser.find_element_by_class_name("strong.ib").text
job = browser.find_element_by_class_name("noMargTop.margBotXs.strong").text
except:
pass
print("Crawling {} job listed by {}.".format(job, company))
'''Each job listing has info stored in JSON format. Collecting it here.'''
try:
json_dict = {}
jsonvar = soup.find("script", type="application/ld+json").text
'''Format the json data in the HTML code for interpretation'''
jsonvar_p = jsonvar.replace("\n ","").replace("\n ","").replace("\n", "").replace("\t","").replace(" </script>","")
'''Push into a dictionary and then flatten it for better pandas processing'''
json_dict = flatten(json.loads(jsonvar_p))
except:
pass
'''Each job listing stores information in tabs that need to be opened for scraper to access'''
try:
a = browser.find_elements_by_class_name("link")
for button in a:
button.click()
time.sleep(2)
'''Ratings information'''
try:
ratings_all = browser.find_element_by_class_name("stars").text
categories = re.findall(r"[-+]\d*\.\d+|\D+", ratings_all)
ratings = re.findall(r"[-+]?\d*\.\d+|\d+", ratings_all)
for i, item in enumerate(categories):
if item == '.':
del categories[i]
for i, item in enumerate(categories):
ratings_dict[categories[i].replace("\n","").replace(" ","").replace("/","").replace("&","")] = ratings[i]
except:
pass
'''Number of ratings submitted'''
try:
ratings_n = browser.find_element_by_class_name("minor.css-0.e1qhxspr2").text
except:
pass
'''Company info'''
try:
# The company details are collected as a string of text to be processed later
company_details = browser.find_element_by_class_name("empBasicInfo").text
except:
pass
'''CEO approval ratings'''
try:
ceo = browser.find_element_by_class_name("cell.middle.chart.ceoApprove").text
except:
pass
'''Would recommend to friend ratings'''
try:
friend = browser.find_element_by_class_name("cell.middle.chart").text
except:
pass
'''Benefit ratings values'''
try:
benefits_n = browser.find_element_by_class_name("minor.noMargTop.padSm").text
except:
pass
'''Benefit ratings counts'''
try:
benefits_rating = browser.find_element_by_class_name("ratingNum.margRtSm").text
except:
pass
except:
pass
'''Push all scraped data into a list of dictionaries - each dictionary is a job listing'''
job_vars = {
"name": job,
"company": company,
"company_details": company_details,
"ratings_count": ratings_n,
"benefits_ratings": benefits_rating,
"benefits_count": benefits_n,
"ceo_approve": ceo,
"friend_recommend": friend,
"url": link,
"description": job_desc
}
'''As data are in 2 or 3 dictionaries (depending on what was available) append differently'''
try:
all_vars = {**job_vars, **json_dict, **ratings_dict}
except:
all_vars = {**job_vars, **json_dict}
'''Finished collecting - append into the list of dictionaries'''
jobs_all.append(all_vars)
time.sleep(5)
'''Close job listing page'''
browser.quit()
'''Print job progress status'''
page_links_all.extend(page_links)
print("{} jobs scraped so far.".format(len(jobs_all)))
'''Find next button to take action on'''
next_button = selenium_webdriver.find_element_by_class_name("next")
'''Try and push the button. If there is a pop up in the way, close it and continue.'''
try:
next_button.click()
except:
popup = selenium_webdriver.find_element_by_id("prefix__icon-close-1")
time.sleep(5)
popup.click()
next_button.click()
main.quit()
return jobs_all
| []
| []
| [
"webdriver.chrome.driver"
]
| [] | ["webdriver.chrome.driver"] | python | 1 | 0 | |
src/doom/game.py | import os
import torch
import torch.multiprocessing as _mp
from models.a2c.A2C import A2C
from models.a2c.test import test as test_a2c
from models.a2c.train import train as train_a2c
from models.a3c.A3C import A3C
from models.a3c import optimizers
from models.a3c.test import test as test_a3c
from models.a3c.train import train as train_a3c
from models.dqn.DQN import DQN
from models.dqn.NStepProgress import NStepProgress
from models.dqn.SoftmaxBody import SoftmaxBody
from models.dqn.AI import AI
from models.dqn.ReplayMemory import ReplayMemory
from models.dqn.train import train as train_dqn
from models.dqn.train import eligibility_trace
from doom.doom_trainer import DoomTrainer
mp = _mp.get_context('spawn')
def play(parameters):
dtype = torch.cuda.FloatTensor
torch.manual_seed(parameters.seed)
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if parameters.model == 'human':
play_human(parameters)
elif parameters.model == 'a3c':
play_a3c(parameters)
elif parameters.model == 'a2c':
play_a2c(parameters)
elif parameters.model == 'dqn':
play_dqn(parameters)
def play_human(params):
trainer = DoomTrainer(params)
trainer.start_game()
trainer.play_human()
def play_a2c(params):
trainer = DoomTrainer(params)
trainer.start_game()
model = A2C(1, params.num_actions).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=params.lr)
counter = 0
while True:
if counter % 10 == 0:
print("Iteration: ", counter)
train_a2c(params, trainer, model, optimizer)
test_a2c(params, trainer, model)
counter += 1
def play_a3c(params):
trainer = DoomTrainer(params)
os.environ['OMP_NUM_THREADS'] = '1'
shared_model = A3C(1, trainer.num_actions()).cuda()
shared_model.share_memory()
optimizer = optimizers.SharedAdam(shared_model.parameters(), lr=params.lr)
optimizer.share_memory()
processes = []
process = mp.Process(target=test_a3c, args=(params.num_processes, params, shared_model))
process.start()
for rank in range(0, params.num_processes):
process = mp.Process(target=train_a3c, args=(rank, params, shared_model, optimizer))
process.start()
processes.append(process)
for p in processes:
p.join()
def play_dqn(params):
trainer = DoomTrainer(params)
trainer.start_game()
model = DQN(trainer.num_actions())
softmax_body = SoftmaxBody(T=1)
ai = AI(brain=model, body=softmax_body)
n_steps = NStepProgress(trainer, ai, n_step=10)
memory = ReplayMemory(n_steps=n_steps, capacity=10000)
train_dqn(model, memory, n_steps) | []
| []
| [
"OMP_NUM_THREADS"
]
| [] | ["OMP_NUM_THREADS"] | python | 1 | 0 | |
legacy/dockerregistry/inventory_test.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package inventory_test
import (
"errors"
"fmt"
"os"
"path/filepath"
"sync"
"testing"
cr "github.com/google/go-containerregistry/pkg/v1/types"
"github.com/stretchr/testify/require"
reg "sigs.k8s.io/k8s-container-image-promoter/legacy/dockerregistry"
"sigs.k8s.io/k8s-container-image-promoter/legacy/json"
"sigs.k8s.io/k8s-container-image-promoter/legacy/stream"
)
type ParseJSONStreamResult struct {
jsons json.Objects
err error
}
func TestReadJSONStream(t *testing.T) {
tests := []struct {
name string
input string
expectedOutput ParseJSONStreamResult
}{
{
name: "Blank input stream",
input: `[]`,
expectedOutput: ParseJSONStreamResult{
json.Objects{},
nil,
},
},
// The order of the maps matters.
{
name: "Simple case",
input: `[
{
"name": "gcr.io/louhi-gke-k8s/addon-resizer"
},
{
"name": "gcr.io/louhi-gke-k8s/pause"
}
]`,
expectedOutput: ParseJSONStreamResult{
json.Objects{
{"name": "gcr.io/louhi-gke-k8s/addon-resizer"},
{"name": "gcr.io/louhi-gke-k8s/pause"},
},
nil,
},
},
// The order of the maps matters.
{
"Expected failure: missing closing brace",
`[
{
"name": "gcr.io/louhi-gke-k8s/addon-resizer"
,
]`,
ParseJSONStreamResult{
nil,
errors.New("yaml: line 4: did not find expected node content"),
},
},
}
// Test only the JSON unmarshalling logic.
for _, test := range tests {
var sr stream.Fake
sr.Bytes = []byte(test.input)
stdout, _, err := sr.Produce()
require.Nil(t, err)
jsons, err := json.Consume(stdout)
defer sr.Close()
if test.expectedOutput.err != nil {
require.NotNil(t, err)
require.Error(t, err, test.expectedOutput.err)
} else {
require.Nil(t, err)
}
require.Equal(t, jsons, test.expectedOutput.jsons)
}
}
func TestParseRegistryManifest(t *testing.T) {
// TODO: Create a function to convert an Manifest to a YAML
// representation, and vice-versa.
//
// TODO: Use property-based testing to test the fidelity of the conversion
// (marshaling/unmarshaling) functions.
tests := []struct {
name string
input string
expectedOutput reg.Manifest
expectedError error
}{
{
"Empty manifest (invalid)",
``,
reg.Manifest{},
fmt.Errorf(`'registries' field cannot be empty`),
},
{
"Stub manifest (`images` field is empty)",
`registries:
- name: gcr.io/bar
service-account: [email protected]
- name: gcr.io/foo
service-account: [email protected]
src: true
images: []
`,
reg.Manifest{
Registries: []reg.RegistryContext{
{
Name: "gcr.io/bar",
ServiceAccount: "[email protected]",
},
{
Name: "gcr.io/foo",
ServiceAccount: "[email protected]",
Src: true,
},
},
Images: []reg.Image{},
},
nil,
},
{
"Basic manifest",
`registries:
- name: gcr.io/bar
service-account: [email protected]
- name: gcr.io/foo
service-account: [email protected]
src: true
images:
- name: agave
dmap:
"sha256:aab34c5841987a1b133388fa9f27e7960c4b1307e2f9147dca407ba26af48a54": ["latest"]
- name: banana
dmap:
"sha256:07353f7b26327f0d933515a22b1de587b040d3d85c464ea299c1b9f242529326": [ "1.8.3" ] # Branches: ['master']
`,
reg.Manifest{
Registries: []reg.RegistryContext{
{
Name: "gcr.io/bar",
ServiceAccount: "[email protected]",
},
{
Name: "gcr.io/foo",
ServiceAccount: "[email protected]",
Src: true,
},
},
Images: []reg.Image{
{
ImageName: "agave",
Dmap: reg.DigestTags{
"sha256:aab34c5841987a1b133388fa9f27e7960c4b1307e2f9147dca407ba26af48a54": {"latest"},
},
},
{
ImageName: "banana",
Dmap: reg.DigestTags{
"sha256:07353f7b26327f0d933515a22b1de587b040d3d85c464ea299c1b9f242529326": {"1.8.3"},
},
},
},
},
nil,
},
{
"Missing src registry in registries (invalid)",
`registries:
- name: gcr.io/bar
service-account: [email protected]
- name: gcr.io/foo
service-account: [email protected]
images:
- name: agave
dmap:
"sha256:aab34c5841987a1b133388fa9f27e7960c4b1307e2f9147dca407ba26af48a54": ["latest"]
- name: banana
dmap:
"sha256:07353f7b26327f0d933515a22b1de587b040d3d85c464ea299c1b9f242529326": [ "1.8.3" ] # Branches: ['master']
`,
reg.Manifest{},
fmt.Errorf("source registry must be set"),
},
}
// Test only the JSON unmarshalling logic.
for _, test := range tests {
b := []byte(test.input)
imageManifest, err := reg.ParseManifestYAML(b)
if test.expectedError != nil {
require.NotNil(t, err)
require.Error(t, err, test.expectedError)
} else {
require.Nil(t, err)
require.Equal(t, imageManifest, test.expectedOutput)
}
}
}
func TestParseThinManifestsFromDir(t *testing.T) {
pwd := bazelTestPath("TestParseThinManifestsFromDir")
tests := []struct {
name string
// "input" is folder name, relative to the location of this source file.
input string
expectedOutput []reg.Manifest
expectedParseError error
}{
{
"No manifests found (invalid)",
"empty",
[]reg.Manifest{},
&os.PathError{
Op: "stat",
Path: filepath.Join(pwd, "empty/images"),
Err: fmt.Errorf("no such file or directory"),
},
},
{
"Singleton (single manifest)",
"singleton",
[]reg.Manifest{
{
Registries: []reg.RegistryContext{
{
Name: "gcr.io/foo-staging",
ServiceAccount: "[email protected]",
Src: true,
},
{
Name: "us.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
{
Name: "eu.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
{
Name: "asia.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
},
Images: []reg.Image{
{
ImageName: "foo-controller",
Dmap: reg.DigestTags{
"sha256:c3d310f4741b3642497da8826e0986db5e02afc9777a2b8e668c8e41034128c1": {"1.0"},
},
},
},
Filepath: "manifests/a/promoter-manifest.yaml",
},
},
nil,
},
{
"Multiple (with 'rebase')",
"multiple-rebases",
[]reg.Manifest{
{
Registries: []reg.RegistryContext{
{
Name: "gcr.io/foo-staging",
ServiceAccount: "[email protected]",
Src: true,
},
{
Name: "us.gcr.io/some-prod/foo",
ServiceAccount: "[email protected]",
},
{
Name: "eu.gcr.io/some-prod/foo",
ServiceAccount: "[email protected]",
},
{
Name: "asia.gcr.io/some-prod/foo",
ServiceAccount: "[email protected]",
},
},
Images: []reg.Image{
{
ImageName: "foo-controller",
Dmap: reg.DigestTags{
"sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": {"1.0"},
},
},
},
Filepath: "manifests/a/promoter-manifest.yaml",
},
{
Registries: []reg.RegistryContext{
{
Name: "gcr.io/bar-staging",
ServiceAccount: "[email protected]",
Src: true,
},
{
Name: "us.gcr.io/some-prod/bar",
ServiceAccount: "[email protected]",
},
{
Name: "eu.gcr.io/some-prod/bar",
ServiceAccount: "[email protected]",
},
{
Name: "asia.gcr.io/some-prod/bar",
ServiceAccount: "[email protected]",
},
},
Images: []reg.Image{
{
ImageName: "bar-controller",
Dmap: reg.DigestTags{
"sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": {"1.0"},
},
},
},
Filepath: "manifests/b/promoter-manifest.yaml",
},
},
nil,
},
{
"Basic (multiple thin manifests)",
"basic-thin",
[]reg.Manifest{
{
Registries: []reg.RegistryContext{
{
Name: "gcr.io/foo-staging",
ServiceAccount: "[email protected]",
Src: true,
},
{
Name: "us.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
{
Name: "eu.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
{
Name: "asia.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
},
Images: []reg.Image{
{
ImageName: "foo-controller",
Dmap: reg.DigestTags{
"sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": {"1.0"},
},
},
},
Filepath: "manifests/a/promoter-manifest.yaml",
},
{
Registries: []reg.RegistryContext{
{
Name: "gcr.io/bar-staging",
ServiceAccount: "[email protected]",
Src: true,
},
{
Name: "us.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
{
Name: "eu.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
{
Name: "asia.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
},
Images: []reg.Image{
{
ImageName: "bar-controller",
Dmap: reg.DigestTags{
"sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": {"1.0"},
},
},
},
Filepath: "manifests/b/promoter-manifest.yaml",
},
{
Registries: []reg.RegistryContext{
{
Name: "gcr.io/cat-staging",
ServiceAccount: "[email protected]",
Src: true,
},
{
Name: "us.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
{
Name: "eu.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
{
Name: "asia.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
},
Images: []reg.Image{
{
ImageName: "cat-controller",
Dmap: reg.DigestTags{
"sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc": {"1.0"},
},
},
},
Filepath: "manifests/c/promoter-manifest.yaml",
},
{
Registries: []reg.RegistryContext{
{
Name: "gcr.io/qux-staging",
ServiceAccount: "[email protected]",
Src: true,
},
{
Name: "us.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
{
Name: "eu.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
{
Name: "asia.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
},
Images: []reg.Image{
{
ImageName: "qux-controller",
Dmap: reg.DigestTags{
"sha256:0000000000000000000000000000000000000000000000000000000000000000": {"1.0"},
},
},
},
Filepath: "manifests/d/promoter-manifest.yaml",
},
},
nil,
},
}
for _, test := range tests {
fixtureDir := bazelTestPath("TestParseThinManifestsFromDir", test.input)
// Fixup expected filepaths to match bazel's testing directory.
expectedModified := test.expectedOutput[:0]
for _, mfest := range test.expectedOutput {
mfest.Filepath = filepath.Join(fixtureDir, mfest.Filepath)
// SA4010: this result of append is never used, except maybe in other appends
// nolint: staticcheck
expectedModified = append(expectedModified, mfest)
}
got, errParse := reg.ParseThinManifestsFromDir(fixtureDir)
if test.expectedParseError != nil {
require.NotNil(t, errParse)
require.Error(t, errParse, test.expectedParseError)
continue
}
// Clear private fields (redundant data) that are calculated on-the-fly
// (it's too verbose to include them here; besides, it's not what we're
// testing).
gotModified := got[:0]
for _, mfest := range got {
mfest.SrcRegistry = nil
gotModified = append(gotModified, mfest)
}
require.Nil(t, errParse)
require.Equal(t, gotModified, test.expectedOutput)
}
}
func TestValidateThinManifestsFromDir(t *testing.T) {
shouldBeValid := []string{
"singleton",
"multiple-rebases",
"overlapping-src-registries",
"overlapping-destination-vertices-same-digest",
"malformed-directory-tree-structure-bad-prefix-is-ignored",
}
pwd := bazelTestPath("TestValidateThinManifestsFromDir")
for _, testInput := range shouldBeValid {
fixtureDir := filepath.Join(pwd, "valid", testInput)
mfests, errParse := reg.ParseThinManifestsFromDir(fixtureDir)
require.Nil(t, errParse)
_, edgeErr := reg.ToPromotionEdges(mfests)
require.Nil(t, edgeErr)
}
shouldBeInvalid := []struct {
dirName string
expectedParseError error
expectedEdgeError error
}{
{
"empty",
&os.PathError{
Op: "stat",
Path: filepath.Join(pwd, "invalid/empty/images"),
Err: fmt.Errorf("no such file or directory"),
},
nil,
},
{
"overlapping-destination-vertices-different-digest",
nil,
fmt.Errorf("overlapping edges detected"),
},
{
"malformed-directory-tree-structure",
fmt.Errorf(
"corresponding file %q does not exist",
filepath.Join(pwd, "invalid/malformed-directory-tree-structure/images/b/images.yaml"),
),
nil,
},
{
"malformed-directory-tree-structure-nested",
fmt.Errorf(
"unexpected manifest path %q",
filepath.Join(pwd, "invalid/malformed-directory-tree-structure-nested/manifests/b/c/promoter-manifest.yaml"),
),
nil,
},
}
for _, test := range shouldBeInvalid {
fixtureDir := bazelTestPath("TestValidateThinManifestsFromDir", "invalid", test.dirName)
// It could be that a manifest, taken individually, failed on its own,
// before we even get to ValidateThinManifestsFromDir(). So handle these
// cases as well.
mfests, errParse := reg.ParseThinManifestsFromDir(fixtureDir)
if test.expectedParseError != nil {
require.NotNil(t, errParse)
require.Error(t, errParse, test.expectedParseError)
} else {
require.Nil(t, errParse)
}
_, edgeErr := reg.ToPromotionEdges(mfests)
if test.expectedEdgeError != nil {
require.NotNil(t, edgeErr)
require.Error(t, edgeErr, test.expectedEdgeError)
} else {
require.Nil(t, edgeErr)
}
}
}
func TestParseImageDigest(t *testing.T) {
shouldBeValid := []string{
`sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef`,
`sha256:0000000000000000000000000000000000000000000000000000000000000000`,
`sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff`,
`sha256:3243f6a8885a308d313198a2e03707344a4093822299f31d0082efa98ec4e6c8`,
}
for _, testInput := range shouldBeValid {
d := reg.Digest(testInput)
require.NotEmpty(t, d)
err := reg.ValidateDigest(d)
require.Nil(t, err)
}
shouldBeInvalid := []string{
// Empty.
``,
// Too short.
`sha256:0`,
// Too long.
`sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef1`,
// Invalid character 'x'.
`sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdex`,
// No prefix 'sha256'.
`0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef`,
}
for _, testInput := range shouldBeInvalid {
d := reg.Digest(testInput)
err := reg.ValidateDigest(d)
require.NotNil(t, err)
}
}
func TestParseImageTag(t *testing.T) {
shouldBeValid := []string{
`a`,
`_`,
`latest`,
`_latest`,
// Awkward, but valid.
`_____----hello........`,
// Longest tag is 128 chars.
`this-is-exactly-128-chars-this-is-exactly-128-chars-this-is-exactly-128-chars-this-is-exactly-128-chars-this-is-exactly-128-char`,
}
for _, testInput := range shouldBeValid {
tag := reg.Tag(testInput)
err := reg.ValidateTag(tag)
require.Nil(t, err)
}
shouldBeInvalid := []string{
// Empty.
``,
// Does not begin with an ASCII word character.
`.`,
// Does not begin with an ASCII word character.
`-`,
// Unicode not allowed.
`안녕`,
// No spaces allowed.
`a b`,
// Too long (>128 ASCII chars).
`this-is-longer-than-128-chars-this-is-longer-than-128-chars-this-is-longer-than-128-chars-this-is-longer-than-128-chars-this-is-l`,
}
for _, testInput := range shouldBeInvalid {
tag := reg.Tag(testInput)
err := reg.ValidateTag(tag)
require.NotNil(t, err)
}
}
func TestValidateRegistryImagePath(t *testing.T) {
shouldBeValid := []string{
`gcr.io/foo/bar`,
`k8s.gcr.io/foo`,
`staging-k8s.gcr.io/foo`,
`staging-k8s.gcr.io/foo/bar/nested/path/image`,
}
for _, testInput := range shouldBeValid {
rip := reg.RegistryImagePath(testInput)
require.NotEmpty(t, rip)
err := reg.ValidateRegistryImagePath(rip)
require.Nil(t, err)
}
shouldBeInvalid := []string{
// Empty.
``,
// No dot.
`gcrio`,
// Too many dots.
`gcr..io`,
// Leading dot.
`.gcr.io`,
// Trailing dot.
`gcr.io.`,
// Too many slashes.
`gcr.io//foo`,
// Leading slash.
`/gcr.io`,
// Trailing slash (1).
`gcr.io/`,
// Trailing slash (2).
`gcr.io/foo/`,
}
for _, testInput := range shouldBeInvalid {
rip := reg.RegistryImagePath(testInput)
err := reg.ValidateRegistryImagePath(rip)
require.NotNil(t, err)
}
}
func TestSplitRegistryImagePath(t *testing.T) {
knownRegistryNames := []reg.RegistryName{
`gcr.io/foo`,
`us.gcr.io/foo`,
`k8s.gcr.io`,
`eu.gcr.io/foo/d`,
}
tests := []struct {
name string
input reg.RegistryImagePath
expectedRegistryName reg.RegistryName
expectedImageName reg.ImageName
expectedErr error
}{
{
`basic gcr.io`,
`gcr.io/foo/a/b/c`,
`gcr.io/foo`,
`a/b/c`,
nil,
},
{
`regional GCR`,
`us.gcr.io/foo/a/b/c`,
`us.gcr.io/foo`,
`a/b/c`,
nil,
},
{
`regional GCR (extra level of nesting)`,
`eu.gcr.io/foo/d/e/f`,
`eu.gcr.io/foo/d`,
`e/f`,
nil,
},
{
`vanity GCR`,
`k8s.gcr.io/a/b/c`,
`k8s.gcr.io`,
`a/b/c`,
nil,
},
}
for _, test := range tests {
rName, iName, err := reg.SplitRegistryImagePath(test.input, knownRegistryNames)
if test.expectedErr != nil {
require.NotNil(t, err)
require.Error(t, err, test.expectedErr)
} else {
require.Nil(t, err)
}
require.Equal(t, rName, test.expectedRegistryName)
require.Equal(t, iName, test.expectedImageName)
}
}
func TestSplitByKnownRegistries(t *testing.T) {
knownRegistryNames := []reg.RegistryName{
// See
// https://github.com/kubernetes-sigs/k8s-container-image-promoter/issues/188.
`us.gcr.io/k8s-artifacts-prod/kube-state-metrics`,
`us.gcr.io/k8s-artifacts-prod/metrics-server`,
`us.gcr.io/k8s-artifacts-prod`,
}
knownRegistryContexts := make([]reg.RegistryContext, 0)
for _, knownRegistryName := range knownRegistryNames {
rc := reg.RegistryContext{}
rc.Name = knownRegistryName
knownRegistryContexts = append(knownRegistryContexts, rc)
}
tests := []struct {
name string
input reg.RegistryName
expectedRegistryName reg.RegistryName
expectedImageName reg.ImageName
expectedErr error
}{
{
`image at toplevel root path`,
`us.gcr.io/k8s-artifacts-prod/kube-state-metrics`,
`us.gcr.io/k8s-artifacts-prod`,
`kube-state-metrics`,
nil,
},
{
`unclean split (known repo cuts into middle of image name)`,
`us.gcr.io/k8s-artifacts-prod/metrics-server-amd64`,
`us.gcr.io/k8s-artifacts-prod`,
`metrics-server-amd64`,
nil,
},
}
for _, test := range tests {
rootReg, imageName, err := reg.SplitByKnownRegistries(test.input, knownRegistryContexts)
if test.expectedErr != nil {
require.NotNil(t, err)
require.Error(t, err, test.expectedErr)
} else {
require.Nil(t, err)
}
require.Equal(t, rootReg, test.expectedRegistryName)
require.Equal(t, imageName, test.expectedImageName)
}
}
func TestCommandGeneration(t *testing.T) {
destRC := reg.RegistryContext{
Name: "gcr.io/foo",
ServiceAccount: "robot",
}
var (
srcRegName reg.RegistryName = "gcr.io/bar"
srcImageName reg.ImageName = "baz"
destImageName reg.ImageName = "baz"
digest reg.Digest = "sha256:000"
tag reg.Tag = "1.0"
tp reg.TagOp
)
t.Run(
"GetDeleteCmd",
func(t *testing.T) {
got := reg.GetDeleteCmd(
destRC,
true,
destImageName,
digest,
false)
expected := []string{
"gcloud",
"--account=robot",
"container",
"images",
"delete",
reg.ToFQIN(destRC.Name, destImageName, digest),
"--format=json",
}
require.Equal(t, got, expected)
got = reg.GetDeleteCmd(
destRC,
false,
destImageName,
digest,
false,
)
expected = []string{
"gcloud",
"container",
"images",
"delete",
reg.ToFQIN(destRC.Name, destImageName, digest),
"--format=json",
}
require.Equal(t, got, expected)
},
)
t.Run(
"GetWriteCmd (Delete)",
func(t *testing.T) {
tp = reg.Delete
got := reg.GetWriteCmd(
destRC,
true,
srcRegName,
srcImageName,
destImageName,
digest,
tag,
tp,
)
expected := []string{
"gcloud",
"--account=robot",
"--quiet",
"container",
"images",
"untag",
reg.ToPQIN(destRC.Name, destImageName, tag),
}
require.Equal(t, got, expected)
got = reg.GetWriteCmd(
destRC,
false,
srcRegName,
srcImageName,
destImageName,
digest,
tag,
tp,
)
expected = []string{
"gcloud",
"--quiet",
"container",
"images",
"untag",
reg.ToPQIN(destRC.Name, destImageName, tag),
}
require.Equal(t, got, expected)
},
)
}
// TestReadRegistries tests reading images and tags from a registry.
func TestReadRegistries(t *testing.T) {
const fakeRegName reg.RegistryName = "gcr.io/foo"
tests := []struct {
name string
input map[string]string
expectedOutput reg.RegInvImage
}{
{
"Only toplevel repos (no child repos)",
map[string]string{
"gcr.io/foo": `{
"child": [
"addon-resizer",
"pause"
],
"manifest": {},
"name": "foo",
"tags": []
}`,
"gcr.io/foo/addon-resizer": `{
"child": [],
"manifest": {
"sha256:b5b2d91319f049143806baeacc886f82f621e9a2550df856b11b5c22db4570a7": {
"imageSizeBytes": "12875324",
"layerId": "",
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"tag": [
"latest"
],
"timeCreatedMs": "1501774217070",
"timeUploadedMs": "1552917295327"
},
"sha256:0519a83e8f217e33dd06fe7a7347444cfda5e2e29cf52aaa24755999cb104a4d": {
"imageSizeBytes": "12875324",
"layerId": "",
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"tag": [
"1.0"
],
"timeCreatedMs": "1501774217070",
"timeUploadedMs": "1552917295327"
}
},
"name": "foo/addon-resizer",
"tags": [
"latest",
"1.0"
]
}`,
"gcr.io/foo/pause": `{
"child": [],
"manifest": {
"sha256:06fdf10aae2eeeac5a82c213e4693f82ab05b3b09b820fce95a7cac0bbdad534": {
"imageSizeBytes": "12875324",
"layerId": "",
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"tag": [
"v1.2.3"
],
"timeCreatedMs": "1501774217070",
"timeUploadedMs": "1552917295327"
}
},
"name": "foo/pause",
"tags": [
"v1.2.3"
]
}`,
},
reg.RegInvImage{
"addon-resizer": {
"sha256:b5b2d91319f049143806baeacc886f82f621e9a2550df856b11b5c22db4570a7": {"latest"},
"sha256:0519a83e8f217e33dd06fe7a7347444cfda5e2e29cf52aaa24755999cb104a4d": {"1.0"},
},
"pause": {
"sha256:06fdf10aae2eeeac5a82c213e4693f82ab05b3b09b820fce95a7cac0bbdad534": {"v1.2.3"},
},
},
},
{
"Recursive repos (child repos)",
map[string]string{
"gcr.io/foo": `{
"child": [
"addon-resizer",
"pause"
],
"manifest": {},
"name": "foo",
"tags": []
}`,
"gcr.io/foo/addon-resizer": `{
"child": [],
"manifest": {
"sha256:b5b2d91319f049143806baeacc886f82f621e9a2550df856b11b5c22db4570a7": {
"imageSizeBytes": "12875324",
"layerId": "",
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"tag": [
"latest"
],
"timeCreatedMs": "1501774217070",
"timeUploadedMs": "1552917295327"
},
"sha256:0519a83e8f217e33dd06fe7a7347444cfda5e2e29cf52aaa24755999cb104a4d": {
"imageSizeBytes": "12875324",
"layerId": "",
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"tag": [
"1.0"
],
"timeCreatedMs": "1501774217070",
"timeUploadedMs": "1552917295327"
}
},
"name": "foo/addon-resizer",
"tags": [
"latest",
"1.0"
]
}`,
"gcr.io/foo/pause": `{
"child": [
"childLevel1"
],
"manifest": {
"sha256:06fdf10aae2eeeac5a82c213e4693f82ab05b3b09b820fce95a7cac0bbdad534": {
"imageSizeBytes": "12875324",
"layerId": "",
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"tag": [
"v1.2.3"
],
"timeCreatedMs": "1501774217070",
"timeUploadedMs": "1552917295327"
}
},
"name": "foo/pause",
"tags": [
"v1.2.3"
]
}`,
"gcr.io/foo/pause/childLevel1": `{
"child": [
"childLevel2"
],
"manifest": {
"sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": {
"imageSizeBytes": "12875324",
"layerId": "",
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"tag": [
"aaa"
],
"timeCreatedMs": "1501774217070",
"timeUploadedMs": "1552917295327"
}
},
"name": "foo/pause/childLevel1",
"tags": [
"aaa"
]
}`,
"gcr.io/foo/pause/childLevel1/childLevel2": `{
"child": [],
"manifest": {
"sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff": {
"imageSizeBytes": "12875324",
"layerId": "",
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"tag": [
"fff"
],
"timeCreatedMs": "1501774217070",
"timeUploadedMs": "1552917295327"
}
},
"name": "foo/pause/childLevel1/childLevel2",
"tags": [
"fff"
]
}`,
},
reg.RegInvImage{
"addon-resizer": {
"sha256:b5b2d91319f049143806baeacc886f82f621e9a2550df856b11b5c22db4570a7": {"latest"},
"sha256:0519a83e8f217e33dd06fe7a7347444cfda5e2e29cf52aaa24755999cb104a4d": {"1.0"},
},
"pause": {
"sha256:06fdf10aae2eeeac5a82c213e4693f82ab05b3b09b820fce95a7cac0bbdad534": {"v1.2.3"},
},
"pause/childLevel1": {
"sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": {"aaa"},
},
"pause/childLevel1/childLevel2": reg.DigestTags{
"sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff": {"fff"},
},
},
},
}
for _, test := range tests {
// Destination registry is a placeholder, because ReadImageNames acts on
// 2 registries (src and dest) at once.
rcs := []reg.RegistryContext{
{
Name: fakeRegName,
ServiceAccount: "robot",
},
}
sc := reg.SyncContext{
RegistryContexts: rcs,
Inv: map[reg.RegistryName]reg.RegInvImage{fakeRegName: nil},
DigestMediaType: make(reg.DigestMediaType),
DigestImageSize: make(reg.DigestImageSize),
}
// test is used to pin the "test" variable from the outer "range"
// scope (see scopelint).
test := test
mkFakeStream1 := func(sc *reg.SyncContext, rc reg.RegistryContext) stream.Producer {
var sr stream.Fake
_, domain, repoPath := reg.GetTokenKeyDomainRepoPath(rc.Name)
fakeHTTPBody, ok := test.input[domain+"/"+repoPath]
if !ok {
require.False(t, ok)
}
sr.Bytes = []byte(fakeHTTPBody)
return &sr
}
sc.ReadRegistries(rcs, true, mkFakeStream1)
got := sc.Inv[fakeRegName]
require.Equal(t, got, test.expectedOutput)
}
}
// TestReadGManifestLists tests reading ManifestList information from GCR.
func TestReadGManifestLists(t *testing.T) {
const fakeRegName reg.RegistryName = "gcr.io/foo"
tests := []struct {
name string
input map[string]string
expectedOutput reg.ParentDigest
}{
{
"Basic example",
map[string]string{
"gcr.io/foo/someImage": `{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"manifests": [
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 739,
"digest": "sha256:0bd88bcba94f800715fca33ffc4bde430646a7c797237313cbccdcdef9f80f2d",
"platform": {
"architecture": "amd64",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 739,
"digest": "sha256:0ad4f92011b2fa5de88a6e6a2d8b97f38371246021c974760e5fc54b9b7069e5",
"platform": {
"architecture": "s390x",
"os": "linux"
}
}
]
}`,
},
reg.ParentDigest{
"sha256:0bd88bcba94f800715fca33ffc4bde430646a7c797237313cbccdcdef9f80f2d": "sha256:0000000000000000000000000000000000000000000000000000000000000000",
"sha256:0ad4f92011b2fa5de88a6e6a2d8b97f38371246021c974760e5fc54b9b7069e5": "sha256:0000000000000000000000000000000000000000000000000000000000000000",
},
},
}
for _, test := range tests {
// Destination registry is a placeholder, because ReadImageNames acts on
// 2 registries (src and dest) at once.
rcs := []reg.RegistryContext{
{
Name: fakeRegName,
ServiceAccount: "robot",
},
}
sc := reg.SyncContext{
RegistryContexts: rcs,
Inv: map[reg.RegistryName]reg.RegInvImage{
"gcr.io/foo": {
"someImage": reg.DigestTags{
"sha256:0000000000000000000000000000000000000000000000000000000000000000": {"1.0"},
},
},
},
DigestMediaType: reg.DigestMediaType{
"sha256:0000000000000000000000000000000000000000000000000000000000000000": cr.DockerManifestList,
},
DigestImageSize: make(reg.DigestImageSize),
ParentDigest: make(reg.ParentDigest),
}
// test is used to pin the "test" variable from the outer "range"
// scope (see scopelint).
test := test
mkFakeStream1 := func(sc *reg.SyncContext, gmlc *reg.GCRManifestListContext) stream.Producer {
var sr stream.Fake
_, domain, repoPath := reg.GetTokenKeyDomainRepoPath(gmlc.RegistryContext.Name)
fakeHTTPBody, ok := test.input[domain+"/"+repoPath+"/"+string(gmlc.ImageName)]
if !ok {
require.False(t, ok)
}
sr.Bytes = []byte(fakeHTTPBody)
return &sr
}
sc.ReadGCRManifestLists(mkFakeStream1)
got := sc.ParentDigest
require.Equal(t, got, test.expectedOutput)
}
}
func TestGetTokenKeyDomainRepoPath(t *testing.T) {
type TokenKeyDomainRepoPath [3]string
tests := []struct {
name string
input reg.RegistryName
expected TokenKeyDomainRepoPath
}{
{
"basic",
"gcr.io/foo/bar",
[3]string{"gcr.io/foo", "gcr.io", "foo/bar"},
},
}
for _, test := range tests {
test := test
t.Run(
test.name,
func(t *testing.T) {
tokenKey, domain, repoPath := reg.GetTokenKeyDomainRepoPath(test.input)
require.Equal(t, tokenKey, test.expected[0])
require.Equal(t, domain, test.expected[1])
require.Equal(t, repoPath, test.expected[2])
},
)
}
}
func TestSetManipulationsRegistryInventories(t *testing.T) {
tests := []struct {
name string
input1 reg.RegInvImage
input2 reg.RegInvImage
op func(a, b reg.RegInvImage) reg.RegInvImage
expectedOutput reg.RegInvImage
}{
{
"Set Minus",
reg.RegInvImage{
"foo": {
"sha256:abc": {"1.0", "latest"},
},
"bar": {
"sha256:def": {"0.9"},
},
},
reg.RegInvImage{
"foo": {
"sha256:abc": {"1.0", "latest"},
},
"bar": {
"sha256:def": {"0.9"},
},
},
reg.RegInvImage.Minus,
reg.RegInvImage{},
},
{
"Set Union",
reg.RegInvImage{
"foo": {
"sha256:abc": {"1.0", "latest"},
},
"bar": {
"sha256:def": {"0.9"},
},
},
reg.RegInvImage{
"apple": {
"sha256:abc": {"1.0", "latest"},
},
"banana": {
"sha256:def": {"0.9"},
},
},
reg.RegInvImage.Union,
reg.RegInvImage{
"foo": {
"sha256:abc": {"1.0", "latest"},
},
"bar": {
"sha256:def": {"0.9"},
},
"apple": {
"sha256:abc": {"1.0", "latest"},
},
"banana": {
"sha256:def": {"0.9"},
},
},
},
}
for _, test := range tests {
got := test.op(test.input1, test.input2)
require.Equal(t, got, test.expectedOutput)
}
}
func TestSetManipulationsTags(t *testing.T) {
tests := []struct {
name string
input1 reg.TagSlice
input2 reg.TagSlice
op func(a, b reg.TagSlice) reg.TagSet
expectedOutput reg.TagSet
}{
{
"Set Minus (both blank)",
reg.TagSlice{},
reg.TagSlice{},
reg.TagSlice.Minus,
reg.TagSet{},
},
{
"Set Minus (first blank)",
reg.TagSlice{},
reg.TagSlice{"a"},
reg.TagSlice.Minus,
reg.TagSet{},
},
{
"Set Minus (second blank)",
reg.TagSlice{"a", "b"},
reg.TagSlice{},
reg.TagSlice.Minus,
reg.TagSet{"a": nil, "b": nil},
},
{
"Set Minus",
reg.TagSlice{"a", "b"},
reg.TagSlice{"b"},
reg.TagSlice.Minus,
reg.TagSet{"a": nil},
},
{
"Set Union (both blank)",
reg.TagSlice{},
reg.TagSlice{},
reg.TagSlice.Union,
reg.TagSet{},
},
{
"Set Union (first blank)",
reg.TagSlice{},
reg.TagSlice{"a"},
reg.TagSlice.Union,
reg.TagSet{"a": nil},
},
{
"Set Union (second blank)",
reg.TagSlice{"a"},
reg.TagSlice{},
reg.TagSlice.Union,
reg.TagSet{"a": nil},
},
{
"Set Union",
reg.TagSlice{"a", "c"},
reg.TagSlice{"b", "d"},
reg.TagSlice.Union,
reg.TagSet{"a": nil, "b": nil, "c": nil, "d": nil},
},
{
"Set Intersection (no intersection)",
reg.TagSlice{"a"},
reg.TagSlice{"b"},
reg.TagSlice.Intersection,
reg.TagSet{},
},
{
"Set Intersection (some intersection)",
reg.TagSlice{"a", "b"},
reg.TagSlice{"b", "c"},
reg.TagSlice.Intersection,
reg.TagSet{"b": nil},
},
}
for _, test := range tests {
got := test.op(test.input1, test.input2)
require.Equal(t, got, test.expectedOutput)
}
}
func TestSetManipulationsRegInvImageTag(t *testing.T) {
tests := []struct {
name string
input1 reg.RegInvImageTag
input2 reg.RegInvImageTag
op func(a, b reg.RegInvImageTag) reg.RegInvImageTag
expectedOutput reg.RegInvImageTag
}{
{
"Set Minus (both blank)",
reg.RegInvImageTag{},
reg.RegInvImageTag{},
reg.RegInvImageTag.Minus,
reg.RegInvImageTag{},
},
{
"Set Minus (first blank)",
reg.RegInvImageTag{},
reg.RegInvImageTag{
reg.ImageTag{ImageName: "pear", Tag: "latest"}: "123",
},
reg.RegInvImageTag.Minus,
reg.RegInvImageTag{},
},
{
"Set Minus (second blank)",
reg.RegInvImageTag{
reg.ImageTag{
ImageName: "pear",
Tag: "latest",
}: "123",
},
reg.RegInvImageTag{},
reg.RegInvImageTag.Minus,
reg.RegInvImageTag{
reg.ImageTag{
ImageName: "pear",
Tag: "latest",
}: "123",
},
},
{
"Set Intersection (both blank)",
reg.RegInvImageTag{},
reg.RegInvImageTag{},
reg.RegInvImageTag.Intersection,
reg.RegInvImageTag{},
},
{
"Set Intersection (first blank)",
reg.RegInvImageTag{},
reg.RegInvImageTag{
reg.ImageTag{ImageName: "pear", Tag: "latest"}: "123",
},
reg.RegInvImageTag.Intersection,
reg.RegInvImageTag{},
},
{
"Set Intersection (second blank)",
reg.RegInvImageTag{
reg.ImageTag{ImageName: "pear", Tag: "latest"}: "123",
},
reg.RegInvImageTag{},
reg.RegInvImageTag.Intersection,
reg.RegInvImageTag{},
},
{
"Set Intersection (no intersection)",
reg.RegInvImageTag{
reg.ImageTag{ImageName: "pear", Tag: "latest"}: "123",
},
reg.RegInvImageTag{
reg.ImageTag{ImageName: "pear", Tag: "1.0"}: "123",
},
reg.RegInvImageTag.Intersection,
reg.RegInvImageTag{},
},
{
"Set Intersection (some intersection)",
reg.RegInvImageTag{
reg.ImageTag{ImageName: "pear", Tag: "latest"}: "this-is-kept",
reg.ImageTag{ImageName: "pear", Tag: "1.0"}: "123",
},
reg.RegInvImageTag{
reg.ImageTag{ImageName: "pear", Tag: "latest"}: "this-is-lost",
reg.ImageTag{ImageName: "foo", Tag: "2.0"}: "def",
},
// The intersection code throws out the second value, because it
// treats a Map as a Set (and doesn't care about preserving
// information for the key's value).
reg.RegInvImageTag.Intersection,
reg.RegInvImageTag{
reg.ImageTag{ImageName: "pear", Tag: "latest"}: "this-is-kept",
},
},
}
for _, test := range tests {
got := test.op(test.input1, test.input2)
require.Equal(t, got, test.expectedOutput)
}
}
func TestToPromotionEdges(t *testing.T) {
srcRegName := reg.RegistryName("gcr.io/foo")
destRegName := reg.RegistryName("gcr.io/bar")
destRegName2 := reg.RegistryName("gcr.io/cat")
destRC := reg.RegistryContext{
Name: destRegName,
ServiceAccount: "robot",
}
destRC2 := reg.RegistryContext{
Name: destRegName2,
ServiceAccount: "robot",
}
srcRC := reg.RegistryContext{
Name: srcRegName,
ServiceAccount: "robot",
Src: true,
}
registries1 := []reg.RegistryContext{destRC, srcRC}
registries2 := []reg.RegistryContext{destRC, srcRC, destRC2}
sc := reg.SyncContext{
Inv: reg.MasterInventory{
"gcr.io/foo": reg.RegInvImage{
"a": {
"sha256:000": {"0.9"},
},
"c": {
"sha256:222": {"2.0"},
"sha256:333": {"3.0"},
},
},
"gcr.io/bar": {
"a": {
"sha256:000": {"0.9"},
},
"b": {
"sha256:111": {},
},
"c": {
"sha256:222": {"2.0"},
"sha256:333": {"3.0"},
},
},
"gcr.io/cat": {
"a": {
"sha256:000": {"0.9"},
},
"c": {
"sha256:222": {"2.0"},
"sha256:333": {"3.0"},
},
},
},
}
tests := []struct {
name string
input []reg.Manifest
expectedInitial map[reg.PromotionEdge]interface{}
expectedInitialErr error
expectedFiltered map[reg.PromotionEdge]interface{}
expectedFilteredClean bool
}{
{
"Basic case (1 new edge; already promoted)",
[]reg.Manifest{
{
Registries: registries1,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
"sha256:000": {"0.9"},
},
},
},
SrcRegistry: &srcRC,
},
},
map[reg.PromotionEdge]interface{}{
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
Digest: "sha256:000",
DstRegistry: destRC,
DstImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
}: nil,
},
nil,
make(map[reg.PromotionEdge]interface{}),
true,
},
{
"Basic case (2 new edges; already promoted)",
[]reg.Manifest{
{
Registries: registries2,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
"sha256:000": {"0.9"},
},
},
},
SrcRegistry: &srcRC,
},
},
map[reg.PromotionEdge]interface{}{
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
Digest: "sha256:000",
DstRegistry: destRC,
DstImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
}: nil,
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
Digest: "sha256:000",
DstRegistry: destRC2,
DstImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
}: nil,
},
nil,
make(map[reg.PromotionEdge]interface{}),
true,
},
{
"Tag move (tag swap image c:2.0 and c:3.0)",
[]reg.Manifest{
{
Registries: registries2,
Images: []reg.Image{
{
ImageName: "c",
Dmap: reg.DigestTags{
"sha256:222": {"3.0"},
"sha256:333": {"2.0"},
},
},
},
SrcRegistry: &srcRC,
},
},
map[reg.PromotionEdge]interface{}{
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "c",
Tag: "2.0",
},
Digest: "sha256:333",
DstRegistry: destRC,
DstImageTag: reg.ImageTag{
ImageName: "c",
Tag: "2.0",
},
}: nil,
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "c",
Tag: "3.0",
},
Digest: "sha256:222",
DstRegistry: destRC,
DstImageTag: reg.ImageTag{
ImageName: "c",
Tag: "3.0",
},
}: nil,
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "c",
Tag: "2.0",
},
Digest: "sha256:333",
DstRegistry: destRC2,
DstImageTag: reg.ImageTag{
ImageName: "c",
Tag: "2.0",
},
}: nil,
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "c",
Tag: "3.0",
},
Digest: "sha256:222",
DstRegistry: destRC2,
DstImageTag: reg.ImageTag{
ImageName: "c",
Tag: "3.0",
},
}: nil,
},
nil,
make(map[reg.PromotionEdge]interface{}),
false,
},
}
for _, test := range tests {
// Finalize Manifests.
for i := range test.input {
require.Nil(t, test.input[i].Finalize())
}
got, gotErr := reg.ToPromotionEdges(test.input)
if test.expectedInitialErr != nil {
require.NotNil(t, gotErr)
require.Error(t, gotErr, test.expectedInitialErr)
}
require.Equal(t, got, test.expectedInitial)
got, gotClean := sc.GetPromotionCandidates(got)
require.Equal(t, got, test.expectedFiltered)
require.Equal(t, gotClean, test.expectedFilteredClean)
}
}
func TestCheckOverlappingEdges(t *testing.T) {
srcRegName := reg.RegistryName("gcr.io/foo")
destRegName := reg.RegistryName("gcr.io/bar")
destRC := reg.RegistryContext{
Name: destRegName,
ServiceAccount: "robot",
}
srcRC := reg.RegistryContext{
Name: srcRegName,
ServiceAccount: "robot",
Src: true,
}
tests := []struct {
name string
input map[reg.PromotionEdge]interface{}
expected map[reg.PromotionEdge]interface{}
expectedErr error
}{
{
"Basic case (0 edges)",
make(map[reg.PromotionEdge]interface{}),
make(map[reg.PromotionEdge]interface{}),
nil,
},
{
"Basic case (singleton edge, no overlapping edges)",
map[reg.PromotionEdge]interface{}{
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
Digest: "sha256:000",
DstRegistry: destRC,
DstImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
}: nil,
},
map[reg.PromotionEdge]interface{}{
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
Digest: "sha256:000",
DstRegistry: destRC,
DstImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
}: nil,
},
nil,
},
{ // nolint: dupl
"Basic case (two edges, no overlapping edges)",
map[reg.PromotionEdge]interface{}{
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
Digest: "sha256:000",
DstRegistry: destRC,
DstImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
}: nil,
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "b",
Tag: "0.9",
},
Digest: "sha256:111",
DstRegistry: destRC,
DstImageTag: reg.ImageTag{
ImageName: "b",
Tag: "0.9",
},
}: nil,
},
map[reg.PromotionEdge]interface{}{
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
Digest: "sha256:000",
DstRegistry: destRC,
DstImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
}: nil,
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "b",
Tag: "0.9",
},
Digest: "sha256:111",
DstRegistry: destRC,
DstImageTag: reg.ImageTag{
ImageName: "b",
Tag: "0.9",
},
}: nil,
},
nil,
},
{
"Basic case (two edges, overlapped)",
map[reg.PromotionEdge]interface{}{
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
Digest: "sha256:000",
DstRegistry: destRC,
DstImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
}: nil,
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "b",
Tag: "0.9",
},
Digest: "sha256:111",
DstRegistry: destRC,
DstImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
}: nil,
},
nil,
fmt.Errorf("overlapping edges detected"),
},
{ // nolint: dupl
"Basic case (two tagless edges (different digests, same PQIN), no overlap)",
map[reg.PromotionEdge]interface{}{
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
Digest: "sha256:000",
DstRegistry: destRC,
DstImageTag: reg.ImageTag{
ImageName: "a",
Tag: "",
},
}: nil,
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "b",
Tag: "0.9",
},
Digest: "sha256:111",
DstRegistry: destRC,
DstImageTag: reg.ImageTag{
ImageName: "a",
Tag: "",
},
}: nil,
},
map[reg.PromotionEdge]interface{}{
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "a",
Tag: "0.9",
},
Digest: "sha256:000",
DstRegistry: destRC,
DstImageTag: reg.ImageTag{
ImageName: "a",
Tag: "",
},
}: nil,
{
SrcRegistry: srcRC,
SrcImageTag: reg.ImageTag{
ImageName: "b",
Tag: "0.9",
},
Digest: "sha256:111",
DstRegistry: destRC,
DstImageTag: reg.ImageTag{
ImageName: "a",
Tag: "",
},
}: nil,
},
nil,
},
}
for _, test := range tests {
got, gotErr := reg.CheckOverlappingEdges(test.input)
if test.expectedErr != nil {
require.NotNil(t, gotErr)
require.Error(t, gotErr, test.expectedErr)
}
require.Equal(t, got, test.expected)
}
}
type FakeCheckAlwaysSucceed struct{}
func (c *FakeCheckAlwaysSucceed) Run() error {
return nil
}
type FakeCheckAlwaysFail struct{}
func (c *FakeCheckAlwaysFail) Run() error {
return fmt.Errorf("there was an error in the pull request check")
}
func TestRunChecks(t *testing.T) {
sc := reg.SyncContext{}
tests := []struct {
name string
checks []reg.PreCheck
expected error
}{
{
"Checking pull request with successful checks",
[]reg.PreCheck{
&FakeCheckAlwaysSucceed{},
},
nil,
},
{
"Checking pull request with unsuccessful checks",
[]reg.PreCheck{
&FakeCheckAlwaysFail{},
},
fmt.Errorf("1 error(s) encountered during the prechecks"),
},
{
"Checking pull request with successful and unsuccessful checks",
[]reg.PreCheck{
&FakeCheckAlwaysSucceed{},
&FakeCheckAlwaysFail{},
&FakeCheckAlwaysFail{},
},
fmt.Errorf("2 error(s) encountered during the prechecks"),
},
}
for _, test := range tests {
got := sc.RunChecks(test.checks)
require.Equal(t, got, test.expected)
}
}
// TestPromotion is the most important test as it simulates the main job of the
// promoter.
func TestPromotion(t *testing.T) {
// CapturedRequests is like a bitmap. We clear off bits (delete keys) for
// each request that we see that got generated. Then it's just a matter of
// ensuring that the map is empty. If it is not empty, we can just show what
// it looks like (basically a list of all requests that did not get
// generated).
//
// We could make it even more "powerful" by storing a histogram instead of a
// set. Then we can check that all requests were generated exactly 1 time.
srcRegName := reg.RegistryName("gcr.io/foo")
destRegName := reg.RegistryName("gcr.io/bar")
destRegName2 := reg.RegistryName("gcr.io/cat")
destRC := reg.RegistryContext{
Name: destRegName,
ServiceAccount: "robot",
}
destRC2 := reg.RegistryContext{
Name: destRegName2,
ServiceAccount: "robot",
}
srcRC := reg.RegistryContext{
Name: srcRegName,
ServiceAccount: "robot",
Src: true,
}
registries := []reg.RegistryContext{destRC, srcRC, destRC2}
registriesRebase := []reg.RegistryContext{
{
Name: reg.RegistryName("us.gcr.io/dog/some/subdir/path/foo"),
ServiceAccount: "robot",
},
srcRC,
}
tests := []struct {
name string
inputM reg.Manifest
inputSc reg.SyncContext
badReads []reg.RegistryName
expectedReqs reg.CapturedRequests
expectedFilteredClean bool
}{
{
// TODO: Use quickcheck to ensure certain properties.
"No promotion",
reg.Manifest{},
reg.SyncContext{},
nil,
reg.CapturedRequests{},
true,
},
{
"No promotion; tag is already promoted",
reg.Manifest{
Registries: registries,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
"sha256:000": {"0.9"},
},
},
},
SrcRegistry: &srcRC,
},
reg.SyncContext{
Inv: reg.MasterInventory{
"gcr.io/foo": {
"a": {
"sha256:000": {"0.9"},
},
},
"gcr.io/bar": {
"a": {
"sha256:000": {"0.9"},
},
"b": {
"sha256:111": {},
},
},
"gcr.io/cat": {
"a": {
"sha256:000": {"0.9"},
},
},
},
},
nil,
reg.CapturedRequests{},
true,
},
{
"No promotion; network errors reading from src registry for all images",
reg.Manifest{
Registries: registries,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
"sha256:000": {"0.9"},
},
},
{
ImageName: "b",
Dmap: reg.DigestTags{
"sha256:111": {"0.9"},
},
},
},
SrcRegistry: &srcRC,
},
reg.SyncContext{
Inv: reg.MasterInventory{
"gcr.io/foo": {
"a": {
"sha256:000": {"0.9"},
},
"b": {
"sha256:111": {"0.9"},
},
},
},
InvIgnore: []reg.ImageName{},
},
[]reg.RegistryName{"gcr.io/foo/a", "gcr.io/foo/b", "gcr.io/foo/c"},
reg.CapturedRequests{},
true,
},
{
"Promote 1 tag; image digest does not exist in dest",
reg.Manifest{
Registries: registries,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
"sha256:000": {"0.9"},
},
},
},
SrcRegistry: &srcRC,
},
reg.SyncContext{
Inv: reg.MasterInventory{
"gcr.io/foo": {
"a": {
"sha256:000": {"0.9"},
},
},
"gcr.io/bar": {
"b": {
"sha256:111": {},
},
},
"gcr.io/cat": {
"a": {
"sha256:000": {"0.9"},
},
},
},
},
nil,
reg.CapturedRequests{
reg.PromotionRequest{
TagOp: reg.Add,
RegistrySrc: srcRegName,
RegistryDest: registries[0].Name,
ServiceAccount: registries[0].ServiceAccount,
ImageNameSrc: "a",
ImageNameDest: "a",
Digest: "sha256:000",
Tag: "0.9",
}: 1,
},
true,
},
{
"Promote 1 tag; image already exists in dest, but digest does not",
reg.Manifest{
Registries: registries,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
"sha256:000": {"0.9"},
},
},
},
SrcRegistry: &srcRC,
},
reg.SyncContext{
Inv: reg.MasterInventory{
"gcr.io/foo": {
"a": {
"sha256:000": {"0.9"},
},
},
"gcr.io/bar": {
"a": {
"sha256:111": {},
},
},
"gcr.io/cat": {
"a": {
"sha256:000": {"0.9"},
},
},
},
},
nil,
reg.CapturedRequests{
reg.PromotionRequest{
TagOp: reg.Add,
RegistrySrc: srcRegName,
RegistryDest: registries[0].Name,
ServiceAccount: registries[0].ServiceAccount,
ImageNameSrc: "a",
ImageNameDest: "a",
Digest: "sha256:000",
Tag: "0.9",
}: 1,
},
true,
},
{
"Promote 1 tag; tag already exists in dest but is pointing to a different digest (move tag)",
reg.Manifest{
Registries: registries,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
// sha256:bad is a bad image uploaded by a
// compromised account. "good" is a good tag that is
// already known and used for this image "a" (and in
// both gcr.io/bar and gcr.io/cat, point to a known
// good digest, 600d.).
"sha256:bad": {"good"},
},
},
},
SrcRegistry: &srcRC,
},
reg.SyncContext{
Inv: reg.MasterInventory{
"gcr.io/foo": {
"a": {
// Malicious image.
"sha256:bad": {"some-other-tag"},
},
},
"gcr.io/bar": {
"a": {
"sha256:bad": {"some-other-tag"},
"sha256:600d": {"good"},
},
},
"gcr.io/cat": {
"a": {
"sha256:bad": {"some-other-tag"},
"sha256:600d": {"good"},
},
},
},
},
nil,
reg.CapturedRequests{},
false,
},
{
"Promote 1 tag as a 'rebase'",
reg.Manifest{
Registries: registriesRebase,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
"sha256:000": {"0.9"},
},
},
},
SrcRegistry: &srcRC,
},
reg.SyncContext{
Inv: reg.MasterInventory{
"gcr.io/foo": {
"a": {
"sha256:000": {"0.9"},
},
},
"us.gcr.io/dog/some/subdir/path": {
"a": {
"sha256:111": {"0.8"},
},
},
},
},
nil,
reg.CapturedRequests{
reg.PromotionRequest{
TagOp: reg.Add,
RegistrySrc: srcRegName,
RegistryDest: registriesRebase[0].Name,
ServiceAccount: registriesRebase[0].ServiceAccount,
ImageNameSrc: "a",
ImageNameDest: "a",
Digest: "sha256:000",
Tag: "0.9",
}: 1,
},
true,
},
{
"Promote 1 digest (tagless promotion)",
reg.Manifest{
Registries: registries,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
"sha256:000": {},
},
},
},
SrcRegistry: &srcRC,
},
reg.SyncContext{
Inv: reg.MasterInventory{
"gcr.io/foo": {
"a": {
"sha256:000": {},
},
},
"gcr.io/bar": {
"a": {
// "bar" already has it
"sha256:000": {},
},
},
"gcr.io/cat": {
"c": {
"sha256:222": {},
},
},
},
},
nil,
reg.CapturedRequests{
reg.PromotionRequest{
TagOp: reg.Add,
RegistrySrc: srcRegName,
RegistryDest: registries[2].Name,
ServiceAccount: registries[2].ServiceAccount,
ImageNameSrc: "a",
ImageNameDest: "a",
Digest: "sha256:000",
Tag: "",
}: 1,
},
true,
},
{
"NOP; dest has extra tag, but NOP because -delete-extra-tags NOT specified",
reg.Manifest{
Registries: registries,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
"sha256:000": {"0.9"},
},
},
},
SrcRegistry: &srcRC,
},
reg.SyncContext{
Inv: reg.MasterInventory{
"gcr.io/foo": {
"a": {
"sha256:000": {"0.9"},
},
},
"gcr.io/bar": {
"a": {
"sha256:000": {"0.9", "extra-tag"},
},
},
"gcr.io/cat": {
"a": {
"sha256:000": {"0.9"},
},
},
},
},
nil,
reg.CapturedRequests{},
true,
},
{
"NOP (src registry does not have any of the images we want to promote)",
reg.Manifest{
Registries: registries,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
"sha256:000": {"missing-from-src"},
"sha256:333": {"0.8"},
},
},
{
ImageName: "b",
Dmap: reg.DigestTags{
"sha256:bbb": {"also-missing"},
},
},
},
SrcRegistry: &srcRC,
},
reg.SyncContext{
Inv: reg.MasterInventory{
"gcr.io/foo": {
"c": {
"sha256:000": {"0.9"},
},
"d": {
"sha256:bbb": {"1.0"},
},
},
"gcr.io/bar": {
"a": {
"sha256:333": {"0.8"},
},
},
},
},
nil,
reg.CapturedRequests{},
true,
},
{
"Add 1 tag for 2 registries",
reg.Manifest{
Registries: registries,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
"sha256:000": {"0.9", "1.0"},
},
},
},
SrcRegistry: &srcRC,
},
reg.SyncContext{
Inv: reg.MasterInventory{
"gcr.io/foo": {
"a": {
"sha256:000": {"0.9"},
},
},
"gcr.io/bar": {
"a": {
"sha256:000": {"0.9"},
},
},
"gcr.io/cat": {
"a": {
"sha256:000": {"0.9"},
},
},
},
},
nil,
reg.CapturedRequests{
reg.PromotionRequest{
TagOp: reg.Add,
RegistrySrc: srcRegName,
RegistryDest: registries[0].Name,
ServiceAccount: registries[0].ServiceAccount,
ImageNameSrc: "a",
ImageNameDest: "a",
Digest: "sha256:000",
Tag: "1.0",
}: 1,
reg.PromotionRequest{
TagOp: reg.Add,
RegistrySrc: srcRegName,
RegistryDest: registries[2].Name,
ServiceAccount: registries[2].ServiceAccount,
ImageNameSrc: "a",
ImageNameDest: "a",
Digest: "sha256:000",
Tag: "1.0",
}: 1,
},
true,
},
{
"Add 1 tag for 1 registry",
reg.Manifest{
Registries: registries,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
"sha256:000": {"0.9", "1.0"},
},
},
},
SrcRegistry: &srcRC,
},
reg.SyncContext{
Inv: reg.MasterInventory{
"gcr.io/foo": {
"a": {
"sha256:000": {"0.9"},
},
},
"gcr.io/bar": {
"a": {
"sha256:000": {"0.9"},
},
},
"gcr.io/cat": {
"a": {
"sha256:000": {
"0.9", "1.0", "extra-tag",
},
},
},
},
},
nil,
reg.CapturedRequests{
reg.PromotionRequest{
TagOp: reg.Add,
RegistrySrc: srcRegName,
RegistryDest: registries[0].Name,
ServiceAccount: registries[0].ServiceAccount,
ImageNameSrc: "a",
ImageNameDest: "a",
Digest: "sha256:000",
Tag: "1.0",
}: 1,
},
true,
},
}
// captured is sort of a "global variable" because processRequestFake
// closes over it.
captured := make(reg.CapturedRequests)
processRequestFake := reg.MkRequestCapturer(&captured)
nopStream := func(
srcRegistry reg.RegistryName,
srcImageName reg.ImageName,
rc reg.RegistryContext,
destImageName reg.ImageName,
digest reg.Digest,
tag reg.Tag,
tp reg.TagOp,
) stream.Producer {
// We don't even need a stream producer, because we are not creating
// subprocesses that generate JSON or any other output; the vanilla
// "mkReq" in Promote() already stores all the info we need to check.
return nil
}
for _, test := range tests {
// Reset captured for each test.
captured = make(reg.CapturedRequests)
srcReg, err := reg.GetSrcRegistry(registries)
require.Nil(t, err)
test.inputSc.SrcRegistry = srcReg
// Simulate bad network conditions.
if test.badReads != nil {
for _, badRead := range test.badReads {
test.inputSc.IgnoreFromPromotion(badRead)
}
}
edges, err := reg.ToPromotionEdges([]reg.Manifest{test.inputM})
require.Nil(t, err)
filteredEdges, gotClean := test.inputSc.FilterPromotionEdges(
edges,
false)
require.Equal(t, gotClean, test.expectedFilteredClean)
require.Nil(t, test.inputSc.Promote(
filteredEdges,
nopStream,
&processRequestFake,
),
)
require.Equal(t, captured, test.expectedReqs)
}
}
func TestExecRequests(t *testing.T) {
sc := reg.SyncContext{}
destRC := reg.RegistryContext{
Name: reg.RegistryName("gcr.io/bar"),
ServiceAccount: "robot",
}
destRC2 := reg.RegistryContext{
Name: reg.RegistryName("gcr.io/cat"),
ServiceAccount: "robot",
}
srcRC := reg.RegistryContext{
Name: reg.RegistryName("gcr.io/foo"),
ServiceAccount: "robot",
Src: true,
}
registries := []reg.RegistryContext{destRC, srcRC, destRC2}
nopStream := func(
srcRegistry reg.RegistryName,
srcImageName reg.ImageName,
rc reg.RegistryContext,
destImageName reg.ImageName,
digest reg.Digest,
tag reg.Tag,
tp reg.TagOp,
) stream.Producer {
return nil
}
edges, err := reg.ToPromotionEdges(
[]reg.Manifest{
{
Registries: registries,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
"sha256:000": {"0.9"},
},
},
},
SrcRegistry: &srcRC,
},
},
)
require.Nil(t, err)
populateRequests := reg.MKPopulateRequestsForPromotionEdges(
edges,
nopStream,
)
var processRequestSuccess reg.ProcessRequest = func(
sc *reg.SyncContext,
reqs chan stream.ExternalRequest,
requestResults chan<- reg.RequestResult,
wg *sync.WaitGroup,
mutex *sync.Mutex) {
for req := range reqs {
reqRes := reg.RequestResult{Context: req}
requestResults <- reqRes
}
}
var processRequestError reg.ProcessRequest = func(
sc *reg.SyncContext,
reqs chan stream.ExternalRequest,
requestResults chan<- reg.RequestResult,
wg *sync.WaitGroup,
mutex *sync.Mutex) {
for req := range reqs {
reqRes := reg.RequestResult{Context: req}
errors := make(reg.Errors, 0)
errors = append(errors, reg.Error{
Context: "Running TestExecRequests",
Error: fmt.Errorf("This request results in an error"),
})
reqRes.Errors = errors
requestResults <- reqRes
}
}
tests := []struct {
name string
processRequestFn reg.ProcessRequest
expected error
}{
{
"Error tracking for successful promotion",
processRequestSuccess,
nil,
},
{
"Error tracking for promotion with errors",
processRequestError,
fmt.Errorf("Encountered an error while executing requests"),
},
}
for _, test := range tests {
got := sc.ExecRequests(populateRequests, test.processRequestFn)
require.Equal(t, got, test.expected)
}
}
func TestGarbageCollection(t *testing.T) {
srcRegName := reg.RegistryName("gcr.io/foo")
destRegName := reg.RegistryName("gcr.io/bar")
destRegName2 := reg.RegistryName("gcr.io/cat")
registries := []reg.RegistryContext{
{
Name: srcRegName,
ServiceAccount: "robot",
Src: true,
},
{
Name: destRegName,
ServiceAccount: "robot",
},
{
Name: destRegName2,
ServiceAccount: "robot",
},
}
tests := []struct {
name string
inputM reg.Manifest
inputSc reg.SyncContext
expectedReqs reg.CapturedRequests
}{
{
"No garbage collection (no empty digests)",
reg.Manifest{
Registries: registries,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
"sha256:000": {"missing-from-src"},
"sha256:333": {"0.8"},
},
},
{
ImageName: "b",
Dmap: reg.DigestTags{
"sha256:bbb": {"also-missing"},
},
},
},
},
reg.SyncContext{
Inv: reg.MasterInventory{
"gcr.io/foo": {
"c": {
"sha256:000": {"0.9"},
},
"d": {
"sha256:bbb": {"1.0"},
},
},
"gcr.io/bar": {
"a": {
"sha256:333": {"0.8"},
},
},
},
},
reg.CapturedRequests{},
},
{
"Simple garbage collection (delete ALL images in dest that are untagged))",
reg.Manifest{
Registries: registries,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
"sha256:000": {"missing-from-src"},
"sha256:333": {"0.8"},
},
},
{
ImageName: "b",
Dmap: reg.DigestTags{
"sha256:bbb": {"also-missing"},
},
},
},
},
reg.SyncContext{
Inv: reg.MasterInventory{
"gcr.io/foo": {
"c": {
"sha256:000": nil,
},
"d": {
"sha256:bbb": nil,
},
},
"gcr.io/bar": {
"a": {
// NOTE: this is skipping the first step where we
// delete extra tags away (-delete-extra-tags).
"sha256:111": nil,
},
"z": {
"sha256:000": nil,
},
},
},
},
reg.CapturedRequests{
reg.PromotionRequest{
TagOp: reg.Delete,
RegistrySrc: srcRegName,
RegistryDest: registries[1].Name,
ServiceAccount: registries[1].ServiceAccount,
ImageNameSrc: "",
ImageNameDest: "a",
Digest: "sha256:111",
Tag: "",
}: 1,
reg.PromotionRequest{
TagOp: reg.Delete,
RegistrySrc: srcRegName,
RegistryDest: registries[1].Name,
ServiceAccount: registries[1].ServiceAccount,
ImageNameSrc: "",
ImageNameDest: "z",
Digest: "sha256:000",
Tag: "",
}: 1,
},
},
}
captured := make(reg.CapturedRequests)
var processRequestFake reg.ProcessRequest = func(
sc *reg.SyncContext,
reqs chan stream.ExternalRequest,
requestResults chan<- reg.RequestResult,
wg *sync.WaitGroup,
mutex *sync.Mutex,
) {
for req := range reqs {
// TODO: Why are we not checking errors here?
// nolint: errcheck
pr := req.RequestParams.(reg.PromotionRequest)
mutex.Lock()
captured[pr]++
mutex.Unlock()
requestResults <- reg.RequestResult{}
}
}
for _, test := range tests {
// Reset captured for each test.
captured = make(reg.CapturedRequests)
nopStream := func(
destRC reg.RegistryContext,
imageName reg.ImageName,
digest reg.Digest) stream.Producer {
return nil
}
srcReg, err := reg.GetSrcRegistry(registries)
require.Nil(t, err)
test.inputSc.SrcRegistry = srcReg
test.inputSc.GarbageCollect(test.inputM, nopStream, &processRequestFake)
require.Equal(t, captured, test.expectedReqs)
}
}
func TestGarbageCollectionMulti(t *testing.T) {
srcRegName := reg.RegistryName("gcr.io/src")
destRegName1 := reg.RegistryName("gcr.io/dest1")
destRegName2 := reg.RegistryName("gcr.io/dest2")
destRC := reg.RegistryContext{
Name: destRegName1,
ServiceAccount: "robotDest1",
}
destRC2 := reg.RegistryContext{
Name: destRegName2,
ServiceAccount: "robotDest2",
}
srcRC := reg.RegistryContext{
Name: srcRegName,
ServiceAccount: "robotSrc",
Src: true,
}
registries := []reg.RegistryContext{srcRC, destRC, destRC2}
tests := []struct {
name string
inputM reg.Manifest
inputSc reg.SyncContext
expectedReqs reg.CapturedRequests
}{
{
"Simple garbage collection (delete ALL images in all dests that are untagged))",
reg.Manifest{
Registries: registries,
Images: []reg.Image{
{
ImageName: "a",
Dmap: reg.DigestTags{
"sha256:000": {"missing-from-src"},
"sha256:333": {"0.8"},
},
},
{
ImageName: "b",
Dmap: reg.DigestTags{
"sha256:bbb": {"also-missing"},
},
},
},
},
reg.SyncContext{
Inv: reg.MasterInventory{
"gcr.io/src": {
"c": {
"sha256:000": nil,
},
"d": {
"sha256:bbb": nil,
},
},
"gcr.io/dest1": {
"a": {
"sha256:111": nil,
},
"z": {
"sha256:222": nil,
},
},
"gcr.io/dest2": {
"a": {
"sha256:123": nil,
},
"b": {
"sha256:444": nil,
},
},
},
},
reg.CapturedRequests{
reg.PromotionRequest{
TagOp: reg.Delete,
RegistrySrc: srcRegName,
RegistryDest: registries[1].Name,
ServiceAccount: registries[1].ServiceAccount,
ImageNameSrc: "",
ImageNameDest: "a",
Digest: "sha256:111",
Tag: "",
}: 1,
reg.PromotionRequest{
TagOp: reg.Delete,
RegistrySrc: srcRegName,
RegistryDest: registries[1].Name,
ServiceAccount: registries[1].ServiceAccount,
ImageNameSrc: "",
ImageNameDest: "z",
Digest: "sha256:222",
Tag: "",
}: 1,
reg.PromotionRequest{
TagOp: reg.Delete,
RegistrySrc: srcRegName,
RegistryDest: registries[2].Name,
ServiceAccount: registries[2].ServiceAccount,
ImageNameSrc: "",
ImageNameDest: "a",
Digest: "sha256:123",
Tag: "",
}: 1,
reg.PromotionRequest{
TagOp: reg.Delete,
RegistrySrc: srcRegName,
RegistryDest: registries[2].Name,
ServiceAccount: registries[2].ServiceAccount,
ImageNameSrc: "",
ImageNameDest: "b",
Digest: "sha256:444",
Tag: "",
}: 1,
},
},
}
captured := make(reg.CapturedRequests)
var processRequestFake reg.ProcessRequest = func(
sc *reg.SyncContext,
reqs chan stream.ExternalRequest,
requestResults chan<- reg.RequestResult,
wg *sync.WaitGroup,
mutex *sync.Mutex,
) {
for req := range reqs {
// TODO: Why are we not checking errors here?
// nolint: errcheck
pr := req.RequestParams.(reg.PromotionRequest)
mutex.Lock()
captured[pr]++
mutex.Unlock()
requestResults <- reg.RequestResult{}
}
}
for _, test := range tests {
// Reset captured for each test.
captured = make(reg.CapturedRequests)
nopStream := func(
destRC reg.RegistryContext,
imageName reg.ImageName,
digest reg.Digest,
) stream.Producer {
return nil
}
srcReg, err := reg.GetSrcRegistry(registries)
require.Nil(t, err)
test.inputSc.SrcRegistry = srcReg
test.inputSc.GarbageCollect(test.inputM, nopStream, &processRequestFake)
require.Equal(t, captured, test.expectedReqs)
}
}
func TestSnapshot(t *testing.T) {
tests := []struct {
name string
input reg.RegInvImage
expected string
}{
{
"Basic",
reg.RegInvImage{
"foo": {
"sha256:111": {"one"},
"sha256:fff": {"0.9", "0.5"},
"sha256:abc": {"0.3", "0.2"},
},
"bar": {
"sha256:000": {"0.8", "0.5", "0.9"},
},
},
`- name: bar
dmap:
"sha256:000": ["0.5", "0.8", "0.9"]
- name: foo
dmap:
"sha256:111": ["one"]
"sha256:abc": ["0.2", "0.3"]
"sha256:fff": ["0.5", "0.9"]
`,
},
}
for _, test := range tests {
gotYAML := test.input.ToYAML(reg.YamlMarshalingOpts{})
require.YAMLEq(t, gotYAML, test.expected)
}
}
func TestParseContainerParts(t *testing.T) {
type ContainerParts struct {
registry string
repository string
err error
}
shouldBeValid := []struct {
input string
expected ContainerParts
}{
{
"gcr.io/google-containers/foo",
ContainerParts{
"gcr.io/google-containers",
"foo",
nil,
},
},
{
"us.gcr.io/google-containers/foo",
ContainerParts{
"us.gcr.io/google-containers",
"foo",
nil,
},
},
{
"us.gcr.io/google-containers/foo/bar",
ContainerParts{
"us.gcr.io/google-containers",
"foo/bar",
nil,
},
},
{
"k8s.gcr.io/a/b/c",
ContainerParts{
"k8s.gcr.io",
"a/b/c",
nil,
},
},
}
for _, test := range shouldBeValid {
registry, repository, err := reg.ParseContainerParts(test.input)
got := ContainerParts{
registry,
repository,
err,
}
require.Equal(t, got, test.expected)
}
shouldBeInvalid := []struct {
input string
expected ContainerParts
}{
{
// Blank string.
"",
ContainerParts{
"",
"",
fmt.Errorf("invalid string '%s'", ""),
},
},
{
// Bare domain..
"gcr.io",
ContainerParts{
"",
"",
fmt.Errorf("invalid string '%s'", "gcr.io"),
},
},
{
// Another top-level name (missing image name).
"gcr.io/google-containers",
ContainerParts{
"",
"",
fmt.Errorf("invalid string '%s'", "gcr.io/google-containers"),
},
},
{
// Naked vanity domain (missing image name).
"k8s.gcr.io",
ContainerParts{
"",
"",
fmt.Errorf("invalid string '%s'", "k8s.gcr.io"),
},
},
{
// Double slash.
"k8s.gcr.io//a/b",
ContainerParts{
"",
"",
fmt.Errorf("invalid string '%s'", "k8s.gcr.io//a/b"),
},
},
{
// Trailing slash.
"k8s.gcr.io/a/b/",
ContainerParts{
"",
"",
fmt.Errorf("invalid string '%s'", "k8s.gcr.io/a/b/"),
},
},
}
for _, test := range shouldBeInvalid {
registry, repository, err := reg.ParseContainerParts(test.input)
got := ContainerParts{
registry,
repository,
err,
}
require.Equal(t, got, test.expected)
}
}
func TestMatch(t *testing.T) {
inputMfest := reg.Manifest{
Registries: []reg.RegistryContext{
{
Name: "gcr.io/foo-staging",
ServiceAccount: "[email protected]",
Src: true,
},
{
Name: "us.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
{
Name: "eu.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
{
Name: "asia.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
},
Images: []reg.Image{
{
ImageName: "foo-controller",
Dmap: reg.DigestTags{
"sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": {"1.0"},
},
},
},
Filepath: "a/promoter-manifest.yaml",
}
tests := []struct {
name string
mfest reg.Manifest
gcrPayload reg.GCRPubSubPayload
expectedMatch reg.GcrPayloadMatch
}{
{
"INSERT message contains both Digest and Tag",
inputMfest,
reg.GCRPubSubPayload{
Action: "INSERT",
FQIN: "us.gcr.io/some-prod/foo-controller@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
PQIN: "us.gcr.io/some-prod/foo-controller:1.0",
},
reg.GcrPayloadMatch{
PathMatch: true,
DigestMatch: true,
TagMatch: true,
},
},
{
"INSERT message only contains Digest",
inputMfest,
reg.GCRPubSubPayload{
Action: "INSERT",
FQIN: "us.gcr.io/some-prod/foo-controller@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
},
reg.GcrPayloadMatch{
PathMatch: true,
DigestMatch: true,
},
},
{
"INSERT's digest is not in Manifest (digest mismatch, but path matched)",
inputMfest,
reg.GCRPubSubPayload{
Action: "INSERT",
FQIN: "us.gcr.io/some-prod/foo-controller@sha256:000",
},
reg.GcrPayloadMatch{
PathMatch: true,
},
},
{
"INSERT's digest is not in Manifest (neither digest nor tag match, but path matched)",
inputMfest,
reg.GCRPubSubPayload{
Action: "INSERT",
FQIN: "us.gcr.io/some-prod/foo-controller@sha256:000",
PQIN: "us.gcr.io/some-prod/foo-controller:1.0",
},
reg.GcrPayloadMatch{
PathMatch: true,
},
},
{
"INSERT's digest is not in Manifest (tag specified, but tag mismatch)",
inputMfest,
reg.GCRPubSubPayload{
Action: "INSERT",
FQIN: "us.gcr.io/some-prod/foo-controller@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
PQIN: "us.gcr.io/some-prod/foo-controller:white-powder",
},
reg.GcrPayloadMatch{
PathMatch: true,
DigestMatch: true,
TagMismatch: true,
},
},
}
for _, test := range tests {
require.Nil(t, test.gcrPayload.PopulateExtraFields())
got := test.gcrPayload.Match(&test.mfest)
require.Equal(t, got, test.expectedMatch)
}
}
func TestPopulateExtraFields(t *testing.T) {
shouldBeValid := []struct {
name string
input reg.GCRPubSubPayload
expected reg.GCRPubSubPayload
}{
{
"basic",
reg.GCRPubSubPayload{
Action: "INSERT",
FQIN: "k8s.gcr.io/subproject/foo@sha256:000",
PQIN: "k8s.gcr.io/subproject/foo:1.0",
Path: "",
Digest: "",
Tag: "",
},
reg.GCRPubSubPayload{
Action: "INSERT",
FQIN: "k8s.gcr.io/subproject/foo@sha256:000",
PQIN: "k8s.gcr.io/subproject/foo:1.0",
Path: "k8s.gcr.io/subproject/foo",
Digest: "sha256:000",
Tag: "1.0",
},
},
{
"only FQIN",
reg.GCRPubSubPayload{
Action: "INSERT",
FQIN: "k8s.gcr.io/subproject/foo@sha256:000",
PQIN: "",
Path: "",
Digest: "",
Tag: "",
},
reg.GCRPubSubPayload{
Action: "INSERT",
FQIN: "k8s.gcr.io/subproject/foo@sha256:000",
PQIN: "",
Path: "k8s.gcr.io/subproject/foo",
Digest: "sha256:000",
Tag: "",
},
},
{
"only PQIN",
reg.GCRPubSubPayload{
Action: "DELETE",
FQIN: "",
PQIN: "k8s.gcr.io/subproject/foo:1.0",
Path: "",
Digest: "",
Tag: "",
},
reg.GCRPubSubPayload{
Action: "DELETE",
FQIN: "",
PQIN: "k8s.gcr.io/subproject/foo:1.0",
Path: "k8s.gcr.io/subproject/foo",
Digest: "",
Tag: "1.0",
},
},
}
for _, test := range shouldBeValid {
require.Nil(t, test.input.PopulateExtraFields())
got := test.input
require.Equal(t, got, test.expected)
}
shouldBeInvalid := []struct {
name string
input reg.GCRPubSubPayload
expected error
}{
{
"FQIN missing @-sign",
reg.GCRPubSubPayload{
Action: "INSERT",
FQIN: "k8s.gcr.io/subproject/foosha256:000",
PQIN: "k8s.gcr.io/subproject/foo:1.0",
Path: "",
Digest: "",
Tag: "",
},
fmt.Errorf("invalid FQIN: k8s.gcr.io/subproject/foosha256:000"),
},
{
"PQIN missing colon",
reg.GCRPubSubPayload{
Action: "INSERT",
FQIN: "k8s.gcr.io/subproject/foo@sha256:000",
PQIN: "k8s.gcr.io/subproject/foo1.0",
Path: "",
Digest: "",
Tag: "",
},
fmt.Errorf("invalid PQIN: k8s.gcr.io/subproject/foo1.0"),
},
}
for _, test := range shouldBeInvalid {
err := test.input.PopulateExtraFields()
require.NotNil(t, err)
require.Error(t, err, test.expected)
}
}
// Helper functions.
func bazelTestPath(testName string, paths ...string) string {
prefix := []string{
os.Getenv("PWD"),
"inventory_test",
testName,
}
return filepath.Join(append(prefix, paths...)...)
}
| [
"\"PWD\""
]
| []
| [
"PWD"
]
| [] | ["PWD"] | go | 1 | 0 | |
main.go | package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"sync"
"github.com/gorilla/mux"
"gopkg.in/yaml.v2"
)
/**
Analyzer of programming language
Input:
git diff from one commit
Collected data:
- user email
- filename
Rules:
- defined in config file as regexps
Output:
- JSON sent to Collector API
*/
var languageAnalyzer Analyzer
var rulesAnalyzer Analyzer
var config *Config
func main() {
c, err := NewConfig("etc/app.ini")
if err != nil {
panic(err.Error())
}
config = c
rules, err := readRules(config.RulesFile)
if err != nil {
panic(err.Error())
}
rulesAnalyzer = NewRulesAnalyzer(rules, config.Source)
languageAnalyzer = NewLanguageAnalyzer(config.Source, config.DefaultPoints)
r := mux.NewRouter()
r.HandleFunc("/ping", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "pong")
}).Methods("GET")
r.HandleFunc("/commit", CommitHandler).Methods("POST")
r.HandleFunc("/import", ImportHandler).Methods("POST")
if port := os.Getenv("VCAP_APP_PORT"); len(port) != 0 {
if p, e := strconv.Atoi(port); e == nil && p > 0 {
config.Port = int(p)
}
}
http.Handle("/", r)
log.Printf("Listening on port %d\n", config.Port)
listen := fmt.Sprintf("%s:%d", config.Host, config.Port)
log.Println(http.ListenAndServe(listen, nil))
}
func readRules(f string) ([]Rule, error) {
data, err := ioutil.ReadFile(f)
if err != nil {
return nil, err
}
rules := []Rule{}
err = yaml.Unmarshal(data, &rules)
return rules, err
}
func CommitHandler(w http.ResponseWriter, r *http.Request) {
decoder := json.NewDecoder(r.Body)
p := Payload{}
err := decoder.Decode(&p)
if err != nil {
panic(err.Error())
}
promos, err := Analyze(p.Commits)
if err != nil {
panic(err.Error())
}
if len(promos) == 0 {
return
}
log.Printf("message for user %s, given points: %f\n", promos[0].Username, promos[0].Points)
// HACK
for i := range promos {
promos[i].AvatarUrl = p.Sender.AvatarUrl
}
teamID := r.URL.Query().Get("team_id")
resp, err := sendToCollector(promos, teamID)
if err != nil {
panic(err.Error())
}
defer resp.Body.Close()
respData, err := ioutil.ReadAll(resp.Body)
w.Write(respData)
}
// ImportHandler takes github repo name, e.g. hackerbadge/analyzer, and import all commits, push to Collector API
func ImportHandler(w http.ResponseWriter, r *http.Request) {
var (
clientId = "3a758ff9868a3541c9cf"
clientSecret = "dc7e30f04713519c02f8730808d10f462163e528"
queries = r.URL.Query()
name = queries["name"][0]
singleCommits []GithubSingleCommit
wg sync.WaitGroup
max = 20
i = 0
)
commitUrls, err := fetchAllCommitURLs(name, clientId, clientSecret)
if err != nil {
panic(err)
}
// loop and fetch all single commits, collect changed files
for {
if i >= len(commitUrls) {
break
}
ch := make(chan GithubSingleCommit, max)
for j := 0; j < max; j++ {
if i >= len(commitUrls) {
break
}
wg.Add(1)
go fetchCommitURL(commitUrls[i], clientId, clientSecret, ch, &wg)
i++
}
wg.Wait()
close(ch)
for m := range ch {
singleCommits = append(singleCommits, m)
}
}
// Send singleCommits to analyzer
analyzer := NewLanguageAnalyzer(config.Source, config.DefaultPoints)
promos, err := analyzer.AnalyzeFull(singleCommits)
if err != nil {
panic(err)
}
if len(promos) == 0 {
return
}
teamID := r.URL.Query().Get("team_id")
resp, err := sendToCollector(promos, teamID)
if err != nil {
panic(err.Error())
}
defer resp.Body.Close()
respData, err := ioutil.ReadAll(resp.Body)
log.Println("Response from Collector API:" + string(respData))
w.Write(respData)
}
func fetchCommitURL(url, clientId, clientSecret string, ch chan GithubSingleCommit, wg *sync.WaitGroup) {
defer wg.Done()
url = fmt.Sprintf("%s?client_id=%s&client_secret=%s", url, clientId, clientSecret)
log.Printf("[DEBUG] Fetching single Commit URL %s\n", url)
resp, err := http.Get(url)
if err != nil {
panic(err)
}
defer resp.Body.Close()
singleCommit := &GithubSingleCommit{}
// Decoding json response
decoder := json.NewDecoder(resp.Body)
err = decoder.Decode(singleCommit)
if err != nil {
panic(err)
}
log.Printf("[DEBUG] Fetched commit %+v\n", *singleCommit)
ch <- *singleCommit
}
func fetchAllCommitURLs(name, clientId, clientSecret string) ([]string, error) {
var (
commitUrls []string
page = 1
perPage = 50
err error
)
// loop and fetch all pages of /commits API, collect all URLs of single commits
for {
apiUrl := fmt.Sprintf("https://api.github.com/repos/%s/commits?page=%d&per_page=%d&client_id=%s&client_secret=%s", name, page, perPage, clientId, clientSecret)
log.Printf("[DEBUG] Fetching Commits List from %s\n", apiUrl)
resp, err := http.Get(apiUrl)
if err != nil {
return commitUrls, err
}
defer resp.Body.Close()
// Decoding json response
decoder := json.NewDecoder(resp.Body)
githubCommits := []GithubCommit{}
err = decoder.Decode(&githubCommits)
if err != nil {
return commitUrls, err
}
for _, githubCommit := range githubCommits {
commitUrls = append(commitUrls, githubCommit.Url)
}
// Stop fetching if there is no more commits
// TODO remove break here
break
if len(githubCommits) == 0 {
break
}
page++
}
return commitUrls, err
}
func sendToCollector(promos []Promotion, teamID string) (resp *http.Response, err error) {
log.Printf("Sending %d promotions to %s\n", len(promos), config.CollectorApi)
data, err := json.Marshal(promos)
if err != nil {
return nil, err
}
log.Printf("Posting to Collector API %s, payload=%s\n", config.CollectorApi, data)
r := bytes.NewReader(data)
resp, err = http.Post(config.CollectorApi+"?team_id="+teamID, "application/json", r)
fmt.Println("sending to collector finished. Sent promos: %d", len(promos))
fmt.Println("error: %v", err)
return
}
func Analyze(data []Commit) ([]Promotion, error) {
promotions := []Promotion{}
languagePromos, err := languageAnalyzer.Analyze(data)
if err != nil {
return nil, err
}
rulesPromos, err := rulesAnalyzer.Analyze(data)
if err != nil {
return nil, err
}
promotions = append(languagePromos, rulesPromos...)
return promotions, nil
}
// AppendUnique appends items to a slice if they do not exist in that slice yet
func AppendUnique(slice []string, elems ...string) (ret []string) {
ret = slice
for _, elem := range elems {
var b bool = true
for _, s := range slice {
if elem == s {
b = false
continue
}
}
if b {
ret = append(ret, elem)
}
}
return ret
}
| [
"\"VCAP_APP_PORT\""
]
| []
| [
"VCAP_APP_PORT"
]
| [] | ["VCAP_APP_PORT"] | go | 1 | 0 | |
models/ingress_cert_params.go | // Code generated by go-swagger; DO NOT EDIT.
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// IngressCertParams ingress cert params
//
// swagger:model ingress-cert-params
type IngressCertParams string
// Validate validates this ingress cert params
func (m IngressCertParams) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this ingress cert params based on context it is used
func (m IngressCertParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
pkg/helmexec/exec.go | package helmexec
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
type decryptedSecret struct {
mutex sync.RWMutex
bytes []byte
}
type execer struct {
helmBinary string
runner Runner
logger *zap.SugaredLogger
kubeContext string
extra []string
decryptedSecretMutex sync.Mutex
decryptedSecrets map[string]*decryptedSecret
}
func NewLogger(writer io.Writer, logLevel string) *zap.SugaredLogger {
var cfg zapcore.EncoderConfig
cfg.MessageKey = "message"
out := zapcore.AddSync(writer)
var level zapcore.Level
err := level.Set(logLevel)
if err != nil {
panic(err)
}
core := zapcore.NewCore(
zapcore.NewConsoleEncoder(cfg),
out,
level,
)
return zap.New(core).Sugar()
}
// New for running helm commands
func New(helmBinary string, logger *zap.SugaredLogger, kubeContext string, runner Runner) *execer {
return &execer{
helmBinary: helmBinary,
logger: logger,
kubeContext: kubeContext,
runner: runner,
decryptedSecrets: make(map[string]*decryptedSecret),
}
}
func (helm *execer) SetExtraArgs(args ...string) {
helm.extra = args
}
func (helm *execer) SetHelmBinary(bin string) {
helm.helmBinary = bin
}
func (helm *execer) AddRepo(name, repository, cafile, certfile, keyfile, username, password string) error {
var args []string
args = append(args, "repo", "add", name, repository)
if certfile != "" && keyfile != "" {
args = append(args, "--cert-file", certfile, "--key-file", keyfile)
}
if cafile != "" {
args = append(args, "--ca-file", cafile)
}
if username != "" && password != "" {
args = append(args, "--username", username, "--password", password)
}
helm.logger.Infof("Adding repo %v %v", name, repository)
out, err := helm.exec(args, map[string]string{})
helm.info(out)
return err
}
func (helm *execer) UpdateRepo() error {
helm.logger.Info("Updating repo")
out, err := helm.exec([]string{"repo", "update"}, map[string]string{})
helm.info(out)
return err
}
func (helm *execer) BuildDeps(name, chart string) error {
helm.logger.Infof("Building dependency release=%v, chart=%v", name, chart)
out, err := helm.exec([]string{"dependency", "build", chart}, map[string]string{})
helm.info(out)
return err
}
func (helm *execer) UpdateDeps(chart string) error {
helm.logger.Infof("Updating dependency %v", chart)
out, err := helm.exec([]string{"dependency", "update", chart}, map[string]string{})
helm.info(out)
return err
}
func (helm *execer) SyncRelease(context HelmContext, name, chart string, flags ...string) error {
helm.logger.Infof("Upgrading release=%v, chart=%v", name, chart)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "upgrade", "--install", "--reset-values", name, chart), flags...), env)
helm.write(out)
return err
}
func (helm *execer) ReleaseStatus(context HelmContext, name string, flags ...string) error {
helm.logger.Infof("Getting status %v", name)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "status", name), flags...), env)
helm.write(out)
return err
}
func (helm *execer) List(context HelmContext, filter string, flags ...string) (string, error) {
helm.logger.Infof("Listing releases matching %v", filter)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
var args []string
if helm.isHelm3() {
args = []string{"list", "--filter", filter}
} else {
args = []string{"list", filter}
}
out, err := helm.exec(append(append(preArgs, args...), flags...), env)
// In v2 we have been expecting `helm list FILTER` prints nothing.
// In v3 helm still prints the header like `NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION`,
// which confuses helmfile's existing logic that treats any non-empty output from `helm list` is considered as the indication
// of the release to exist.
//
// This fixes it by removing the header from the v3 output, so that the output is formatted the same as that of v2.
if helm.isHelm3() {
lines := strings.Split(string(out), "\n")
lines = lines[1:]
out = []byte(strings.Join(lines, "\n"))
}
helm.write(out)
return string(out), err
}
func (helm *execer) DecryptSecret(context HelmContext, name string, flags ...string) (string, error) {
absPath, err := filepath.Abs(name)
if err != nil {
return "", err
}
helm.logger.Debugf("Preparing to decrypt secret %v", absPath)
helm.decryptedSecretMutex.Lock()
secret, ok := helm.decryptedSecrets[absPath]
// Cache miss
if !ok {
secret = &decryptedSecret{}
helm.decryptedSecrets[absPath] = secret
secret.mutex.Lock()
defer secret.mutex.Unlock()
helm.decryptedSecretMutex.Unlock()
helm.logger.Infof("Decrypting secret %v", absPath)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "secrets", "dec", absPath), flags...), env)
helm.info(out)
if err != nil {
return "", err
}
// HELM_SECRETS_DEC_SUFFIX is used by the helm-secrets plugin to define the output file
decSuffix := os.Getenv("HELM_SECRETS_DEC_SUFFIX")
if len(decSuffix) == 0 {
decSuffix = ".yaml.dec"
}
decFilename := strings.Replace(absPath, ".yaml", decSuffix, 1)
secretBytes, err := ioutil.ReadFile(decFilename)
if err != nil {
return "", err
}
secret.bytes = secretBytes
if err := os.Remove(decFilename); err != nil {
return "", err
}
} else {
// Cache hit
helm.logger.Debugf("Found secret in cache %v", absPath)
secret.mutex.RLock()
helm.decryptedSecretMutex.Unlock()
defer secret.mutex.RUnlock()
}
tmpFile, err := ioutil.TempFile("", "secret")
if err != nil {
return "", err
}
_, err = tmpFile.Write(secret.bytes)
if err != nil {
return "", err
}
return tmpFile.Name(), err
}
func (helm *execer) TemplateRelease(name string, chart string, flags ...string) error {
helm.logger.Infof("Templating release=%v, chart=%v", name, chart)
var args []string
if helm.isHelm3() {
args = []string{"template", name, chart}
} else {
args = []string{"template", chart, "--name", name}
}
out, err := helm.exec(append(args, flags...), map[string]string{})
helm.write(out)
return err
}
func (helm *execer) DiffRelease(context HelmContext, name, chart string, flags ...string) error {
helm.logger.Infof("Comparing release=%v, chart=%v", name, chart)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "diff", "upgrade", "--reset-values", "--allow-unreleased", name, chart), flags...), env)
// Do our best to write STDOUT only when diff existed
// Unfortunately, this works only when you run helmfile with `--detailed-exitcode`
detailedExitcodeEnabled := false
for _, f := range flags {
if strings.Contains(f, "detailed-exitcode") {
detailedExitcodeEnabled = true
break
}
}
if detailedExitcodeEnabled {
switch e := err.(type) {
case ExitError:
if e.ExitStatus() == 2 {
helm.write(out)
return err
}
}
} else {
helm.write(out)
}
return err
}
func (helm *execer) Lint(name, chart string, flags ...string) error {
helm.logger.Infof("Linting release=%v, chart=%v", name, chart)
out, err := helm.exec(append([]string{"lint", chart}, flags...), map[string]string{})
helm.write(out)
return err
}
func (helm *execer) Fetch(chart string, flags ...string) error {
helm.logger.Infof("Fetching %v", chart)
out, err := helm.exec(append([]string{"fetch", chart}, flags...), map[string]string{})
helm.info(out)
return err
}
func (helm *execer) DeleteRelease(context HelmContext, name string, flags ...string) error {
helm.logger.Infof("Deleting %v", name)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "delete", name), flags...), env)
helm.write(out)
return err
}
func (helm *execer) TestRelease(context HelmContext, name string, flags ...string) error {
helm.logger.Infof("Testing %v", name)
preArgs := context.GetTillerlessArgs(helm.helmBinary)
env := context.getTillerlessEnv()
var args []string
if helm.isHelm3() {
args = []string{"test", "run", name}
} else {
args = []string{"test", name}
}
out, err := helm.exec(append(append(preArgs, args...), flags...), env)
helm.write(out)
return err
}
func (helm *execer) exec(args []string, env map[string]string) ([]byte, error) {
cmdargs := args
if len(helm.extra) > 0 {
cmdargs = append(cmdargs, helm.extra...)
}
if helm.kubeContext != "" {
cmdargs = append(cmdargs, "--kube-context", helm.kubeContext)
}
cmd := fmt.Sprintf("exec: %s %s", helm.helmBinary, strings.Join(cmdargs, " "))
helm.logger.Debug(cmd)
bytes, err := helm.runner.Execute(helm.helmBinary, cmdargs, env)
helm.logger.Debugf("%s: %s", cmd, bytes)
return bytes, err
}
func (helm *execer) info(out []byte) {
if len(out) > 0 {
helm.logger.Infof("%s", out)
}
}
func (helm *execer) write(out []byte) {
if len(out) > 0 {
fmt.Printf("%s\n", out)
}
}
func (helm *execer) isHelm3() bool {
return os.Getenv("HELMFILE_HELM3") != ""
}
| [
"\"HELM_SECRETS_DEC_SUFFIX\"",
"\"HELMFILE_HELM3\""
]
| []
| [
"HELMFILE_HELM3",
"HELM_SECRETS_DEC_SUFFIX"
]
| [] | ["HELMFILE_HELM3", "HELM_SECRETS_DEC_SUFFIX"] | go | 2 | 0 | |
wrapper/aslprep_docker.py | #!/usr/bin/env python
"""
The aslprep on Docker wrapper
This is a lightweight Python wrapper to run aslprep.
Docker must be installed and running. This can be checked
running ::
docker info
"""
import sys
import os
import re
import subprocess
__version__ = '99.99.99'
__copyright__ = 'Copyright 2020, Center for Reproducible Neuroscience, Stanford University'
__credits__ = ['wait']
__bugreports__ = 'https://github.com/pennlinc/issues'
MISSING = """
Image '{}' is missing
Would you like to download? [Y/n] """
PKG_PATH = '/usr/local/miniconda/lib/python3.7/site-packages'
TF_TEMPLATES = (
'MNI152Lin',
'MNI152NLin2009cAsym',
'MNI152NLin6Asym',
'MNI152NLin6Sym',
'MNIInfant',
'MNIPediatricAsym',
'NKI',
'OASIS30ANTs',
'PNC',
'UNCInfant',
'fsLR',
'fsaverage',
'fsaverage5',
'fsaverage6',
)
NONSTANDARD_REFERENCES = (
'anat',
'T1w',
'run',
'func',
'sbref',
'fsnative'
)
# Monkey-patch Py2 subprocess
if not hasattr(subprocess, 'DEVNULL'):
subprocess.DEVNULL = -3
if not hasattr(subprocess, 'run'):
# Reimplement minimal functionality for usage in this file
def _run(args, stdout=None, stderr=None):
from collections import namedtuple
result = namedtuple('CompletedProcess', 'stdout stderr returncode')
devnull = None
if subprocess.DEVNULL in (stdout, stderr):
devnull = open(os.devnull, 'r+')
if stdout == subprocess.DEVNULL:
stdout = devnull
if stderr == subprocess.DEVNULL:
stderr = devnull
proc = subprocess.Popen(args, stdout=stdout, stderr=stderr)
stdout, stderr = proc.communicate()
res = result(stdout, stderr, proc.returncode)
if devnull is not None:
devnull.close()
return res
subprocess.run = _run
# De-fang Python 2's input - we don't eval user input
try:
input = raw_input
except NameError:
pass
def check_docker():
"""Verify that docker is installed and the user has permission to
run docker images.
Returns
-------
-1 Docker can't be found
0 Docker found, but user can't connect to daemon
1 Test run OK
"""
try:
ret = subprocess.run(['docker', 'version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
from errno import ENOENT
if e.errno == ENOENT:
return -1
raise e
if ret.stderr.startswith(b"Cannot connect to the Docker daemon."):
return 0
return 1
def check_image(image):
"""Check whether image is present on local system"""
ret = subprocess.run(['docker', 'images', '-q', image],
stdout=subprocess.PIPE)
return bool(ret.stdout)
def check_memory(image):
"""Check total memory from within a docker container"""
ret = subprocess.run(['docker', 'run', '--rm', '--entrypoint=free',
image, '-m'],
stdout=subprocess.PIPE)
if ret.returncode:
return -1
mem = [line.decode().split()[1]
for line in ret.stdout.splitlines()
if line.startswith(b'Mem:')][0]
return int(mem)
def merge_help(wrapper_help, target_help):
# Matches all flags with up to one nested square bracket
opt_re = re.compile(r'(\[--?[\w-]+(?:[^\[\]]+(?:\[[^\[\]]+\])?)?\])')
# Matches flag name only
flag_re = re.compile(r'\[--?([\w-]+)[ \]]')
# Normalize to Unix-style line breaks
w_help = wrapper_help.rstrip().replace('\r', '')
t_help = target_help.rstrip().replace('\r', '')
w_usage, w_details = w_help.split('\n\n', 1)
w_groups = w_details.split('\n\n')
t_usage, t_details = t_help.split('\n\n', 1)
t_groups = t_details.split('\n\n')
w_posargs = w_usage.split('\n')[-1].lstrip()
t_posargs = t_usage.split('\n')[-1].lstrip()
w_options = opt_re.findall(w_usage)
w_flags = sum(map(flag_re.findall, w_options), [])
t_options = opt_re.findall(t_usage)
t_flags = sum(map(flag_re.findall, t_options), [])
# The following code makes this assumption
assert w_flags[:2] == ['h', 'version']
assert w_posargs.replace(']', '').replace('[', '') == t_posargs
# Make sure we're not clobbering options we don't mean to
overlap = set(w_flags).intersection(t_flags)
expected_overlap = {
'anat-derivatives',
'fs-license-file',
'fs-subjects-dir',
'h',
'use-plugin',
'version',
'w',
}
assert overlap == expected_overlap, "Clobbering options: {}".format(
', '.join(overlap - expected_overlap))
sections = []
# Construct usage
start = w_usage[:w_usage.index(' [')]
indent = ' ' * len(start)
new_options = sum((
w_options[:2],
[opt for opt, flag in zip(t_options, t_flags) if flag not in overlap],
w_options[2:]
), [])
opt_line_length = 79 - len(start)
length = 0
opt_lines = [start]
for opt in new_options:
opt = ' ' + opt
olen = len(opt)
if length + olen <= opt_line_length:
opt_lines[-1] += opt
length += olen
else:
opt_lines.append(indent + opt)
length = olen
opt_lines.append(indent + ' ' + t_posargs)
sections.append('\n'.join(opt_lines))
# Use target description and positional args
sections.extend(t_groups[:2])
for line in t_groups[2].split('\n')[1:]:
content = line.lstrip().split(',', 1)[0]
if content[1:] not in overlap:
w_groups[2] += '\n' + line
sections.append(w_groups[2])
# All remaining sections, show target then wrapper (skipping duplicates)
sections.extend(t_groups[3:] + w_groups[6:])
return '\n\n'.join(sections)
def is_in_directory(filepath, directory):
return os.path.realpath(filepath).startswith(
os.path.realpath(directory) + os.sep)
def get_parser():
"""Defines the command line interface of the wrapper"""
import argparse
class ToDict(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
d = {}
for kv in values:
k, v = kv.split("=")
d[k] = os.path.abspath(v)
setattr(namespace, self.dest, d)
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False)
# Standard ASLPREP arguments
parser.add_argument('bids_dir', nargs='?', type=os.path.abspath,
default='')
parser.add_argument('output_dir', nargs='?', type=os.path.abspath,
default='')
parser.add_argument('analysis_level', nargs='?', choices=['participant'],
default='participant')
parser.add_argument('-h', '--help', action='store_true',
help="show this help message and exit")
parser.add_argument('--version', action='store_true',
help="show program's version number and exit")
# Allow alternative images (semi-developer)
parser.add_argument('-i', '--image', metavar='IMG', type=str,
default='pennlinc/aslprep{}'.format(__version__),
help='image name')
# Options for mapping files and directories into container
# Update `expected_overlap` variable in merge_help() when adding to this
g_wrap = parser.add_argument_group(
'Wrapper options',
'Standard options that require mapping files into the container')
g_wrap.add_argument('-w', '--work-dir', action='store', type=os.path.abspath,
help='path where intermediate results should be stored')
g_wrap.add_argument(
'--output-spaces', nargs="*",
help="""\
Standard and non-standard spaces to resample anatomical and functional images to. \
Standard spaces may be specified by the form \
``<TEMPLATE>[:res-<resolution>][:cohort-<label>][...]``, where ``<TEMPLATE>`` is \
a keyword (valid keywords: %s) or path pointing to a user-supplied template, and \
may be followed by optional, colon-separated parameters. \
Non-standard spaces (valid keywords: %s) imply specific orientations and sampling \
grids. \
Important to note, the ``res-*`` modifier does not define the resolution used for \
the spatial normalization.""" % (', '.join('"%s"' % s for s in TF_TEMPLATES),
', '.join(NONSTANDARD_REFERENCES)))
g_wrap.add_argument(
'--fs-license-file', metavar='PATH', type=os.path.abspath,
default=os.getenv('FS_LICENSE', None),
help='Path to FreeSurfer license key file. Get it (for free) by registering'
' at https://surfer.nmr.mgh.harvard.edu/registration.html')
g_wrap.add_argument(
'--fs-subjects-dir', metavar='PATH', type=os.path.abspath,
help='Path to existing FreeSurfer subjects directory to reuse. '
'(default: OUTPUT_DIR/freesurfer)')
g_wrap.add_argument(
'--anat-derivatives', metavar='PATH', type=os.path.abspath,
help='Path to existing sMRIPrep/aslprep-anatomical derivatives to fasttrack '
'the anatomical workflow.')
g_wrap.add_argument(
'--use-plugin', metavar='PATH', action='store', default=None,
type=os.path.abspath, help='nipype plugin configuration file')
# Developer patch/shell options
g_dev = parser.add_argument_group(
'Developer options',
'Tools for testing and debugging aslprep')
g_dev.add_argument('--patch', nargs="+", metavar="PACKAGE=PATH", action=ToDict,
help='local repository to use within container')
g_dev.add_argument('--shell', action='store_true',
help='open shell in image instead of running aslprep')
g_dev.add_argument('--config', metavar='PATH', action='store',
type=os.path.abspath, help='Use custom nipype.cfg file')
g_dev.add_argument('-e', '--env', action='append', nargs=2, metavar=('ENV_VAR', 'value'),
help='Set custom environment variable within container')
g_dev.add_argument('-u', '--user', action='store',
help='Run container as a given user/uid. Additionally, group/gid can be'
'assigned, (i.e., --user <UID>:<GID>)')
g_dev.add_argument('--network', action='store',
help='Run container with a different network driver '
'("none" to simulate no internet connection)')
return parser
def main():
"""Entry point"""
parser = get_parser()
# Capture additional arguments to pass inside container
opts, unknown_args = parser.parse_known_args()
# Set help if no directories set
if (opts.bids_dir, opts.output_dir, opts.version) == ('', '', False):
opts.help = True
# Stop if no docker / docker fails to run
check = check_docker()
if check < 1:
if opts.version:
print('aslprep wrapper {!s}'.format(__version__))
if opts.help:
parser.print_help()
if check == -1:
print("aslprep: Could not find docker command... Is it installed?")
else:
print("aslprep: Make sure you have permission to run 'docker'")
return 1
# For --help or --version, ask before downloading an image
if not check_image(opts.image):
resp = 'Y'
if opts.version:
print('aslprep wrapper {!s}'.format(__version__))
if opts.help:
parser.print_help()
if opts.version or opts.help:
try:
resp = input(MISSING.format(opts.image))
except KeyboardInterrupt:
print()
return 1
if resp not in ('y', 'Y', ''):
return 0
print('Downloading. This may take a while...')
# Warn on low memory allocation
mem_total = check_memory(opts.image)
if mem_total == -1:
print('Could not detect memory capacity of Docker container.\n'
'Do you have permission to run docker?')
return 1
if not (opts.help or opts.version or '--reports-only' in unknown_args) and mem_total < 8000:
print('Warning: <8GB of RAM is available within your Docker '
'environment.\nSome parts of aslprep may fail to complete.')
if '--mem_mb' not in unknown_args:
resp = 'N'
try:
resp = input('Continue anyway? [y/N]')
except KeyboardInterrupt:
print()
return 1
if resp not in ('y', 'Y', ''):
return 0
ret = subprocess.run(['docker', 'version', '--format', "{{.Server.Version}}"],
stdout=subprocess.PIPE)
docker_version = ret.stdout.decode('ascii').strip()
command = ['docker', 'run', '--rm', '-it', '-e',
'DOCKER_VERSION_8395080871=%s' % docker_version]
# Patch working repositories into installed package directories
if opts.patch:
for pkg, repo_path in opts.patch.items():
command.extend(
['-v', '{}:{}/{}:ro'.format(repo_path, PKG_PATH, pkg)]
)
if opts.env:
for envvar in opts.env:
command.extend(['-e', '%s=%s' % tuple(envvar)])
if opts.user:
command.extend(['-u', opts.user])
if opts.fs_license_file:
command.extend([
'-v', '{}:/opt/freesurfer/license.txt:ro'.format(
opts.fs_license_file)])
main_args = []
if opts.bids_dir:
command.extend(['-v', ':'.join((opts.bids_dir, '/data', 'ro'))])
main_args.append('/data')
if opts.output_dir:
if not os.path.exists(opts.output_dir):
# create it before docker does
os.makedirs(opts.output_dir)
command.extend(['-v', ':'.join((opts.output_dir, '/out'))])
main_args.append('/out')
main_args.append(opts.analysis_level)
if opts.fs_subjects_dir:
command.extend(['-v', '{}:/opt/subjects'.format(opts.fs_subjects_dir)])
unknown_args.extend(['--fs-subjects-dir', '/opt/subjects'])
if opts.anat_derivatives:
command.extend(['-v', '{}:/opt/smriprep/subjects'.format(opts.anat_derivatives)])
unknown_args.extend(['--anat-derivatives', '/opt/smriprep/subjects'])
if opts.work_dir:
command.extend(['-v', ':'.join((opts.work_dir, '/scratch'))])
unknown_args.extend(['-w', '/scratch'])
# Check that work_dir is not a child of bids_dir
if opts.work_dir and opts.bids_dir:
if is_in_directory(opts.work_dir, opts.bids_dir):
print(
'The selected working directory is a subdirectory of the input BIDS folder. '
'Please modify the output path.')
return 1
if opts.config:
command.extend(['-v', ':'.join((
opts.config, '/home/aslprep/.nipype/nipype.cfg', 'ro'))])
if opts.use_plugin:
command.extend(['-v', ':'.join((opts.use_plugin, '/tmp/plugin.yml',
'ro'))])
unknown_args.extend(['--use-plugin', '/tmp/plugin.yml'])
if opts.output_spaces:
spaces = []
for space in opts.output_spaces:
if space.split(':')[0] not in (TF_TEMPLATES + NONSTANDARD_REFERENCES):
tpl = os.path.basename(space)
if not tpl.startswith('tpl-'):
raise RuntimeError("Custom template %s requires a `tpl-` prefix" % tpl)
target = '/home/aslprep/.cache/templateflow/' + tpl
command.extend(['-v', ':'.join((os.path.abspath(space), target, 'ro'))])
spaces.append(tpl[4:])
else:
spaces.append(space)
unknown_args.extend(['--output-spaces'] + spaces)
if opts.shell:
command.append('--entrypoint=bash')
if opts.network:
command.append('--network=' + opts.network)
command.append(opts.image)
# Override help and version to describe underlying program
# Respects '-i' flag, so will retrieve information from any image
if opts.help:
command.append('-h')
targethelp = subprocess.check_output(command).decode()
print(merge_help(parser.format_help(), targethelp))
return 0
elif opts.version:
# Get version to be run and exit
command.append('--version')
ret = subprocess.run(command)
return ret.returncode
if not opts.shell:
command.extend(main_args)
command.extend(unknown_args)
print("RUNNING: " + ' '.join(command))
ret = subprocess.run(command)
if ret.returncode:
print("aslprep: Please report errors to {}".format(__bugreports__))
return ret.returncode
if __name__ == '__main__':
sys.exit(main())
| []
| []
| [
"FS_LICENSE"
]
| [] | ["FS_LICENSE"] | python | 1 | 0 | |
quantum/openstack/common/setup.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import os
import re
import subprocess
from setuptools.command import sdist
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = [x for x in l.split(' ')
if x.startswith('<')]
mapping[alias] = canonical_email
return mapping
def canonicalize_emails(changelog, mapping):
"""Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email.
"""
for alias, email in mapping.iteritems():
changelog = changelog.replace(alias, email)
return changelog
# Get requirements from the first file that exists
def get_reqs_from_files(requirements_files):
reqs_in = []
for requirements_file in requirements_files:
if os.path.exists(requirements_file):
return open(requirements_file, 'r').read().split('\n')
return []
def parse_requirements(requirements_files=['requirements.txt',
'tools/pip-requires']):
requirements = []
for line in get_reqs_from_files(requirements_files):
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(requirements_files=['requirements.txt',
'tools/pip-requires']):
dependency_links = []
# dependency_links inject alternate locations to find packages listed
# in requirements
for line in get_reqs_from_files(requirements_files):
# skip comments and blank lines
if re.match(r'(\s*#)|(\s*$)', line):
continue
# lines with -e or -f need the whole line, minus the flag
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
# lines that are only urls can go in unmolested
elif re.match(r'\s*https?:', line):
dependency_links.append(line)
return dependency_links
def write_requirements():
venv = os.environ.get('VIRTUAL_ENV', None)
if venv is not None:
with open("requirements.txt", "w") as req_file:
output = subprocess.Popen(["pip", "-E", venv, "freeze", "-l"],
stdout=subprocess.PIPE)
requirements = output.communicate()[0].strip()
req_file.write(requirements)
def _run_shell_command(cmd):
output = subprocess.Popen(["/bin/sh", "-c", cmd],
stdout=subprocess.PIPE)
return output.communicate()[0].strip()
def write_vcsversion(location):
"""Produce a vcsversion dict that mimics the old one produced by bzr.
"""
if os.path.isdir('.git'):
branch_nick_cmd = 'git branch | grep -Ei "\* (.*)" | cut -f2 -d" "'
branch_nick = _run_shell_command(branch_nick_cmd)
revid_cmd = "git rev-parse HEAD"
revid = _run_shell_command(revid_cmd).split()[0]
revno_cmd = "git log --oneline | wc -l"
revno = _run_shell_command(revno_cmd)
with open(location, 'w') as version_file:
version_file.write("""
# This file is automatically generated by setup.py, So don't edit it. :)
version_info = {
'branch_nick': '%s',
'revision_id': '%s',
'revno': %s
}
""" % (branch_nick, revid, revno))
def write_git_changelog():
"""Write a changelog based on the git changelog."""
if os.path.isdir('.git'):
git_log_cmd = 'git log --stat'
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open("ChangeLog", "w") as changelog_file:
changelog_file.write(canonicalize_emails(changelog, mailmap))
def generate_authors():
"""Create AUTHORS file using git commits."""
jenkins_email = '[email protected]'
old_authors = 'AUTHORS.in'
new_authors = 'AUTHORS'
if os.path.isdir('.git'):
# don't include jenkins email address in AUTHORS file
git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | "
"grep -v " + jenkins_email)
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open(new_authors, 'w') as new_authors_fh:
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
if os.path.exists(old_authors):
with open(old_authors, "r") as old_authors_fh:
new_authors_fh.write('\n' + old_authors_fh.read())
def get_cmdclass():
"""Return dict of commands to run from setup.py."""
cmdclass = dict()
class LocalSDist(sdist.sdist):
"""Builds the ChangeLog and Authors files from VC first."""
def run(self):
write_git_changelog()
generate_authors()
# sdist.sdist is an old style class, can't use super()
sdist.sdist.run(self)
cmdclass['sdist'] = LocalSDist
# If Sphinx is installed on the box running setup.py,
# enable setup.py to build the documentation, otherwise,
# just ignore it
try:
from sphinx.setup_command import BuildDoc
class LocalBuildDoc(BuildDoc):
def run(self):
for builder in ['html', 'man']:
self.builder = builder
self.finalize_options()
BuildDoc.run(self)
cmdclass['build_sphinx'] = LocalBuildDoc
except ImportError:
pass
return cmdclass
| []
| []
| [
"VIRTUAL_ENV"
]
| [] | ["VIRTUAL_ENV"] | python | 1 | 0 | |
tools/valgrind/chrome_tests.py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Runs various chrome tests through valgrind_test.py.'''
import glob
import logging
import multiprocessing
import optparse
import os
import stat
import sys
import logging_utils
import path_utils
import common
import valgrind_test
class TestNotFound(Exception): pass
class MultipleGTestFiltersSpecified(Exception): pass
class BuildDirNotFound(Exception): pass
class BuildDirAmbiguous(Exception): pass
class ChromeTests:
SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"]
LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 300
def __init__(self, options, args, test):
if ':' in test:
(self._test, self._gtest_filter) = test.split(':', 1)
else:
self._test = test
self._gtest_filter = options.gtest_filter
if self._test not in self._test_list:
raise TestNotFound("Unknown test: %s" % test)
if options.gtest_filter and options.gtest_filter != self._gtest_filter:
raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
"and --test %s" % test)
self._options = options
self._args = args
script_dir = path_utils.ScriptDir()
# Compute the top of the tree (the "source dir") from the script dir (where
# this script lives). We assume that the script dir is in tools/valgrind/
# relative to the top of the tree.
self._source_dir = os.path.dirname(os.path.dirname(script_dir))
# since this path is used for string matching, make sure it's always
# an absolute Unix-style path
self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
if not self._options.build_dir:
dirs = [
os.path.join(self._source_dir, "xcodebuild", "Debug"),
os.path.join(self._source_dir, "out", "Debug"),
os.path.join(self._source_dir, "build", "Debug"),
]
build_dir = [d for d in dirs if os.path.isdir(d)]
if len(build_dir) > 1:
raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
"%s\nPlease specify just one "
"using --build-dir" % ", ".join(build_dir))
elif build_dir:
self._options.build_dir = build_dir[0]
else:
self._options.build_dir = None
if self._options.build_dir:
build_dir = os.path.abspath(self._options.build_dir)
self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
def _EnsureBuildDirFound(self):
if not self._options.build_dir:
raise BuildDirNotFound("Oops, couldn't find a build dir, please "
"specify it manually using --build-dir")
def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
'''Generates the default command array that most tests will use.'''
if exe and common.IsWindows():
exe += '.exe'
cmd = list(self._command_preamble)
# Find all suppressions matching the following pattern:
# tools/valgrind/TOOL/suppressions[_PLATFORM].txt
# and list them with --suppressions= prefix.
script_dir = path_utils.ScriptDir()
tool_name = tool.ToolName();
suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
if os.path.exists(suppression_file):
cmd.append("--suppressions=%s" % suppression_file)
# Platform-specific suppression
for platform in common.PlatformNames():
platform_suppression_file = \
os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
if os.path.exists(platform_suppression_file):
cmd.append("--suppressions=%s" % platform_suppression_file)
if self._options.valgrind_tool_flags:
cmd += self._options.valgrind_tool_flags.split(" ")
if self._options.keep_logs:
cmd += ["--keep_logs"]
if valgrind_test_args != None:
for arg in valgrind_test_args:
cmd.append(arg)
if exe:
self._EnsureBuildDirFound()
cmd.append(os.path.join(self._options.build_dir, exe))
# Valgrind runs tests slowly, so slow tests hurt more; show elapased time
# so we can find the slowpokes.
cmd.append("--gtest_print_time")
# Built-in test launcher for gtest-based executables runs tests using
# multiple process by default. Force the single-process mode back.
cmd.append("--single-process-tests")
if self._options.gtest_repeat:
cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
if self._options.gtest_shuffle:
cmd.append("--gtest_shuffle")
if self._options.brave_new_test_launcher:
cmd.append("--brave-new-test-launcher")
if self._options.test_launcher_bot_mode:
cmd.append("--test-launcher-bot-mode")
return cmd
def Run(self):
''' Runs the test specified by command-line argument --test '''
logging.info("running test %s" % (self._test))
return self._test_list[self._test](self)
def _AppendGtestFilter(self, tool, name, cmd):
'''Append an appropriate --gtest_filter flag to the googletest binary
invocation.
If the user passed his own filter mentioning only one test, just use it.
Othewise, filter out tests listed in the appropriate gtest_exclude files.
'''
if (self._gtest_filter and
":" not in self._gtest_filter and
"?" not in self._gtest_filter and
"*" not in self._gtest_filter):
cmd.append("--gtest_filter=%s" % self._gtest_filter)
return
filters = []
gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
gtest_filter_files = [
os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
# Use ".gtest.txt" files only for slow tools, as they now contain
# Valgrind- and Dr.Memory-specific filters.
# TODO(glider): rename the files to ".gtest_slow.txt"
if tool.ToolName() in ChromeTests.SLOW_TOOLS:
gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
for platform_suffix in common.PlatformNames():
gtest_filter_files += [
os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
(tool.ToolName(), platform_suffix))]
logging.info("Reading gtest exclude filter files:")
for filename in gtest_filter_files:
# strip the leading absolute path (may be very long on the bot)
# and the following / or \.
readable_filename = filename.replace("\\", "/") # '\' on Windows
readable_filename = readable_filename.replace(self._source_dir, "")[1:]
if not os.path.exists(filename):
logging.info(" \"%s\" - not found" % readable_filename)
continue
logging.info(" \"%s\" - OK" % readable_filename)
f = open(filename, 'r')
for line in f.readlines():
if line.startswith("#") or line.startswith("//") or line.isspace():
continue
line = line.rstrip()
test_prefixes = ["FLAKY", "FAILS"]
for p in test_prefixes:
# Strip prefixes from the test names.
line = line.replace(".%s_" % p, ".")
# Exclude the original test name.
filters.append(line)
if line[-2:] != ".*":
# List all possible prefixes if line doesn't end with ".*".
for p in test_prefixes:
filters.append(line.replace(".", ".%s_" % p))
# Get rid of duplicates.
filters = set(filters)
gtest_filter = self._gtest_filter
if len(filters):
if gtest_filter:
gtest_filter += ":"
if gtest_filter.find("-") < 0:
gtest_filter += "-"
else:
gtest_filter = "-"
gtest_filter += ":".join(filters)
if gtest_filter:
cmd.append("--gtest_filter=%s" % gtest_filter)
@staticmethod
def ShowTests():
test_to_names = {}
for name, test_function in ChromeTests._test_list.iteritems():
test_to_names.setdefault(test_function, []).append(name)
name_to_aliases = {}
for names in test_to_names.itervalues():
names.sort(key=lambda name: len(name))
name_to_aliases[names[0]] = names[1:]
print
print "Available tests:"
print "----------------"
for name, aliases in sorted(name_to_aliases.iteritems()):
if aliases:
print " {} (aka {})".format(name, ', '.join(aliases))
else:
print " {}".format(name)
def SetupLdPath(self, requires_build_dir):
if requires_build_dir:
self._EnsureBuildDirFound()
elif not self._options.build_dir:
return
# Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
if (os.getenv("LD_LIBRARY_PATH")):
os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
self._options.build_dir))
else:
os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, name, valgrind_test_args)
self._AppendGtestFilter(tool, name, cmd)
cmd.extend(['--test-tiny-timeout=1000'])
if cmd_args:
cmd.extend(cmd_args)
self.SetupLdPath(True)
return tool.Run(cmd, module)
def RunCmdLine(self):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, None, self._args)
self.SetupLdPath(False)
return tool.Run(cmd, None)
def TestAccessibility(self):
return self.SimpleTest("accessibility", "accessibility_unittests")
def TestAddressInput(self):
return self.SimpleTest("addressinput", "libaddressinput_unittests")
def TestAngle(self):
return self.SimpleTest("angle", "angle_unittests")
def TestAppList(self):
return self.SimpleTest("app_list", "app_list_unittests")
def TestAsh(self):
return self.SimpleTest("ash", "ash_unittests")
def TestAshShell(self):
return self.SimpleTest("ash_shelf", "ash_shell_unittests")
def TestAura(self):
return self.SimpleTest("aura", "aura_unittests")
def TestBase(self):
return self.SimpleTest("base", "base_unittests")
def TestBlinkHeap(self):
return self.SimpleTest("blink_heap", "blink_heap_unittests")
def TestBlinkPlatform(self):
return self.SimpleTest("blink_platform", "blink_platform_unittests")
def TestCacheInvalidation(self):
return self.SimpleTest("cacheinvalidation", "cacheinvalidation_unittests")
def TestCast(self):
return self.SimpleTest("chrome", "cast_unittests")
def TestCC(self):
return self.SimpleTest("cc", "cc_unittests")
def TestChromeApp(self):
return self.SimpleTest("chrome_app", "chrome_app_unittests")
def TestChromeElf(self):
return self.SimpleTest("chrome_elf", "chrome_elf_unittests")
def TestChromeDriver(self):
return self.SimpleTest("chromedriver", "chromedriver_unittests")
def TestChromeOS(self):
return self.SimpleTest("chromeos", "chromeos_unittests")
def TestCloudPrint(self):
return self.SimpleTest("cloud_print", "cloud_print_unittests")
def TestComponents(self):
return self.SimpleTest("components", "components_unittests")
def TestCompositor(self):
return self.SimpleTest("compositor", "compositor_unittests")
def TestContent(self):
return self.SimpleTest("content", "content_unittests")
def TestCourgette(self):
return self.SimpleTest("courgette", "courgette_unittests")
def TestCrypto(self):
return self.SimpleTest("crypto", "crypto_unittests")
def TestDevice(self):
return self.SimpleTest("device", "device_unittests")
def TestDisplay(self):
return self.SimpleTest("display", "display_unittests")
def TestEvents(self):
return self.SimpleTest("events", "events_unittests")
def TestExtensions(self):
return self.SimpleTest("extensions", "extensions_unittests")
def TestFFmpeg(self):
return self.SimpleTest("chrome", "ffmpeg_unittests")
def TestFFmpegRegressions(self):
return self.SimpleTest("chrome", "ffmpeg_regression_tests")
def TestGCM(self):
return self.SimpleTest("gcm", "gcm_unit_tests")
def TestGfx(self):
return self.SimpleTest("gfx", "gfx_unittests")
def TestGin(self):
return self.SimpleTest("gin", "gin_unittests")
def TestGoogleApis(self):
return self.SimpleTest("google_apis", "google_apis_unittests")
def TestGPU(self):
return self.SimpleTest("gpu", "gpu_unittests")
def TestIpc(self):
return self.SimpleTest("ipc", "ipc_tests",
valgrind_test_args=["--trace_children"])
def TestInstallerUtil(self):
return self.SimpleTest("installer_util", "installer_util_unittests")
def TestJingle(self):
return self.SimpleTest("chrome", "jingle_unittests")
def TestKeyboard(self):
return self.SimpleTest("keyboard", "keyboard_unittests")
def TestMedia(self):
return self.SimpleTest("chrome", "media_unittests")
def TestMessageCenter(self):
return self.SimpleTest("message_center", "message_center_unittests")
def TestMojoAppsJS(self):
return self.SimpleTest("mojo_apps_js", "mojo_apps_js_unittests")
def TestMojoCommon(self):
return self.SimpleTest("mojo_common", "mojo_common_unittests")
def TestMojoJS(self):
return self.SimpleTest("mojo_js", "mojo_js_unittests")
def TestMojoPublicBindings(self):
return self.SimpleTest("mojo_public_bindings",
"mojo_public_bindings_unittests")
def TestMojoPublicEnv(self):
return self.SimpleTest("mojo_public_env",
"mojo_public_environment_unittests")
def TestMojoPublicSystem(self):
return self.SimpleTest("mojo_public_system",
"mojo_public_system_unittests")
def TestMojoPublicSysPerf(self):
return self.SimpleTest("mojo_public_sysperf",
"mojo_public_system_perftests")
def TestMojoPublicUtility(self):
return self.SimpleTest("mojo_public_utility",
"mojo_public_utility_unittests")
def TestMojoServiceManager(self):
return self.SimpleTest("mojo_service_manager",
"mojo_service_manager_unittests")
def TestMojoSystem(self):
return self.SimpleTest("mojo_system", "mojo_system_unittests")
def TestMojoViewManager(self):
return self.SimpleTest("mojo_view_manager", "mojo_view_manager_unittests")
def TestMojoViewManagerLib(self):
return self.SimpleTest("mojo_view_manager_lib",
"mojo_view_manager_lib_unittests")
def TestNet(self):
return self.SimpleTest("net", "net_unittests")
def TestNetPerf(self):
return self.SimpleTest("net", "net_perftests")
def TestPhoneNumber(self):
return self.SimpleTest("phonenumber", "libphonenumber_unittests")
def TestPPAPI(self):
return self.SimpleTest("chrome", "ppapi_unittests")
def TestPrinting(self):
return self.SimpleTest("chrome", "printing_unittests")
def TestRemoting(self):
return self.SimpleTest("chrome", "remoting_unittests",
cmd_args=[
"--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=150000"])
def TestSql(self):
return self.SimpleTest("chrome", "sql_unittests")
def TestSync(self):
return self.SimpleTest("chrome", "sync_unit_tests")
def TestLinuxSandbox(self):
return self.SimpleTest("sandbox", "sandbox_linux_unittests")
def TestUnit(self):
# http://crbug.com/51716
# Disabling all unit tests
# Problems reappeared after r119922
if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
logging.warning("unit_tests are disabled for memcheck on MacOS.")
return 0;
return self.SimpleTest("chrome", "unit_tests")
def TestUIUnit(self):
return self.SimpleTest("chrome", "ui_unittests")
def TestURL(self):
return self.SimpleTest("chrome", "url_unittests")
def TestViews(self):
return self.SimpleTest("views", "views_unittests")
# Valgrind timeouts are in seconds.
UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
# UI test timeouts are in milliseconds.
UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=150000",
"--no-sandbox"]
# TODO(thestig) fine-tune these values.
# Valgrind timeouts are in seconds.
BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
# Browser test timeouts are in milliseconds.
BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",
"--ui-test-action-max-timeout=800000",
"--no-sandbox"]
def TestBrowser(self):
return self.SimpleTest("chrome", "browser_tests",
valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
cmd_args=self.BROWSER_TEST_ARGS)
def TestContentBrowser(self):
return self.SimpleTest("content", "content_browsertests",
valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
cmd_args=self.BROWSER_TEST_ARGS)
def TestInteractiveUI(self):
return self.SimpleTest("chrome", "interactive_ui_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=self.UI_TEST_ARGS)
def TestSafeBrowsing(self):
return self.SimpleTest("chrome", "safe_browsing_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(["--ui-test-action-max-timeout=450000"]))
def TestSyncIntegration(self):
return self.SimpleTest("chrome", "sync_integration_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(["--ui-test-action-max-timeout=450000"]))
def TestLayoutChunk(self, chunk_num, chunk_size):
# Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
# list of tests. Wrap around to beginning of list at end.
# If chunk_size is zero, run all tests in the list once.
# If a text file is given as argument, it is used as the list of tests.
#
# Build the ginormous commandline in 'cmd'.
# It's going to be roughly
# python valgrind_test.py ... python run_webkit_tests.py ...
# but we'll use the --indirect flag to valgrind_test.py
# to avoid valgrinding python.
# Start by building the valgrind_test.py commandline.
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool)
cmd.append("--trace_children")
cmd.append("--indirect_webkit_layout")
cmd.append("--ignore_exit_code")
# Now build script_cmd, the run_webkits_tests.py commandline
# Store each chunk in its own directory so that we can find the data later
chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
out_dir = os.path.join(path_utils.ScriptDir(), "latest")
out_dir = os.path.join(out_dir, chunk_dir)
if os.path.exists(out_dir):
old_files = glob.glob(os.path.join(out_dir, "*.txt"))
for f in old_files:
os.remove(f)
else:
os.makedirs(out_dir)
script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
"run_webkit_tests.py")
# http://crbug.com/260627: After the switch to content_shell from DRT, each
# test now brings up 3 processes. Under Valgrind, they become memory bound
# and can eventually OOM if we don't reduce the total count.
# It'd be nice if content_shell automatically throttled the startup of new
# tests if we're low on memory.
jobs = max(1, int(multiprocessing.cpu_count() * 0.3))
script_cmd = ["python", script, "-v",
# run a separate DumpRenderTree for each test
"--batch-size=1",
"--fully-parallel",
"--child-processes=%d" % jobs,
"--time-out-ms=800000",
"--no-retry-failures", # retrying takes too much time
# http://crbug.com/176908: Don't launch a browser when done.
"--no-show-results",
"--nocheck-sys-deps"]
# Pass build mode to run_webkit_tests.py. We aren't passed it directly,
# so parse it out of build_dir. run_webkit_tests.py can only handle
# the two values "Release" and "Debug".
# TODO(Hercules): unify how all our scripts pass around build mode
# (--mode / --target / --build-dir / --debug)
if self._options.build_dir:
build_root, mode = os.path.split(self._options.build_dir)
script_cmd.extend(["--build-directory", build_root, "--target", mode])
if (chunk_size > 0):
script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
if len(self._args):
# if the arg is a txt file, then treat it as a list of tests
if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
script_cmd.append("--test-list=%s" % self._args[0])
else:
script_cmd.extend(self._args)
self._AppendGtestFilter(tool, "layout", script_cmd)
# Now run script_cmd with the wrapper in cmd
cmd.extend(["--"])
cmd.extend(script_cmd)
# Layout tests often times fail quickly, but the buildbot remains green.
# Detect this situation when running with the default chunk size.
if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
min_runtime_in_seconds=120
else:
min_runtime_in_seconds=0
ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
return ret
def TestLayout(self):
# A "chunk file" is maintained in the local directory so that each test
# runs a slice of the layout tests of size chunk_size that increments with
# each run. Since tests can be added and removed from the layout tests at
# any time, this is not going to give exact coverage, but it will allow us
# to continuously run small slices of the layout tests under valgrind rather
# than having to run all of them in one shot.
chunk_size = self._options.num_tests
if (chunk_size == 0):
return self.TestLayoutChunk(0, 0)
chunk_num = 0
chunk_file = os.path.join("valgrind_layout_chunk.txt")
logging.info("Reading state from " + chunk_file)
try:
f = open(chunk_file)
if f:
chunk_str = f.read()
if len(chunk_str):
chunk_num = int(chunk_str)
# This should be enough so that we have a couple of complete runs
# of test data stored in the archive (although note that when we loop
# that we almost guaranteed won't be at the end of the test list)
if chunk_num > 10000:
chunk_num = 0
f.close()
except IOError, (errno, strerror):
logging.error("error reading from file %s (%d, %s)" % (chunk_file,
errno, strerror))
# Save the new chunk size before running the tests. Otherwise if a
# particular chunk hangs the bot, the chunk number will never get
# incremented and the bot will be wedged.
logging.info("Saving state to " + chunk_file)
try:
f = open(chunk_file, "w")
chunk_num += 1
f.write("%d" % chunk_num)
f.close()
except IOError, (errno, strerror):
logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
strerror))
# Since we're running small chunks of the layout tests, it's important to
# mark the ones that have errors in them. These won't be visible in the
# summary list for long, but will be useful for someone reviewing this bot.
return self.TestLayoutChunk(chunk_num, chunk_size)
# The known list of tests.
# Recognise the original abbreviations as well as full executable names.
_test_list = {
"cmdline" : RunCmdLine,
"addressinput": TestAddressInput,
"libaddressinput_unittests": TestAddressInput,
"accessibility": TestAccessibility,
"angle": TestAngle, "angle_unittests": TestAngle,
"app_list": TestAppList, "app_list_unittests": TestAppList,
"ash": TestAsh, "ash_unittests": TestAsh,
"ash_shell": TestAshShell, "ash_shell_unittests": TestAshShell,
"aura": TestAura, "aura_unittests": TestAura,
"base": TestBase, "base_unittests": TestBase,
"blink_heap": TestBlinkHeap,
"blink_platform": TestBlinkPlatform,
"browser": TestBrowser, "browser_tests": TestBrowser,
"cacheinvalidation": TestCacheInvalidation,
"cacheinvalidation_unittests": TestCacheInvalidation,
"cast": TestCast, "cast_unittests": TestCast,
"cc": TestCC, "cc_unittests": TestCC,
"chrome_app": TestChromeApp,
"chrome_elf": TestChromeElf,
"chromedriver": TestChromeDriver,
"chromeos": TestChromeOS, "chromeos_unittests": TestChromeOS,
"cloud_print": TestCloudPrint,
"cloud_print_unittests": TestCloudPrint,
"components": TestComponents,"components_unittests": TestComponents,
"compositor": TestCompositor,"compositor_unittests": TestCompositor,
"content": TestContent, "content_unittests": TestContent,
"content_browsertests": TestContentBrowser,
"courgette": TestCourgette, "courgette_unittests": TestCourgette,
"crypto": TestCrypto, "crypto_unittests": TestCrypto,
"device": TestDevice, "device_unittests": TestDevice,
"display": TestDisplay, "display_unittests": TestDisplay,
"events": TestEvents, "events_unittests": TestEvents,
"extensions": TestExtensions,
"ffmpeg": TestFFmpeg, "ffmpeg_unittests": TestFFmpeg,
"ffmpeg_regression_tests": TestFFmpegRegressions,
"gcm": TestGCM, "gcm_unit_tests": TestGCM,
"gin": TestGin, "gin_unittests": TestGin,
"gfx": TestGfx, "gfx_unittests": TestGfx,
"google_apis": TestGoogleApis,
"gpu": TestGPU, "gpu_unittests": TestGPU,
"ipc": TestIpc, "ipc_tests": TestIpc,
"installer_util": TestInstallerUtil,
"interactive_ui": TestInteractiveUI,
"jingle": TestJingle, "jingle_unittests": TestJingle,
"keyboard": TestKeyboard, "keyboard_unittests": TestKeyboard,
"layout": TestLayout, "layout_tests": TestLayout,
"media": TestMedia, "media_unittests": TestMedia,
"message_center": TestMessageCenter,
"message_center_unittests" : TestMessageCenter,
"mojo_apps_js": TestMojoAppsJS,
"mojo_common": TestMojoCommon,
"mojo_js": TestMojoJS,
"mojo_system": TestMojoSystem,
"mojo_public_system": TestMojoPublicSystem,
"mojo_public_utility": TestMojoPublicUtility,
"mojo_public_bindings": TestMojoPublicBindings,
"mojo_public_env": TestMojoPublicEnv,
"mojo_public_sysperf": TestMojoPublicSysPerf,
"mojo_service_manager": TestMojoServiceManager,
"mojo_view_manager": TestMojoViewManager,
"mojo_view_manager_lib": TestMojoViewManagerLib,
"net": TestNet, "net_unittests": TestNet,
"net_perf": TestNetPerf, "net_perftests": TestNetPerf,
"phonenumber": TestPhoneNumber,
"libphonenumber_unittests": TestPhoneNumber,
"ppapi": TestPPAPI, "ppapi_unittests": TestPPAPI,
"printing": TestPrinting, "printing_unittests": TestPrinting,
"remoting": TestRemoting, "remoting_unittests": TestRemoting,
"safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
"sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
"sql": TestSql, "sql_unittests": TestSql,
"sync": TestSync, "sync_unit_tests": TestSync,
"sync_integration_tests": TestSyncIntegration,
"sync_integration": TestSyncIntegration,
"ui_unit": TestUIUnit, "ui_unittests": TestUIUnit,
"unit": TestUnit, "unit_tests": TestUnit,
"url": TestURL, "url_unittests": TestURL,
"views": TestViews, "views_unittests": TestViews,
"webkit": TestLayout,
}
def _main():
parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
"[-t <test> ...]")
parser.add_option("--help-tests", dest="help_tests", action="store_true",
default=False, help="List all available tests")
parser.add_option("-b", "--build-dir",
help="the location of the compiler output")
parser.add_option("--target", help="Debug or Release")
parser.add_option("-t", "--test", action="append", default=[],
help="which test to run, supports test:gtest_filter format "
"as well.")
parser.add_option("--baseline", action="store_true", default=False,
help="generate baseline data instead of validating")
parser.add_option("--gtest_filter",
help="additional arguments to --gtest_filter")
parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
parser.add_option("--gtest_shuffle", action="store_true", default=False,
help="Randomize tests' orders on every iteration.")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output - enable debug log messages")
parser.add_option("--tool", dest="valgrind_tool", default="memcheck",
help="specify a valgrind tool to run the tests under")
parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
help="specify custom flags for the selected valgrind tool")
parser.add_option("--keep_logs", action="store_true", default=False,
help="store memory tool logs in the <tool>.logs directory "
"instead of /tmp.\nThis can be useful for tool "
"developers/maintainers.\nPlease note that the <tool>"
".logs directory will be clobbered on tool startup.")
parser.add_option("-n", "--num_tests", type="int",
default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
help="for layout tests: # of subtests per run. 0 for all.")
# TODO(thestig) Remove this if we can.
parser.add_option("--gtest_color", dest="gtest_color", default="no",
help="dummy compatibility flag for sharding_supervisor.")
parser.add_option("--brave-new-test-launcher", action="store_true",
help="run the tests with --brave-new-test-launcher")
parser.add_option("--test-launcher-bot-mode", action="store_true",
help="run the tests with --test-launcher-bot-mode")
options, args = parser.parse_args()
# Bake target into build_dir.
if options.target and options.build_dir:
assert (options.target !=
os.path.basename(os.path.dirname(options.build_dir)))
options.build_dir = os.path.join(os.path.abspath(options.build_dir),
options.target)
if options.verbose:
logging_utils.config_root(logging.DEBUG)
else:
logging_utils.config_root()
if options.help_tests:
ChromeTests.ShowTests()
return 0
if not options.test:
parser.error("--test not specified")
if len(options.test) != 1 and options.gtest_filter:
parser.error("--gtest_filter and multiple tests don't make sense together")
for t in options.test:
tests = ChromeTests(options, args, t)
ret = tests.Run()
if ret: return ret
return 0
if __name__ == "__main__":
sys.exit(_main())
| []
| []
| [
"LD_LIBRARY_PATH"
]
| [] | ["LD_LIBRARY_PATH"] | python | 1 | 0 | |
d4s2/settings_base.py | """
Base settings for d4s2 project.
Deployments should write a minimal settings.py that imports * from this file
See settings.template for an example
"""
import os
import datetime
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'd4s2_api',
'd4s2_api_v1',
'd4s2_api_v2',
'd4s2_api_v3',
'gcb_web_auth',
'django_filters',
'switchboard',
'crispy_forms',
'ownership',
'simple_history',
'corsheaders',
'background_task',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'simple_history.middleware.HistoryRequestMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'd4s2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'd4s2.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'gcb_web_auth.backends.dukeds.DukeDSAuthBackend',
'gcb_web_auth.backends.oauth.OAuth2Backend',
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# Email
# For development we'll just use the console backend.
# It writes emails to the console and does not send them
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_FROM_ADDRESS = 'noreply@localhost'
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
'DEFAULT_AUTHENTICATION_CLASSES': (
'gcb_web_auth.dukeds_auth.DukeDSTokenAuthentication', # Allows users to authenticate with a DukeDS token
'rest_framework_jwt.authentication.JSONWebTokenAuthentication', # Allows users to authenticate with a JWT
'rest_framework.authentication.TokenAuthentication', # Allows users to authenticate with D4S2 rest_framework authtoken
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
'drf_ember_backend.renderers.JSONRootObjectRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'drf_ember_backend.parsers.JSONRootObjectParser',
),
'EXCEPTION_HANDLER': 'drf_ember_backend.exception_handlers.switching_exception_handler',
}
CORS_ORIGIN_WHITELIST = (
'localhost:4200',
'127.0.0.1:4200',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
},
},
}
# Configure djangorestframework-jwt
JWT_AUTH = {
# Allow token refresh
'JWT_ALLOW_REFRESH': True,
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=7200),
}
USERNAME_EMAIL_HOST = os.getenv('D4S2_USERNAME_EMAIL_HOST')
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
USERNAME_EMAIL_HOST = os.getenv('D4S2_USERNAME_EMAIL_HOST')
DIRECTORY_SERVICE_TOKEN = os.getenv('D4S2_DIRECTORY_SERVICE_TOKEN')
DIRECTORY_SERVICE_URL = os.getenv('D4S2_DIRECTORY_SERVICE_URL')
| []
| []
| [
"D4S2_DIRECTORY_SERVICE_TOKEN",
"D4S2_DIRECTORY_SERVICE_URL",
"DJANGO_LOG_LEVEL",
"D4S2_USERNAME_EMAIL_HOST"
]
| [] | ["D4S2_DIRECTORY_SERVICE_TOKEN", "D4S2_DIRECTORY_SERVICE_URL", "DJANGO_LOG_LEVEL", "D4S2_USERNAME_EMAIL_HOST"] | python | 4 | 0 | |
cmd/bulk_data_gen/main.go | // bulk_data_gen generates time series data from pre-specified use cases.
//
// Supported formats:
// InfluxDB bulk load format
// ElasticSearch bulk load format
// Cassandra query format
// Mongo custom format
// OpenTSDB bulk HTTP format
//
// Supported use cases:
// Devops: scale_var is the number of hosts to simulate, with log messages
// every 10 seconds.
package main
import (
"bufio"
"flag"
"fmt"
"io"
"log"
"math/rand"
"os"
"strings"
"time"
)
// Output data format choices:
var formatChoices = []string{"influx-bulk", "es-bulk", "cassandra", "mongo", "opentsdb"}
// Use case choices:
var useCaseChoices = []string{"devops", "iot"}
// Program option vars:
var (
daemonUrl string
dbName string
format string
useCase string
scaleVar int64
timestampStartStr string
timestampEndStr string
timestampStart time.Time
timestampEnd time.Time
interleavedGenerationGroupID uint
interleavedGenerationGroups uint
seed int64
debug int
)
// Parse args:
func init() {
flag.StringVar(&format, "format", formatChoices[0], fmt.Sprintf("Format to emit. (choices: %s)", strings.Join(formatChoices, ", ")))
flag.StringVar(&useCase, "use-case", useCaseChoices[0], "Use case to model. (choices: devops, iot)")
flag.Int64Var(&scaleVar, "scale-var", 1, "Scaling variable specific to the use case.")
flag.StringVar(×tampStartStr, "timestamp-start", "2016-01-01T00:00:00Z", "Beginning timestamp (RFC3339).")
flag.StringVar(×tampEndStr, "timestamp-end", "2016-01-01T06:00:00Z", "Ending timestamp (RFC3339).")
flag.Int64Var(&seed, "seed", 0, "PRNG seed (default, or 0, uses the current timestamp).")
flag.IntVar(&debug, "debug", 0, "Debug printing (choices: 0, 1, 2) (default 0).")
flag.UintVar(&interleavedGenerationGroupID, "interleaved-generation-group-id", 0, "Group (0-indexed) to perform round-robin serialization within. Use this to scale up data generation to multiple processes.")
flag.UintVar(&interleavedGenerationGroups, "interleaved-generation-groups", 1, "The number of round-robin serialization groups. Use this to scale up data generation to multiple processes.")
flag.Parse()
if !(interleavedGenerationGroupID < interleavedGenerationGroups) {
log.Fatal("incorrect interleaved groups configuration")
}
validFormat := false
for _, s := range formatChoices {
if s == format {
validFormat = true
break
}
}
if !validFormat {
log.Fatal("invalid format specifier")
}
// the default seed is the current timestamp:
if seed == 0 {
seed = int64(time.Now().Nanosecond())
}
fmt.Fprintf(os.Stderr, "using random seed %d\n", seed)
// Parse timestamps:
var err error
timestampStart, err = time.Parse(time.RFC3339, timestampStartStr)
if err != nil {
log.Fatal(err)
}
timestampStart = timestampStart.UTC()
timestampEnd, err = time.Parse(time.RFC3339, timestampEndStr)
if err != nil {
log.Fatal(err)
}
timestampEnd = timestampEnd.UTC()
}
func main() {
rand.Seed(seed)
out := bufio.NewWriterSize(os.Stdout, 4<<20)
defer out.Flush()
var sim Simulator
switch useCase {
case "devops":
cfg := &DevopsSimulatorConfig{
Start: timestampStart,
End: timestampEnd,
HostCount: scaleVar,
}
sim = cfg.ToSimulator()
default:
panic("unreachable")
}
var serializer func(*Point, io.Writer) error
switch format {
case "influx-bulk":
serializer = (*Point).SerializeInfluxBulk
case "es-bulk":
serializer = (*Point).SerializeESBulk
case "cassandra":
serializer = (*Point).SerializeCassandra
case "mongo":
serializer = (*Point).SerializeMongo
case "opentsdb":
serializer = (*Point).SerializeOpenTSDBBulk
default:
panic("unreachable")
}
var currentInterleavedGroup uint = 0
point := MakeUsablePoint()
for !sim.Finished() {
sim.Next(point)
// in the default case this is always true
if currentInterleavedGroup == interleavedGenerationGroupID {
//println("printing")
err := serializer(point, out)
if err != nil {
log.Fatal(err)
}
}
point.Reset()
currentInterleavedGroup++
if currentInterleavedGroup == interleavedGenerationGroups {
currentInterleavedGroup = 0
}
}
err := out.Flush()
if err != nil {
log.Fatal(err.Error())
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
pkg/chaosdaemon/jvm_server.go | // Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package chaosdaemon
import (
"context"
"fmt"
"os"
"strings"
"github.com/golang/protobuf/ptypes/empty"
"github.com/pkg/errors"
"github.com/chaos-mesh/chaos-mesh/pkg/bpm"
pb "github.com/chaos-mesh/chaos-mesh/pkg/chaosdaemon/pb"
"github.com/chaos-mesh/chaos-mesh/pkg/chaosdaemon/util"
)
const (
bmInstallCommand = "bminstall.sh -b -Dorg.jboss.byteman.transform.all -Dorg.jboss.byteman.verbose -Dorg.jboss.byteman.compileToBytecode -p %d %d"
bmSubmitCommand = "bmsubmit.sh -p %d -%s %s"
)
func (s *DaemonServer) InstallJVMRules(ctx context.Context,
req *pb.InstallJVMRulesRequest) (*empty.Empty, error) {
log := s.getLoggerFromContext(ctx)
log.Info("InstallJVMRules", "request", req)
pid, err := s.crClient.GetPidFromContainerID(ctx, req.ContainerId)
if err != nil {
log.Error(err, "GetPidFromContainerID")
return nil, err
}
containerPids := []uint32{pid}
childPids, err := util.GetChildProcesses(pid, log)
if err != nil {
log.Error(err, "GetChildProcesses")
}
containerPids = append(containerPids, childPids...)
for _, containerPid := range containerPids {
name, err := util.ReadCommName(int(containerPid))
if err != nil {
log.Error(err, "ReadCommName")
continue
}
if name == "java\n" {
pid = containerPid
break
}
}
bytemanHome := os.Getenv("BYTEMAN_HOME")
if len(bytemanHome) == 0 {
return nil, errors.New("environment variable BYTEMAN_HOME not set")
}
// copy agent.jar to container's namespace
if req.EnterNS {
processBuilder := bpm.DefaultProcessBuilder("sh", "-c", fmt.Sprintf("mkdir -p %s/lib/", bytemanHome)).SetContext(ctx).SetNS(pid, bpm.MountNS)
output, err := processBuilder.Build(ctx).CombinedOutput()
if err != nil {
return nil, err
}
if len(output) > 0 {
log.Info("mkdir", "output", string(output))
}
agentFile, err := os.Open(fmt.Sprintf("%s/lib/byteman.jar", bytemanHome))
if err != nil {
return nil, err
}
processBuilder = bpm.DefaultProcessBuilder("sh", "-c", "cat > /usr/local/byteman/lib/byteman.jar").SetContext(ctx)
processBuilder = processBuilder.SetNS(pid, bpm.MountNS).SetStdin(agentFile)
output, err = processBuilder.Build(ctx).CombinedOutput()
if err != nil {
return nil, err
}
if len(output) > 0 {
log.Info("copy agent.jar", "output", string(output))
}
}
bmInstallCmd := fmt.Sprintf(bmInstallCommand, req.Port, pid)
processBuilder := bpm.DefaultProcessBuilder("sh", "-c", bmInstallCmd).SetContext(ctx)
if req.EnterNS {
processBuilder = processBuilder.EnableLocalMnt()
}
cmd := processBuilder.Build(ctx)
output, err := cmd.CombinedOutput()
if err != nil {
// this error will occured when install agent more than once, and will ignore this error and continue to submit rule
errMsg1 := "Agent JAR loaded but agent failed to initialize"
// these two errors will occured when java version less or euqal to 1.8, and don't know why
// but it can install agent success even with this error, so just ignore it now.
// TODO: Investigate the cause of these two error
errMsg2 := "Provider sun.tools.attach.LinuxAttachProvider not found"
errMsg3 := "install java.io.IOException: Non-numeric value found"
// this error is caused by the different attach result codes in different java versions. In fact, the agent has attached success, just ignore it here.
// refer to https://stackoverflow.com/questions/54340438/virtualmachine-attach-throws-com-sun-tools-attach-agentloadexception-0-when-usi/54454418#54454418
errMsg4 := "install com.sun.tools.attach.AgentLoadException"
if !strings.Contains(string(output), errMsg1) && !strings.Contains(string(output), errMsg2) &&
!strings.Contains(string(output), errMsg3) && !strings.Contains(string(output), errMsg4) {
log.Error(err, string(output))
return nil, errors.Wrap(err, string(output))
}
log.Info("exec comamnd", "cmd", cmd.String(), "output", string(output), "error", err.Error())
}
// submit rules
filename, err := writeDataIntoFile(req.Rule, "rule.btm")
if err != nil {
return nil, err
}
bmSubmitCmd := fmt.Sprintf(bmSubmitCommand, req.Port, "l", filename)
processBuilder = bpm.DefaultProcessBuilder("sh", "-c", bmSubmitCmd).SetContext(ctx)
if req.EnterNS {
processBuilder = processBuilder.SetNS(pid, bpm.NetNS)
}
output, err = processBuilder.Build(ctx).CombinedOutput()
if err != nil {
log.Error(err, string(output))
return nil, errors.Wrap(err, string(output))
}
if len(output) > 0 {
log.Info("submit rules", "output", string(output))
}
return &empty.Empty{}, nil
}
func (s *DaemonServer) UninstallJVMRules(ctx context.Context,
req *pb.UninstallJVMRulesRequest) (*empty.Empty, error) {
log := s.getLoggerFromContext(ctx)
log.Info("InstallJVMRules", "request", req)
pid, err := s.crClient.GetPidFromContainerID(ctx, req.ContainerId)
if err != nil {
log.Error(err, "GetPidFromContainerID")
return nil, err
}
filename, err := writeDataIntoFile(req.Rule, "rule.btm")
if err != nil {
return nil, err
}
log.Info("create btm file", "file", filename)
bmSubmitCmd := fmt.Sprintf(bmSubmitCommand, req.Port, "u", filename)
processBuilder := bpm.DefaultProcessBuilder("sh", "-c", bmSubmitCmd).SetContext(ctx)
if req.EnterNS {
processBuilder = processBuilder.SetNS(pid, bpm.NetNS)
}
output, err := processBuilder.Build(ctx).CombinedOutput()
if err != nil {
log.Error(err, string(output))
if strings.Contains(string(output), "No rule scripts to remove") {
return &empty.Empty{}, nil
}
return nil, errors.Wrap(err, string(output))
}
if len(output) > 0 {
log.Info(string(output))
}
return &empty.Empty{}, nil
}
func writeDataIntoFile(data string, filename string) (string, error) {
tmpfile, err := os.CreateTemp("", filename)
if err != nil {
return "", err
}
if _, err := tmpfile.WriteString(data); err != nil {
return "", err
}
if err := tmpfile.Close(); err != nil {
return "", err
}
return tmpfile.Name(), err
}
| [
"\"BYTEMAN_HOME\""
]
| []
| [
"BYTEMAN_HOME"
]
| [] | ["BYTEMAN_HOME"] | go | 1 | 0 | |
examples/iothub_recv.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
An example to show receiving events from an IoT Hub partition.
"""
from azure import eventhub
from azure.eventhub import EventData, EventHubClient, Offset
import logging
import os
logger = logging.getLogger('azure.eventhub')
iot_connection_str = os.environ['IOTHUB_CONNECTION_STR']
client = EventHubClient.from_iothub_connection_string(iot_connection_str, debug=True)
receiver = client.add_receiver("$default", "0", operation='/messages/events')
try:
client.run()
eh_info = client.get_eventhub_info()
print(eh_info)
received = receiver.receive(timeout=5)
print(received)
finally:
client.stop()
| []
| []
| [
"IOTHUB_CONNECTION_STR"
]
| [] | ["IOTHUB_CONNECTION_STR"] | python | 1 | 0 | |
booking/wsgi.py | import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'booking.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
internal/runtime/interactive.go | package runtime
import (
"fmt"
"os"
"path"
"strings"
"github.com/chzyer/readline"
"github.com/npenkov/gcqlsh/internal/action"
"github.com/npenkov/gcqlsh/internal/db"
)
const ProgramPromptPrefix = "gcqlsh"
func RunInteractiveSession(cks *db.CQLKeyspaceSession) error {
var completer = readline.NewPrefixCompleter(
readline.PcItem("use",
readline.PcItemDynamic(action.ListKeyspaces(cks)),
),
readline.PcItem("select",
readline.PcItem("*",
readline.PcItem("from",
readline.PcItemDynamic(action.ListTables(cks)),
),
),
),
readline.PcItem("insert",
readline.PcItem("into",
readline.PcItemDynamic(action.ListTables(cks)),
),
),
readline.PcItem("delete",
readline.PcItem("from",
readline.PcItemDynamic(action.ListTables(cks),
readline.PcItem(";"),
readline.PcItemDynamic(action.ListColumns(cks, "delete from"),
readline.PcItem("="),
),
),
),
),
readline.PcItem("update",
readline.PcItemDynamic(action.ListTables(cks),
readline.PcItem("set",
readline.PcItemDynamic(action.ListColumns(cks, "update"),
readline.PcItem("="),
),
),
),
),
readline.PcItem("desc",
readline.PcItem("keyspaces",
readline.PcItem(";"),
),
readline.PcItem("keyspace",
readline.PcItemDynamic(action.ListKeyspaces(cks),
readline.PcItem(";"),
),
),
readline.PcItem("tables",
readline.PcItem(";"),
),
readline.PcItem("table",
readline.PcItemDynamic(action.ListTables(cks),
readline.PcItem(";"),
),
),
),
readline.PcItem("tracing",
readline.PcItem("on",
readline.PcItem(";"),
),
readline.PcItem("off",
readline.PcItem(";"),
),
),
)
home := os.Getenv("HOME")
config := &readline.Config{
Prompt: fmt.Sprintf("%s:%s> ", ProgramPromptPrefix, cks.ActiveKeyspace),
HistoryFile: path.Join(home, ".gcqlsh-history"),
DisableAutoSaveHistory: true,
AutoComplete: completer,
InterruptPrompt: "^C",
}
rl, err := readline.NewEx(config)
rl.SetPrompt(fmt.Sprintf("%s:%s> ", ProgramPromptPrefix, cks.ActiveKeyspace))
if err != nil {
return err
}
defer rl.Close()
var cmds []string
for {
line, err := rl.Readline()
if err != nil {
break
}
line = strings.TrimSpace(line)
if len(line) == 0 {
continue
}
cmds = append(cmds, line)
if !strings.HasSuffix(line, ";") {
rl.SetPrompt(">>> ")
continue
}
cmd := strings.Join(cmds, " ")
cmds = cmds[:0]
breakLoop, _, err := action.ProcessCommand(cmd, cks)
if err != nil {
fmt.Println(err)
}
if breakLoop {
break
}
rl.SetPrompt(fmt.Sprintf("%s:%s> ", ProgramPromptPrefix, cks.ActiveKeyspace))
rl.SaveHistory(cmd)
}
return nil
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
tests/test-recipes/metadata/python_build_run/run_test.py | import os
import json
import glob
def main():
prefix = os.environ['PREFIX']
info_files = glob.glob(os.path.join(prefix, 'conda-meta',
'conda-build-test-python-build-run-1.0-py*0.json'))
assert len(info_files) == 1
info_file = info_files[0]
with open(info_file, 'r') as fh:
info = json.load(fh)
# one without the version, and another with the version
assert len(info['depends']) == 1
assert info['depends'][0].startswith('python ')
if __name__ == '__main__':
main()
| []
| []
| [
"PREFIX"
]
| [] | ["PREFIX"] | python | 1 | 0 | |
internal/uidriver/mobile/graphics_opengl.go | // Copyright 2018 The Ebiten Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build android ios,386 ios,amd64 ios,ebitengl
package mobile
import (
"github.com/MattSwanson/ebiten/v2/internal/driver"
"github.com/MattSwanson/ebiten/v2/internal/graphicsdriver/opengl"
)
func (*UserInterface) Graphics() driver.Graphics {
return opengl.Get()
}
| []
| []
| []
| [] | [] | go | null | null | null |
pkg/agent/run.go | package agent
import (
"context"
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"time"
systemd "github.com/coreos/go-systemd/daemon"
"github.com/pkg/errors"
"github.com/rancher/k3s/pkg/agent/config"
"github.com/rancher/k3s/pkg/agent/containerd"
"github.com/rancher/k3s/pkg/agent/flannel"
"github.com/rancher/k3s/pkg/agent/netpol"
"github.com/rancher/k3s/pkg/agent/proxy"
"github.com/rancher/k3s/pkg/agent/syssetup"
"github.com/rancher/k3s/pkg/agent/tunnel"
"github.com/rancher/k3s/pkg/cgroups"
"github.com/rancher/k3s/pkg/cli/cmds"
"github.com/rancher/k3s/pkg/clientaccess"
cp "github.com/rancher/k3s/pkg/cloudprovider"
"github.com/rancher/k3s/pkg/daemons/agent"
daemonconfig "github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/daemons/executor"
"github.com/rancher/k3s/pkg/nodeconfig"
"github.com/rancher/k3s/pkg/rootless"
"github.com/rancher/k3s/pkg/util"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/controller-manager/app"
app2 "k8s.io/kubernetes/cmd/kube-proxy/app"
kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config"
utilsnet "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer"
)
func run(ctx context.Context, cfg cmds.Agent, proxy proxy.Proxy) error {
nodeConfig := config.Get(ctx, cfg, proxy)
dualCluster, err := utilsnet.IsDualStackCIDRs(nodeConfig.AgentConfig.ClusterCIDRs)
if err != nil {
return errors.Wrap(err, "failed to validate cluster-cidr")
}
dualService, err := utilsnet.IsDualStackCIDRs(nodeConfig.AgentConfig.ServiceCIDRs)
if err != nil {
return errors.Wrap(err, "failed to validate service-cidr")
}
dualNode, err := utilsnet.IsDualStackIPs(nodeConfig.AgentConfig.NodeIPs)
if err != nil {
return errors.Wrap(err, "failed to validate node-ip")
}
enableIPv6 := dualCluster || dualService || dualNode
conntrackConfig, err := getConntrackConfig(nodeConfig)
if err != nil {
return errors.Wrap(err, "failed to validate kube-proxy conntrack configuration")
}
syssetup.Configure(enableIPv6, conntrackConfig)
if err := setupCriCtlConfig(cfg, nodeConfig); err != nil {
return err
}
if err := executor.Bootstrap(ctx, nodeConfig, cfg); err != nil {
return err
}
if !nodeConfig.NoFlannel {
if err := flannel.Prepare(ctx, nodeConfig); err != nil {
return err
}
}
if !nodeConfig.Docker && nodeConfig.ContainerRuntimeEndpoint == "" {
if err := containerd.Run(ctx, nodeConfig); err != nil {
return err
}
}
notifySocket := os.Getenv("NOTIFY_SOCKET")
os.Unsetenv("NOTIFY_SOCKET")
if err := setupTunnelAndRunAgent(ctx, nodeConfig, cfg, proxy); err != nil {
return err
}
coreClient, err := coreClient(nodeConfig.AgentConfig.KubeConfigKubelet)
if err != nil {
return err
}
app.WaitForAPIServer(coreClient, 30*time.Second)
if !nodeConfig.NoFlannel {
if err := flannel.Run(ctx, nodeConfig, coreClient.CoreV1().Nodes()); err != nil {
return err
}
}
if err := configureNode(ctx, &nodeConfig.AgentConfig, coreClient.CoreV1().Nodes()); err != nil {
return err
}
if !nodeConfig.AgentConfig.DisableNPC {
if err := netpol.Run(ctx, nodeConfig); err != nil {
return err
}
}
os.Setenv("NOTIFY_SOCKET", notifySocket)
systemd.SdNotify(true, "READY=1\n")
<-ctx.Done()
return ctx.Err()
}
// getConntrackConfig uses the kube-proxy code to parse the user-provided kube-proxy-arg values, and
// extract the conntrack settings so that K3s can set them itself. This allows us to soft-fail when
// running K3s in Docker, where kube-proxy is no longer allowed to set conntrack sysctls on newer kernels.
// When running rootless, we do not attempt to set conntrack sysctls - this behavior is copied from kubeadm.
func getConntrackConfig(nodeConfig *daemonconfig.Node) (*kubeproxyconfig.KubeProxyConntrackConfiguration, error) {
ctConfig := &kubeproxyconfig.KubeProxyConntrackConfiguration{
MaxPerCore: utilpointer.Int32Ptr(0),
Min: utilpointer.Int32Ptr(0),
TCPEstablishedTimeout: &metav1.Duration{},
TCPCloseWaitTimeout: &metav1.Duration{},
}
if nodeConfig.AgentConfig.Rootless {
return ctConfig, nil
}
cmd := app2.NewProxyCommand()
if err := cmd.ParseFlags(daemonconfig.GetArgsList(map[string]string{}, nodeConfig.AgentConfig.ExtraKubeProxyArgs)); err != nil {
return nil, err
}
maxPerCore, err := cmd.Flags().GetInt32("conntrack-max-per-core")
if err != nil {
return nil, err
}
ctConfig.MaxPerCore = &maxPerCore
min, err := cmd.Flags().GetInt32("conntrack-min")
if err != nil {
return nil, err
}
ctConfig.Min = &min
establishedTimeout, err := cmd.Flags().GetDuration("conntrack-tcp-timeout-established")
if err != nil {
return nil, err
}
ctConfig.TCPEstablishedTimeout.Duration = establishedTimeout
closeWaitTimeout, err := cmd.Flags().GetDuration("conntrack-tcp-timeout-close-wait")
if err != nil {
return nil, err
}
ctConfig.TCPCloseWaitTimeout.Duration = closeWaitTimeout
return ctConfig, nil
}
func coreClient(cfg string) (kubernetes.Interface, error) {
restConfig, err := clientcmd.BuildConfigFromFlags("", cfg)
if err != nil {
return nil, err
}
return kubernetes.NewForConfig(restConfig)
}
func Run(ctx context.Context, cfg cmds.Agent) error {
if err := cgroups.Validate(); err != nil {
return err
}
if cfg.Rootless && !cfg.RootlessAlreadyUnshared {
if err := rootless.Rootless(cfg.DataDir); err != nil {
return err
}
}
agentDir := filepath.Join(cfg.DataDir, "agent")
if err := os.MkdirAll(agentDir, 0700); err != nil {
return err
}
proxy, err := proxy.NewSupervisorProxy(ctx, !cfg.DisableLoadBalancer, agentDir, cfg.ServerURL, cfg.LBServerPort)
if err != nil {
return err
}
for {
newToken, err := clientaccess.ParseAndValidateTokenForUser(proxy.SupervisorURL(), cfg.Token, "node")
if err != nil {
logrus.Error(err)
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(2 * time.Second):
}
continue
}
cfg.Token = newToken.String()
break
}
return run(ctx, cfg, proxy)
}
func configureNode(ctx context.Context, agentConfig *daemonconfig.Agent, nodes v1.NodeInterface) error {
count := 0
for {
node, err := nodes.Get(ctx, agentConfig.NodeName, metav1.GetOptions{})
if err != nil {
if count%30 == 0 {
logrus.Infof("Waiting for kubelet to be ready on node %s: %v", agentConfig.NodeName, err)
}
count++
time.Sleep(1 * time.Second)
continue
}
updateNode := false
if labels, changed := updateMutableLabels(agentConfig, node.Labels); changed {
node.Labels = labels
updateNode = true
}
if !agentConfig.DisableCCM {
if annotations, changed := updateAddressAnnotations(agentConfig, node.Annotations); changed {
node.Annotations = annotations
updateNode = true
}
if labels, changed := updateLegacyAddressLabels(agentConfig, node.Labels); changed {
node.Labels = labels
updateNode = true
}
}
// inject node config
if changed, err := nodeconfig.SetNodeConfigAnnotations(node); err != nil {
return err
} else if changed {
updateNode = true
}
if updateNode {
if _, err := nodes.Update(ctx, node, metav1.UpdateOptions{}); err != nil {
logrus.Infof("Failed to update node %s: %v", agentConfig.NodeName, err)
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(time.Second):
continue
}
}
logrus.Infof("labels have been set successfully on node: %s", agentConfig.NodeName)
} else {
logrus.Infof("labels have already set on node: %s", agentConfig.NodeName)
}
break
}
return nil
}
func updateMutableLabels(agentConfig *daemonconfig.Agent, nodeLabels map[string]string) (map[string]string, bool) {
result := map[string]string{}
for _, m := range agentConfig.NodeLabels {
var (
v string
p = strings.SplitN(m, `=`, 2)
k = p[0]
)
if len(p) > 1 {
v = p[1]
}
result[k] = v
}
result = labels.Merge(nodeLabels, result)
return result, !equality.Semantic.DeepEqual(nodeLabels, result)
}
func updateLegacyAddressLabels(agentConfig *daemonconfig.Agent, nodeLabels map[string]string) (map[string]string, bool) {
ls := labels.Set(nodeLabels)
if ls.Has(cp.InternalIPKey) || ls.Has(cp.HostnameKey) {
result := map[string]string{
cp.InternalIPKey: agentConfig.NodeIP,
cp.HostnameKey: agentConfig.NodeName,
}
if agentConfig.NodeExternalIP != "" {
result[cp.ExternalIPKey] = agentConfig.NodeExternalIP
}
result = labels.Merge(nodeLabels, result)
return result, !equality.Semantic.DeepEqual(nodeLabels, result)
}
return nil, false
}
func updateAddressAnnotations(agentConfig *daemonconfig.Agent, nodeAnnotations map[string]string) (map[string]string, bool) {
result := map[string]string{
cp.InternalIPKey: util.JoinIPs(agentConfig.NodeIPs),
cp.HostnameKey: agentConfig.NodeName,
}
if agentConfig.NodeExternalIP != "" {
result[cp.ExternalIPKey] = util.JoinIPs(agentConfig.NodeExternalIPs)
}
result = labels.Merge(nodeAnnotations, result)
return result, !equality.Semantic.DeepEqual(nodeAnnotations, result)
}
// setupTunnelAndRunAgent should start the setup tunnel before starting kubelet and kubeproxy
// there are special case for etcd agents, it will wait until it can find the apiaddress from
// the address channel and update the proxy with the servers addresses, if in rke2 we need to
// start the agent before the tunnel is setup to allow kubelet to start first and start the pods
func setupTunnelAndRunAgent(ctx context.Context, nodeConfig *daemonconfig.Node, cfg cmds.Agent, proxy proxy.Proxy) error {
var agentRan bool
if cfg.ETCDAgent {
// only in rke2 run the agent before the tunnel setup and check for that later in the function
if proxy.IsAPIServerLBEnabled() {
if err := agent.Agent(&nodeConfig.AgentConfig); err != nil {
return err
}
agentRan = true
}
select {
case address := <-cfg.APIAddressCh:
cfg.ServerURL = address
u, err := url.Parse(cfg.ServerURL)
if err != nil {
logrus.Warn(err)
}
proxy.Update([]string{fmt.Sprintf("%s:%d", u.Hostname(), nodeConfig.ServerHTTPSPort)})
case <-ctx.Done():
return ctx.Err()
}
} else if cfg.ClusterReset && proxy.IsAPIServerLBEnabled() {
if err := agent.Agent(&nodeConfig.AgentConfig); err != nil {
return err
}
agentRan = true
}
if err := tunnel.Setup(ctx, nodeConfig, proxy); err != nil {
return err
}
if !agentRan {
return agent.Agent(&nodeConfig.AgentConfig)
}
return nil
}
| [
"\"NOTIFY_SOCKET\""
]
| []
| [
"NOTIFY_SOCKET"
]
| [] | ["NOTIFY_SOCKET"] | go | 1 | 0 | |
src/main/java/herbott/webserver/ControlFromAppConnector.java | package herbott.webserver;
import herbott.webserver.servlets.Hi;
import herbott.webserver.servlets.OauthServlet;
import herbott.webserver.servlets.TakeActiveServlet;
import herbott.webserver.servlets.StreamNoticeCallback;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
public class ControlFromAppConnector extends Thread {
@Override
public void run() {
int port = Integer.valueOf(System.getenv("PORT"));
Server server = new Server(port);
ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS);
context.setContextPath("/");
server.setHandler(context);
context.addServlet(new ServletHolder(new Hi()), "/*");
context.addServlet(new ServletHolder(new StreamNoticeCallback()), "/callback");
context.addServlet(new ServletHolder(new OauthServlet()), "/oauth");
context.addServlet(new ServletHolder(new TakeActiveServlet()), "/takeactive");
try {
server.start();
server.join();
} catch (Exception io) {
io.printStackTrace();
}
}
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | java | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# gonsns directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, 'gonsns'))
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
mingw32/lib/python2.7/test/regrtest.py | #!/usr/bin/env python2
"""
Usage:
python -m test.regrtest [options] [test_name1 [test_name2 ...]]
python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]]
If no arguments or options are provided, finds all files matching
the pattern "test_*" in the Lib/test subdirectory and runs
them in alphabetical order (but see -M and -u, below, for exceptions).
For more rigorous testing, it is useful to use the following
command line:
python -E -tt -Wd -3 -m test.regrtest [options] [test_name1 ...]
Options:
-h/--help -- print this text and exit
Verbosity
-v/--verbose -- run tests in verbose mode with output to stdout
-w/--verbose2 -- re-run failed tests in verbose mode
-W/--verbose3 -- re-run failed tests in verbose mode immediately
-q/--quiet -- no output unless one or more tests fail
-S/--slowest -- print the slowest 10 tests
--header -- print header with interpreter info
Selecting tests
-r/--randomize -- randomize test execution order (see below)
--randseed -- pass a random seed to reproduce a previous random run
-f/--fromfile -- read names of tests to run from a file (see below)
-x/--exclude -- arguments are tests to *exclude*
-s/--single -- single step through a set of tests (see below)
-m/--match PAT -- match test cases and methods with glob pattern PAT
--matchfile FILENAME -- filters tests using a text file, one pattern per line
-G/--failfast -- fail as soon as a test fails (only with -v or -W)
-u/--use RES1,RES2,...
-- specify which special resource intensive tests to run
-M/--memlimit LIMIT
-- run very large memory-consuming tests
Special runs
-l/--findleaks -- if GC is available detect tests that leak memory
-L/--runleaks -- run the leaks(1) command just before exit
-R/--huntrleaks RUNCOUNTS
-- search for reference leaks (needs debug build, v. slow)
-j/--multiprocess PROCESSES
-- run PROCESSES processes at once
-T/--coverage -- turn on code coverage tracing using the trace module
-D/--coverdir DIRECTORY
-- Directory where coverage files are put
-N/--nocoverdir -- Put coverage files alongside modules
-t/--threshold THRESHOLD
-- call gc.set_threshold(THRESHOLD)
-F/--forever -- run the specified tests in a loop, until an error happens
-P/--pgo -- enable Profile Guided Optimization training
--testdir -- execute test files in the specified directory
(instead of the Python stdlib test suite)
--list-tests -- only write the name of tests that will be run,
don't execute them
--list-cases -- only write the name of test cases that will be run,
don't execute them
--fail-env-changed -- if a test file alters the environment, mark the test
as failed
Additional Option Details:
-r randomizes test execution order. You can use --randseed=int to provide an
int seed value for the randomizer; this is useful for reproducing troublesome
test orders.
-s On the first invocation of regrtest using -s, the first test file found
or the first test file given on the command line is run, and the name of
the next test is recorded in a file named pynexttest. If run from the
Python build directory, pynexttest is located in the 'build' subdirectory,
otherwise it is located in tempfile.gettempdir(). On subsequent runs,
the test in pynexttest is run, and the next test is written to pynexttest.
When the last test has been run, pynexttest is deleted. In this way it
is possible to single step through the test files. This is useful when
doing memory analysis on the Python interpreter, which process tends to
consume too many resources to run the full regression test non-stop.
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), and the minimal invocation is '-R :'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines howmuch memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
bsddb - It is okay to run the bsddb testsuite, which takes
a long time to complete.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
cpu - Used for certain CPU-heavy tests.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
gui - Run tests that require a running GUI.
xpickle - Test pickle and cPickle against Python 2.4, 2.5 and 2.6 to
test backwards compatibility. These tests take a long time
to run.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the bsddb tests, give the
option '-uall,-bsddb'.
--matchfile filters tests using a text file, one pattern per line.
Pattern examples:
- test method: test_stat_attributes
- test class: FileTests
- test identifier: test_os.FileTests.test_stat_attributes
"""
import StringIO
import datetime
import getopt
import json
import os
import random
import re
import shutil
import sys
import time
import traceback
import warnings
import unittest
import tempfile
import imp
import platform
import sysconfig
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
# These modules are searched first in sys.path[0] (so '' -- the CWD) and if
# they are found in the CWD their __file__ and __path__ will be relative (this
# happens before the chdir). All the modules imported after the chdir, are
# not found in the CWD, and since the other paths in sys.path[1:] are absolute
# (site.py absolutize them), the __file__ and __path__ will be absolute too.
# Therefore it is necessary to absolutize manually the __file__ and __path__ of
# the packages to prevent later imports to fail when the CWD is different.
for module in sys.modules.itervalues():
if hasattr(module, '__path__'):
module.__path__ = [os.path.abspath(path) for path in module.__path__]
if hasattr(module, '__file__'):
module.__file__ = os.path.abspath(module.__file__)
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
# Windows, Tkinter, and resetting the environment after each test don't
# mix well. To alleviate test failures due to Tcl/Tk not being able to
# find its library, get the necessary environment massage done once early.
if sys.platform == 'win32':
try:
import FixTk
except Exception:
pass
# Test result constants.
PASSED = 1
FAILED = 0
ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
CHILD_ERROR = -5 # error in a child process
# Minimum duration of a test to display its duration or to mention that
# the test is running in background
PROGRESS_MIN_TIME = 30.0 # seconds
# Display the running tests if nothing happened last N seconds
PROGRESS_UPDATE = 30.0 # seconds
from test import test_support
ALL_RESOURCES = ('audio', 'curses', 'largefile', 'network', 'bsddb',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui',
'xpickle')
# Other resources excluded from --use=all:
#
# - extralagefile (ex: test_zipfile64): really too slow to be enabled
# "by default"
RESOURCE_NAMES = ALL_RESOURCES + ('extralargefile',)
TEMPDIR = os.path.abspath(tempfile.gettempdir())
def usage(code, msg=''):
print __doc__
if msg: print msg
sys.exit(code)
def format_duration(seconds):
if seconds < 1.0:
return '%.0f ms' % (seconds * 1e3)
if seconds < 60.0:
return '%.0f sec' % seconds
minutes, seconds = divmod(seconds, 60.0)
return '%.0f min %.0f sec' % (minutes, seconds)
_FORMAT_TEST_RESULT = {
PASSED: '%s passed',
FAILED: '%s failed',
ENV_CHANGED: '%s failed (env changed)',
SKIPPED: '%s skipped',
RESOURCE_DENIED: '%s skipped (resource denied)',
INTERRUPTED: '%s interrupted',
CHILD_ERROR: '%s crashed',
}
def format_test_result(test_name, result):
fmt = _FORMAT_TEST_RESULT.get(result, "%s")
return fmt % test_name
def cpu_count():
# first try os.sysconf() to prevent loading the big multiprocessing module
try:
return os.sysconf('SC_NPROCESSORS_ONLN')
except (AttributeError, ValueError):
pass
# try multiprocessing.cpu_count()
try:
import multiprocessing
except ImportError:
pass
else:
return multiprocessing.cpu_count()
return None
def unload_test_modules(save_modules):
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
test_support.unload(module)
def main(tests=None, testdir=None, verbose=0, quiet=False,
exclude=False, single=False, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
random_seed=None, use_mp=None, verbose3=False, forever=False,
header=False, pgo=False, failfast=False, match_tests=None):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
single, randomize, findleaks, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
regrtest_start_time = time.time()
test_support.record_original_stdout(sys.stdout)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvqxsSrf:lu:t:TD:NLR:FwWM:j:PGm:',
['help', 'verbose', 'verbose2', 'verbose3', 'quiet',
'exclude', 'single', 'slow', 'slowest', 'randomize', 'fromfile=',
'findleaks',
'use=', 'threshold=', 'trace', 'coverdir=', 'nocoverdir',
'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=',
'multiprocess=', 'slaveargs=', 'forever', 'header', 'pgo',
'failfast', 'match=', 'testdir=', 'list-tests', 'list-cases',
'coverage', 'matchfile=', 'fail-env-changed'])
except getopt.error, msg:
usage(2, msg)
# Defaults
if random_seed is None:
random_seed = random.randrange(10000000)
if use_resources is None:
use_resources = []
slaveargs = None
list_tests = False
list_cases_opt = False
fail_env_changed = False
for o, a in opts:
if o in ('-h', '--help'):
usage(0)
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-w', '--verbose2'):
verbose2 = True
elif o in ('-W', '--verbose3'):
verbose3 = True
elif o in ('-G', '--failfast'):
failfast = True
elif o in ('-q', '--quiet'):
quiet = True;
verbose = 0
elif o in ('-x', '--exclude'):
exclude = True
elif o in ('-s', '--single'):
single = True
elif o in ('-S', '--slow', '--slowest'):
print_slow = True
elif o in ('-r', '--randomize'):
randomize = True
elif o == '--randseed':
random_seed = int(a)
elif o in ('-f', '--fromfile'):
fromfile = a
elif o in ('-m', '--match'):
if match_tests is None:
match_tests = []
match_tests.append(a)
elif o == '--matchfile':
if match_tests is None:
match_tests = []
filename = os.path.join(test_support.SAVEDCWD, a)
with open(filename) as fp:
for line in fp:
match_tests.append(line.strip())
elif o in ('-l', '--findleaks'):
findleaks = True
elif o in ('-L', '--runleaks'):
runleaks = True
elif o in ('-t', '--threshold'):
import gc
gc.set_threshold(int(a))
elif o in ('-T', '--coverage'):
trace = True
elif o in ('-D', '--coverdir'):
coverdir = os.path.join(os.getcwd(), a)
elif o in ('-N', '--nocoverdir'):
coverdir = None
elif o in ('-R', '--huntrleaks'):
huntrleaks = a.split(':')
if len(huntrleaks) not in (2, 3):
print a, huntrleaks
usage(2, '-R takes 2 or 3 colon-separated arguments')
if not huntrleaks[0]:
huntrleaks[0] = 5
else:
huntrleaks[0] = int(huntrleaks[0])
if not huntrleaks[1]:
huntrleaks[1] = 4
else:
huntrleaks[1] = int(huntrleaks[1])
if len(huntrleaks) == 2 or not huntrleaks[2]:
huntrleaks[2:] = ["reflog.txt"]
elif o in ('-M', '--memlimit'):
test_support.set_memlimit(a)
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
if r == 'all':
use_resources[:] = ALL_RESOURCES
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if r not in RESOURCE_NAMES:
usage(1, 'Invalid -u/--use option: ' + a)
if remove:
if r in use_resources:
use_resources.remove(r)
elif r not in use_resources:
use_resources.append(r)
elif o in ('-F', '--forever'):
forever = True
elif o in ('-j', '--multiprocess'):
use_mp = int(a)
elif o == '--header':
header = True
elif o == '--slaveargs':
slaveargs = a
elif o in ('-P', '--pgo'):
pgo = True
elif o == '--testdir':
testdir = a
elif o == '--list-tests':
list_tests = True
elif o == '--list-cases':
list_cases_opt = True
elif o == '--fail-env-changed':
fail_env_changed = True
else:
print >>sys.stderr, ("No handler for option {}. Please "
"report this as a bug at http://bugs.python.org.").format(o)
sys.exit(1)
if single and fromfile:
usage(2, "-s and -f don't go together!")
if use_mp and trace:
usage(2, "-T and -j don't go together!")
if use_mp and findleaks:
usage(2, "-l and -j don't go together!")
if failfast and not (verbose or verbose3):
usage("-G/--failfast needs either -v or -W")
if testdir:
testdir = os.path.abspath(testdir)
# Prepend test directory to sys.path, so runtest() will be able
# to locate tests
sys.path.insert(0, testdir)
# Make sure that '' and Lib/test/ are not in sys.path
regrtest_dir = os.path.abspath(os.path.dirname(__file__))
for path in ('', regrtest_dir):
try:
sys.path.remove(path)
except ValueError:
pass
if slaveargs is not None:
args, kwargs = json.loads(slaveargs)
if kwargs['huntrleaks']:
warm_caches()
if testdir:
kwargs['testdir'] = testdir
try:
result = runtest(*args, **kwargs)
except BaseException, e:
result = INTERRUPTED, e.__class__.__name__
print # Force a newline (just in case)
print json.dumps(result)
sys.exit(0)
if huntrleaks:
warm_caches()
good = []
bad = []
skipped = []
resource_denieds = []
environment_changed = []
interrupted = False
if findleaks:
try:
import gc
except ImportError:
print 'No GC available, disabling findleaks.'
findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if single:
filename = os.path.join(TEMPDIR, 'pynexttest')
try:
fp = open(filename, 'r')
next_test = fp.read().strip()
tests = [next_test]
fp.close()
except IOError:
pass
if fromfile:
tests = []
fp = open(os.path.join(test_support.SAVEDCWD, fromfile))
for line in fp:
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
fp.close()
# Strip .py extensions.
removepy(args)
removepy(tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS.copy()
if exclude:
for arg in args:
if arg in stdtests:
stdtests.remove(arg)
nottests.add(arg)
args = []
display_header = (verbose or header or not (quiet or single or tests or args)) and (not pgo)
alltests = findtests(testdir, stdtests, nottests)
selected = tests or args or alltests
if single:
selected = selected[:1]
try:
next_single_test = alltests[alltests.index(selected[0])+1]
except IndexError:
next_single_test = None
if list_tests:
for name in selected:
print(name)
sys.exit(0)
if list_cases_opt:
list_cases(testdir, selected, match_tests)
sys.exit(0)
if trace:
import trace
tracer = trace.Trace(trace=False, count=True)
test_times = []
test_support.use_resources = use_resources
save_modules = set(sys.modules)
def accumulate_result(test, result):
ok, test_time = result
if ok not in (CHILD_ERROR, INTERRUPTED):
test_times.append((test_time, test))
if ok == PASSED:
good.append(test)
elif ok in (FAILED, CHILD_ERROR):
bad.append(test)
elif ok == ENV_CHANGED:
environment_changed.append(test)
elif ok == SKIPPED:
skipped.append(test)
elif ok == RESOURCE_DENIED:
skipped.append(test)
resource_denieds.append(test)
elif ok != INTERRUPTED:
raise ValueError("invalid test result: %r" % ok)
if forever:
def test_forever(tests=list(selected)):
while True:
for test in tests:
yield test
if bad:
return
if fail_env_changed and environment_changed:
return
tests = test_forever()
test_count = ''
test_count_width = 3
else:
tests = iter(selected)
test_count = '/{}'.format(len(selected))
test_count_width = len(test_count) - 1
def display_progress(test_index, test):
# "[ 51/405/1] test_tcl"
line = "{1:{0}}{2}".format(test_count_width, test_index, test_count)
if bad and not pgo:
line = '{}/{}'.format(line, len(bad))
line = '[{}]'.format(line)
# add the system load prefix: "load avg: 1.80 "
if hasattr(os, 'getloadavg'):
load_avg_1min = os.getloadavg()[0]
line = "load avg: {:.2f} {}".format(load_avg_1min, line)
# add the timestamp prefix: "0:01:05 "
test_time = time.time() - regrtest_start_time
test_time = datetime.timedelta(seconds=int(test_time))
line = "%s %s" % (test_time, line)
# add the test name
line = "{} {}".format(line, test)
print(line)
sys.stdout.flush()
# For a partial run, we do not need to clutter the output.
if display_header:
# Print basic platform information
print "==", platform.python_implementation(), \
" ".join(sys.version.split())
print "== ", platform.platform(aliased=True), \
"%s-endian" % sys.byteorder
print "== ", os.getcwd()
ncpu = cpu_count()
if ncpu:
print "== CPU count:", ncpu
if randomize:
random.seed(random_seed)
print "Using random seed", random_seed
random.shuffle(selected)
if use_mp:
try:
from threading import Thread
except ImportError:
print "Multiprocess option requires thread support"
sys.exit(2)
from Queue import Queue, Empty
from subprocess import Popen, PIPE
debug_output_pat = re.compile(r"\[\d+ refs\]$")
output = Queue()
def tests_and_args():
for test in tests:
args_tuple = (
(test, verbose, quiet),
dict(huntrleaks=huntrleaks, use_resources=use_resources,
failfast=failfast,
match_tests=match_tests,
pgo=pgo)
)
yield (test, args_tuple)
pending = tests_and_args()
opt_args = test_support.args_from_interpreter_flags()
base_cmd = [sys.executable] + opt_args + ['-m', 'test.regrtest']
# required to spawn a new process with PGO flag on/off
if pgo:
base_cmd = base_cmd + ['--pgo']
class MultiprocessThread(Thread):
current_test = None
start_time = None
def runtest(self):
try:
test, args_tuple = next(pending)
except StopIteration:
output.put((None, None, None, None))
return True
# -E is needed by some tests, e.g. test_import
args = base_cmd + ['--slaveargs', json.dumps(args_tuple)]
if testdir:
args.extend(('--testdir', testdir))
try:
self.start_time = time.time()
self.current_test = test
popen = Popen(args,
stdout=PIPE, stderr=PIPE,
universal_newlines=True,
close_fds=(os.name != 'nt'))
stdout, stderr = popen.communicate()
retcode = popen.wait()
finally:
self.current_test = None
# Strip last refcount output line if it exists, since it
# comes from the shutdown of the interpreter in the subcommand.
stderr = debug_output_pat.sub("", stderr)
if retcode == 0:
stdout, _, result = stdout.strip().rpartition("\n")
if not result:
output.put((None, None, None, None))
return True
result = json.loads(result)
else:
result = (CHILD_ERROR, "Exit code %s" % retcode)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
return False
def run(self):
try:
stop = False
while not stop:
stop = self.runtest()
except BaseException:
output.put((None, None, None, None))
raise
workers = [MultiprocessThread() for i in range(use_mp)]
print("Run tests in parallel using %s child processes"
% len(workers))
for worker in workers:
worker.start()
def get_running(workers):
running = []
for worker in workers:
current_test = worker.current_test
if not current_test:
continue
dt = time.time() - worker.start_time
if dt >= PROGRESS_MIN_TIME:
running.append('%s (%.0f sec)' % (current_test, dt))
return running
finished = 0
test_index = 1
get_timeout = max(PROGRESS_UPDATE, PROGRESS_MIN_TIME)
try:
while finished < use_mp:
try:
item = output.get(timeout=get_timeout)
except Empty:
running = get_running(workers)
if running and not pgo:
print('running: %s' % ', '.join(running))
continue
test, stdout, stderr, result = item
if test is None:
finished += 1
continue
accumulate_result(test, result)
if not quiet:
ok, test_time = result
text = format_test_result(test, ok)
if (ok not in (CHILD_ERROR, INTERRUPTED)
and test_time >= PROGRESS_MIN_TIME
and not pgo):
text += ' (%.0f sec)' % test_time
running = get_running(workers)
if running and not pgo:
text += ' -- running: %s' % ', '.join(running)
display_progress(test_index, text)
if stdout:
print stdout
sys.stdout.flush()
if stderr and not pgo:
print >>sys.stderr, stderr
sys.stderr.flush()
if result[0] == INTERRUPTED:
assert result[1] == 'KeyboardInterrupt'
raise KeyboardInterrupt # What else?
test_index += 1
except KeyboardInterrupt:
interrupted = True
pending.close()
for worker in workers:
worker.join()
else:
print("Run tests sequentially")
previous_test = None
for test_index, test in enumerate(tests, 1):
if not quiet:
text = test
if previous_test:
text = '%s -- %s' % (text, previous_test)
display_progress(test_index, text)
def local_runtest():
result = runtest(test, verbose, quiet, huntrleaks, None, pgo,
failfast=failfast,
match_tests=match_tests,
testdir=testdir)
accumulate_result(test, result)
return result
start_time = time.time()
if trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
ns = dict(locals())
tracer.runctx('result = local_runtest()',
globals=globals(), locals=ns)
result = ns['result']
else:
try:
result = local_runtest()
if verbose3 and result[0] == FAILED:
if not pgo:
print "Re-running test %r in verbose mode" % test
runtest(test, True, quiet, huntrleaks, None, pgo,
testdir=testdir)
except KeyboardInterrupt:
interrupted = True
break
except:
raise
test_time = time.time() - start_time
previous_test = format_test_result(test, result[0])
if test_time >= PROGRESS_MIN_TIME:
previous_test = "%s in %s" % (previous_test,
format_duration(test_time))
elif result[0] == PASSED:
# be quiet: say nothing if the test passed shortly
previous_test = None
if findleaks:
gc.collect()
if gc.garbage:
print "Warning: test created", len(gc.garbage),
print "uncollectable object(s)."
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
unload_test_modules(save_modules)
if interrupted and not pgo:
# print a newline after ^C
print
print "Test suite interrupted by signal SIGINT."
omitted = set(selected) - set(good) - set(bad) - set(skipped)
print count(len(omitted), "test"), "omitted:"
printlist(omitted)
if good and not quiet and not pgo:
if not bad and not skipped and not interrupted and len(good) > 1:
print "All",
print count(len(good), "test"), "OK."
if print_slow:
test_times.sort(reverse=True)
print "10 slowest tests:"
for test_time, test in test_times[:10]:
print("- %s: %.1fs" % (test, test_time))
if bad and not pgo:
print count(len(bad), "test"), "failed:"
printlist(bad)
if environment_changed and not pgo:
print "{} altered the execution environment:".format(
count(len(environment_changed), "test"))
printlist(environment_changed)
if skipped and not quiet and not pgo:
print count(len(skipped), "test"), "skipped:"
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print count(len(surprise), "skip"), \
"unexpected on", plat + ":"
printlist(surprise)
else:
print "Those skips are all expected on", plat + "."
else:
print "Ask someone to teach regrtest.py about which tests are"
print "expected to get skipped on", plat + "."
if verbose2 and bad:
print "Re-running failed tests in verbose mode"
for test in bad[:]:
print "Re-running test %r in verbose mode" % test
sys.stdout.flush()
try:
test_support.verbose = True
ok = runtest(test, True, quiet, huntrleaks, None, pgo,
testdir=testdir)
except KeyboardInterrupt:
# print a newline separate from the ^C
print
break
else:
if ok[0] in {PASSED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED}:
bad.remove(test)
else:
if bad:
print count(len(bad), "test"), "failed again:"
printlist(bad)
if single:
if next_single_test:
with open(filename, 'w') as fp:
fp.write(next_single_test + '\n')
else:
os.unlink(filename)
if trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if runleaks:
os.system("leaks %d" % os.getpid())
print
duration = time.time() - regrtest_start_time
print("Total duration: %s" % format_duration(duration))
if bad:
result = "FAILURE"
elif interrupted:
result = "INTERRUPTED"
elif fail_env_changed and environment_changed:
result = "ENV CHANGED"
else:
result = "SUCCESS"
print("Tests result: %s" % result)
if bad:
sys.exit(2)
if interrupted:
sys.exit(130)
if fail_env_changed and environment_changed:
sys.exit(3)
sys.exit(0)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
]
NOTTESTS = {
'test_support',
'test_future1',
'test_future2',
}
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
names = os.listdir(testdir)
tests = []
others = set(stdtests) | nottests
for name in names:
modname, ext = os.path.splitext(name)
if modname[:5] == "test_" and ext == ".py" and modname not in others:
tests.append(modname)
return stdtests + sorted(tests)
def runtest(test, verbose, quiet,
huntrleaks=False, use_resources=None, pgo=False,
failfast=False, match_tests=None, testdir=None):
"""Run a single test.
test -- the name of the test
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
test_times -- a list of (time, test_name) pairs
huntrleaks -- run multiple times to test for leaks; requires a debug
build; a triple corresponding to -R's three arguments
pgo -- if true, do not print unnecessary info when running the test
for Profile Guided Optimization build
Returns one of the test result constants:
CHILD_ERROR Child process crashed
INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
"""
test_support.verbose = verbose # Tell tests to be moderately quiet
if use_resources is not None:
test_support.use_resources = use_resources
try:
test_support.set_match_tests(match_tests)
if failfast:
test_support.failfast = True
return runtest_inner(test, verbose, quiet, huntrleaks, pgo, testdir)
finally:
cleanup_test_droppings(test, verbose)
# Unit tests are supposed to leave the execution environment unchanged
# once they complete. But sometimes tests have bugs, especially when
# tests fail, and the changes to environment go on to mess up other
# tests. This can cause issues with buildbot stability, since tests
# are run in random order and so problems may appear to come and go.
# There are a few things we can save and restore to mitigate this, and
# the following context manager handles this task.
class saved_test_environment:
"""Save bits of the test environment and restore them at block exit.
with saved_test_environment(testname, verbose, quiet):
#stuff
Unless quiet is True, a warning is printed to stderr if any of
the saved items was changed by the test. The attribute 'changed'
is initially False, but is set to True if a change is detected.
If verbose is more than 1, the before and after state of changed
items is also printed.
"""
changed = False
def __init__(self, testname, verbose=0, quiet=False, pgo=False):
self.testname = testname
self.verbose = verbose
self.quiet = quiet
self.pgo = pgo
# To add things to save and restore, add a name XXX to the resources list
# and add corresponding get_XXX/restore_XXX functions. get_XXX should
# return the value to be saved and compared against a second call to the
# get function when test execution completes. restore_XXX should accept
# the saved value and restore the resource using it. It will be called if
# and only if a change in the value is detected.
#
# Note: XXX will have any '.' replaced with '_' characters when determining
# the corresponding method names.
resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
'os.environ', 'sys.path', 'asyncore.socket_map',
'files',
)
def get_sys_argv(self):
return id(sys.argv), sys.argv, sys.argv[:]
def restore_sys_argv(self, saved_argv):
sys.argv = saved_argv[1]
sys.argv[:] = saved_argv[2]
def get_cwd(self):
return os.getcwd()
def restore_cwd(self, saved_cwd):
os.chdir(saved_cwd)
def get_sys_stdout(self):
return sys.stdout
def restore_sys_stdout(self, saved_stdout):
sys.stdout = saved_stdout
def get_sys_stderr(self):
return sys.stderr
def restore_sys_stderr(self, saved_stderr):
sys.stderr = saved_stderr
def get_sys_stdin(self):
return sys.stdin
def restore_sys_stdin(self, saved_stdin):
sys.stdin = saved_stdin
def get_os_environ(self):
return id(os.environ), os.environ, dict(os.environ)
def restore_os_environ(self, saved_environ):
os.environ = saved_environ[1]
os.environ.clear()
os.environ.update(saved_environ[2])
def get_sys_path(self):
return id(sys.path), sys.path, sys.path[:]
def restore_sys_path(self, saved_path):
sys.path = saved_path[1]
sys.path[:] = saved_path[2]
def get_asyncore_socket_map(self):
asyncore = sys.modules.get('asyncore')
# XXX Making a copy keeps objects alive until __exit__ gets called.
return asyncore and asyncore.socket_map.copy() or {}
def restore_asyncore_socket_map(self, saved_map):
asyncore = sys.modules.get('asyncore')
if asyncore is not None:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
def get_test_support_TESTFN(self):
if os.path.isfile(test_support.TESTFN):
result = 'f'
elif os.path.isdir(test_support.TESTFN):
result = 'd'
else:
result = None
return result
def restore_test_support_TESTFN(self, saved_value):
if saved_value is None:
if os.path.isfile(test_support.TESTFN):
os.unlink(test_support.TESTFN)
elif os.path.isdir(test_support.TESTFN):
shutil.rmtree(test_support.TESTFN)
def get_files(self):
return sorted(fn + ('/' if os.path.isdir(fn) else '')
for fn in os.listdir(os.curdir))
def restore_files(self, saved_value):
fn = test_support.TESTFN
if fn not in saved_value and (fn + '/') not in saved_value:
if os.path.isfile(fn):
test_support.unlink(fn)
elif os.path.isdir(fn):
test_support.rmtree(fn)
def resource_info(self):
for name in self.resources:
method_suffix = name.replace('.', '_')
get_name = 'get_' + method_suffix
restore_name = 'restore_' + method_suffix
yield name, getattr(self, get_name), getattr(self, restore_name)
def __enter__(self):
self.saved_values = dict((name, get()) for name, get, restore
in self.resource_info())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
del self.saved_values
for name, get, restore in self.resource_info():
current = get()
original = saved_values.pop(name)
# Check for changes to the resource's value
if current != original:
self.changed = True
restore(original)
if not self.quiet and not self.pgo:
print >>sys.stderr, (
"Warning -- {} was modified by {}".format(
name, self.testname))
print >>sys.stderr, (
" Before: {}\n After: {} ".format(
original, current))
# XXX (ncoghlan): for most resources (e.g. sys.path) identity
# matters at least as much as value. For others (e.g. cwd),
# identity is irrelevant. Should we add a mechanism to check
# for substitution in the cases where it matters?
return False
def post_test_cleanup():
test_support.reap_children()
def runtest_inner(test, verbose, quiet, huntrleaks=False, pgo=False, testdir=None):
test_support.unload(test)
if verbose:
capture_stdout = None
else:
capture_stdout = StringIO.StringIO()
test_time = 0.0
refleak = False # True if the test leaked references.
try:
save_stdout = sys.stdout
try:
if capture_stdout:
sys.stdout = capture_stdout
abstest = get_abs_module(testdir, test)
clear_caches()
with saved_test_environment(test, verbose, quiet, pgo) as environment:
start_time = time.time()
the_package = __import__(abstest, globals(), locals(), [])
if abstest.startswith('test.'):
the_module = getattr(the_package, test)
else:
the_module = the_package
# Old tests run to completion simply as a side-effect of
# being imported. For tests based on unittest or doctest,
# explicitly invoke their test_main() function (if it exists).
indirect_test = getattr(the_module, "test_main", None)
if indirect_test is not None:
indirect_test()
if huntrleaks:
refleak = dash_R(the_module, test, indirect_test,
huntrleaks)
test_time = time.time() - start_time
post_test_cleanup()
finally:
sys.stdout = save_stdout
except test_support.ResourceDenied, msg:
if not quiet and not pgo:
print test, "skipped --", msg
sys.stdout.flush()
return RESOURCE_DENIED, test_time
except unittest.SkipTest, msg:
if not quiet and not pgo:
print test, "skipped --", msg
sys.stdout.flush()
return SKIPPED, test_time
except KeyboardInterrupt:
raise
except test_support.TestFailed, msg:
if not pgo:
print >>sys.stderr, "test", test, "failed --", msg
sys.stderr.flush()
return FAILED, test_time
except:
type, value = sys.exc_info()[:2]
if not pgo:
print >>sys.stderr, "test", test, "crashed --", str(type) + ":", value
sys.stderr.flush()
if verbose and not pgo:
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
else:
if refleak:
return FAILED, test_time
if environment.changed:
return ENV_CHANGED, test_time
# Except in verbose mode, tests should not print anything
if verbose or huntrleaks:
return PASSED, test_time
output = capture_stdout.getvalue()
if not output:
return PASSED, test_time
print "test", test, "produced unexpected output:"
print "*" * 70
print output
print "*" * 70
sys.stdout.flush()
return FAILED, test_time
def cleanup_test_droppings(testname, verbose):
import stat
import gc
# First kill any dangling references to open files etc.
gc.collect()
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (test_support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print "%r left behind %s %r" % (testname, kind, name)
try:
# if we have chmod, fix possible permissions problems
# that might prevent cleanup
if (hasattr(os, 'chmod')):
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception, msg:
print >> sys.stderr, ("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg))
def dash_R(the_module, test, indirect_test, huntrleaks):
"""Run a test multiple times, looking for reference leaks.
Returns:
False if the test didn't leak references; True if we detected refleaks.
"""
# This code is hackish and inelegant, but it seems to do the job.
import copy_reg, _abcoll, _pyio
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copy_reg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
try:
import zipimport
except ImportError:
zdc = None # Run unmodified on platforms without zipimport support
else:
zdc = zipimport._zip_directory_cache.copy()
abcs = {}
modules = _abcoll, _pyio
for abc in [getattr(mod, a) for mod in modules for a in mod.__all__]:
# XXX isinstance(abc, ABCMeta) leads to infinite recursion
if not hasattr(abc, '_abc_registry'):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
if indirect_test:
def run_the_test():
indirect_test()
else:
def run_the_test():
imp.reload(the_module)
deltas = []
nwarmup, ntracked, fname = huntrleaks
fname = os.path.join(test_support.SAVEDCWD, fname)
repcount = nwarmup + ntracked
print >> sys.stderr, "beginning", repcount, "repetitions"
print >> sys.stderr, ("1234567890"*(repcount//10 + 1))[:repcount]
dash_R_cleanup(fs, ps, pic, zdc, abcs)
for i in range(repcount):
rc_before = sys.gettotalrefcount()
run_the_test()
sys.stderr.write('.')
dash_R_cleanup(fs, ps, pic, zdc, abcs)
rc_after = sys.gettotalrefcount()
if i >= nwarmup:
deltas.append(rc_after - rc_before)
print >> sys.stderr
# bpo-30776: Try to ignore false positives:
#
# [3, 0, 0]
# [0, 1, 0]
# [8, -8, 1]
#
# Expected leaks:
#
# [5, 5, 6]
# [10, 1, 1]
if all(delta >= 1 for delta in deltas):
msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas))
print >> sys.stderr, msg
with open(fname, "a") as refrep:
print >> refrep, msg
refrep.flush()
return True
return False
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
import gc, copy_reg
# Restore some original values.
warnings.filters[:] = fs
copy_reg.dispatch_table.clear()
copy_reg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
try:
import zipimport
except ImportError:
pass # Run unmodified on platforms without zipimport support
else:
zipimport._zip_directory_cache.clear()
zipimport._zip_directory_cache.update(zdc)
# clear type cache
sys._clear_type_cache()
# Clear ABC registries, restoring previously saved ABC registries.
for abc, registry in abcs.items():
abc._abc_registry = registry.copy()
abc._abc_cache.clear()
abc._abc_negative_cache.clear()
clear_caches()
def clear_caches():
import gc
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Clear assorted module caches.
# Don't worry about resetting the cache if the module is not loaded
try:
distutils_dir_util = sys.modules['distutils.dir_util']
except KeyError:
pass
else:
distutils_dir_util._path_created.clear()
re.purge()
try:
_strptime = sys.modules['_strptime']
except KeyError:
pass
else:
_strptime._regex_cache.clear()
try:
urlparse = sys.modules['urlparse']
except KeyError:
pass
else:
urlparse.clear_cache()
try:
urllib = sys.modules['urllib']
except KeyError:
pass
else:
urllib.urlcleanup()
try:
urllib2 = sys.modules['urllib2']
except KeyError:
pass
else:
urllib2.install_opener(None)
try:
dircache = sys.modules['dircache']
except KeyError:
pass
else:
dircache.reset()
try:
linecache = sys.modules['linecache']
except KeyError:
pass
else:
linecache.clearcache()
try:
mimetypes = sys.modules['mimetypes']
except KeyError:
pass
else:
mimetypes._default_mime_types()
try:
filecmp = sys.modules['filecmp']
except KeyError:
pass
else:
filecmp._cache.clear()
try:
struct = sys.modules['struct']
except KeyError:
pass
else:
struct._clearcache()
try:
doctest = sys.modules['doctest']
except KeyError:
pass
else:
doctest.master = None
try:
ctypes = sys.modules['ctypes']
except KeyError:
pass
else:
ctypes._reset_cache()
# Collect cyclic trash.
gc.collect()
def warm_caches():
"""Create explicitly internal singletons which are created on demand
to prevent false positive when hunting reference leaks."""
# char cache
for i in range(256):
chr(i)
# unicode cache
for i in range(256):
unichr(i)
# int cache
list(range(-5, 257))
def findtestdir(path=None):
return path or os.path.dirname(__file__) or os.curdir
def removepy(names):
if not names:
return
for idx, name in enumerate(names):
basename, ext = os.path.splitext(name)
if ext == '.py':
names[idx] = basename
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4, file=None):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
print >>file, fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks)
def get_abs_module(testdir, test):
if test.startswith('test.') or testdir:
return test
else:
# Always import it from the test package
return 'test.' + test
def _list_cases(suite):
for test in suite:
if isinstance(test, unittest.TestSuite):
_list_cases(test)
elif isinstance(test, unittest.TestCase):
if test_support.match_test(test):
print(test.id())
def list_cases(testdir, selected, match_tests):
test_support.verbose = False
test_support.set_match_tests(match_tests)
save_modules = set(sys.modules)
skipped = []
for test in selected:
abstest = get_abs_module(testdir, test)
try:
suite = unittest.defaultTestLoader.loadTestsFromName(abstest)
_list_cases(suite)
except unittest.SkipTest:
skipped.append(test)
unload_test_modules(save_modules)
if skipped:
print >>sys.stderr
print >>sys.stderr, count(len(skipped), "test"), "skipped:"
printlist(skipped, file=sys.stderr)
# Map sys.platform to a string containing the basenames of tests
# expected to be skipped on that platform.
#
# Special cases:
# test_pep277
# The _ExpectedSkips constructor adds this to the set of expected
# skips if not os.path.supports_unicode_filenames.
# test_timeout
# Controlled by test_timeout.skip_expected. Requires the network
# resource and a socket module.
#
# Tests that are expected to be skipped everywhere except on one platform
# are also handled separately.
_expectations = {
'win32':
"""
test__locale
test_bsddb185
test_bsddb3
test_commands
test_crypt
test_curses
test_dbm
test_dl
test_fcntl
test_fork1
test_epoll
test_gdbm
test_grp
test_ioctl
test_largefile
test_kqueue
test_mhlib
test_openpty
test_ossaudiodev
test_pipes
test_poll
test_posix
test_pty
test_pwd
test_resource
test_signal
test_spwd
test_threadsignals
test_timing
test_wait3
test_wait4
""",
'linux2':
"""
test_bsddb185
test_curses
test_dl
test_largefile
test_kqueue
test_ossaudiodev
""",
'unixware7':
"""
test_bsddb
test_bsddb185
test_dl
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
""",
'openunix8':
"""
test_bsddb
test_bsddb185
test_dl
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
""",
'sco_sv3':
"""
test_asynchat
test_bsddb
test_bsddb185
test_dl
test_fork1
test_epoll
test_gettext
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_queue
test_sax
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
""",
'riscos':
"""
test_asynchat
test_atexit
test_bsddb
test_bsddb185
test_bsddb3
test_commands
test_crypt
test_dbm
test_dl
test_fcntl
test_fork1
test_epoll
test_gdbm
test_grp
test_largefile
test_locale
test_kqueue
test_mmap
test_openpty
test_poll
test_popen2
test_pty
test_pwd
test_strop
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
test_timing
""",
'darwin':
"""
test__locale
test_bsddb
test_bsddb3
test_curses
test_epoll
test_gdb
test_gdbm
test_largefile
test_locale
test_kqueue
test_minidom
test_ossaudiodev
test_poll
""",
'sunos5':
"""
test_bsddb
test_bsddb185
test_curses
test_dbm
test_epoll
test_kqueue
test_gdbm
test_gzip
test_openpty
test_zipfile
test_zlib
""",
'hp-ux11':
"""
test_bsddb
test_bsddb185
test_curses
test_dl
test_epoll
test_gdbm
test_gzip
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_zipfile
test_zlib
""",
'atheos':
"""
test_bsddb185
test_curses
test_dl
test_gdbm
test_epoll
test_largefile
test_locale
test_kqueue
test_mhlib
test_mmap
test_poll
test_popen2
test_resource
""",
'cygwin':
"""
test_bsddb185
test_bsddb3
test_curses
test_dbm
test_epoll
test_ioctl
test_kqueue
test_largefile
test_locale
test_ossaudiodev
test_socketserver
""",
'os2emx':
"""
test_audioop
test_bsddb185
test_bsddb3
test_commands
test_curses
test_dl
test_epoll
test_kqueue
test_largefile
test_mhlib
test_mmap
test_openpty
test_ossaudiodev
test_pty
test_resource
test_signal
""",
'freebsd4':
"""
test_bsddb
test_bsddb3
test_epoll
test_gdbm
test_locale
test_ossaudiodev
test_pep277
test_pty
test_socketserver
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_timeout
test_urllibnet
test_multiprocessing
""",
'aix5':
"""
test_bsddb
test_bsddb185
test_bsddb3
test_bz2
test_dl
test_epoll
test_gdbm
test_gzip
test_kqueue
test_ossaudiodev
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_zipimport
test_zlib
""",
'openbsd3':
"""
test_ascii_formatd
test_bsddb
test_bsddb3
test_ctypes
test_dl
test_epoll
test_gdbm
test_locale
test_normalization
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
""",
'netbsd3':
"""
test_ascii_formatd
test_bsddb
test_bsddb185
test_bsddb3
test_ctypes
test_curses
test_dl
test_epoll
test_gdbm
test_locale
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
""",
}
_expectations['freebsd5'] = _expectations['freebsd4']
_expectations['freebsd6'] = _expectations['freebsd4']
_expectations['freebsd7'] = _expectations['freebsd4']
_expectations['freebsd8'] = _expectations['freebsd4']
class _ExpectedSkips:
def __init__(self):
import os.path
from test import test_timeout
self.valid = False
if sys.platform in _expectations:
s = _expectations[sys.platform]
self.expected = set(s.split())
# expected to be skipped on every platform, even Linux
self.expected.add('test_linuxaudiodev')
if not os.path.supports_unicode_filenames:
self.expected.add('test_pep277')
if test_timeout.skip_expected:
self.expected.add('test_timeout')
if sys.maxint == 9223372036854775807L:
self.expected.add('test_imageop')
if sys.platform != "darwin":
MAC_ONLY = ["test_macos", "test_macostools", "test_aepack",
"test_plistlib", "test_scriptpackages",
"test_applesingle"]
for skip in MAC_ONLY:
self.expected.add(skip)
elif len(u'\0'.encode('unicode-internal')) == 4:
self.expected.add("test_macostools")
if sys.platform != "win32":
# test_sqlite is only reliable on Windows where the library
# is distributed with Python
WIN_ONLY = ["test_unicode_file", "test_winreg",
"test_winsound", "test_startfile",
"test_sqlite", "test_msilib"]
for skip in WIN_ONLY:
self.expected.add(skip)
if sys.platform != 'irix':
IRIX_ONLY = ["test_imageop", "test_al", "test_cd", "test_cl",
"test_gl", "test_imgfile"]
for skip in IRIX_ONLY:
self.expected.add(skip)
if sys.platform != 'sunos5':
self.expected.add('test_sunaudiodev')
self.expected.add('test_nis')
if not sys.py3kwarning:
self.expected.add('test_py3kwarn')
self.valid = True
def isvalid(self):
"Return true iff _ExpectedSkips knows about the current platform."
return self.valid
def getexpected(self):
"""Return set of test names we expect to skip on current platform.
self.isvalid() must be true.
"""
assert self.isvalid()
return self.expected
def main_in_temp_cwd():
"""Run main() in a temporary working directory."""
global TEMPDIR
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. It eases the cleanup of leftover
# files using command "make distclean".
if sysconfig.is_python_build():
TEMPDIR = os.path.join(sysconfig.get_config_var('srcdir'), 'build')
TEMPDIR = os.path.abspath(TEMPDIR)
if not os.path.exists(TEMPDIR):
os.mkdir(TEMPDIR)
# Define a writable temp dir that will be used as cwd while running
# the tests. The name of the dir includes the pid to allow parallel
# testing (see the -j option).
TESTCWD = 'test_python_{}'.format(os.getpid())
TESTCWD = os.path.join(TEMPDIR, TESTCWD)
# Run the tests in a context manager that temporary changes the CWD to a
# temporary and writable directory. If it's not possible to create or
# change the CWD, the original CWD will be used. The original CWD is
# available from test_support.SAVEDCWD.
with test_support.temp_cwd(TESTCWD, quiet=True):
main()
if __name__ == '__main__':
# findtestdir() gets the dirname out of __file__, so we have to make it
# absolute before changing the working directory.
# For example __file__ may be relative when running trace or profile.
# See issue #9323.
global __file__
__file__ = os.path.abspath(__file__)
# sanity check
assert __file__ == os.path.abspath(sys.argv[0])
main_in_temp_cwd()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/cmd/services/m3coordinator/downsample/downsampler_test.go | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//nolint: dupl
package downsample
import (
"bytes"
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/m3db/m3/src/aggregator/client"
clusterclient "github.com/m3db/m3/src/cluster/client"
"github.com/m3db/m3/src/cluster/kv/mem"
dbclient "github.com/m3db/m3/src/dbnode/client"
"github.com/m3db/m3/src/metrics/aggregation"
"github.com/m3db/m3/src/metrics/generated/proto/metricpb"
"github.com/m3db/m3/src/metrics/generated/proto/rulepb"
"github.com/m3db/m3/src/metrics/matcher"
"github.com/m3db/m3/src/metrics/metadata"
"github.com/m3db/m3/src/metrics/metric/id"
"github.com/m3db/m3/src/metrics/metric/unaggregated"
"github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/metrics/rules"
ruleskv "github.com/m3db/m3/src/metrics/rules/store/kv"
"github.com/m3db/m3/src/metrics/rules/view"
"github.com/m3db/m3/src/metrics/transformation"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/storage/m3"
"github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/storage/mock"
"github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/serialize"
xtest "github.com/m3db/m3/src/x/test"
xtime "github.com/m3db/m3/src/x/time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
var (
testAggregationType = aggregation.Sum
testAggregationStoragePolicies = []policy.StoragePolicy{
policy.MustParseStoragePolicy("2s:1d"),
}
)
const (
nameTag = "__name__"
)
func TestDownsamplerAggregationWithAutoMappingRulesFromNamespacesWatcher(t *testing.T) {
t.Parallel()
ctrl := xtest.NewController(t)
defer ctrl.Finish()
gaugeMetrics, _ := testGaugeMetrics(testGaugeMetricsOptions{})
require.Equal(t, 1, len(gaugeMetrics))
gaugeMetric := gaugeMetrics[0]
numSamples := len(gaugeMetric.samples)
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: gaugeMetrics,
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: gaugeMetric.tags,
// NB(nate): Automapping rules generated from cluster namespaces currently
// hardcode 'Last' as the aggregation type. As such, expect value to be the last value
// in the sample.
values: []expectedValue{{value: gaugeMetric.samples[numSamples-1]}},
},
},
},
})
require.False(t, testDownsampler.downsampler.Enabled())
origStagedMetadata := originalStagedMetadata(t, testDownsampler)
session := dbclient.NewMockSession(ctrl)
setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{
NamespaceID: ident.StringID("2s:1d"),
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
Session: session,
})
waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata)
require.True(t, testDownsampler.downsampler.Enabled())
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationDownsamplesRawMetricWithRollupRule(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42},
{value: 64, offset: 1 * time.Second},
},
}
res := 1 * time.Second
ret := 30 * 24 * time.Hour
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
RollupRules: []RollupRuleConfiguration{
{
Filter: fmt.Sprintf(
"%s:http_requests app:* status_code:* endpoint:*",
nameTag),
Transforms: []TransformConfiguration{
{
Transform: &TransformOperationConfiguration{
Type: transformation.PerSecond,
},
},
{
Rollup: &RollupOperationConfiguration{
MetricName: "http_requests_by_status_code",
GroupBy: []string{"app", "status_code", "endpoint"},
Aggregations: []aggregation.Type{aggregation.Sum},
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
// aggregated rollup metric
{
tags: map[string]string{
nameTag: "http_requests_by_status_code",
string(rollupTagName): string(rollupTagValue),
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
},
values: []expectedValue{{value: 22}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
// raw aggregated metric
{
tags: gaugeMetric.tags,
values: []expectedValue{{value: 42}, {value: 64}},
},
},
},
})
// Setup auto-mapping rules.
require.False(t, testDownsampler.downsampler.Enabled())
origStagedMetadata := originalStagedMetadata(t, testDownsampler)
ctrl := xtest.NewController(t)
defer ctrl.Finish()
session := dbclient.NewMockSession(ctrl)
setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{
NamespaceID: ident.StringID("1s:30d"),
Resolution: res,
Retention: ret,
Session: session,
})
waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata)
require.True(t, testDownsampler.downsampler.Enabled())
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerEmptyGroupBy(t *testing.T) {
t.Parallel()
requestMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "http_requests",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42},
{value: 64, offset: 1 * time.Second},
},
}
errorMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "http_errors",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 43},
{value: 65, offset: 1 * time.Second},
},
}
res := 1 * time.Second
ret := 30 * 24 * time.Hour
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
RollupRules: []RollupRuleConfiguration{
{
Filter: fmt.Sprintf("%s:http_*", nameTag),
Transforms: []TransformConfiguration{
{
Transform: &TransformOperationConfiguration{
Type: transformation.PerSecond,
},
},
{
Rollup: &RollupOperationConfiguration{
MetricName: "http_all",
GroupBy: []string{},
Aggregations: []aggregation.Type{aggregation.Sum},
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{requestMetric, errorMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
// aggregated rollup metric
{
tags: map[string]string{
nameTag: "http_all",
string(rollupTagName): string(rollupTagValue),
},
values: []expectedValue{{value: 22 * 2}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
// raw aggregated metric
{
tags: requestMetric.tags,
values: []expectedValue{{value: 42}, {value: 64}},
},
{
tags: errorMetric.tags,
values: []expectedValue{{value: 43}, {value: 65}},
},
},
},
})
// Setup auto-mapping rules.
require.False(t, testDownsampler.downsampler.Enabled())
origStagedMetadata := originalStagedMetadata(t, testDownsampler)
ctrl := xtest.NewController(t)
defer ctrl.Finish()
session := dbclient.NewMockSession(ctrl)
setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{
NamespaceID: ident.StringID("1s:30d"),
Resolution: res,
Retention: ret,
Session: session,
})
waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata)
require.True(t, testDownsampler.downsampler.Enabled())
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationDoesNotDownsampleRawMetricWithRollupRulesWithoutRollup(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42},
{value: 64, offset: 1 * time.Second},
},
}
res := 1 * time.Second
ret := 30 * 24 * time.Hour
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
RollupRules: []RollupRuleConfiguration{
{
Filter: fmt.Sprintf(
"%s:http_requests app:* status_code:* endpoint:*",
nameTag),
Transforms: []TransformConfiguration{
{
Aggregate: &AggregateOperationConfiguration{
Type: aggregation.Sum,
},
},
{
Transform: &TransformOperationConfiguration{
Type: transformation.Add,
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
// mapped metric
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
},
values: []expectedValue{{value: 42}, {value: 106, offset: 1 * time.Second}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
},
},
})
// Setup auto-mapping rules.
require.False(t, testDownsampler.downsampler.Enabled())
origStagedMetadata := originalStagedMetadata(t, testDownsampler)
ctrl := xtest.NewController(t)
defer ctrl.Finish()
session := dbclient.NewMockSession(ctrl)
setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{
NamespaceID: ident.StringID("1s:30d"),
Resolution: res,
Retention: ret,
Session: session,
})
waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata)
require.True(t, testDownsampler.downsampler.Enabled())
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationToggleEnabled(t *testing.T) {
t.Parallel()
ctrl := xtest.NewController(t)
defer ctrl.Finish()
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{})
require.False(t, testDownsampler.downsampler.Enabled())
// Add an aggregated namespace and expect downsampler to be enabled.
session := dbclient.NewMockSession(ctrl)
setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{
NamespaceID: ident.StringID("2s:1d"),
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
Session: session,
})
waitForEnabledUpdate(t, &testDownsampler, false)
require.True(t, testDownsampler.downsampler.Enabled())
// Set just an unaggregated namespace and expect downsampler to be disabled.
clusters, err := m3.NewClusters(m3.UnaggregatedClusterNamespaceDefinition{
NamespaceID: ident.StringID("default"),
Retention: 48 * time.Hour,
Session: session,
})
require.NoError(t, err)
require.NoError(t,
testDownsampler.opts.ClusterNamespacesWatcher.Update(clusters.ClusterNamespaces()))
waitForEnabledUpdate(t, &testDownsampler, true)
require.False(t, testDownsampler.downsampler.Enabled())
}
func TestDownsamplerAggregationWithRulesStore(t *testing.T) {
t.Parallel()
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{})
rulesStore := testDownsampler.rulesStore
// Create rules
nss, err := rulesStore.ReadNamespaces()
require.NoError(t, err)
_, err = nss.AddNamespace("default", testUpdateMetadata())
require.NoError(t, err)
rule := view.MappingRule{
ID: "mappingrule",
Name: "mappingrule",
Filter: "app:test*",
AggregationID: aggregation.MustCompressTypes(testAggregationType),
StoragePolicies: testAggregationStoragePolicies,
}
rs := rules.NewEmptyRuleSet("default", testUpdateMetadata())
_, err = rs.AddMappingRule(rule, testUpdateMetadata())
require.NoError(t, err)
err = rulesStore.WriteAll(nss, rs)
require.NoError(t, err)
logger := testDownsampler.instrumentOpts.Logger().
With(zap.String("test", t.Name()))
// Wait for mapping rule to appear
logger.Info("waiting for mapping rules to propagate")
matcher := testDownsampler.matcher
testMatchID := newTestID(t, map[string]string{
"__name__": "foo",
"app": "test123",
})
for {
now := time.Now().UnixNano()
res := matcher.ForwardMatch(testMatchID, now, now+1)
results := res.ForExistingIDAt(now)
if !results.IsDefault() {
break
}
time.Sleep(100 * time.Millisecond)
}
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRules(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "foo_metric",
"app": "nginx_edge",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "app:nginx*",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: gaugeMetric.tags,
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithAutoMappingRulesAndRulesConfigMappingRulesAndDropRule(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "foo_metric",
"app": "nginx_edge",
"env": "staging",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
expectDropPolicyApplied: true,
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
autoMappingRules: []m3.ClusterNamespaceOptions{
m3.NewClusterNamespaceOptions(
storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Retention: 2 * time.Hour,
Resolution: 1 * time.Second,
},
nil,
),
m3.NewClusterNamespaceOptions(
storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Retention: 12 * time.Hour,
Resolution: 5 * time.Second,
},
nil,
),
},
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "env:staging",
Drop: true,
},
{
Filter: "app:nginx*",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 10 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
allowFilter: &testDownsamplerOptionsExpectAllowFilter{
attributes: []storagemetadata.Attributes{
{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 10 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
writes: []testExpectedWrite{
{
tags: gaugeMetric.tags,
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 10 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesPartialReplaceAutoMappingRuleFromNamespacesWatcher(t *testing.T) {
t.Parallel()
ctrl := xtest.NewController(t)
defer ctrl.Finish()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "foo_metric",
"app": "nginx_edge",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0, offset: 1 * time.Millisecond},
},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "app:nginx*",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
// Expect the max to be used and override the default auto
// mapping rule for the storage policy 2s:24h.
{
tags: gaugeMetric.tags,
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
},
},
// Expect last to still be used for the storage
// policy 4s:48h.
{
tags: gaugeMetric.tags,
// NB(nate): Automapping rules generated from cluster namespaces currently
// hardcode 'Last' as the aggregation type. As such, expect value to be the last value
// in the sample.
values: []expectedValue{{value: 0}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 4 * time.Second,
Retention: 48 * time.Hour,
},
},
},
},
})
origStagedMetadata := originalStagedMetadata(t, testDownsampler)
session := dbclient.NewMockSession(ctrl)
setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{
NamespaceID: ident.StringID("2s:24h"),
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
Session: session,
}, m3.AggregatedClusterNamespaceDefinition{
NamespaceID: ident.StringID("4s:48h"),
Resolution: 4 * time.Second,
Retention: 48 * time.Hour,
Session: session,
})
waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata)
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesReplaceAutoMappingRuleFromNamespacesWatcher(t *testing.T) {
t.Parallel()
ctrl := xtest.NewController(t)
defer ctrl.Finish()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "foo_metric",
"app": "nginx_edge",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "app:nginx*",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
// Expect the max to be used and override the default auto
// mapping rule for the storage policy 2s:24h.
{
tags: gaugeMetric.tags,
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
},
},
},
},
})
origStagedMetadata := originalStagedMetadata(t, testDownsampler)
session := dbclient.NewMockSession(ctrl)
setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{
NamespaceID: ident.StringID("2s:24h"),
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
Session: session,
})
waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata)
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesNoNameTag(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"app": "nginx_edge",
"endpoint": "health",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "endpoint",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "app:nginx*",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: gaugeMetric.tags,
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesTypeFilter(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"app": "nginx_edge",
"endpoint": "health",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "endpoint",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__m3_type__:counter",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
},
sampleAppenderOpts: &SampleAppenderOptions{
SeriesAttributes: ts.SeriesAttributes{M3Type: ts.M3MetricTypeCounter},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
"app": "nginx_edge",
"endpoint": "health",
},
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
//nolint:dupl
func TestDownsamplerAggregationWithRulesConfigMappingRulesTypePromFilter(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"app": "nginx_edge",
"endpoint": "health",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "endpoint",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__m3_prom_type__:counter",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
},
sampleAppenderOpts: &SampleAppenderOptions{
SeriesAttributes: ts.SeriesAttributes{PromType: ts.PromMetricTypeCounter},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
"app": "nginx_edge",
"endpoint": "health",
},
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesTypeFilterNoMatch(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"app": "nginx_edge",
"endpoint": "health",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "endpoint",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__m3_type__:counter",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
},
sampleAppenderOpts: &SampleAppenderOptions{
SeriesAttributes: ts.SeriesAttributes{M3Type: ts.M3MetricTypeGauge},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
//nolint:dupl
func TestDownsamplerAggregationWithRulesConfigMappingRulesPromTypeFilterNoMatch(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"app": "nginx_edge",
"endpoint": "health",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "endpoint",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__m3_prom_type__:counter",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
},
sampleAppenderOpts: &SampleAppenderOptions{
SeriesAttributes: ts.SeriesAttributes{PromType: ts.PromMetricTypeGauge},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesAggregationType(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"__g0__": "nginx_edge",
"__g1__": "health",
"__option_id_scheme__": "graphite",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
tags := []Tag{{Name: "__m3_graphite_aggregation__"}}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "__g2__",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__m3_type__:gauge",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
Tags: tags,
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
"__g0__": "nginx_edge",
"__g1__": "health",
"__g2__": "upper",
},
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesMultipleAggregationType(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"__g0__": "nginx_edge",
"__g1__": "health",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
tags := []Tag{{Name: "__m3_graphite_aggregation__"}}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "__g2__",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__m3_type__:gauge",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
Tags: tags,
},
{
Filter: "__m3_type__:gauge",
Aggregations: []aggregation.Type{aggregation.Sum},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
Tags: tags,
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
"__g0__": "nginx_edge",
"__g1__": "health",
"__g2__": "upper",
},
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
{
tags: map[string]string{
"__g0__": "nginx_edge",
"__g1__": "health",
"__g2__": "sum",
},
values: []expectedValue{{value: 60}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesGraphitePrefixAndAggregationTags(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"__g0__": "nginx_edge",
"__g1__": "health",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
tags := []Tag{
{Name: "__m3_graphite_aggregation__"},
{Name: "__m3_graphite_prefix__", Value: "stats.counter"},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "__g4__",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__m3_type__:gauge",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
Tags: tags,
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
"__g0__": "stats",
"__g1__": "counter",
"__g2__": "nginx_edge",
"__g3__": "health",
"__g4__": "upper",
},
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesGraphitePrefixTag(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"__g0__": "nginx_edge",
"__g1__": "health",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
tags := []Tag{
{Name: "__m3_graphite_prefix__", Value: "stats.counter"},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "__g3__",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__m3_type__:gauge",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
Tags: tags,
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
"__g0__": "stats",
"__g1__": "counter",
"__g2__": "nginx_edge",
"__g3__": "health",
},
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesPromQuantileTag(t *testing.T) {
t.Parallel()
timerMetric := testTimerMetric{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"endpoint": "health",
},
timedSamples: []testTimerMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
tags := []Tag{
{Name: "__m3_prom_summary__"},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__m3_type__:timer",
Aggregations: []aggregation.Type{aggregation.P50},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
Tags: tags,
},
},
},
sampleAppenderOpts: &SampleAppenderOptions{
SeriesAttributes: ts.SeriesAttributes{M3Type: ts.M3MetricTypeTimer},
},
ingest: &testDownsamplerOptionsIngest{
timerMetrics: []testTimerMetric{timerMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"endpoint": "health",
"agg": ".p50",
"quantile": "0.5",
},
values: []expectedValue{{value: 10}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesPromQuantileTagIgnored(t *testing.T) {
t.Parallel()
timerMetric := testTimerMetric{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"endpoint": "health",
},
timedSamples: []testTimerMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
tags := []Tag{
{Name: "__m3_prom_summary__"},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__m3_type__:timer",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
Tags: tags,
},
},
},
sampleAppenderOpts: &SampleAppenderOptions{
SeriesAttributes: ts.SeriesAttributes{M3Type: ts.M3MetricTypeTimer},
},
ingest: &testDownsamplerOptionsIngest{
timerMetrics: []testTimerMetric{timerMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"endpoint": "health",
"agg": ".upper",
},
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesAugmentTag(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"app": "nginx_edge",
"endpoint": "health",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
tags := []Tag{
{Name: "datacenter", Value: "abc"},
}
//nolint:dupl
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "app",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "app:nginx*",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
Tags: tags,
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
"app": "nginx_edge",
"endpoint": "health",
"datacenter": "abc",
},
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesWithDropTSTag(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "foo_metric",
"app": "nginx_edge",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
counterMetric := testCounterMetric{
tags: map[string]string{
nameTag: "counter0",
"app": "testapp",
"foo": "bar",
},
timedSamples: []testCounterMetricTimedSample{
{value: 1}, {value: 2}, {value: 3},
},
expectDropTimestamp: true,
}
tags := []Tag{
{Name: "__m3_drop_timestamp__", Value: ""},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "app:nginx*",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
{
Filter: "app:testapp",
Aggregations: []aggregation.Type{aggregation.Sum},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
Tags: tags,
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
counterMetrics: []testCounterMetric{counterMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: gaugeMetric.tags,
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
{
tags: counterMetric.tags,
values: []expectedValue{{value: 6}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigRollupRulesNoNameTag(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42},
{value: 64, offset: 1 * time.Second},
},
}
res := 1 * time.Second
ret := 30 * 24 * time.Hour
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "endpoint",
rulesConfig: &RulesConfiguration{
RollupRules: []RollupRuleConfiguration{
{
Filter: fmt.Sprintf(
"%s:http_requests app:* status_code:* endpoint:*",
nameTag),
Transforms: []TransformConfiguration{
{
Transform: &TransformOperationConfiguration{
Type: transformation.PerSecond,
},
},
{
Rollup: &RollupOperationConfiguration{
MetricName: "http_requests_by_status_code",
GroupBy: []string{"app", "status_code", "endpoint"},
Aggregations: []aggregation.Type{aggregation.Sum},
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigRollupRulesPerSecondSum(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42},
{value: 64, offset: 1 * time.Second},
},
}
res := 1 * time.Second
ret := 30 * 24 * time.Hour
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
RollupRules: []RollupRuleConfiguration{
{
Filter: fmt.Sprintf(
"%s:http_requests app:* status_code:* endpoint:*",
nameTag),
Transforms: []TransformConfiguration{
{
Transform: &TransformOperationConfiguration{
Type: transformation.PerSecond,
},
},
{
Rollup: &RollupOperationConfiguration{
MetricName: "http_requests_by_status_code",
GroupBy: []string{"app", "status_code", "endpoint"},
Aggregations: []aggregation.Type{aggregation.Sum},
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
nameTag: "http_requests_by_status_code",
string(rollupTagName): string(rollupTagValue),
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
},
values: []expectedValue{{value: 22}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
// TestDownsamplerAggregationWithRulesConfigRollupRulesAggregateTransformNoRollup
// tests that rollup rules can be used to actually simply transform values
// without the need for an explicit rollup step.
func TestDownsamplerAggregationWithRulesConfigRollupRulesAggregateTransformNoRollup(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42},
{value: 64},
},
}
res := 5 * time.Second
ret := 30 * 24 * time.Hour
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
RollupRules: []RollupRuleConfiguration{
{
Filter: fmt.Sprintf(
"%s:http_requests app:* status_code:* endpoint:*",
nameTag),
Transforms: []TransformConfiguration{
{
Aggregate: &AggregateOperationConfiguration{
Type: aggregation.Sum,
},
},
{
Transform: &TransformOperationConfiguration{
Type: transformation.Add,
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value",
},
values: []expectedValue{{value: 106}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigRollupRulesIncreaseAdd(t *testing.T) {
t.Parallel()
// nolint:dupl
gaugeMetrics := []testGaugeMetric{
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value_1",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42, offset: 1 * time.Second}, // +42 (should not be accounted since is a reset)
// Explicit no value.
{value: 12, offset: 2 * time.Second}, // +12 - simulate a reset (should not be accounted)
{value: 33, offset: 3 * time.Second}, // +21
},
},
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value_2",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 13, offset: 1 * time.Second}, // +13 (should not be accounted since is a reset)
{value: 27, offset: 2 * time.Second}, // +14
// Explicit no value.
{value: 42, offset: 3 * time.Second}, // +15
},
},
}
res := 1 * time.Second
ret := 30 * 24 * time.Hour
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
RollupRules: []RollupRuleConfiguration{
{
Filter: fmt.Sprintf(
"%s:http_requests app:* status_code:* endpoint:*",
nameTag),
Transforms: []TransformConfiguration{
{
Transform: &TransformOperationConfiguration{
Type: transformation.Increase,
},
},
{
Rollup: &RollupOperationConfiguration{
MetricName: "http_requests_by_status_code",
GroupBy: []string{"app", "status_code", "endpoint"},
Aggregations: []aggregation.Type{aggregation.Sum},
},
},
{
Transform: &TransformOperationConfiguration{
Type: transformation.Add,
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: gaugeMetrics,
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
nameTag: "http_requests_by_status_code",
string(rollupTagName): string(rollupTagValue),
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
},
values: []expectedValue{
{value: 14},
{value: 50, offset: 1 * time.Second},
},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigRollupRuleAndDropPolicy(t *testing.T) {
t.Parallel()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42},
{value: 64, offset: 1 * time.Second},
},
expectDropPolicyApplied: true,
}
res := 1 * time.Second
ret := 30 * 24 * time.Hour
filter := fmt.Sprintf("%s:http_requests app:* status_code:* endpoint:*", nameTag)
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: filter,
Drop: true,
},
},
RollupRules: []RollupRuleConfiguration{
{
Filter: filter,
Transforms: []TransformConfiguration{
{
Transform: &TransformOperationConfiguration{
Type: transformation.PerSecond,
},
},
{
Rollup: &RollupOperationConfiguration{
MetricName: "http_requests_by_status_code",
GroupBy: []string{"app", "status_code", "endpoint"},
Aggregations: []aggregation.Type{aggregation.Sum},
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
nameTag: "http_requests_by_status_code",
string(rollupTagName): string(rollupTagValue),
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
},
values: []expectedValue{{value: 22}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigRollupRuleAndDropPolicyAndDropTimestamp(t *testing.T) {
t.Parallel()
gaugeMetrics := []testGaugeMetric{
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value_1",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42}, // +42 (should not be accounted since is a reset)
// Explicit no value.
{value: 12, offset: 2 * time.Second}, // +12 - simulate a reset (should not be accounted)
},
expectDropTimestamp: true,
expectDropPolicyApplied: true,
},
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value_2",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 13},
{value: 27, offset: 2 * time.Second}, // +14
},
expectDropTimestamp: true,
expectDropPolicyApplied: true,
},
}
tags := []Tag{
{Name: "__m3_drop_timestamp__", Value: ""},
}
res := 1 * time.Second
ret := 30 * 24 * time.Hour
filter := fmt.Sprintf("%s:http_requests app:* status_code:* endpoint:*", nameTag)
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: filter,
Drop: true,
Tags: tags,
},
},
RollupRules: []RollupRuleConfiguration{
{
Filter: filter,
Transforms: []TransformConfiguration{
{
Rollup: &RollupOperationConfiguration{
MetricName: "http_requests_by_status_code",
GroupBy: []string{"app", "status_code", "endpoint"},
Aggregations: []aggregation.Type{aggregation.Sum},
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: gaugeMetrics,
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
nameTag: "http_requests_by_status_code",
string(rollupTagName): string(rollupTagValue),
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
},
values: []expectedValue{{value: 94}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigRollupRuleUntimedRollups(t *testing.T) {
t.Parallel()
gaugeMetrics := []testGaugeMetric{
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value_1",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42},
{value: 12, offset: 2 * time.Second},
},
expectDropTimestamp: true,
},
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value_2",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 13},
{value: 27, offset: 2 * time.Second},
},
expectDropTimestamp: true,
},
}
res := 1 * time.Second
ret := 30 * 24 * time.Hour
filter := fmt.Sprintf("%s:http_requests app:* status_code:* endpoint:*", nameTag)
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
untimedRollups: true,
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "app:nginx*",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
RollupRules: []RollupRuleConfiguration{
{
Filter: filter,
Transforms: []TransformConfiguration{
{
Rollup: &RollupOperationConfiguration{
MetricName: "http_requests_by_status_code",
GroupBy: []string{"app", "status_code", "endpoint"},
Aggregations: []aggregation.Type{aggregation.Sum},
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: gaugeMetrics,
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
nameTag: "http_requests_by_status_code",
string(rollupTagName): string(rollupTagValue),
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
},
values: []expectedValue{{value: 94}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigRollupRuleUntimedRollupsWaitForOffset(t *testing.T) {
t.Parallel()
gaugeMetrics := []testGaugeMetric{
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value_1",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42},
},
expectDropPolicyApplied: true,
expectDropTimestamp: true,
},
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value_2",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 12, offset: 2 * time.Second},
},
expectDropPolicyApplied: true,
expectDropTimestamp: true,
},
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value_3",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 13},
},
expectDropPolicyApplied: true,
expectDropTimestamp: true,
},
}
res := 1 * time.Second
ret := 30 * 24 * time.Hour
filter := fmt.Sprintf("%s:http_requests app:* status_code:* endpoint:*", nameTag)
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
waitForOffset: true,
untimedRollups: true,
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: filter,
Drop: true,
},
},
RollupRules: []RollupRuleConfiguration{
{
Filter: filter,
Transforms: []TransformConfiguration{
{
Rollup: &RollupOperationConfiguration{
MetricName: "http_requests_by_status_code",
GroupBy: []string{"app", "status_code", "endpoint"},
Aggregations: []aggregation.Type{aggregation.Sum},
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: gaugeMetrics,
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
nameTag: "http_requests_by_status_code",
string(rollupTagName): string(rollupTagValue),
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
},
values: []expectedValue{{value: 42}, {value: 25}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigRollupRuleRollupLaterUntimedRollups(t *testing.T) {
t.Parallel()
gaugeMetrics := []testGaugeMetric{
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value_1",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42},
{value: 12, offset: 2 * time.Second},
},
expectDropTimestamp: true,
},
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value_2",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 13},
{value: 27, offset: 2 * time.Second},
},
expectDropTimestamp: true,
},
}
res := 1 * time.Second
ret := 30 * 24 * time.Hour
filter := fmt.Sprintf("%s:http_requests app:* status_code:* endpoint:*", nameTag)
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
untimedRollups: true,
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "app:nginx*",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 1 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
RollupRules: []RollupRuleConfiguration{
{
Filter: filter,
Transforms: []TransformConfiguration{
{
Transform: &TransformOperationConfiguration{
Type: transformation.Add,
},
},
{
Rollup: &RollupOperationConfiguration{
MetricName: "http_requests_by_status_code",
GroupBy: []string{"app", "status_code", "endpoint"},
Aggregations: []aggregation.Type{aggregation.Sum},
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: gaugeMetrics,
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
nameTag: "http_requests_by_status_code",
string(rollupTagName): string(rollupTagValue),
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
},
values: []expectedValue{{value: 39}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigRollupRulesExcludeByLastMean(t *testing.T) {
t.Parallel()
gaugeMetrics := []testGaugeMetric{
{
tags: map[string]string{
nameTag: "http_request_latency_max_gauge",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"instance": "not_rolled_up_instance_1",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42},
},
},
{
tags: map[string]string{
nameTag: "http_request_latency_max_gauge",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"instance": "not_rolled_up_instance_2",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 13},
},
},
}
res := 1 * time.Second
ret := 30 * 24 * time.Hour
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
RollupRules: []RollupRuleConfiguration{
{
Filter: fmt.Sprintf(
"%s:http_request_latency_max_gauge app:* status_code:* endpoint:*",
nameTag),
Transforms: []TransformConfiguration{
{
Aggregate: &AggregateOperationConfiguration{
Type: aggregation.Last,
},
},
{
Rollup: &RollupOperationConfiguration{
MetricName: "{{ .MetricName }}:mean_without_instance",
ExcludeBy: []string{"instance"},
Aggregations: []aggregation.Type{aggregation.Mean},
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: gaugeMetrics,
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
nameTag: "http_request_latency_max_gauge:mean_without_instance",
string(rollupTagName): string(rollupTagValue),
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
},
values: []expectedValue{
{value: 27.5},
},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigRollupRulesExcludeByIncreaseSumAdd(t *testing.T) {
t.Parallel()
// nolint:dupl
gaugeMetrics := []testGaugeMetric{
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"instance": "not_rolled_up_instance_1",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42, offset: 1 * time.Second}, // +42 (should not be accounted since is a reset)
// Explicit no value.
{value: 12, offset: 2 * time.Second}, // +12 - simulate a reset (should not be accounted)
{value: 33, offset: 3 * time.Second}, // +21
},
},
{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"instance": "not_rolled_up_instance_2",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 13, offset: 1 * time.Second}, // +13 (should not be accounted since is a reset)
{value: 27, offset: 2 * time.Second}, // +14
// Explicit no value.
{value: 42, offset: 3 * time.Second}, // +15
},
},
}
res := 1 * time.Second
ret := 30 * 24 * time.Hour
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
RollupRules: []RollupRuleConfiguration{
{
Filter: fmt.Sprintf(
"%s:http_requests app:* status_code:* endpoint:*",
nameTag),
Transforms: []TransformConfiguration{
{
Transform: &TransformOperationConfiguration{
Type: transformation.Increase,
},
},
{
Rollup: &RollupOperationConfiguration{
MetricName: "{{ .MetricName }}:sum_without_instance",
ExcludeBy: []string{"instance"},
Aggregations: []aggregation.Type{aggregation.Sum},
},
},
{
Transform: &TransformOperationConfiguration{
Type: transformation.Add,
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: gaugeMetrics,
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
nameTag: "http_requests:sum_without_instance",
string(rollupTagName): string(rollupTagValue),
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
},
values: []expectedValue{
{value: 14},
{value: 50, offset: 1 * time.Second},
},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithTimedSamples(t *testing.T) {
counterMetrics, counterMetricsExpect := testCounterMetrics(testCounterMetricsOptions{
timedSamples: true,
})
gaugeMetrics, gaugeMetricsExpect := testGaugeMetrics(testGaugeMetricsOptions{
timedSamples: true,
})
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
ingest: &testDownsamplerOptionsIngest{
counterMetrics: counterMetrics,
gaugeMetrics: gaugeMetrics,
},
expect: &testDownsamplerOptionsExpect{
writes: append(counterMetricsExpect, gaugeMetricsExpect...),
},
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__name__:*",
Aggregations: []aggregation.Type{testAggregationType},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
},
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithOverrideRules(t *testing.T) {
counterMetrics, counterMetricsExpect := testCounterMetrics(testCounterMetricsOptions{})
counterMetricsExpect[0].values = []expectedValue{{value: 2}}
gaugeMetrics, gaugeMetricsExpect := testGaugeMetrics(testGaugeMetricsOptions{})
gaugeMetricsExpect[0].values = []expectedValue{{value: 5}}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
sampleAppenderOpts: &SampleAppenderOptions{
Override: true,
OverrideRules: SamplesAppenderOverrideRules{
MappingRules: []AutoMappingRule{
{
Aggregations: []aggregation.Type{aggregation.Mean},
Policies: []policy.StoragePolicy{
policy.MustParseStoragePolicy("4s:1d"),
},
},
},
},
},
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__name__:*",
Aggregations: []aggregation.Type{testAggregationType},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
counterMetrics: counterMetrics,
gaugeMetrics: gaugeMetrics,
},
expect: &testDownsamplerOptionsExpect{
writes: append(counterMetricsExpect, gaugeMetricsExpect...),
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRemoteAggregatorClient(t *testing.T) {
ctrl := xtest.NewController(t)
defer ctrl.Finish()
// Create mock client
remoteClientMock := client.NewMockClient(ctrl)
remoteClientMock.EXPECT().Init().Return(nil)
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__name__:*",
Aggregations: []aggregation.Type{testAggregationType},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
},
},
},
},
},
remoteClientMock: remoteClientMock,
})
// Test expected output
testDownsamplerRemoteAggregation(t, testDownsampler)
}
func TestDownsamplerWithOverrideNamespace(t *testing.T) {
overrideNamespaceTag := "override_namespace_tag"
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value",
// Set namespace tags on ingested metrics.
// The test demonstrates that overrideNamespaceTag is respected, meaning setting
// values on defaultNamespaceTag won't affect aggregation.
defaultNamespaceTag: "namespace_ignored",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42},
{value: 64, offset: 5 * time.Second},
},
}
res := 5 * time.Second
ret := 30 * 24 * time.Hour
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
RollupRules: []RollupRuleConfiguration{
{
Filter: fmt.Sprintf(
"%s:http_requests app:* status_code:* endpoint:*",
nameTag),
Transforms: []TransformConfiguration{
{
Transform: &TransformOperationConfiguration{
Type: transformation.PerSecond,
},
},
{
Rollup: &RollupOperationConfiguration{
MetricName: "http_requests_by_status_code",
GroupBy: []string{"app", "status_code", "endpoint"},
Aggregations: []aggregation.Type{aggregation.Sum},
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
matcherConfig: MatcherConfiguration{NamespaceTag: overrideNamespaceTag},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
nameTag: "http_requests_by_status_code",
string(rollupTagName): string(rollupTagValue),
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
},
values: []expectedValue{{value: 4.4}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func originalStagedMetadata(t *testing.T, testDownsampler testDownsampler) []metricpb.StagedMetadatas {
ds, ok := testDownsampler.downsampler.(*downsampler)
require.True(t, ok)
origStagedMetadata := ds.metricsAppenderOpts.defaultStagedMetadatasProtos
return origStagedMetadata
}
func waitForStagedMetadataUpdate(t *testing.T, testDownsampler testDownsampler, origStagedMetadata []metricpb.StagedMetadatas) {
ds, ok := testDownsampler.downsampler.(*downsampler)
require.True(t, ok)
require.True(t, clock.WaitUntil(func() bool {
ds.RLock()
defer ds.RUnlock()
return !assert.ObjectsAreEqual(origStagedMetadata, ds.metricsAppenderOpts.defaultStagedMetadatasProtos)
}, time.Second))
}
func waitForEnabledUpdate(t *testing.T, testDownsampler *testDownsampler, current bool) {
ds, ok := testDownsampler.downsampler.(*downsampler)
require.True(t, ok)
require.True(t, clock.WaitUntil(func() bool {
ds.RLock()
defer ds.RUnlock()
return current != ds.enabled
}, time.Second))
}
type testExpectedWrite struct {
tags map[string]string
values []expectedValue // use values for multi expected values
valueAllowedError float64 // use for allowing for slightly inexact values due to timing, etc
attributes *storagemetadata.Attributes
}
type expectedValue struct {
offset time.Duration
value float64
}
type testCounterMetric struct {
tags map[string]string
samples []int64
timedSamples []testCounterMetricTimedSample
expectDropPolicyApplied bool
expectDropTimestamp bool
}
type testCounterMetricTimedSample struct {
time xtime.UnixNano
offset time.Duration
value int64
}
type testGaugeMetric struct {
tags map[string]string
samples []float64
timedSamples []testGaugeMetricTimedSample
expectDropPolicyApplied bool
expectDropTimestamp bool
}
type testGaugeMetricTimedSample struct {
time xtime.UnixNano
offset time.Duration
value float64
}
type testTimerMetric struct {
tags map[string]string
samples []float64
timedSamples []testTimerMetricTimedSample
expectDropPolicyApplied bool
expectDropTimestamp bool
}
type testTimerMetricTimedSample struct {
time xtime.UnixNano
offset time.Duration
value float64
}
type testCounterMetricsOptions struct {
timedSamples bool
}
func testCounterMetrics(opts testCounterMetricsOptions) (
[]testCounterMetric,
[]testExpectedWrite,
) {
metric := testCounterMetric{
tags: map[string]string{nameTag: "counter0", "app": "testapp", "foo": "bar"},
samples: []int64{1, 2, 3},
}
if opts.timedSamples {
metric.samples = nil
metric.timedSamples = []testCounterMetricTimedSample{
{value: 1}, {value: 2}, {value: 3},
}
}
write := testExpectedWrite{
tags: metric.tags,
values: []expectedValue{{value: 6}},
}
return []testCounterMetric{metric}, []testExpectedWrite{write}
}
type testGaugeMetricsOptions struct {
timedSamples bool
}
func testGaugeMetrics(opts testGaugeMetricsOptions) ([]testGaugeMetric, []testExpectedWrite) {
metric := testGaugeMetric{
tags: map[string]string{nameTag: "gauge0", "app": "testapp", "qux": "qaz"},
samples: []float64{4, 5, 6},
}
if opts.timedSamples {
metric.samples = nil
metric.timedSamples = []testGaugeMetricTimedSample{
{value: 4},
{value: 5},
{value: 6, offset: 1 * time.Nanosecond},
}
}
write := testExpectedWrite{
tags: metric.tags,
values: []expectedValue{{value: 15}},
}
return []testGaugeMetric{metric}, []testExpectedWrite{write}
}
func testDownsamplerAggregation(
t *testing.T,
testDownsampler testDownsampler,
) {
testOpts := testDownsampler.testOpts
logger := testDownsampler.instrumentOpts.Logger().
With(zap.String("test", t.Name()))
counterMetrics, counterMetricsExpect := testCounterMetrics(testCounterMetricsOptions{})
gaugeMetrics, gaugeMetricsExpect := testGaugeMetrics(testGaugeMetricsOptions{})
expectedWrites := append(counterMetricsExpect, gaugeMetricsExpect...)
// Allow overrides
var (
allowFilter *testDownsamplerOptionsExpectAllowFilter
timerMetrics []testTimerMetric
)
if ingest := testOpts.ingest; ingest != nil {
counterMetrics = ingest.counterMetrics
gaugeMetrics = ingest.gaugeMetrics
timerMetrics = ingest.timerMetrics
}
if expect := testOpts.expect; expect != nil {
expectedWrites = expect.writes
allowFilter = expect.allowFilter
}
// Ingest points
testDownsamplerAggregationIngest(t, testDownsampler,
counterMetrics, gaugeMetrics, timerMetrics)
// Wait for writes
logger.Info("wait for test metrics to appear")
logWritesAccumulated := os.Getenv("TEST_LOG_WRITES_ACCUMULATED") == "true"
logWritesAccumulatedTicker := time.NewTicker(time.Second)
logWritesMatch := os.Getenv("TEST_LOG_WRITES_MATCH") == "true"
logWritesMatchTicker := time.NewTicker(time.Second)
identTag := nameTag
if len(testDownsampler.testOpts.identTag) > 0 {
identTag = testDownsampler.testOpts.identTag
}
CheckAllWritesArrivedLoop:
for {
allWrites := testDownsampler.storage.Writes()
if logWritesAccumulated {
select {
case <-logWritesAccumulatedTicker.C:
logger.Info("logging accmulated writes",
zap.Int("numAllWrites", len(allWrites)))
for _, write := range allWrites {
logger.Info("accumulated write",
zap.ByteString("tags", write.Tags().ID()),
zap.Any("datapoints", write.Datapoints()),
zap.Any("attributes", write.Attributes()))
}
default:
}
}
for _, expectedWrite := range expectedWrites {
name := expectedWrite.tags[identTag]
attrs := expectedWrite.attributes
writesForNameAndAttrs, _ := findWrites(allWrites, name, identTag, attrs)
if len(writesForNameAndAttrs) != len(expectedWrite.values) {
if logWritesMatch {
select {
case <-logWritesMatchTicker.C:
logger.Info("continuing wait for accumulated writes",
zap.String("name", name),
zap.Any("attributes", attrs),
zap.Int("numWritesForNameAndAttrs", len(writesForNameAndAttrs)),
zap.Int("numExpectedWriteValues", len(expectedWrite.values)),
)
default:
}
}
time.Sleep(100 * time.Millisecond)
continue CheckAllWritesArrivedLoop
}
}
break
}
// Verify writes
logger.Info("verify test metrics")
allWrites := testDownsampler.storage.Writes()
if logWritesAccumulated {
logger.Info("logging accmulated writes to verify",
zap.Int("numAllWrites", len(allWrites)))
for _, write := range allWrites {
logger.Info("accumulated write",
zap.ByteString("tags", write.Tags().ID()),
zap.Any("datapoints", write.Datapoints()))
}
}
for _, expectedWrite := range expectedWrites {
name := expectedWrite.tags[identTag]
expectedValues := expectedWrite.values
allowedError := expectedWrite.valueAllowedError
writesForNameAndAttrs, found := findWrites(allWrites, name, identTag, expectedWrite.attributes)
require.True(t, found)
require.Equal(t, len(expectedValues), len(writesForNameAndAttrs))
for i, expectedValue := range expectedValues {
write := writesForNameAndAttrs[i]
assert.Equal(t, expectedWrite.tags, tagsToStringMap(write.Tags()))
require.Equal(t, 1, len(write.Datapoints()))
actualValue := write.Datapoints()[0].Value
if allowedError == 0 {
// Exact match value.
assert.Equal(t, expectedValue.value, actualValue)
} else {
// Fuzzy match value.
lower := expectedValue.value - allowedError
upper := expectedValue.value + allowedError
withinBounds := (lower <= actualValue) && (actualValue <= upper)
msg := fmt.Sprintf("expected within: lower=%f, upper=%f, actual=%f",
lower, upper, actualValue)
assert.True(t, withinBounds, msg)
}
if expectedOffset := expectedValue.offset; expectedOffset > 0 {
// Check if distance between datapoints as expected (use
// absolute offset from first write).
firstTimestamp := writesForNameAndAttrs[0].Datapoints()[0].Timestamp
actualOffset := write.Datapoints()[0].Timestamp.Sub(firstTimestamp)
assert.Equal(t, expectedOffset, actualOffset)
}
if attrs := expectedWrite.attributes; attrs != nil {
assert.Equal(t, *attrs, write.Attributes())
}
}
}
if allowFilter == nil {
return // No allow filter checking required.
}
for _, write := range testDownsampler.storage.Writes() {
attrs := write.Attributes()
foundMatchingAttribute := false
for _, allowed := range allowFilter.attributes {
if allowed == attrs {
foundMatchingAttribute = true
break
}
}
assert.True(t, foundMatchingAttribute,
fmt.Sprintf("attribute not allowed: allowed=%v, actual=%v",
allowFilter.attributes, attrs))
}
}
func testDownsamplerRemoteAggregation(
t *testing.T,
testDownsampler testDownsampler,
) {
testOpts := testDownsampler.testOpts
expectTestCounterMetrics, _ := testCounterMetrics(testCounterMetricsOptions{})
testCounterMetrics, _ := testCounterMetrics(testCounterMetricsOptions{})
expectTestGaugeMetrics, _ := testGaugeMetrics(testGaugeMetricsOptions{})
testGaugeMetrics, _ := testGaugeMetrics(testGaugeMetricsOptions{})
remoteClientMock := testOpts.remoteClientMock
require.NotNil(t, remoteClientMock)
// Expect ingestion
checkedCounterSamples := 0
remoteClientMock.EXPECT().
WriteUntimedCounter(gomock.Any(), gomock.Any()).
AnyTimes().
Do(func(counter unaggregated.Counter,
metadatas metadata.StagedMetadatas,
) error {
for _, c := range expectTestCounterMetrics {
if !strings.Contains(counter.ID.String(), c.tags[nameTag]) {
continue
}
var remainingSamples []int64
found := false
for _, s := range c.samples {
if !found && s == counter.Value {
found = true
} else {
remainingSamples = append(remainingSamples, s)
}
}
c.samples = remainingSamples
if found {
checkedCounterSamples++
}
break
}
return nil
})
checkedGaugeSamples := 0
remoteClientMock.EXPECT().
WriteUntimedGauge(gomock.Any(), gomock.Any()).
AnyTimes().
Do(func(gauge unaggregated.Gauge,
metadatas metadata.StagedMetadatas,
) error {
for _, g := range expectTestGaugeMetrics {
if !strings.Contains(gauge.ID.String(), g.tags[nameTag]) {
continue
}
var remainingSamples []float64
found := false
for _, s := range g.samples {
if !found && s == gauge.Value {
found = true
} else {
remainingSamples = append(remainingSamples, s)
}
}
g.samples = remainingSamples
if found {
checkedGaugeSamples++
}
break
}
return nil
})
// Ingest points
testDownsamplerAggregationIngest(t, testDownsampler,
testCounterMetrics, testGaugeMetrics, []testTimerMetric{})
// Ensure we checked counters and gauges
samplesCounters := 0
for _, c := range testCounterMetrics {
samplesCounters += len(c.samples)
}
samplesGauges := 0
for _, c := range testGaugeMetrics {
samplesGauges += len(c.samples)
}
require.Equal(t, samplesCounters, checkedCounterSamples)
require.Equal(t, samplesGauges, checkedGaugeSamples)
}
func testDownsamplerAggregationIngest(
t *testing.T,
testDownsampler testDownsampler,
testCounterMetrics []testCounterMetric,
testGaugeMetrics []testGaugeMetric,
testTimerMetrics []testTimerMetric,
) {
downsampler := testDownsampler.downsampler
testOpts := testDownsampler.testOpts
logger := testDownsampler.instrumentOpts.Logger().
With(zap.String("test", t.Name()))
logger.Info("write test metrics")
appender, err := downsampler.NewMetricsAppender()
require.NoError(t, err)
defer appender.Finalize()
var opts SampleAppenderOptions
if testOpts.sampleAppenderOpts != nil {
opts = *testOpts.sampleAppenderOpts
}
// make the current timestamp predictable:
now := time.Now().Truncate(time.Microsecond)
xNow := xtime.ToUnixNano(now)
for _, metric := range testCounterMetrics {
appender.NextMetric()
for name, value := range metric.tags {
appender.AddTag([]byte(name), []byte(value))
}
samplesAppenderResult, err := appender.SamplesAppender(opts)
require.NoError(t, err)
require.Equal(t, metric.expectDropPolicyApplied,
samplesAppenderResult.IsDropPolicyApplied)
require.Equal(t, metric.expectDropTimestamp,
samplesAppenderResult.ShouldDropTimestamp)
samplesAppender := samplesAppenderResult.SamplesAppender
for _, sample := range metric.samples {
err = samplesAppender.AppendUntimedCounterSample(sample, nil)
require.NoError(t, err)
}
for _, sample := range metric.timedSamples {
if sample.time.IsZero() {
sample.time = xNow // Allow empty time to mean "now"
}
if sample.offset > 0 {
sample.time = sample.time.Add(sample.offset)
}
if testOpts.waitForOffset {
time.Sleep(sample.offset)
}
if samplesAppenderResult.ShouldDropTimestamp {
err = samplesAppender.AppendUntimedCounterSample(sample.value, nil)
} else {
err = samplesAppender.AppendCounterSample(sample.time, sample.value, nil)
}
require.NoError(t, err)
}
}
for _, metric := range testGaugeMetrics {
appender.NextMetric()
for name, value := range metric.tags {
appender.AddTag([]byte(name), []byte(value))
}
samplesAppenderResult, err := appender.SamplesAppender(opts)
require.NoError(t, err)
require.Equal(t, metric.expectDropPolicyApplied,
samplesAppenderResult.IsDropPolicyApplied)
require.Equal(t, metric.expectDropTimestamp,
samplesAppenderResult.ShouldDropTimestamp)
samplesAppender := samplesAppenderResult.SamplesAppender
for _, sample := range metric.samples {
err = samplesAppender.AppendUntimedGaugeSample(sample, nil)
require.NoError(t, err)
}
for _, sample := range metric.timedSamples {
if sample.time.IsZero() {
sample.time = xNow // Allow empty time to mean "now"
}
if sample.offset > 0 {
sample.time = sample.time.Add(sample.offset)
}
if testOpts.waitForOffset {
time.Sleep(sample.offset)
}
if samplesAppenderResult.ShouldDropTimestamp {
err = samplesAppender.AppendUntimedGaugeSample(sample.value, nil)
} else {
err = samplesAppender.AppendGaugeSample(sample.time, sample.value, nil)
}
require.NoError(t, err)
}
}
//nolint:dupl
for _, metric := range testTimerMetrics {
appender.NextMetric()
for name, value := range metric.tags {
appender.AddTag([]byte(name), []byte(value))
}
samplesAppenderResult, err := appender.SamplesAppender(opts)
require.NoError(t, err)
require.Equal(t, metric.expectDropPolicyApplied,
samplesAppenderResult.IsDropPolicyApplied)
require.Equal(t, metric.expectDropTimestamp,
samplesAppenderResult.ShouldDropTimestamp)
samplesAppender := samplesAppenderResult.SamplesAppender
for _, sample := range metric.samples {
err = samplesAppender.AppendUntimedTimerSample(sample, nil)
require.NoError(t, err)
}
for _, sample := range metric.timedSamples {
if sample.time.IsZero() {
sample.time = xtime.ToUnixNano(now) // Allow empty time to mean "now"
}
if sample.offset > 0 {
sample.time = sample.time.Add(sample.offset)
}
if testOpts.waitForOffset {
time.Sleep(sample.offset)
}
if samplesAppenderResult.ShouldDropTimestamp {
err = samplesAppender.AppendUntimedTimerSample(sample.value, nil)
} else {
err = samplesAppender.AppendTimerSample(sample.time, sample.value, nil)
}
require.NoError(t, err)
}
}
}
func setAggregatedNamespaces(
t *testing.T,
testDownsampler testDownsampler,
session dbclient.Session,
namespaces ...m3.AggregatedClusterNamespaceDefinition,
) {
clusters, err := m3.NewClusters(m3.UnaggregatedClusterNamespaceDefinition{
NamespaceID: ident.StringID("default"),
Retention: 48 * time.Hour,
Session: session,
}, namespaces...)
require.NoError(t, err)
require.NoError(t, testDownsampler.opts.ClusterNamespacesWatcher.Update(clusters.ClusterNamespaces()))
}
func tagsToStringMap(tags models.Tags) map[string]string {
stringMap := make(map[string]string, tags.Len())
for _, t := range tags.Tags {
stringMap[string(t.Name)] = string(t.Value)
}
return stringMap
}
type testDownsampler struct {
opts DownsamplerOptions
testOpts testDownsamplerOptions
downsampler Downsampler
matcher matcher.Matcher
storage mock.Storage
rulesStore rules.Store
instrumentOpts instrument.Options
}
type testDownsamplerOptions struct {
clockOpts clock.Options
instrumentOpts instrument.Options
identTag string
untimedRollups bool
waitForOffset bool
// Options for the test
autoMappingRules []m3.ClusterNamespaceOptions
sampleAppenderOpts *SampleAppenderOptions
remoteClientMock *client.MockClient
rulesConfig *RulesConfiguration
matcherConfig MatcherConfiguration
// Test ingest and expectations overrides
ingest *testDownsamplerOptionsIngest
expect *testDownsamplerOptionsExpect
}
type testDownsamplerOptionsIngest struct {
counterMetrics []testCounterMetric
gaugeMetrics []testGaugeMetric
timerMetrics []testTimerMetric
}
type testDownsamplerOptionsExpect struct {
writes []testExpectedWrite
allowFilter *testDownsamplerOptionsExpectAllowFilter
}
type testDownsamplerOptionsExpectAllowFilter struct {
attributes []storagemetadata.Attributes
}
func newTestDownsampler(t *testing.T, opts testDownsamplerOptions) testDownsampler {
if opts.expect == nil {
opts.expect = &testDownsamplerOptionsExpect{}
}
storage := mock.NewMockStorage()
rulesKVStore := mem.NewStore()
clockOpts := clock.NewOptions()
if opts.clockOpts != nil {
clockOpts = opts.clockOpts
}
// Use a test instrument options by default to get the debug logs on by default.
instrumentOpts := instrument.NewTestOptions(t)
if opts.instrumentOpts != nil {
instrumentOpts = opts.instrumentOpts
}
matcherOpts := matcher.NewOptions()
// Initialize the namespaces
_, err := rulesKVStore.Set(matcherOpts.NamespacesKey(), &rulepb.Namespaces{})
require.NoError(t, err)
rulesetKeyFmt := matcherOpts.RuleSetKeyFn()([]byte("%s"))
rulesStoreOpts := ruleskv.NewStoreOptions(matcherOpts.NamespacesKey(),
rulesetKeyFmt, nil)
rulesStore := ruleskv.NewStore(rulesKVStore, rulesStoreOpts)
tagEncoderOptions := serialize.NewTagEncoderOptions()
tagDecoderOptions := serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{})
tagEncoderPoolOptions := pool.NewObjectPoolOptions().
SetSize(2).
SetInstrumentOptions(instrumentOpts.
SetMetricsScope(instrumentOpts.MetricsScope().
SubScope("tag-encoder-pool")))
tagDecoderPoolOptions := pool.NewObjectPoolOptions().
SetSize(2).
SetInstrumentOptions(instrumentOpts.
SetMetricsScope(instrumentOpts.MetricsScope().
SubScope("tag-decoder-pool")))
metricsAppenderPoolOptions := pool.NewObjectPoolOptions().
SetSize(2).
SetInstrumentOptions(instrumentOpts.
SetMetricsScope(instrumentOpts.MetricsScope().
SubScope("metrics-appender-pool")))
cfg := Configuration{
BufferPastLimits: []BufferPastLimitConfiguration{
{Resolution: 0, BufferPast: 500 * time.Millisecond},
},
}
if opts.remoteClientMock != nil {
// Optionally set an override to use remote aggregation
// with a mock client
cfg.RemoteAggregator = &RemoteAggregatorConfiguration{
clientOverride: opts.remoteClientMock,
}
}
if opts.rulesConfig != nil {
cfg.Rules = opts.rulesConfig
}
cfg.Matcher = opts.matcherConfig
cfg.UntimedRollups = opts.untimedRollups
instance, err := cfg.NewDownsampler(DownsamplerOptions{
Storage: storage,
ClusterClient: clusterclient.NewMockClient(gomock.NewController(t)),
RulesKVStore: rulesKVStore,
ClusterNamespacesWatcher: m3.NewClusterNamespacesWatcher(),
ClockOptions: clockOpts,
InstrumentOptions: instrumentOpts,
TagEncoderOptions: tagEncoderOptions,
TagDecoderOptions: tagDecoderOptions,
TagEncoderPoolOptions: tagEncoderPoolOptions,
TagDecoderPoolOptions: tagDecoderPoolOptions,
MetricsAppenderPoolOptions: metricsAppenderPoolOptions,
RWOptions: xio.NewOptions(),
TagOptions: models.NewTagOptions(),
})
require.NoError(t, err)
if len(opts.autoMappingRules) > 0 {
// Simulate the automapping rules being injected into the downsampler.
ctrl := gomock.NewController(t)
var mockNamespaces m3.ClusterNamespaces
for _, r := range opts.autoMappingRules {
n := m3.NewMockClusterNamespace(ctrl)
n.EXPECT().
Options().
Return(r).
AnyTimes()
mockNamespaces = append(mockNamespaces, n)
}
instance.(*downsampler).OnUpdate(mockNamespaces)
}
downcast, ok := instance.(*downsampler)
require.True(t, ok)
return testDownsampler{
opts: downcast.opts,
testOpts: opts,
downsampler: instance,
matcher: downcast.agg.matcher,
storage: storage,
rulesStore: rulesStore,
instrumentOpts: instrumentOpts,
}
}
func newTestID(t *testing.T, tags map[string]string) id.ID {
tagEncoderPool := serialize.NewTagEncoderPool(serialize.NewTagEncoderOptions(),
pool.NewObjectPoolOptions().SetSize(1))
tagEncoderPool.Init()
tagsIter := newTags()
for name, value := range tags {
tagsIter.append([]byte(name), []byte(value))
}
tagEncoder := tagEncoderPool.Get()
err := tagEncoder.Encode(tagsIter)
require.NoError(t, err)
data, ok := tagEncoder.Data()
require.True(t, ok)
size := 1
tagDecoderPool := serialize.NewTagDecoderPool(
serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{
CheckBytesWrapperPoolSize: &size,
}),
pool.NewObjectPoolOptions().SetSize(size))
tagDecoderPool.Init()
tagDecoder := tagDecoderPool.Get()
iter := serialize.NewMetricTagsIterator(tagDecoder, nil)
iter.Reset(data.Bytes())
return iter
}
func findWrites(
writes []*storage.WriteQuery,
name, identTag string,
optionalMatchAttrs *storagemetadata.Attributes,
) ([]*storage.WriteQuery, bool) {
var results []*storage.WriteQuery
for _, w := range writes {
if t, ok := w.Tags().Get([]byte(identTag)); ok {
if !bytes.Equal(t, []byte(name)) {
// Does not match name.
continue
}
if optionalMatchAttrs != nil && w.Attributes() != *optionalMatchAttrs {
// Tried to match attributes and not matched.
continue
}
// Matches name and all optional lookups.
results = append(results, w)
}
}
return results, len(results) > 0
}
func testUpdateMetadata() rules.UpdateMetadata {
return rules.NewRuleSetUpdateHelper(0).NewUpdateMetadata(time.Now().UnixNano(), "test")
}
| [
"\"TEST_LOG_WRITES_ACCUMULATED\"",
"\"TEST_LOG_WRITES_MATCH\""
]
| []
| [
"TEST_LOG_WRITES_ACCUMULATED",
"TEST_LOG_WRITES_MATCH"
]
| [] | ["TEST_LOG_WRITES_ACCUMULATED", "TEST_LOG_WRITES_MATCH"] | go | 2 | 0 | |
publib.py | #!/usr/bin/env python2
from __future__ import print_function
import httplib2
import base64
from apiclient import discovery
from oauth2client import client as oauth2client
import datetime
import time
import os
import myauth
(PUB_CREDENTIALS,PUB_SCOPE,SUBSCRIPT,TOPIC)=myauth.setPubSubConfirm()
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = PUB_CREDENTIALS
PUBSUB_SCOPES = PUB_SCOPE
subscription=SUBSCRIPT
def create_pubsub_client(http=None):
credentials = oauth2client.GoogleCredentials.get_application_default()
if credentials.create_scoped_required():
credentials = credentials.create_scoped(PUBSUB_SCOPES)
if not http:
http = httplib2.Http()
credentials.authorize(http)
return discovery.build('pubsub', 'v1', http=http)
client=create_pubsub_client(http=None)
def checkForMessage():
data=None
batch_size = 100
body = {
# Setting ReturnImmediately to false instructs the API to wait
# to collect the message up to the size of MaxEvents, or until
# the timeout.
'returnImmediately': True,
'maxMessages': batch_size,
}
resp = client.projects().subscriptions().pull(
subscription=subscription, body=body).execute()
received_messages = resp.get('receivedMessages')
if received_messages is not None:
ack_ids = []
for received_message in received_messages:
pubsub_message = received_message.get('message')
if pubsub_message:
# Process messages
data = base64.b64decode(str(pubsub_message.get('data')))
# print(data)
# process(data)
# Get the message's ack ID
ack_ids.append(received_message.get('ackId'))
# Create a POST body for the acknowledge request
ack_body = {'ackIds': ack_ids}
# print ack_body
# Acknowledge the message.
client.projects().subscriptions().acknowledge(
subscription=subscription, body=ack_body).execute()
return data
def sendHeartBeat(id1):
message1 = {}
message1['HearBeat']=str(id1)
message1['accounting']=[id1,1,1,datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')]
message1['timeStamp']=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
message1 = base64.b64encode(str(message1))
# Create a POST body for the Pub/Sub request
body = {
'messages': [
{'data': message1},
]
}
resp = client.projects().topics().publish(
topic=TOPIC, body=body).execute()
message_ids = resp.get('messageIds')
if message_ids:
for message_id in message_ids:
# Process each message ID
pass
#print(message_id)
def sendMsg(msg):
message1 = base64.b64encode(str(msg))
# Create a POST body for the Pub/Sub request
body = {
'messages': [
{'data': message1},
]
}
resp = client.projects().topics().publish(
topic=TOPIC, body=body).execute()
message_ids = resp.get('messageIds')
if message_ids:
for message_id in message_ids:
# Process each message ID
print(message_id)
def timeCheck(msg,num=3,sec=3):
print("waiting for confirmation")
for i in range(0,num):
sendHeartBeat('timeCheck'+str(i)+':'+str(msg))
return_msd = checkForMessage()
if return_msd != None:
if msg.find(return_msd) >= 0:
print("CONFIRMED!!....")
return return_msd
time.sleep(sec)
sendHeartBeat('timeCheck_2'+str(i)+':'+str(msg))
| []
| []
| [
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["GOOGLE_APPLICATION_CREDENTIALS"] | python | 1 | 0 | |
service/es_service_integration_test.go | //go:build integration
// +build integration
package service
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/Financial-Times/go-logger"
testLog "github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/olivere/elastic.v5"
"github.com/google/uuid"
)
const (
peopleType = "people"
membershipType = "memberships"
esStatusCreated = "created"
)
func TestMain(m *testing.M) {
logger.InitLogger("test-concept-rw-elasticsearch", "error")
conceptCountBefore := getESConceptsCount()
code := m.Run()
conceptCountAfter := getESConceptsCount()
if conceptCountBefore != conceptCountAfter {
logger.Errorf("expected concept count %d, got %d", conceptCountBefore, conceptCountAfter)
code = 1
}
os.Exit(code)
}
func TestWrite(t *testing.T) {
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, time.Second)
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
bulkProcessor, err := newBulkProcessor(ec, &bulkProcessorConfig)
require.NoError(t, err, "require a bulk processor")
service := &esService{sync.RWMutex{}, ec, bulkProcessor, indexName, &bulkProcessorConfig, time.Now}
testUUID := uuid.New().String()
_, up, resp, err := writeTestDocument(service, organisationsType, testUUID)
defer deleteTestDocument(t, service, organisationsType, testUUID)
assert.NoError(t, err, "expected successful write")
assert.Equal(t, esStatusCreated, resp.Result, "document should have been created")
assert.Equal(t, indexName, resp.Index, "index name")
assert.Equal(t, organisationsType, resp.Type, "concept type")
assert.Equal(t, testUUID, resp.Id, "document id")
assert.True(t, up, "updated was true")
}
func TestWriteMakesPersonAnFTColumnist(t *testing.T) {
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, 100*time.Millisecond)
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
bulkProcessor, err := newBulkProcessor(ec, &bulkProcessorConfig)
require.NoError(t, err, "require a bulk processor")
service := &esService{sync.RWMutex{}, ec, bulkProcessor, indexName, &bulkProcessorConfig, time.Now}
testUUID := uuid.New().String()
op, _, _, err := writeTestPersonDocument(service, peopleType, testUUID, "false")
defer deleteTestDocument(t, service, peopleType, testUUID)
require.NoError(t, err, "expected successful write")
ctx := context.Background()
_, err = ec.Refresh(indexName).Do(ctx)
require.NoError(t, err, "expected successful flush")
ftColumnist := &EsMembershipModel{
Id: uuid.New().String(),
PersonId: testUUID,
OrganisationId: "7bcfe07b-0fb1-49ce-a5fa-e51d5c01c3e0",
Memberships: []string{"7ef75a6a-b6bf-4eb7-a1da-03e0acabef1a", "7ef75a6a-b6bf-4eb7-a1da-03e0acabef1b", "7ef75a6a-b6bf-4eb7-a1da-03e0acabef1c"},
}
up, _, err := service.LoadData(newTestContext(), membershipType, ftColumnist.Id, ftColumnist)
require.NoError(t, err, "expected successful write")
_, err = ec.Refresh(indexName).Do(ctx)
require.NoError(t, err, "expected successful flush")
err = service.bulkProcessor.Flush() // wait for the bulk processor to write the data
require.NoError(t, err, "require successful write")
assert.True(t, up, "author was updated")
p, err := service.ReadData(peopleType, testUUID)
assert.NoError(t, err, "expected successful read")
var actual EsPersonConceptModel
assert.NoError(t, json.Unmarshal(*p.Source, &actual))
assert.Equal(t, "true", actual.IsFTAuthor)
assert.Equal(t, op.Id, actual.Id)
assert.Equal(t, op.ApiUrl, actual.ApiUrl)
assert.Equal(t, op.PrefLabel, actual.PrefLabel)
}
func TestWriteMakesPersonAnFTJournalist(t *testing.T) {
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, 100*time.Millisecond)
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
bulkProcessor, err := newBulkProcessor(ec, &bulkProcessorConfig)
require.NoError(t, err, "require a bulk processor")
service := &esService{sync.RWMutex{}, ec, bulkProcessor, indexName, &bulkProcessorConfig, time.Now}
testUUID := uuid.New().String()
_, _, _, err = writeTestPersonDocument(service, peopleType, testUUID, "false")
defer deleteTestDocument(t, service, peopleType, testUUID)
require.NoError(t, err, "expected successful write")
ctx := context.Background()
_, err = ec.Refresh(indexName).Do(ctx)
require.NoError(t, err, "expected successful flush")
ftColumnist := &EsMembershipModel{
Id: uuid.New().String(),
PersonId: testUUID,
OrganisationId: "7bcfe07b-0fb1-49ce-a5fa-e51d5c01c3e0",
Memberships: []string{"7ef75a6a-b6bf-4eb7-a1da-03e0acabef1a", "33ee38a4-c677-4952-a141-2ae14da3aedd", "7ef75a6a-b6bf-4eb7-a1da-03e0acabef1c"},
}
up, _, err := service.LoadData(newTestContext(), membershipType, ftColumnist.Id, ftColumnist)
require.NoError(t, err, "expected successful write")
_, err = ec.Refresh(indexName).Do(ctx)
require.NoError(t, err, "expected successful flush")
err = service.bulkProcessor.Flush() // wait for the bulk processor to write the data
require.NoError(t, err, "require successful write")
assert.True(t, up, "Journalist updated")
p, err := service.ReadData(peopleType, testUUID)
assert.NoError(t, err, "expected successful read")
var actual EsPersonConceptModel
assert.NoError(t, json.Unmarshal(*p.Source, &actual))
assert.Equal(t, "true", actual.IsFTAuthor)
}
func TestWriteDummyPersonWhenMembershipArrives(t *testing.T) {
getTimeFunc := func() time.Time {
res, err := time.Parse(time.RFC3339, testLastModified)
require.NoError(t, err)
return res
}
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, 100*time.Millisecond)
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
bulkProcessor, err := newBulkProcessor(ec, &bulkProcessorConfig)
require.NoError(t, err, "require a bulk processor")
service := &esService{sync.RWMutex{}, ec, bulkProcessor, indexName, &bulkProcessorConfig, getTimeFunc}
testUUID := uuid.New().String()
ctx := context.Background()
membership := &EsMembershipModel{
Id: uuid.New().String(),
PersonId: testUUID,
OrganisationId: "7bcfe07b-0fb1-49ce-a5fa-e51d5c01c3e0",
Memberships: []string{"7ef75a6a-b6bf-4eb7-a1da-03e0acabef1a", "33ee38a4-c677-4952-a141-2ae14da3aedd", "7ef75a6a-b6bf-4eb7-a1da-03e0acabef1c"},
}
up, _, err := service.LoadData(newTestContext(), membershipType, membership.Id, membership)
defer deleteTestDocument(t, service, peopleType, testUUID)
require.NoError(t, err, "expected successful write")
_, err = ec.Refresh(indexName).Do(ctx)
require.NoError(t, err, "expected successful flush")
err = service.bulkProcessor.Flush() // wait for the bulk processor to write the data
require.NoError(t, err, "require successful write")
assert.True(t, up, "Journalist updated")
p, err := service.ReadData(peopleType, testUUID)
assert.NoError(t, err, "expected successful read")
var actual EsPersonConceptModel
assert.NoError(t, json.Unmarshal(*p.Source, &actual))
assert.Equal(t, testUUID, actual.Id)
assert.Equal(t, "true", actual.IsFTAuthor)
assert.Equal(t, testLastModified, actual.LastModified)
}
func TestWritePersonAfterMembership(t *testing.T) {
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, 100*time.Millisecond)
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
bulkProcessor, err := newBulkProcessor(ec, &bulkProcessorConfig)
require.NoError(t, err, "require a bulk processor")
service := &esService{sync.RWMutex{}, ec, bulkProcessor, indexName, &bulkProcessorConfig, time.Now}
testUUID := uuid.New().String()
ctx := context.Background()
membership := &EsMembershipModel{
Id: uuid.New().String(),
PersonId: testUUID,
OrganisationId: "7bcfe07b-0fb1-49ce-a5fa-e51d5c01c3e0",
Memberships: []string{"7ef75a6a-b6bf-4eb7-a1da-03e0acabef1a", "33ee38a4-c677-4952-a141-2ae14da3aedd", "7ef75a6a-b6bf-4eb7-a1da-03e0acabef1c"},
}
up, _, err := service.LoadData(newTestContext(), membershipType, membership.Id, membership)
require.NoError(t, err, "expected successful write")
_, err = ec.Refresh(indexName).Do(ctx)
require.NoError(t, err, "expected successful flush")
op, _, _, err := writeTestDocument(service, peopleType, testUUID)
defer deleteTestDocument(t, service, peopleType, testUUID)
require.NoError(t, err, "expected successful write")
_, err = ec.Refresh(indexName).Do(ctx)
require.NoError(t, err, "expected successful flush")
err = service.bulkProcessor.Flush() // wait for the bulk processor to write the data
require.NoError(t, err, "require successful write")
assert.True(t, up, "Journalist updated")
p, err := service.ReadData(peopleType, testUUID)
assert.NoError(t, err, "expected successful read")
var actual EsPersonConceptModel
assert.NoError(t, json.Unmarshal(*p.Source, &actual))
assert.Equal(t, op.Id, actual.Id)
assert.Equal(t, op.ApiUrl, actual.ApiUrl)
assert.Equal(t, op.PrefLabel, actual.PrefLabel)
assert.Equal(t, "true", actual.IsFTAuthor)
}
func TestFTAuthorWriteOrder(t *testing.T) {
service := getTestESService(t)
testUUID := uuid.New().String()
membership := &EsMembershipModel{
Id: uuid.New().String(),
PersonId: testUUID,
OrganisationId: "7bcfe07b-0fb1-49ce-a5fa-e51d5c01c3e0",
Memberships: []string{"7ef75a6a-b6bf-4eb7-a1da-03e0acabef1a", "33ee38a4-c677-4952-a141-2ae14da3aedd", "7ef75a6a-b6bf-4eb7-a1da-03e0acabef1c"},
}
_, _, _, err := writeTestDocument(service, peopleType, testUUID)
require.NoError(t, err)
_, _, err = service.LoadData(newTestContext(), membershipType, membership.Id, membership)
require.NoError(t, err)
flushChangesToIndex(t, service)
var p1 EsPersonConceptModel
esResult, _ := service.ReadData(peopleType, testUUID)
require.NoError(t, json.Unmarshal(*esResult.Source, &p1))
deleteTestDocument(t, service, peopleType, testUUID)
_, _, err = service.LoadData(newTestContext(), membershipType, membership.Id, membership)
require.NoError(t, err)
_, _, _, err = writeTestDocument(service, peopleType, testUUID)
require.NoError(t, err)
flushChangesToIndex(t, service)
var p2 EsPersonConceptModel
esResult, _ = service.ReadData(peopleType, testUUID)
require.NoError(t, json.Unmarshal(*esResult.Source, &p2))
deleteTestDocument(t, service, peopleType, testUUID)
assert.Equal(t, "true", p1.IsFTAuthor)
assert.Equal(t, "true", p2.IsFTAuthor)
assert.Equal(t, p1, p2)
}
func TestWriteMakesDoesNotMakePersonAnFTAuthor(t *testing.T) {
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, 100*time.Millisecond)
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
bulkProcessor, err := newBulkProcessor(ec, &bulkProcessorConfig)
require.NoError(t, err, "require a bulk processor")
service := &esService{sync.RWMutex{}, ec, bulkProcessor, indexName, &bulkProcessorConfig, time.Now}
testUUID := uuid.New().String()
_, _, _, err = writeTestPersonDocument(service, peopleType, testUUID, "false")
defer deleteTestDocument(t, service, peopleType, testUUID)
require.NoError(t, err, "expected successful write")
ctx := context.Background()
_, err = ec.Refresh(indexName).Do(ctx)
require.NoError(t, err, "expected successful flush")
testCases := []struct {
name string
model *EsMembershipModel
}{
{
name: "Not FT org",
model: &EsMembershipModel{
Id: uuid.New().String(),
PersonId: testUUID,
OrganisationId: "7aafe07b-0fb1-49ce-a5fa-e51d5c01c3e0",
Memberships: []string{"7ef75a6a-b6bf-4eb7-a1da-03e0acabef1a", "33ee38a4-c677-4952-a141-2ae14da3aedd", "7ef75a6a-b6bf-4eb7-a1da-03e0acabef1c"},
},
},
{
name: "FT but not a columnist or journalist",
model: &EsMembershipModel{
Id: uuid.New().String(),
PersonId: testUUID,
OrganisationId: "7bcfe07b-0fb1-49ce-a5fa-e51d5c01c3e0",
Memberships: []string{"7af75a6a-b6bf-4eb7-a1da-03e0acabef1a", "33aa38a4-c677-4952-a141-2ae14da3aedd", "7af75a6a-b6bf-4eb7-a1da-03e0acabef1c"},
},
},
{
name: "FT but has no memberships",
model: &EsMembershipModel{
Id: uuid.New().String(),
PersonId: testUUID,
OrganisationId: "7bcfe07b-0fb1-49ce-a5fa-e51d5c01c3e0",
},
},
}
for _, c := range testCases {
t.Run(c.name, func(t *testing.T) {
up, _, err := service.LoadData(newTestContext(), membershipType, c.model.Id, c.model)
require.NoError(t, err, "expected successful write")
_, err = ec.Refresh(indexName).Do(ctx)
require.NoError(t, err, "expected successful flush")
err = service.bulkProcessor.Flush() // wait for the bulk processor to write the data
require.NoError(t, err, "require successful write")
assert.False(t, up, "should not have updated person")
p, err := service.ReadData(peopleType, testUUID)
assert.NoError(t, err, "expected successful read")
var actual EsPersonConceptModel
assert.NoError(t, json.Unmarshal(*p.Source, &actual))
assert.Equal(t, "false", actual.IsFTAuthor)
})
}
}
func TestWritePreservesPatchableDataForPerson(t *testing.T) {
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, 100*time.Millisecond)
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
bulkProcessor, err := newBulkProcessor(ec, &bulkProcessorConfig)
require.NoError(t, err, "require a bulk processor")
service := &esService{sync.RWMutex{}, ec, bulkProcessor, indexName, &bulkProcessorConfig, time.Now}
testUUID := uuid.New().String()
payload, _, _, err := writeTestPersonDocument(service, peopleType, testUUID, "true")
defer deleteTestDocument(t, service, peopleType, testUUID)
assert.NoError(t, err, "expected successful write")
ctx := context.Background()
_, err = ec.Refresh(indexName).Do(ctx)
require.NoError(t, err, "expected successful flush")
service.PatchUpdateConcept(ctx, peopleType, testUUID, &EsConceptModelPatch{Metrics: &ConceptMetrics{AnnotationsCount: 1234, PrevWeekAnnotationsCount: 123}})
err = service.bulkProcessor.Flush() // wait for the bulk processor to write the data
require.NoError(t, err, "require successful metrics write")
p, err := service.ReadData(peopleType, testUUID)
assert.NoError(t, err, "expected successful read")
var previous EsPersonConceptModel
assert.NoError(t, json.Unmarshal(*p.Source, &previous))
assert.Equal(t, "true", previous.IsFTAuthor)
payload.PrefLabel = "Updated PrefLabel"
payload.Metrics = nil // blank metrics
up, _, err := service.LoadData(ctx, peopleType, testUUID, payload)
require.NoError(t, err, "require successful metrics write")
err = service.bulkProcessor.Flush() // wait for the bulk processor to write the data
require.NoError(t, err, "require successful metrics write")
_, err = ec.Refresh(indexName).Do(ctx)
require.NoError(t, err, "expected successful flush")
assert.True(t, up, "person should have been updated")
p, err = service.ReadData(peopleType, testUUID)
assert.NoError(t, err, "expected successful read")
var actual EsPersonConceptModel
assert.NoError(t, json.Unmarshal(*p.Source, &actual))
assert.Equal(t, actual.EsConceptModel.Metrics.AnnotationsCount, 1234)
assert.Equal(t, actual.EsConceptModel.Metrics.PrevWeekAnnotationsCount, 123)
previous.PrefLabel = payload.PrefLabel
assert.Equal(t, previous, actual)
}
func TestWritePreservesMetrics(t *testing.T) {
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, 100*time.Millisecond)
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
bulkProcessor, err := newBulkProcessor(ec, &bulkProcessorConfig)
require.NoError(t, err, "require a bulk processor")
service := &esService{sync.RWMutex{}, ec, bulkProcessor, indexName, &bulkProcessorConfig, time.Now}
testUUID := uuid.New().String()
_, _, _, err = writeTestDocument(service, organisationsType, testUUID)
defer deleteTestDocument(t, service, organisationsType, testUUID)
require.NoError(t, err, "require successful concept write")
testMetrics := &EsConceptModelPatch{Metrics: &ConceptMetrics{AnnotationsCount: 150000, PrevWeekAnnotationsCount: 15}}
service.PatchUpdateConcept(newTestContext(), organisationsType, testUUID, testMetrics)
err = service.bulkProcessor.Flush() // wait for the bulk processor to write the data
require.NoError(t, err, "require successful metrics write")
_, _, _, _ = writeTestDocument(service, organisationsType, testUUID)
err = service.bulkProcessor.Flush() // wait for the bulk processor to write the data
require.NoError(t, err, "require successful concept update")
actual, err := service.ReadData(organisationsType, testUUID)
assert.NoError(t, err, "expected successful concept read")
m := make(map[string]interface{})
assert.NoError(t, json.Unmarshal(*actual.Source, &m))
actualMetrics := m["metrics"].(map[string]interface{})
actualCount := int(actualMetrics["annotationsCount"].(float64))
assert.NoError(t, err, "expected concept to contain annotations count")
assert.Equal(t, 150000, actualCount)
prevWeekAnnotationsCount := int(actualMetrics["prevWeekAnnotationsCount"].(float64))
assert.Equal(t, 15, prevWeekAnnotationsCount)
}
func TestIsReadOnly(t *testing.T) {
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
service := &esService{sync.RWMutex{}, ec, nil, indexName, nil, time.Now}
defer ec.Stop()
readOnly, name, err := service.IsIndexReadOnly()
assert.False(t, readOnly, "index should not be read-only")
assert.Equal(t, name, indexName, "index name should be returned")
assert.NoError(t, err, "read-only check should not return an error")
setReadOnly(t, ec, indexName, true)
defer setReadOnly(t, ec, indexName, false)
readOnly, name, err = service.IsIndexReadOnly()
assert.True(t, readOnly, "index should be read-only")
assert.Equal(t, name, indexName, "index name should be returned")
assert.NoError(t, err, "read-only check should not return an error")
}
func TestIsReadOnlyIndexNotFound(t *testing.T) {
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
service := &esService{sync.RWMutex{}, ec, nil, "foo", nil, time.Now}
defer ec.Stop()
readOnly, name, err := service.IsIndexReadOnly()
assert.False(t, readOnly, "index should not be read-only")
assert.Empty(t, name, "no index name should be returned")
assert.Error(t, err, "index should not be found")
}
func TestRead(t *testing.T) {
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, time.Second)
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
bulkProcessor, err := newBulkProcessor(ec, &bulkProcessorConfig)
require.NoError(t, err, "require a bulk processor")
service := &esService{sync.RWMutex{}, ec, bulkProcessor, indexName, &bulkProcessorConfig, time.Now}
defer ec.Stop()
testUUID := uuid.New().String()
payload, _, _, err := writeTestDocument(service, organisationsType, testUUID)
defer deleteTestDocument(t, service, organisationsType, testUUID)
assert.NoError(t, err, "expected successful write")
_, err = ec.Refresh(indexName).Do(context.Background())
require.NoError(t, err, "expected successful flush")
resp, err := service.ReadData(organisationsType, testUUID)
assert.NoError(t, err, "expected no error for ES read")
assert.True(t, resp.Found, "should find a result")
obj := make(map[string]interface{})
assert.NoError(t, json.Unmarshal(*resp.Source, &obj))
assert.Equal(t, payload.ApiUrl, obj["apiUrl"], "apiUrl")
assert.Equal(t, payload.PrefLabel, obj["prefLabel"], "prefLabel")
}
func TestPassClientThroughChannel(t *testing.T) {
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, time.Second)
esURL := getElasticSearchTestURL()
ecc := make(chan *elastic.Client)
defer close(ecc)
service := NewEsService(ecc, indexName, &bulkProcessorConfig)
ec := getElasticClient(t, esURL)
ecc <- ec
err := waitForClientInjection(service)
require.NoError(t, err, "ES client injection failed or timed out")
testUUID := uuid.New().String()
payload, _, _, err := writeTestDocument(service, organisationsType, testUUID)
defer deleteTestDocument(t, service.(*esService), organisationsType, testUUID)
assert.NoError(t, err, "expected successful write")
resp, err := service.ReadData(organisationsType, testUUID)
assert.NoError(t, err, "expected no error for ES read")
assert.True(t, resp.Found, "should find a result")
obj := make(map[string]interface{})
assert.NoError(t, json.Unmarshal(*resp.Source, &obj))
assert.Equal(t, fmt.Sprintf("%s/%s/%s", apiBaseURL, organisationsType, testUUID), obj["apiUrl"], "apiUrl")
assert.Equal(t, payload.ApiUrl, obj["apiUrl"], "apiUrl")
assert.Equal(t, payload.PrefLabel, obj["prefLabel"], "prefLabel")
}
func TestDelete(t *testing.T) {
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, time.Second)
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
bulkProcessor, err := newBulkProcessor(ec, &bulkProcessorConfig)
require.NoError(t, err, "require a bulk processor")
service := &esService{sync.RWMutex{}, ec, bulkProcessor, indexName, &bulkProcessorConfig, time.Now}
testUUID := uuid.New().String()
_, _, resp, err := writeTestDocument(service, organisationsType, testUUID)
require.NoError(t, err, "expected successful write")
assert.Equal(t, esStatusCreated, resp.Result, "document should have been created")
assert.Equal(t, indexName, resp.Index, "index name")
assert.Equal(t, organisationsType, resp.Type, "concept type")
assert.Equal(t, testUUID, resp.Id, "document id")
deleteResp, err := service.DeleteData(newTestContext(), organisationsType, testUUID)
require.NoError(t, err)
assert.True(t, deleteResp.Found)
getResp, err := service.ReadData(organisationsType, testUUID)
assert.NoError(t, err)
assert.False(t, getResp.Found)
}
func TestDeleteNotFoundConcept(t *testing.T) {
hook := testLog.NewGlobal()
esURL := getElasticSearchTestURL()
ec, err := elastic.NewClient(
elastic.SetURL(esURL),
elastic.SetSniff(false),
)
assert.NoError(t, err, "expected no error for ES client")
service := &esService{sync.RWMutex{}, ec, nil, indexName, nil, time.Now}
testUUID := uuid.New().String()
resp, _ := service.DeleteData(newTestContext(), organisationsType+"s", testUUID)
assert.False(t, resp.Found, "document is not found")
assert.Empty(t, hook.AllEntries(), "It logged nothing")
}
func TestCleanup(t *testing.T) {
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, time.Second)
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
bulkProcessor, err := newBulkProcessor(ec, &bulkProcessorConfig)
require.NoError(t, err, "require a bulk processor")
service := &esService{sync.RWMutex{}, ec, bulkProcessor, indexName, &bulkProcessorConfig, time.Now}
testUUID1 := uuid.New().String()
_, _, resp, err := writeTestDocument(service, organisationsType, testUUID1)
defer deleteTestDocument(t, service, organisationsType, testUUID1)
require.NoError(t, err, "expected successful write")
require.Equal(t, esStatusCreated, resp.Result, "document should have been created")
testUUID2 := uuid.New().String()
_, _, resp, err = writeTestDocument(service, peopleType, testUUID2)
require.NoError(t, err, "expected successful write")
require.Equal(t, esStatusCreated, resp.Result, "document should have been created")
testUUID3 := uuid.New().String()
_, _, resp, err = writeTestDocument(service, organisationsType, testUUID3)
require.NoError(t, err, "expected successful write")
require.Equal(t, esStatusCreated, resp.Result, "document should have been created")
concept := AggregateConceptModel{PrefUUID: testUUID1, SourceRepresentations: []SourceConcept{
{
UUID: testUUID1,
},
{
UUID: testUUID2,
},
{
UUID: testUUID3,
},
}}
// ensure test data is immediately available from the index
_, err = ec.Refresh(indexName).Do(context.Background())
require.NoError(t, err)
service.CleanupData(newTestContext(), concept)
getResp, err := service.ReadData(peopleType, testUUID2)
assert.NoError(t, err)
assert.False(t, getResp.Found)
getResp, err = service.ReadData(organisationsType, testUUID3)
assert.NoError(t, err)
assert.False(t, getResp.Found)
getResp, err = service.ReadData(organisationsType, testUUID1)
assert.NoError(t, err)
assert.True(t, getResp.Found)
}
func TestDeprecationFlagTrue(t *testing.T) {
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, time.Second)
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
bulkProcessor, err := newBulkProcessor(ec, &bulkProcessorConfig)
require.NoError(t, err, "require a bulk processor")
service := &esService{sync.RWMutex{}, ec, bulkProcessor, indexName, &bulkProcessorConfig, time.Now}
testUUID := uuid.New().String()
payload := EsConceptModel{
Id: testUUID,
ApiUrl: fmt.Sprintf("%s/%s/%s", apiBaseURL, organisationsType, testUUID),
PrefLabel: fmt.Sprintf("Test concept %s %s", organisationsType, testUUID),
Types: []string{},
DirectType: "",
Aliases: []string{},
IsDeprecated: true,
LastModified: testLastModified,
}
_, resp, err := service.LoadData(newTestContext(), organisationsType, testUUID, payload)
defer deleteTestDocument(t, service, organisationsType, testUUID)
assert.NoError(t, err, "expected successful write")
assert.Equal(t, esStatusCreated, resp.Result, "document should have been created")
assert.Equal(t, indexName, resp.Index, "index name")
assert.Equal(t, organisationsType, resp.Type, "concept type")
assert.Equal(t, testUUID, resp.Id, "document id")
readResp, err := service.ReadData(organisationsType, testUUID)
assert.NoError(t, err, "expected no error for ES read")
assert.True(t, readResp.Found, "should find a result")
obj := make(map[string]interface{})
assert.NoError(t, json.Unmarshal(*readResp.Source, &obj))
assert.Equal(t, payload.ApiUrl, obj["apiUrl"], "apiUrl")
assert.Equal(t, payload.PrefLabel, obj["prefLabel"], "prefLabel")
assert.Equal(t, true, obj["isDeprecated"], "deprecation flag")
}
func TestDeprecationFlagFalse(t *testing.T) {
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, time.Second)
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
bulkProcessor, err := newBulkProcessor(ec, &bulkProcessorConfig)
require.NoError(t, err, "require a bulk processor")
service := &esService{sync.RWMutex{}, ec, bulkProcessor, indexName, &bulkProcessorConfig, time.Now}
testUUID := uuid.New().String()
payload := EsConceptModel{
Id: testUUID,
ApiUrl: fmt.Sprintf("%s/%s/%s", apiBaseURL, organisationsType, testUUID),
PrefLabel: fmt.Sprintf("Test concept %s %s", organisationsType, testUUID),
Types: []string{},
DirectType: "",
Aliases: []string{},
LastModified: testLastModified,
}
_, resp, err := service.LoadData(newTestContext(), organisationsType, testUUID, payload)
defer deleteTestDocument(t, service, organisationsType, testUUID)
assert.NoError(t, err, "expected successful write")
assert.Equal(t, esStatusCreated, resp.Result, "document should have been created")
assert.Equal(t, indexName, resp.Index, "index name")
assert.Equal(t, organisationsType, resp.Type, "concept type")
assert.Equal(t, testUUID, resp.Id, "document id")
readResp, err := service.ReadData(organisationsType, testUUID)
assert.NoError(t, err, "expected no error for ES read")
assert.True(t, readResp.Found, "should find a result")
obj := make(map[string]interface{})
assert.NoError(t, json.Unmarshal(*readResp.Source, &obj))
assert.Equal(t, payload.ApiUrl, obj["apiUrl"], "apiUrl")
assert.Equal(t, payload.PrefLabel, obj["prefLabel"], "prefLabel")
_, deprecatedFlagExists := obj["isDeprecated"]
assert.False(t, deprecatedFlagExists, "deprecation flag")
}
func TestMetricsUpdated(t *testing.T) {
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, time.Second)
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
bulkProcessor, err := newBulkProcessor(ec, &bulkProcessorConfig)
require.NoError(t, err, "require a bulk processor")
service := &esService{sync.RWMutex{}, ec, bulkProcessor, indexName, &bulkProcessorConfig, time.Now}
testUUID := uuid.New().String()
payload := EsConceptModel{
Id: testUUID,
ApiUrl: fmt.Sprintf("%s/%ss/%s", apiBaseURL, organisationsType, testUUID),
PrefLabel: fmt.Sprintf("Test concept %s %s", organisationsType, testUUID),
Types: []string{},
DirectType: "",
Aliases: []string{},
LastModified: testLastModified,
}
_, resp, err := service.LoadData(newTestContext(), organisationsType, testUUID, payload)
defer deleteTestDocument(t, service, organisationsType, testUUID)
assert.NoError(t, err, "expected successful write")
assert.Equal(t, esStatusCreated, resp.Result, "document should have been created")
assert.Equal(t, indexName, resp.Index, "index name")
assert.Equal(t, organisationsType, resp.Type, "concept type")
assert.Equal(t, testUUID, resp.Id, "document id")
testMetrics := &EsConceptModelPatch{Metrics: &ConceptMetrics{AnnotationsCount: 15000, PrevWeekAnnotationsCount: 150}}
service.PatchUpdateConcept(newTestContext(), organisationsType, testUUID, testMetrics)
service.bulkProcessor.Flush() // wait for the bulk processor to write the data
readResp, err := service.ReadData(organisationsType, testUUID)
assert.NoError(t, err, "expected no error for ES read")
assert.True(t, readResp.Found, "should find a result")
actualModel := EsConceptModel{}
err = json.Unmarshal(*readResp.Source, &actualModel)
assert.NoError(t, err)
assert.Equal(t, payload.ApiUrl, actualModel.ApiUrl, "Expect the original fields to still be intact")
assert.Equal(t, payload.PrefLabel, actualModel.PrefLabel, "Expect the original fields to still be intact")
assert.Equal(t, testMetrics.Metrics.AnnotationsCount, actualModel.Metrics.AnnotationsCount, "Count should be set")
assert.Equal(t, testMetrics.Metrics.PrevWeekAnnotationsCount, actualModel.Metrics.PrevWeekAnnotationsCount, "PrevWeekAnnotationsCount should be set")
}
func TestGetAllIds(t *testing.T) {
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, time.Second)
bulkProcessor, _ := newBulkProcessor(ec, &bulkProcessorConfig)
service := &esService{sync.RWMutex{}, ec, bulkProcessor, indexName, &bulkProcessorConfig, time.Now}
max := 1001
expected := make([]string, max)
workers := 8
ids := make(chan string, workers)
var wg sync.WaitGroup
wg.Add(max)
for i := 0; i < workers; i++ {
go func() {
for id := range ids {
_, _, _, err := writeTestDocument(service, organisationsType, id)
require.NoError(t, err, "expected successful write")
wg.Done()
}
}()
}
for i := 0; i < max; i++ {
testUUID := uuid.New().String()
expected[i] = testUUID
ids <- testUUID
}
close(ids)
wg.Wait()
_, err := ec.Refresh(indexName).Do(context.Background())
require.NoError(t, err, "expected successful flush")
ch := service.GetAllIds(context.Background())
actual := make(map[string]struct{})
for id := range ch {
actual[id.ID] = struct{}{}
}
notFound := 0
for _, id := range expected {
_, found := actual[id]
if !found {
notFound++
continue
}
deleteTestDocument(t, service, organisationsType, id)
}
assert.Equal(t, 0, notFound, "UUIDs not found")
}
func getTestESService(t *testing.T) *esService {
bulkProcessorConfig := NewBulkProcessorConfig(1, 1, 1, 100*time.Millisecond)
esURL := getElasticSearchTestURL()
ec := getElasticClient(t, esURL)
bulkProcessor, err := newBulkProcessor(ec, &bulkProcessorConfig)
require.NoError(t, err, "require a bulk processor")
return &esService{
elasticClient: ec,
bulkProcessor: bulkProcessor,
indexName: indexName,
bulkProcessorConfig: &bulkProcessorConfig,
getCurrentTime: time.Now,
}
}
func getElasticSearchTestURL() string {
esURL := os.Getenv("ELASTICSEARCH_TEST_URL")
if strings.TrimSpace(esURL) == "" {
esURL = "http://localhost:9200"
}
return esURL
}
func setReadOnly(t *testing.T, client *elastic.Client, indexName string, readOnly bool) {
indexService := elastic.NewIndicesPutSettingsService(client)
_, err := indexService.Index(indexName).BodyJson(map[string]interface{}{"index.blocks.write": strconv.FormatBool(readOnly)}).Do(context.Background())
assert.NoError(t, err, "expected no error for putting index settings")
}
func writeTestPersonDocument(es EsService, conceptType string, uuid string, isFTAuthor string) (EsPersonConceptModel, bool, *elastic.IndexResponse, error) {
payload := EsPersonConceptModel{
EsConceptModel: &EsConceptModel{
Id: uuid,
ApiUrl: fmt.Sprintf("%s/%s/%s", apiBaseURL, conceptType, uuid),
PrefLabel: fmt.Sprintf("Test concept %s %s", conceptType, uuid),
Types: []string{},
DirectType: "",
Aliases: []string{},
LastModified: testLastModified,
},
IsFTAuthor: isFTAuthor,
}
updated, resp, err := es.LoadData(newTestContext(), conceptType, uuid, payload)
return payload, updated, resp, err
}
func waitForClientInjection(service EsService) error {
var err error
for i := 0; i < 10; i++ {
_, err = service.GetClusterHealth()
if err == nil {
return nil
}
time.Sleep(100 * time.Millisecond)
}
return err
}
func deleteTestDocument(t *testing.T, es *esService, conceptType string, uuid string) {
deleteResp, err := es.DeleteData(newTestContext(), conceptType, uuid)
require.NoError(t, err)
assert.True(t, deleteResp.Found)
flushChangesToIndex(t, es)
}
func flushChangesToIndex(t *testing.T, es *esService) {
err := es.bulkProcessor.Flush()
require.NoError(t, err)
_, err = es.elasticClient.Refresh(indexName).Do(context.Background())
require.NoError(t, err)
}
func getESConceptsCount() int {
esURL := getElasticSearchTestURL()
resp, err := http.Get(esURL + "/concept/_count")
if err != nil {
fmt.Printf("%s", err)
os.Exit(1)
}
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
logger.Error(err)
os.Exit(1)
}
var esCountResp struct {
Count int
}
if err := json.Unmarshal(respBody, &esCountResp); err != nil {
logger.Error(err)
os.Exit(1)
}
return esCountResp.Count
}
| [
"\"ELASTICSEARCH_TEST_URL\""
]
| []
| [
"ELASTICSEARCH_TEST_URL"
]
| [] | ["ELASTICSEARCH_TEST_URL"] | go | 1 | 0 | |
client/testutil/testutil.go | package testutil
import (
"os"
"github.com/banyansecurity/terraform-banyan-provider/client"
"github.com/joho/godotenv"
)
func GetClientHolderForTest() (newClient *client.ClientHolder, err error) {
err = godotenv.Load("../../.env")
if err != nil {
return
}
testhost := os.Getenv("BANYAN_HOST")
testRefreshToken := os.Getenv("BANYAN_REFRESH_TOKEN")
newClient, err = client.NewClientHolder(testhost, testRefreshToken)
return
}
| [
"\"BANYAN_HOST\"",
"\"BANYAN_REFRESH_TOKEN\""
]
| []
| [
"BANYAN_HOST",
"BANYAN_REFRESH_TOKEN"
]
| [] | ["BANYAN_HOST", "BANYAN_REFRESH_TOKEN"] | go | 2 | 0 | |
pkg/util/browser/browser.go | //
// Last.Backend LLC CONFIDENTIAL
// __________________
//
// [2014] - [2019] Last.Backend LLC
// All Rights Reserved.
//
// NOTICE: All information contained herein is, and remains
// the property of Last.Backend LLC and its suppliers,
// if any. The intellectual and technical concepts contained
// herein are proprietary to Last.Backend LLC
// and its suppliers and may be covered by Russian Federation and Foreign Patents,
// patents in process, and are protected by trade secret or copyright law.
// Dissemination of this information or reproduction of this material
// is strictly forbidden unless prior written permission is obtained
// from Last.Backend LLC.
//
package browser
import (
"errors"
"os"
"os/exec"
"path/filepath"
"runtime"
)
var Os = runtime.GOOS
var CommandWrapper = func(name string, parameters ...string) error {
return exec.Command(name, parameters...).Start()
}
func Open(url string) error {
var err error
switch Os {
case "linux":
err = CommandWrapper("xdg-open", url)
case "windows":
cmd := "url.dll,FileProtocolHandler"
runDll32 := filepath.Join(os.Getenv("SYSTEMROOT"), "System32", "rundll32.exe")
err = CommandWrapper(runDll32, cmd, url)
case "darwin":
err = CommandWrapper("open", url)
default:
err = errors.New("unsupported platform")
}
return err
}
| [
"\"SYSTEMROOT\""
]
| []
| [
"SYSTEMROOT"
]
| [] | ["SYSTEMROOT"] | go | 1 | 0 | |
s3store/s3_store_test.go | // +build !skipexternal
package s3store
import (
"os"
"testing"
"github.com/albsen/chainstore"
"github.com/stretchr/testify/assert"
"golang.org/x/net/context"
)
var (
bucketID string
accessKey string
secretKey string
)
func init() {
bucketID = os.Getenv("S3_BUCKET")
accessKey = os.Getenv("S3_ACCESS_KEY")
secretKey = os.Getenv("S3_SECRET_KEY")
}
func TestS3Store(t *testing.T) {
var store chainstore.Store
var err error
ctx := context.Background()
assert := assert.New(t)
store = chainstore.New(New(bucketID, accessKey, secretKey))
err = store.Open()
assert.Nil(err)
defer store.Close()
// Put a bunch of objects
e1 := store.Put(ctx, "hi", []byte{1, 2, 3})
e2 := store.Put(ctx, "bye", []byte{4, 5, 6})
assert.Nil(e1)
assert.Nil(e2)
// Get those objects
v1, _ := store.Get(ctx, "hi")
v2, _ := store.Get(ctx, "bye")
assert.Equal(v1, []byte{1, 2, 3})
assert.Equal(v2, []byte{4, 5, 6})
// Delete those objects
e1 = store.Del(ctx, "hi")
e2 = store.Del(ctx, "bye")
assert.Equal(e1, nil)
assert.Equal(e2, nil)
}
| [
"\"S3_BUCKET\"",
"\"S3_ACCESS_KEY\"",
"\"S3_SECRET_KEY\""
]
| []
| [
"S3_ACCESS_KEY",
"S3_BUCKET",
"S3_SECRET_KEY"
]
| [] | ["S3_ACCESS_KEY", "S3_BUCKET", "S3_SECRET_KEY"] | go | 3 | 0 | |
plugins/example/helm-example.go | package main
import (
"fmt"
"os"
)
func main() {
fmt.Printf("Args are: %v\n", os.Args)
fmt.Printf("Helm home is: %s\n", os.Getenv("HELM_HOME"))
fmt.Printf("Helm command is: %s\n", os.Getenv("HELM_COMMAND"))
fmt.Printf("Helm default repo is: %s\n", os.Getenv("HELM_DEFAULT_REPO"))
}
| [
"\"HELM_HOME\"",
"\"HELM_COMMAND\"",
"\"HELM_DEFAULT_REPO\""
]
| []
| [
"HELM_HOME",
"HELM_DEFAULT_REPO",
"HELM_COMMAND"
]
| [] | ["HELM_HOME", "HELM_DEFAULT_REPO", "HELM_COMMAND"] | go | 3 | 0 | |
test/e2e/common_test.go | package integration
import (
"bytes"
"fmt"
"io/ioutil"
"math/rand"
"net"
"os"
"os/exec"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/containers/common/pkg/cgroups"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/inspect"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/util"
. "github.com/containers/podman/v4/test/utils"
"github.com/containers/storage"
"github.com/containers/storage/pkg/reexec"
"github.com/containers/storage/pkg/stringid"
jsoniter "github.com/json-iterator/go"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var (
//lint:ignore ST1003
PODMAN_BINARY string //nolint:golint,stylecheck
INTEGRATION_ROOT string //nolint:golint,stylecheck
CGROUP_MANAGER = "systemd" //nolint:golint,stylecheck
RESTORE_IMAGES = []string{ALPINE, BB, nginx} //nolint:golint,stylecheck
defaultWaitTimeout = 90
CGROUPSV2, _ = cgroups.IsCgroup2UnifiedMode() //nolint:golint,stylecheck
)
// PodmanTestIntegration struct for command line options
type PodmanTestIntegration struct {
PodmanTest
ConmonBinary string
Root string
NetworkConfigDir string
OCIRuntime string
RunRoot string
StorageOptions string
SignaturePolicyPath string
CgroupManager string
Host HostOS
Timings []string
TmpDir string
RemoteStartErr error
}
var LockTmpDir string
// PodmanSessionIntegration struct for command line session
type PodmanSessionIntegration struct {
*PodmanSession
}
type testResult struct {
name string
length float64
}
type testResultsSorted []testResult
func (a testResultsSorted) Len() int { return len(a) }
func (a testResultsSorted) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type testResultsSortedLength struct{ testResultsSorted }
func (a testResultsSorted) Less(i, j int) bool { return a[i].length < a[j].length }
var testResults []testResult
var testResultsMutex sync.Mutex
func TestMain(m *testing.M) {
if reexec.Init() {
return
}
os.Exit(m.Run())
}
// TestLibpod ginkgo master function
func TestLibpod(t *testing.T) {
if os.Getenv("NOCACHE") == "1" {
CACHE_IMAGES = []string{}
RESTORE_IMAGES = []string{}
}
RegisterFailHandler(Fail)
RunSpecs(t, "Libpod Suite")
}
var _ = SynchronizedBeforeSuite(func() []byte {
// make cache dir
if err := os.MkdirAll(ImageCacheDir, 0777); err != nil {
fmt.Printf("%q\n", err)
os.Exit(1)
}
// Cache images
cwd, _ := os.Getwd()
INTEGRATION_ROOT = filepath.Join(cwd, "../../")
podman := PodmanTestSetup("/tmp")
// Pull cirros but don't put it into the cache
pullImages := []string{cirros, fedoraToolbox, volumeTest}
pullImages = append(pullImages, CACHE_IMAGES...)
for _, image := range pullImages {
podman.createArtifact(image)
}
if err := os.MkdirAll(filepath.Join(ImageCacheDir, podman.ImageCacheFS+"-images"), 0777); err != nil {
fmt.Printf("%q\n", err)
os.Exit(1)
}
podman.Root = ImageCacheDir
// If running localized tests, the cache dir is created and populated. if the
// tests are remote, this is a no-op
populateCache(podman)
host := GetHostDistributionInfo()
if host.Distribution == "rhel" && strings.HasPrefix(host.Version, "7") {
f, err := os.OpenFile("/proc/sys/user/max_user_namespaces", os.O_WRONLY, 0644)
if err != nil {
fmt.Println("Unable to enable userspace on RHEL 7")
os.Exit(1)
}
_, err = f.WriteString("15000")
if err != nil {
fmt.Println("Unable to enable userspace on RHEL 7")
os.Exit(1)
}
f.Close()
}
path, err := ioutil.TempDir("", "libpodlock")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// If running remote, we need to stop the associated podman system service
if podman.RemoteTest {
podman.StopRemoteService()
}
return []byte(path)
}, func(data []byte) {
cwd, _ := os.Getwd()
INTEGRATION_ROOT = filepath.Join(cwd, "../../")
LockTmpDir = string(data)
})
func (p *PodmanTestIntegration) Setup() {
cwd, _ := os.Getwd()
INTEGRATION_ROOT = filepath.Join(cwd, "../../")
}
var _ = SynchronizedAfterSuite(func() {},
func() {
sort.Sort(testResultsSortedLength{testResults})
fmt.Println("integration timing results")
for _, result := range testResults {
fmt.Printf("%s\t\t%f\n", result.name, result.length)
}
// previous runroot
tempdir, err := CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
}
podmanTest := PodmanTestCreate(tempdir)
if err := os.RemoveAll(podmanTest.Root); err != nil {
fmt.Printf("%q\n", err)
}
// If running remote, we need to stop the associated podman system service
if podmanTest.RemoteTest {
podmanTest.StopRemoteService()
}
// for localized tests, this removes the image cache dir and for remote tests
// this is a no-op
removeCache()
})
// PodmanTestCreate creates a PodmanTestIntegration instance for the tests
func PodmanTestCreateUtil(tempDir string, remote bool) *PodmanTestIntegration {
var podmanRemoteBinary string
host := GetHostDistributionInfo()
cwd, _ := os.Getwd()
root := filepath.Join(tempDir, "root")
podmanBinary := filepath.Join(cwd, "../../bin/podman")
if os.Getenv("PODMAN_BINARY") != "" {
podmanBinary = os.Getenv("PODMAN_BINARY")
}
podmanRemoteBinary = filepath.Join(cwd, "../../bin/podman-remote")
if os.Getenv("PODMAN_REMOTE_BINARY") != "" {
podmanRemoteBinary = os.Getenv("PODMAN_REMOTE_BINARY")
}
conmonBinary := filepath.Join("/usr/libexec/podman/conmon")
altConmonBinary := "/usr/bin/conmon"
if _, err := os.Stat(conmonBinary); os.IsNotExist(err) {
conmonBinary = altConmonBinary
}
if os.Getenv("CONMON_BINARY") != "" {
conmonBinary = os.Getenv("CONMON_BINARY")
}
storageOptions := STORAGE_OPTIONS
if os.Getenv("STORAGE_OPTIONS") != "" {
storageOptions = os.Getenv("STORAGE_OPTIONS")
}
cgroupManager := CGROUP_MANAGER
if rootless.IsRootless() {
cgroupManager = "cgroupfs"
}
if os.Getenv("CGROUP_MANAGER") != "" {
cgroupManager = os.Getenv("CGROUP_MANAGER")
}
ociRuntime := os.Getenv("OCI_RUNTIME")
if ociRuntime == "" {
ociRuntime = "crun"
}
os.Setenv("DISABLE_HC_SYSTEMD", "true")
networkBackend := CNI
networkConfigDir := "/etc/cni/net.d"
if rootless.IsRootless() {
networkConfigDir = filepath.Join(os.Getenv("HOME"), ".config/cni/net.d")
}
if strings.ToLower(os.Getenv("NETWORK_BACKEND")) == "netavark" {
networkBackend = Netavark
networkConfigDir = "/etc/containers/networks"
if rootless.IsRootless() {
networkConfigDir = filepath.Join(root, "etc", "networks")
}
}
if err := os.MkdirAll(root, 0755); err != nil {
panic(err)
}
if err := os.MkdirAll(networkConfigDir, 0755); err != nil {
panic(err)
}
storageFs := STORAGE_FS
if rootless.IsRootless() {
storageFs = ROOTLESS_STORAGE_FS
}
if os.Getenv("STORAGE_FS") != "" {
storageFs = os.Getenv("STORAGE_FS")
storageOptions = "--storage-driver " + storageFs
}
p := &PodmanTestIntegration{
PodmanTest: PodmanTest{
PodmanBinary: podmanBinary,
RemotePodmanBinary: podmanRemoteBinary,
TempDir: tempDir,
RemoteTest: remote,
ImageCacheFS: storageFs,
ImageCacheDir: ImageCacheDir,
NetworkBackend: networkBackend,
},
ConmonBinary: conmonBinary,
Root: root,
TmpDir: tempDir,
NetworkConfigDir: networkConfigDir,
OCIRuntime: ociRuntime,
RunRoot: filepath.Join(tempDir, "runroot"),
StorageOptions: storageOptions,
SignaturePolicyPath: filepath.Join(INTEGRATION_ROOT, "test/policy.json"),
CgroupManager: cgroupManager,
Host: host,
}
if remote {
var pathPrefix string
if !rootless.IsRootless() {
pathPrefix = "/run/podman/podman"
} else {
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
pathPrefix = filepath.Join(runtimeDir, "podman")
}
// We want to avoid collisions in socket paths, but using the
// socket directly for a collision check doesn’t work; bind(2) on AF_UNIX
// creates the file, and we need to pass a unique path now before the bind(2)
// happens. So, use a podman-%s.sock-lock empty file as a marker.
tries := 0
for {
uuid := stringid.GenerateNonCryptoID()
lockPath := fmt.Sprintf("%s-%s.sock-lock", pathPrefix, uuid)
lockFile, err := os.OpenFile(lockPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0700)
if err == nil {
lockFile.Close()
p.RemoteSocketLock = lockPath
p.RemoteSocket = fmt.Sprintf("unix:%s-%s.sock", pathPrefix, uuid)
break
}
tries++
if tries >= 1000 {
panic("Too many RemoteSocket collisions")
}
}
}
// Setup registries.conf ENV variable
p.setDefaultRegistriesConfigEnv()
// Rewrite the PodmanAsUser function
p.PodmanMakeOptions = p.makeOptions
return p
}
func (p PodmanTestIntegration) AddImageToRWStore(image string) {
if err := p.RestoreArtifact(image); err != nil {
logrus.Errorf("Unable to restore %s to RW store", image)
}
}
func imageTarPath(image string) string {
cacheDir := os.Getenv("PODMAN_TEST_IMAGE_CACHE_DIR")
if cacheDir == "" {
cacheDir = os.Getenv("TMPDIR")
if cacheDir == "" {
cacheDir = "/tmp"
}
}
// e.g., registry.com/fubar:latest -> registry.com-fubar-latest.tar
imageCacheName := strings.Replace(strings.Replace(image, ":", "-", -1), "/", "-", -1) + ".tar"
return filepath.Join(cacheDir, imageCacheName)
}
// createArtifact creates a cached image tarball in a local directory
func (p *PodmanTestIntegration) createArtifact(image string) {
if os.Getenv("NO_TEST_CACHE") != "" {
return
}
destName := imageTarPath(image)
if _, err := os.Stat(destName); os.IsNotExist(err) {
fmt.Printf("Caching %s at %s...\n", image, destName)
pull := p.PodmanNoCache([]string{"pull", image})
pull.Wait(440)
Expect(pull).Should(Exit(0))
save := p.PodmanNoCache([]string{"save", "-o", destName, image})
save.Wait(90)
Expect(save).Should(Exit(0))
fmt.Printf("\n")
} else {
fmt.Printf("[image already cached: %s]\n", destName)
}
}
// InspectImageJSON takes the session output of an inspect
// image and returns json
func (s *PodmanSessionIntegration) InspectImageJSON() []inspect.ImageData {
var i []inspect.ImageData
err := jsoniter.Unmarshal(s.Out.Contents(), &i)
Expect(err).To(BeNil())
return i
}
// InspectContainer returns a container's inspect data in JSON format
func (p *PodmanTestIntegration) InspectContainer(name string) []define.InspectContainerData {
cmd := []string{"inspect", name}
session := p.Podman(cmd)
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
return session.InspectContainerToJSON()
}
func processTestResult(f GinkgoTestDescription) {
tr := testResult{length: f.Duration.Seconds(), name: f.TestText}
testResultsMutex.Lock()
testResults = append(testResults, tr)
testResultsMutex.Unlock()
}
func GetPortLock(port string) storage.Locker {
lockFile := filepath.Join(LockTmpDir, port)
lock, err := storage.GetLockfile(lockFile)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
lock.Lock()
return lock
}
// GetRandomIPAddress returns a random IP address to avoid IP
// collisions during parallel tests
func GetRandomIPAddress() string {
// To avoid IP collisions of initialize random seed for random IP addresses
rand.Seed(time.Now().UnixNano())
// Add GinkgoParallelNode() on top of the IP address
// in case of the same random seed
ip3 := strconv.Itoa(rand.Intn(230) + GinkgoParallelNode())
ip4 := strconv.Itoa(rand.Intn(230) + GinkgoParallelNode())
return "10.88." + ip3 + "." + ip4
}
// RunTopContainer runs a simple container in the background that
// runs top. If the name passed != "", it will have a name
func (p *PodmanTestIntegration) RunTopContainer(name string) *PodmanSessionIntegration {
return p.RunTopContainerWithArgs(name, nil)
}
// RunTopContainerWithArgs runs a simple container in the background that
// runs top. If the name passed != "", it will have a name, command args can also be passed in
func (p *PodmanTestIntegration) RunTopContainerWithArgs(name string, args []string) *PodmanSessionIntegration {
var podmanArgs = []string{"run"}
if name != "" {
podmanArgs = append(podmanArgs, "--name", name)
}
podmanArgs = append(podmanArgs, args...)
podmanArgs = append(podmanArgs, "-d", ALPINE, "top")
return p.Podman(podmanArgs)
}
// RunLsContainer runs a simple container in the background that
// simply runs ls. If the name passed != "", it will have a name
func (p *PodmanTestIntegration) RunLsContainer(name string) (*PodmanSessionIntegration, int, string) {
var podmanArgs = []string{"run"}
if name != "" {
podmanArgs = append(podmanArgs, "--name", name)
}
podmanArgs = append(podmanArgs, "-d", ALPINE, "ls")
session := p.Podman(podmanArgs)
session.WaitWithDefaultTimeout()
if session.ExitCode() != 0 {
return session, session.ExitCode(), session.OutputToString()
}
cid := session.OutputToString()
wsession := p.Podman([]string{"wait", cid})
wsession.WaitWithDefaultTimeout()
return session, wsession.ExitCode(), cid
}
// RunNginxWithHealthCheck runs the alpine nginx container with an optional name and adds a healthcheck into it
func (p *PodmanTestIntegration) RunNginxWithHealthCheck(name string) (*PodmanSessionIntegration, string) {
var podmanArgs = []string{"run"}
if name != "" {
podmanArgs = append(podmanArgs, "--name", name)
}
podmanArgs = append(podmanArgs, "-dt", "-P", "--health-cmd", "curl http://localhost/", nginx)
session := p.Podman(podmanArgs)
session.WaitWithDefaultTimeout()
return session, session.OutputToString()
}
func (p *PodmanTestIntegration) RunLsContainerInPod(name, pod string) (*PodmanSessionIntegration, int, string) {
var podmanArgs = []string{"run", "--pod", pod}
if name != "" {
podmanArgs = append(podmanArgs, "--name", name)
}
podmanArgs = append(podmanArgs, "-d", ALPINE, "ls")
session := p.Podman(podmanArgs)
session.WaitWithDefaultTimeout()
if session.ExitCode() != 0 {
return session, session.ExitCode(), session.OutputToString()
}
cid := session.OutputToString()
wsession := p.Podman([]string{"wait", cid})
wsession.WaitWithDefaultTimeout()
return session, wsession.ExitCode(), cid
}
// BuildImage uses podman build and buildah to build an image
// called imageName based on a string dockerfile
func (p *PodmanTestIntegration) BuildImage(dockerfile, imageName string, layers string) string {
return p.buildImage(dockerfile, imageName, layers, "")
}
// BuildImageWithLabel uses podman build and buildah to build an image
// called imageName based on a string dockerfile, adds desired label to paramset
func (p *PodmanTestIntegration) BuildImageWithLabel(dockerfile, imageName string, layers string, label string) string {
return p.buildImage(dockerfile, imageName, layers, label)
}
// PodmanPID execs podman and returns its PID
func (p *PodmanTestIntegration) PodmanPID(args []string) (*PodmanSessionIntegration, int) {
podmanOptions := p.MakeOptions(args, false, false)
fmt.Printf("Running: %s %s\n", p.PodmanBinary, strings.Join(podmanOptions, " "))
command := exec.Command(p.PodmanBinary, podmanOptions...)
session, err := Start(command, GinkgoWriter, GinkgoWriter)
if err != nil {
Fail("unable to run podman command: " + strings.Join(podmanOptions, " "))
}
podmanSession := &PodmanSession{Session: session}
return &PodmanSessionIntegration{podmanSession}, command.Process.Pid
}
// Cleanup cleans up the temporary store
func (p *PodmanTestIntegration) Cleanup() {
// Remove all containers
stopall := p.Podman([]string{"stop", "-a", "--time", "0"})
stopall.WaitWithDefaultTimeout()
podstop := p.Podman([]string{"pod", "stop", "-a", "-t", "0"})
podstop.WaitWithDefaultTimeout()
podrm := p.Podman([]string{"pod", "rm", "-fa"})
podrm.WaitWithDefaultTimeout()
session := p.Podman([]string{"rm", "-fa"})
session.WaitWithDefaultTimeout()
p.StopRemoteService()
// Nuke tempdir
if err := os.RemoveAll(p.TempDir); err != nil {
fmt.Printf("%q\n", err)
}
// Clean up the registries configuration file ENV variable set in Create
resetRegistriesConfigEnv()
}
// CleanupVolume cleans up the temporary store
func (p *PodmanTestIntegration) CleanupVolume() {
// Remove all containers
session := p.Podman([]string{"volume", "rm", "-fa"})
session.Wait(90)
p.Cleanup()
}
// CleanupSecret cleans up the temporary store
func (p *PodmanTestIntegration) CleanupSecrets() {
// Remove all containers
session := p.Podman([]string{"secret", "rm", "-a"})
session.Wait(90)
// Stop remove service on secret cleanup
p.StopRemoteService()
// Nuke tempdir
if err := os.RemoveAll(p.TempDir); err != nil {
fmt.Printf("%q\n", err)
}
}
// InspectContainerToJSON takes the session output of an inspect
// container and returns json
func (s *PodmanSessionIntegration) InspectContainerToJSON() []define.InspectContainerData {
var i []define.InspectContainerData
err := jsoniter.Unmarshal(s.Out.Contents(), &i)
Expect(err).To(BeNil())
return i
}
// InspectPodToJSON takes the sessions output from a pod inspect and returns json
func (s *PodmanSessionIntegration) InspectPodToJSON() define.InspectPodData {
var i define.InspectPodData
err := jsoniter.Unmarshal(s.Out.Contents(), &i)
Expect(err).To(BeNil())
return i
}
// InspectPodToJSON takes the sessions output from an inspect and returns json
func (s *PodmanSessionIntegration) InspectPodArrToJSON() []define.InspectPodData {
var i []define.InspectPodData
err := jsoniter.Unmarshal(s.Out.Contents(), &i)
Expect(err).To(BeNil())
return i
}
// CreatePod creates a pod with no infra container
// it optionally takes a pod name
func (p *PodmanTestIntegration) CreatePod(options map[string][]string) (*PodmanSessionIntegration, int, string) {
var args = []string{"pod", "create", "--infra=false", "--share", ""}
for k, values := range options {
for _, v := range values {
args = append(args, k+"="+v)
}
}
session := p.Podman(args)
session.WaitWithDefaultTimeout()
return session, session.ExitCode(), session.OutputToString()
}
func (p *PodmanTestIntegration) RunTopContainerInPod(name, pod string) *PodmanSessionIntegration {
return p.RunTopContainerWithArgs(name, []string{"--pod", pod})
}
func (p *PodmanTestIntegration) RunHealthCheck(cid string) error {
for i := 0; i < 10; i++ {
hc := p.Podman([]string{"healthcheck", "run", cid})
hc.WaitWithDefaultTimeout()
if hc.ExitCode() == 0 {
return nil
}
// Restart container if it's not running
ps := p.Podman([]string{"ps", "--no-trunc", "--quiet", "--filter", fmt.Sprintf("id=%s", cid)})
ps.WaitWithDefaultTimeout()
if ps.ExitCode() == 0 {
if !strings.Contains(ps.OutputToString(), cid) {
fmt.Printf("Container %s is not running, restarting", cid)
restart := p.Podman([]string{"restart", cid})
restart.WaitWithDefaultTimeout()
if restart.ExitCode() != 0 {
return errors.Errorf("unable to restart %s", cid)
}
}
}
fmt.Printf("Waiting for %s to pass healthcheck\n", cid)
time.Sleep(1 * time.Second)
}
return errors.Errorf("unable to detect %s as running", cid)
}
func (p *PodmanTestIntegration) CreateSeccompJSON(in []byte) (string, error) {
jsonFile := filepath.Join(p.TempDir, "seccomp.json")
err := WriteJSONFile(in, jsonFile)
if err != nil {
return "", err
}
return jsonFile, nil
}
func checkReason(reason string) {
if len(reason) < 5 {
panic("Test must specify a reason to skip")
}
}
func SkipIfRootlessCgroupsV1(reason string) {
checkReason(reason)
if os.Geteuid() != 0 && !CGROUPSV2 {
Skip("[rootless]: " + reason)
}
}
func SkipIfRootless(reason string) {
checkReason(reason)
if os.Geteuid() != 0 {
Skip("[rootless]: " + reason)
}
}
func SkipIfNotRootless(reason string) {
checkReason(reason)
if os.Geteuid() == 0 {
Skip("[notRootless]: " + reason)
}
}
func SkipIfSystemdNotRunning(reason string) {
checkReason(reason)
cmd := exec.Command("systemctl", "list-units")
err := cmd.Run()
if err != nil {
if _, ok := err.(*exec.Error); ok {
Skip("[notSystemd]: not running " + reason)
}
Expect(err).ToNot(HaveOccurred())
}
}
func SkipIfNotSystemd(manager, reason string) {
checkReason(reason)
if manager != "systemd" {
Skip("[notSystemd]: " + reason)
}
}
func SkipIfNotFedora() {
info := GetHostDistributionInfo()
if info.Distribution != "fedora" {
Skip("Test can only run on Fedora")
}
}
func isRootless() bool {
return os.Geteuid() != 0
}
func isCgroupsV1() bool {
return !CGROUPSV2
}
func SkipIfCgroupV1(reason string) {
checkReason(reason)
if isCgroupsV1() {
Skip(reason)
}
}
func SkipIfCgroupV2(reason string) {
checkReason(reason)
if CGROUPSV2 {
Skip(reason)
}
}
func isContainerized() bool {
// This is set to "podman" by podman automatically
return os.Getenv("container") != ""
}
func SkipIfContainerized(reason string) {
checkReason(reason)
if isContainerized() {
Skip(reason)
}
}
func SkipIfRemote(reason string) {
checkReason(reason)
if !IsRemote() {
return
}
Skip("[remote]: " + reason)
}
func SkipIfNotRemote(reason string) {
checkReason(reason)
if IsRemote() {
return
}
Skip("[local]: " + reason)
}
// SkipIfInContainer skips a test if the test is run inside a container
func SkipIfInContainer(reason string) {
checkReason(reason)
if os.Getenv("TEST_ENVIRON") == "container" {
Skip("[container]: " + reason)
}
}
// SkipIfNotActive skips a test if the given systemd unit is not active
func SkipIfNotActive(unit string, reason string) {
checkReason(reason)
var buffer bytes.Buffer
cmd := exec.Command("systemctl", "is-active", unit)
cmd.Stdout = &buffer
err := cmd.Start()
Expect(err).ToNot(HaveOccurred())
err = cmd.Wait()
Expect(err).ToNot(HaveOccurred())
Expect(err).ToNot(HaveOccurred())
if strings.TrimSpace(buffer.String()) != "active" {
Skip(fmt.Sprintf("[systemd]: unit %s is not active: %s", unit, reason))
}
}
func SkipIfCNI(p *PodmanTestIntegration) {
if p.NetworkBackend == CNI {
Skip("this test is not compatible with the CNI network backend")
}
}
func SkipIfNetavark(p *PodmanTestIntegration) {
if p.NetworkBackend == Netavark {
Skip("This test is not compatible with the netavark network backend")
}
}
// PodmanAsUser is the exec call to podman on the filesystem with the specified uid/gid and environment
func (p *PodmanTestIntegration) PodmanAsUser(args []string, uid, gid uint32, cwd string, env []string) *PodmanSessionIntegration {
podmanSession := p.PodmanAsUserBase(args, uid, gid, cwd, env, false, false, nil, nil)
return &PodmanSessionIntegration{podmanSession}
}
// RestartRemoteService stop and start API Server, usually to change config
func (p *PodmanTestIntegration) RestartRemoteService() {
p.StopRemoteService()
p.StartRemoteService()
}
// RestoreArtifactToCache populates the imagecache from tarballs that were cached earlier
func (p *PodmanTestIntegration) RestoreArtifactToCache(image string) error {
tarball := imageTarPath(image)
if _, err := os.Stat(tarball); err == nil {
fmt.Printf("Restoring %s...\n", image)
p.Root = p.ImageCacheDir
restore := p.PodmanNoEvents([]string{"load", "-q", "-i", tarball})
restore.WaitWithDefaultTimeout()
}
return nil
}
func populateCache(podman *PodmanTestIntegration) {
for _, image := range CACHE_IMAGES {
err := podman.RestoreArtifactToCache(image)
Expect(err).To(BeNil())
}
// logformatter uses this to recognize the first test
fmt.Printf("-----------------------------\n")
}
func removeCache() {
// Remove cache dirs
if err := os.RemoveAll(ImageCacheDir); err != nil {
fmt.Printf("%q\n", err)
}
}
// PodmanNoCache calls the podman command with no configured imagecache
func (p *PodmanTestIntegration) PodmanNoCache(args []string) *PodmanSessionIntegration {
podmanSession := p.PodmanBase(args, false, true)
return &PodmanSessionIntegration{podmanSession}
}
func PodmanTestSetup(tempDir string) *PodmanTestIntegration {
return PodmanTestCreateUtil(tempDir, false)
}
// PodmanNoEvents calls the Podman command without an imagecache and without an
// events backend. It is used mostly for caching and uncaching images.
func (p *PodmanTestIntegration) PodmanNoEvents(args []string) *PodmanSessionIntegration {
podmanSession := p.PodmanBase(args, true, true)
return &PodmanSessionIntegration{podmanSession}
}
// MakeOptions assembles all the podman main options
func (p *PodmanTestIntegration) makeOptions(args []string, noEvents, noCache bool) []string {
if p.RemoteTest {
if !util.StringInSlice("--remote", args) {
return append([]string{"--remote", "--url", p.RemoteSocket}, args...)
}
return args
}
var debug string
if _, ok := os.LookupEnv("DEBUG"); ok {
debug = "--log-level=debug --syslog=true "
}
eventsType := "file"
if noEvents {
eventsType = "none"
}
networkBackend := p.NetworkBackend.ToString()
networkDir := p.NetworkConfigDir
if p.NetworkBackend == Netavark {
networkDir = p.NetworkConfigDir
}
podmanOptions := strings.Split(fmt.Sprintf("%s--root %s --runroot %s --runtime %s --conmon %s --network-config-dir %s --cgroup-manager %s --tmpdir %s --events-backend %s",
debug, p.Root, p.RunRoot, p.OCIRuntime, p.ConmonBinary, networkDir, p.CgroupManager, p.TmpDir, eventsType), " ")
if os.Getenv("HOOK_OPTION") != "" {
podmanOptions = append(podmanOptions, os.Getenv("HOOK_OPTION"))
}
podmanOptions = append(podmanOptions, "--network-backend", networkBackend)
podmanOptions = append(podmanOptions, strings.Split(p.StorageOptions, " ")...)
if !noCache {
cacheOptions := []string{"--storage-opt",
fmt.Sprintf("%s.imagestore=%s", p.PodmanTest.ImageCacheFS, p.PodmanTest.ImageCacheDir)}
podmanOptions = append(cacheOptions, podmanOptions...)
}
podmanOptions = append(podmanOptions, args...)
return podmanOptions
}
func writeConf(conf []byte, confPath string) {
if _, err := os.Stat(filepath.Dir(confPath)); os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(confPath), 777); err != nil {
fmt.Println(err)
}
}
if err := ioutil.WriteFile(confPath, conf, 777); err != nil {
fmt.Println(err)
}
}
func removeConf(confPath string) {
if err := os.Remove(confPath); err != nil {
fmt.Println(err)
}
}
// generateNetworkConfig generates a CNI or Netavark config with a random name
// it returns the network name and the filepath
func generateNetworkConfig(p *PodmanTestIntegration) (string, string) {
var (
path string
conf string
)
// generate a random name to prevent conflicts with other tests
name := "net" + stringid.GenerateNonCryptoID()
if p.NetworkBackend != Netavark {
path = filepath.Join(p.NetworkConfigDir, fmt.Sprintf("%s.conflist", name))
conf = fmt.Sprintf(`{
"cniVersion": "0.3.0",
"name": "%s",
"plugins": [
{
"type": "bridge",
"bridge": "cni1",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"subnet": "10.99.0.0/16",
"routes": [
{ "dst": "0.0.0.0/0" }
]
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}`, name)
} else {
path = filepath.Join(p.NetworkConfigDir, fmt.Sprintf("%s.json", name))
conf = fmt.Sprintf(`
{
"name": "%s",
"id": "e1ef2749024b88f5663ca693a9118e036d6bfc48bcfe460faf45e9614a513e5c",
"driver": "bridge",
"network_interface": "netavark1",
"created": "2022-01-05T14:15:10.975493521-06:00",
"subnets": [
{
"subnet": "10.100.0.0/16",
"gateway": "10.100.0.1"
}
],
"ipv6_enabled": false,
"internal": false,
"dns_enabled": true,
"ipam_options": {
"driver": "host-local"
}
}
`, name)
}
writeConf([]byte(conf), path)
return name, path
}
func (p *PodmanTestIntegration) removeNetwork(name string) {
session := p.Podman([]string{"network", "rm", "-f", name})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(BeNumerically("<=", 1), "Exit code must be 0 or 1")
}
func (s *PodmanSessionIntegration) jq(jqCommand string) (string, error) {
var out bytes.Buffer
cmd := exec.Command("jq", jqCommand)
cmd.Stdin = strings.NewReader(s.OutputToString())
cmd.Stdout = &out
err := cmd.Run()
return strings.TrimRight(out.String(), "\n"), err
}
func (p *PodmanTestIntegration) buildImage(dockerfile, imageName string, layers string, label string) string {
dockerfilePath := filepath.Join(p.TempDir, "Dockerfile")
err := ioutil.WriteFile(dockerfilePath, []byte(dockerfile), 0755)
Expect(err).To(BeNil())
cmd := []string{"build", "--pull-never", "--layers=" + layers, "--file", dockerfilePath}
if label != "" {
cmd = append(cmd, "--label="+label)
}
if len(imageName) > 0 {
cmd = append(cmd, []string{"-t", imageName}...)
}
cmd = append(cmd, p.TempDir)
session := p.Podman(cmd)
session.Wait(240)
Expect(session).Should(Exit(0), fmt.Sprintf("BuildImage session output: %q", session.OutputToString()))
output := session.OutputToStringArray()
return output[len(output)-1]
}
func writeYaml(content string, fileName string) error {
f, err := os.Create(fileName)
if err != nil {
return err
}
defer f.Close()
_, err = f.WriteString(content)
if err != nil {
return err
}
return nil
}
// GetPort finds an unused port on the system
func GetPort() int {
a, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
Fail(fmt.Sprintf("unable to get free port: %v", err))
}
l, err := net.ListenTCP("tcp", a)
if err != nil {
Fail(fmt.Sprintf("unable to get free port: %v", err))
}
defer l.Close()
return l.Addr().(*net.TCPAddr).Port
}
func ncz(port int) bool {
timeout := 500 * time.Millisecond
for i := 0; i < 5; i++ {
ncCmd := []string{"-z", "localhost", fmt.Sprintf("%d", port)}
fmt.Printf("Running: nc %s\n", strings.Join(ncCmd, " "))
check := SystemExec("nc", ncCmd)
if check.ExitCode() == 0 {
return true
}
time.Sleep(timeout)
timeout++
}
return false
}
func createNetworkName(name string) string {
return name + stringid.GenerateNonCryptoID()[:10]
}
var IPRegex = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`
// digShort execs into the given container and does a dig lookup with a timeout
// backoff. If it gets a response, it ensures that the output is in the correct
// format and iterates a string array for match
func digShort(container, lookupName string, matchNames []string, p *PodmanTestIntegration) string {
digInterval := time.Millisecond * 250
for i := 0; i < 6; i++ {
time.Sleep(digInterval * time.Duration(i))
dig := p.Podman([]string{"exec", container, "dig", "+short", lookupName})
dig.WaitWithDefaultTimeout()
if dig.ExitCode() == 0 {
output := dig.OutputToString()
Expect(output).To(MatchRegexp(IPRegex))
for _, name := range matchNames {
Expect(output).To(Equal(name))
}
return output
}
}
Fail("dns is not responding")
return ""
}
| [
"\"NOCACHE\"",
"\"PODMAN_BINARY\"",
"\"PODMAN_BINARY\"",
"\"PODMAN_REMOTE_BINARY\"",
"\"PODMAN_REMOTE_BINARY\"",
"\"CONMON_BINARY\"",
"\"CONMON_BINARY\"",
"\"STORAGE_OPTIONS\"",
"\"STORAGE_OPTIONS\"",
"\"CGROUP_MANAGER\"",
"\"CGROUP_MANAGER\"",
"\"OCI_RUNTIME\"",
"\"HOME\"",
"\"NETWORK_BACKEND\"",
"\"STORAGE_FS\"",
"\"STORAGE_FS\"",
"\"XDG_RUNTIME_DIR\"",
"\"PODMAN_TEST_IMAGE_CACHE_DIR\"",
"\"TMPDIR\"",
"\"NO_TEST_CACHE\"",
"\"container\"",
"\"TEST_ENVIRON\"",
"\"HOOK_OPTION\"",
"\"HOOK_OPTION\""
]
| []
| [
"CGROUP_MANAGER",
"STORAGE_FS",
"NOCACHE",
"container",
"TEST_ENVIRON",
"STORAGE_OPTIONS",
"HOOK_OPTION",
"NO_TEST_CACHE",
"PODMAN_BINARY",
"XDG_RUNTIME_DIR",
"CONMON_BINARY",
"OCI_RUNTIME",
"NETWORK_BACKEND",
"PODMAN_TEST_IMAGE_CACHE_DIR",
"PODMAN_REMOTE_BINARY",
"HOME",
"TMPDIR"
]
| [] | ["CGROUP_MANAGER", "STORAGE_FS", "NOCACHE", "container", "TEST_ENVIRON", "STORAGE_OPTIONS", "HOOK_OPTION", "NO_TEST_CACHE", "PODMAN_BINARY", "XDG_RUNTIME_DIR", "CONMON_BINARY", "OCI_RUNTIME", "NETWORK_BACKEND", "PODMAN_TEST_IMAGE_CACHE_DIR", "PODMAN_REMOTE_BINARY", "HOME", "TMPDIR"] | go | 17 | 0 | |
docker/docker.go | package main
import (
"crypto/tls"
"fmt"
"os"
"runtime"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/client"
"github.com/docker/docker/autogen/dockerversion"
"github.com/docker/docker/opts"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/pkg/reexec"
"github.com/docker/docker/pkg/term"
"github.com/docker/docker/pkg/tlsconfig"
"github.com/docker/docker/utils"
)
const (
defaultTrustKeyFile = "key.json"
defaultCaFile = "ca.pem"
defaultKeyFile = "key.pem"
defaultCertFile = "cert.pem"
)
func main() {
if reexec.Init() {
return
}
// Set terminal emulation based on platform as required.
stdin, stdout, stderr := term.StdStreams()
initLogging(stderr)
flag.Parse()
// FIXME: validate daemon flags here
if *flVersion {
showVersion()
return
}
if *flLogLevel != "" {
lvl, err := logrus.ParseLevel(*flLogLevel)
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", *flLogLevel)
os.Exit(1)
}
setLogLevel(lvl)
} else {
setLogLevel(logrus.InfoLevel)
}
if *flDebug {
os.Setenv("DEBUG", "1")
setLogLevel(logrus.DebugLevel)
}
if len(flHosts) == 0 {
defaultHost := os.Getenv("DOCKER_HOST")
if defaultHost == "" || *flDaemon {
if runtime.GOOS != "windows" {
// If we do not have a host, default to unix socket
defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket)
} else {
// If we do not have a host, default to TCP socket on Windows
defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort)
}
}
defaultHost, err := opts.ValidateHost(defaultHost)
if err != nil {
if *flDaemon {
logrus.Fatal(err)
} else {
fmt.Fprint(os.Stderr, err)
}
os.Exit(1)
}
flHosts = append(flHosts, defaultHost)
}
setDefaultConfFlag(flTrustKey, defaultTrustKeyFile)
// Regardless of whether the user sets it to true or false, if they
// specify --tlsverify at all then we need to turn on tls
if flag.IsSet("-tlsverify") {
*flTls = true
}
if *flDaemon {
if *flHelp {
flag.Usage()
return
}
mainDaemon()
return
}
// From here on, we assume we're a client, not a server.
if len(flHosts) > 1 {
fmt.Fprintf(os.Stderr, "Please specify only one -H")
os.Exit(0)
}
protoAddrParts := strings.SplitN(flHosts[0], "://", 2)
var tlsConfig *tls.Config
if *flTls {
tlsOptions.InsecureSkipVerify = !*flTlsVerify
if !flag.IsSet("-tlscert") {
if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) {
tlsOptions.CertFile = ""
}
}
if !flag.IsSet("-tlskey") {
if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) {
tlsOptions.KeyFile = ""
}
}
var err error
tlsConfig, err = tlsconfig.Client(tlsOptions)
if err != nil {
fmt.Fprintln(stderr, err)
os.Exit(1)
}
}
cli := client.NewDockerCli(stdin, stdout, stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], tlsConfig)
if err := cli.Cmd(flag.Args()...); err != nil {
if sterr, ok := err.(client.StatusError); ok {
if sterr.Status != "" {
fmt.Fprintln(cli.Err(), sterr.Status)
os.Exit(1)
}
os.Exit(sterr.StatusCode)
}
fmt.Fprintln(cli.Err(), err)
os.Exit(1)
}
}
func showVersion() {
if utils.ExperimentalBuild() {
fmt.Printf("Docker version %s, build %s, experimental\n", dockerversion.VERSION, dockerversion.GITCOMMIT)
} else {
fmt.Printf("Docker version %s, build %s\n", dockerversion.VERSION, dockerversion.GITCOMMIT)
}
}
| [
"\"DOCKER_HOST\""
]
| []
| [
"DOCKER_HOST"
]
| [] | ["DOCKER_HOST"] | go | 1 | 0 | |
imports.go | package main
import (
"encoding/json"
"flag"
"fmt"
"go/ast"
"go/build"
"go/parser"
"go/token"
"log"
"os"
"path/filepath"
"strings"
"github.com/fatih/color"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
)
var (
red = color.New(color.FgHiRed).SprintFunc()
blue = color.New(color.FgHiBlue).SprintFunc()
)
func main() {
log.SetFlags(log.Lshortfile | log.LstdFlags)
dir := flag.String("d", ".", "directory to search from")
out := flag.String("o", "text", "output format [text|json|yaml]")
flag.Parse()
dirs := []string{}
err := filepath.Walk(*dir, func(path string, f os.FileInfo, err error) error {
if f.IsDir() && !strings.Contains(path, "vendor") {
dirs = append(dirs, path)
}
return nil
})
if err != nil {
log.Fatalf("error walking directories: %v", red(err))
}
imports, err := getImports(dirs, srcDir())
if len(imports) > 0 {
print(imports, *out)
}
}
func getImports(dirs []string, gopath string) (imports map[string][]string, err error) {
imports = map[string][]string{}
var pkgImports []string
for _, dir := range dirs {
fs := token.NewFileSet()
nodes, err := parser.ParseDir(fs, dir, nil, parser.ImportsOnly)
if err != nil {
continue
}
for _, node := range nodes {
ast.Inspect(node, func(n ast.Node) bool {
imp, ok := n.(*ast.ImportSpec)
if ok {
pkgImports = append(pkgImports, strings.Trim(imp.Path.Value, `"`))
return true
}
return true
})
}
if len(pkgImports) > 0 {
fullPath, err := filepath.Abs(dir)
if err != nil {
return nil, errors.Wrap(err, "getting full path")
}
fullPath = strings.TrimPrefix(fullPath, gopath)
imports[fullPath] = pkgImports
}
pkgImports = []string{}
}
return
}
func print(imports map[string][]string, out string) {
switch strings.ToLower(out) {
case "text":
for k, v := range imports {
fmt.Println(k, blue(v))
}
case "json":
b, err := json.MarshalIndent(imports, "", " ")
if err != nil {
log.Fatalf("error marshalling: %v", red(err))
}
fmt.Println(string(b))
case "yaml":
b, err := yaml.Marshal(imports)
if err != nil {
log.Fatalf("error marshalling: %v", red(err))
}
fmt.Println(string(b))
}
}
func srcDir() string {
p := os.Getenv("GOPATH")
if p == "" {
p = build.Default.GOPATH
}
return filepath.Join(p, "src") + "/"
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
main.go | package main
import (
"fmt"
"log"
"math/rand"
"net/http"
"os"
"sync/atomic"
"time"
"github.com/DataDog/datadog-go/statsd"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
dd *statsd.Client
clients int32 = 0
request = prometheus.NewCounter(prometheus.CounterOpts{
Name: "http_request",
Help: "Number of request.",
})
latency = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "http_latency_seconds",
Help: "Request latency.",
})
activeclients = prometheus.NewGaugeFunc(
prometheus.GaugeOpts{
Name: "http_clients",
Help: "Number of active clients",
},
gauge,
)
)
func main() {
client, err := statsd.New(os.Getenv("DATADOG_HOST")+":8125",
statsd.WithNamespace("simpleserver.dd."),
)
if err != nil {
panic(err)
}
dd = client
prometheus.MustRegister(request)
prometheus.MustRegister(latency)
prometheus.MustRegister(activeclients)
http.Handle("/metrics", promhttp.Handler())
http.HandleFunc("/", handler)
s := &http.Server{
Addr: ":8080",
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
}
fmt.Println("start server at :8080")
log.Fatal(s.ListenAndServe())
}
func handler(w http.ResponseWriter, r *http.Request) {
start := time.Now()
defer func() {
elapse := time.Since(start).Seconds()
request.Inc()
dd.Incr("http_request", []string{}, 1)
latency.Observe(elapse)
dd.Histogram("http_latency_seconds", elapse, []string{}, 1)
}()
sleep := (90 * time.Millisecond) + (time.Duration(rand.Int63n(20)) * time.Millisecond)
atomic.AddInt32(&clients, 1)
dd.Gauge("http_clients", gauge(), []string{}, 1)
time.Sleep(sleep)
atomic.AddInt32(&clients, -1)
dd.Gauge("http_clients", gauge(), []string{}, 1)
fmt.Fprintf(w, "Hello...\n")
}
func gauge() float64 {
return float64(atomic.LoadInt32(&clients))
}
| [
"\"DATADOG_HOST\""
]
| []
| [
"DATADOG_HOST"
]
| [] | ["DATADOG_HOST"] | go | 1 | 0 | |
main.go | package main
import (
"context"
"flag"
"os"
"github.com/hobbyfarm/gargantua/pkg/scheduledeventserver"
"github.com/hobbyfarm/gargantua/pkg/vmtemplateserver"
"net/http"
"sync"
"time"
"github.com/golang/glog"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/hobbyfarm/gargantua/pkg/accesscode"
"github.com/hobbyfarm/gargantua/pkg/authclient"
"github.com/hobbyfarm/gargantua/pkg/authserver"
hfClientset "github.com/hobbyfarm/gargantua/pkg/client/clientset/versioned"
hfInformers "github.com/hobbyfarm/gargantua/pkg/client/informers/externalversions"
"github.com/hobbyfarm/gargantua/pkg/controllers/dynamicbindcontroller"
"github.com/hobbyfarm/gargantua/pkg/controllers/scheduledevent"
"github.com/hobbyfarm/gargantua/pkg/controllers/session"
"github.com/hobbyfarm/gargantua/pkg/controllers/tfpcontroller"
"github.com/hobbyfarm/gargantua/pkg/controllers/vmclaimcontroller"
"github.com/hobbyfarm/gargantua/pkg/controllers/vmsetcontroller"
"github.com/hobbyfarm/gargantua/pkg/courseclient"
"github.com/hobbyfarm/gargantua/pkg/courseserver"
"github.com/hobbyfarm/gargantua/pkg/environmentserver"
"github.com/hobbyfarm/gargantua/pkg/scenarioclient"
"github.com/hobbyfarm/gargantua/pkg/scenarioserver"
"github.com/hobbyfarm/gargantua/pkg/sessionserver"
"github.com/hobbyfarm/gargantua/pkg/shell"
"github.com/hobbyfarm/gargantua/pkg/signals"
"github.com/hobbyfarm/gargantua/pkg/userserver"
"github.com/hobbyfarm/gargantua/pkg/vmclaimserver"
"github.com/hobbyfarm/gargantua/pkg/vmclient"
"github.com/hobbyfarm/gargantua/pkg/vmserver"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
const (
ClientGoQPS = 100
ClientGoBurst = 100
)
var (
localMasterUrl string
localKubeconfig string
disableControllers bool
shellServer bool
)
func init() {
flag.StringVar(&localKubeconfig, "kubeconfig", "", "Path to kubeconfig of local cluster. Only required if out-of-cluster.")
flag.StringVar(&localMasterUrl, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
flag.BoolVar(&disableControllers, "disablecontrollers", false, "Disable the controllers")
flag.BoolVar(&shellServer, "shellserver", false, "Be a shell server")
}
func main() {
//var signal chan struct{}
//signal = make(chan struct{})
stopCh := signals.SetupSignalHandler()
ctx := context.Background()
flag.Parse()
glog.V(2).Infof("Starting Gargantua")
r := mux.NewRouter()
cfg, err := rest.InClusterConfig()
if err != nil {
cfg, err = clientcmd.BuildConfigFromFlags(localMasterUrl, localKubeconfig)
if err != nil {
glog.Fatalf("Error building kubeconfig: %s", err.Error())
}
}
cfg.QPS = ClientGoQPS
cfg.Burst = ClientGoBurst
hfClient, err := hfClientset.NewForConfig(cfg)
if err != nil {
glog.Fatal(err)
}
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
glog.Fatalf("Error building kubernetes clientset: %s", err.Error())
}
hfInformerFactory := hfInformers.NewSharedInformerFactory(hfClient, time.Second*30)
authClient, err := authclient.NewAuthClient(hfClient, hfInformerFactory)
if err != nil {
glog.Fatal(err)
}
authServer, err := authserver.NewAuthServer(authClient, hfClient, ctx)
if err != nil {
glog.Fatal(err)
}
acClient, err := accesscode.NewAccessCodeClient(hfClient, ctx)
if err != nil {
glog.Fatal(err)
}
courseServer, err := courseserver.NewCourseServer(authClient, acClient, hfClient, hfInformerFactory, ctx)
if err != nil {
glog.Fatal(err)
}
courseClient, err := courseclient.NewCourseClient(courseServer)
if err != nil {
glog.Fatal(err)
}
scenarioServer, err := scenarioserver.NewScenarioServer(authClient, acClient, hfClient, hfInformerFactory, ctx)
if err != nil {
glog.Fatal(err)
}
scenarioClient, err := scenarioclient.NewScenarioClient(scenarioServer)
if err != nil {
glog.Fatal(err)
}
sessionServer, err := sessionserver.NewSessionServer(authClient, acClient, scenarioClient, courseClient, hfClient, hfInformerFactory, ctx)
if err != nil {
glog.Fatal(err)
}
vmServer, err := vmserver.NewVMServer(authClient, hfClient, hfInformerFactory)
if err != nil {
glog.Fatal(err)
}
vmClient, err := vmclient.NewVirtualMachineClient(vmServer)
if err != nil {
glog.Fatal(err)
}
vmClaimServer, err := vmclaimserver.NewVMClaimServer(authClient, hfClient, hfInformerFactory)
if err != nil {
glog.Fatal(err)
}
shellProxy, err := shell.NewShellProxy(authClient, vmClient, hfClient, kubeClient, ctx)
if err != nil {
glog.Fatal(err)
}
environmentServer, err := environmentserver.NewEnvironmentServer(authClient, hfClient, ctx)
if err != nil {
glog.Fatal(err)
}
scheduledEventServer, err := scheduledeventserver.NewScheduledEventServer(authClient, hfClient, ctx)
if err != nil {
glog.Fatal(err)
}
userServer, err := userserver.NewUserServer(authClient, hfClient, ctx)
if err != nil {
glog.Fatal(err)
}
vmTemplateServer, err := vmtemplateserver.NewVirtualMachineTemplateServer(authClient, hfClient, ctx)
if err != nil {
glog.Fatal(err)
}
if shellServer {
glog.V(2).Infof("Starting as a shell server")
shellProxy.SetupRoutes(r)
} else {
sessionServer.SetupRoutes(r)
authServer.SetupRoutes(r)
courseServer.SetupRoutes(r)
scenarioServer.SetupRoutes(r)
vmServer.SetupRoutes(r)
//shellProxy.SetupRoutes(r)
vmClaimServer.SetupRoutes(r)
environmentServer.SetupRoutes(r)
scheduledEventServer.SetupRoutes(r)
userServer.SetupRoutes(r)
vmTemplateServer.SetupRoutes(r)
}
corsHeaders := handlers.AllowedHeaders([]string{"Authorization", "Content-Type"})
corsOrigins := handlers.AllowedOrigins([]string{"*"})
corsMethods := handlers.AllowedMethods([]string{"GET", "POST", "PUT", "HEAD", "OPTIONS", "DELETE"})
/*
glog.V(6).Infof("Waiting for informers to synchronize")
if ok := cache.WaitForCacheSync(stopCh,
hfInformerFactory.Hobbyfarm().V1().Users().Informer().HasSynced,
hfInformerFactory.Hobbyfarm().V1().VirtualMachines().Informer().HasSynced,
hfInformerFactory.Hobbyfarm().V1().Sessions().Informer().HasSynced,
hfInformerFactory.Hobbyfarm().V1().Scenarios().Informer().HasSynced,
hfInformerFactory.Hobbyfarm().V1().VirtualMachineClaims().Informer().HasSynced,
hfInformerFactory.Hobbyfarm().V1().AccessCodes().Informer().HasSynced,
hfInformerFactory.Hobbyfarm().V1().VirtualMachineTemplates().Informer().HasSynced,
//hfInformerFactory.Hobbyfarm().V1().Environments().Informer().HasSynced,
hfInformerFactory.Hobbyfarm().V1().VirtualMachineSets().Informer().HasSynced,
); !ok {
glog.Fatalf("failed to wait for caches to sync")
}
glog.V(6).Infof("Informers have synchronized")
*/
http.Handle("/", r)
var wg sync.WaitGroup
if !disableControllers {
/*
environmentController, err := environment.NewEnvironmentController(hfClient, hfInformerFactory)
if err != nil {
glog.Fatal(err)
}
*/
glog.V(2).Infof("Starting controllers")
sessionController, err := session.NewSessionController(hfClient, hfInformerFactory, ctx)
if err != nil {
glog.Fatal(err)
}
scheduledEventController, err := scheduledevent.NewScheduledEventController(hfClient, hfInformerFactory, ctx)
if err != nil {
glog.Fatal(err)
}
vmClaimController, err := vmclaimcontroller.NewVMClaimController(hfClient, hfInformerFactory, ctx)
if err != nil {
glog.Fatal(err)
}
tfpController, err := tfpcontroller.NewTerraformProvisionerController(kubeClient, hfClient, hfInformerFactory, ctx)
if err != nil {
glog.Fatal(err)
}
vmSetController, err := vmsetcontroller.NewVirtualMachineSetController(hfClient, hfInformerFactory, ctx)
if err != nil {
glog.Fatal(err)
}
dynamicBindController, err := dynamicbindcontroller.NewDynamicBindController(hfClient, hfInformerFactory, ctx)
if err != nil {
glog.Fatal(err)
}
wg.Add(6)
/*
go func() {
defer wg.Done()
environmentController.Run(stopCh)
}()
*/
go func() {
defer wg.Done()
sessionController.Run(stopCh)
}()
go func() {
defer wg.Done()
scheduledEventController.Run(stopCh)
}()
go func() {
defer wg.Done()
vmClaimController.Run(stopCh)
}()
go func() {
defer wg.Done()
tfpController.Run(stopCh)
}()
go func() {
defer wg.Done()
vmSetController.Run(stopCh)
}()
go func() {
defer wg.Done()
dynamicBindController.Run(stopCh)
}()
}
hfInformerFactory.Start(stopCh)
wg.Add(1)
port := os.Getenv("PORT")
if port == "" {
port = "80"
}
glog.Info("listening on " + port)
go func() {
defer wg.Done()
glog.Fatal(http.ListenAndServe(":"+port, handlers.CORS(corsHeaders, corsOrigins, corsMethods)(r)))
}()
wg.Wait()
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
cmd/gateway/sia/gateway-sia.go | /*
* Minio Cloud Storage, (C) 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sia
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"time"
humanize "github.com/dustin/go-humanize"
"github.com/fatih/color"
"github.com/minio/cli"
"github.com/minio/minio-go/pkg/set"
minio "github.com/piensa/nodo/cmd"
"github.com/piensa/nodo/cmd/logger"
"github.com/piensa/nodo/pkg/auth"
"github.com/piensa/nodo/pkg/hash"
)
const (
siaBackend = "sia"
)
type siaObjects struct {
minio.GatewayUnsupported
Address string // Address and port of Sia Daemon.
TempDir string // Temporary storage location for file transfers.
RootDir string // Root directory to store files on Sia.
password string // Sia password for uploading content in authenticated manner.
}
func init() {
const siaGatewayTemplate = `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [SIA_DAEMON_ADDR]
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
ENVIRONMENT VARIABLES: (Default values in parenthesis)
ACCESS:
MINIO_ACCESS_KEY: Custom access key (Do not reuse same access keys on all instances)
MINIO_SECRET_KEY: Custom secret key (Do not reuse same secret keys on all instances)
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
DOMAIN:
MINIO_DOMAIN: To enable virtual-host-style requests, set this value to Minio host domain name.
CACHE:
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";".
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";".
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
MINIO_CACHE_MAXUSE: Maximum permitted usage of the cache in percentage (0-100).
SIA_TEMP_DIR: The name of the local Sia temporary storage directory. (.sia_temp)
SIA_API_PASSWORD: API password for Sia daemon. (default is empty)
EXAMPLES:
1. Start minio gateway server for Sia backend.
$ {{.HelpName}}
2. Start minio gateway server for Sia backend with edge caching enabled.
$ export MINIO_CACHE_DRIVES="/mnt/drive1;/mnt/drive2;/mnt/drive3;/mnt/drive4"
$ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png"
$ export MINIO_CACHE_EXPIRY=40
$ export MINIO_CACHE_MAXUSE=80
$ {{.HelpName}}
`
minio.RegisterGatewayCommand(cli.Command{
Name: siaBackend,
Usage: "Sia Decentralized Cloud.",
Action: siaGatewayMain,
CustomHelpTemplate: siaGatewayTemplate,
HideHelpCommand: true,
})
}
// Handler for 'minio gateway sia' command line.
func siaGatewayMain(ctx *cli.Context) {
// Validate gateway arguments.
host := ctx.Args().First()
// Validate gateway arguments.
logger.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
minio.StartGateway(ctx, &Sia{host})
}
// Sia implements Gateway.
type Sia struct {
host string // Sia daemon host address
}
// Name implements Gateway interface.
func (g *Sia) Name() string {
return siaBackend
}
// NewGatewayLayer returns Sia gateway layer, implements ObjectLayer interface to
// talk to Sia backend.
func (g *Sia) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
sia := &siaObjects{
Address: g.host,
// RootDir uses access key directly, provides partitioning for
// concurrent users talking to same sia daemon.
RootDir: creds.AccessKey,
TempDir: os.Getenv("SIA_TEMP_DIR"),
password: os.Getenv("SIA_API_PASSWORD"),
}
// If Address not provided on command line or ENV, default to:
if sia.Address == "" {
sia.Address = "127.0.0.1:9980"
}
// If local Sia temp directory not specified, default to:
if sia.TempDir == "" {
sia.TempDir = ".sia_temp"
}
var err error
sia.TempDir, err = filepath.Abs(sia.TempDir)
if err != nil {
return nil, err
}
// Create the temp directory with proper permissions.
// Ignore error when dir already exists.
if err = os.MkdirAll(sia.TempDir, 0700); err != nil {
return nil, err
}
colorBlue := color.New(color.FgBlue).SprintfFunc()
colorBold := color.New(color.Bold).SprintFunc()
formatStr := "%" + fmt.Sprintf("%ds", len(sia.Address)+7)
logger.StartupMessage(colorBlue("\nSia Configuration:"))
logger.StartupMessage(colorBlue(" API Address:") + colorBold(fmt.Sprintf(formatStr, sia.Address)))
logger.StartupMessage(colorBlue(" Staging Directory:") + colorBold(fmt.Sprintf(" %s", sia.TempDir)))
return sia, nil
}
// Production - sia gateway is not ready for production use.
func (g *Sia) Production() bool {
return false
}
// non2xx returns true for non-success HTTP status codes.
func non2xx(code int) bool {
return code < 200 || code > 299
}
// decodeError returns the api.Error from a API response. This method should
// only be called if the response's status code is non-2xx. The error returned
// may not be of type api.Error in the event of an error unmarshalling the
// JSON.
type siaError struct {
// Message describes the error in English. Typically it is set to
// `err.Error()`. This field is required.
Message string `json:"message"`
}
func (s siaError) Error() string {
return s.Message
}
func decodeError(resp *http.Response) error {
// Error is a type that is encoded as JSON and returned in an API response in
// the event of an error. Only the Message field is required. More fields may
// be added to this struct in the future for better error reporting.
var apiErr siaError
if err := json.NewDecoder(resp.Body).Decode(&apiErr); err != nil {
return err
}
return apiErr
}
// MethodNotSupported - returned if call returned error.
type MethodNotSupported struct {
method string
}
func (s MethodNotSupported) Error() string {
return fmt.Sprintf("API call not recognized: %s", s.method)
}
// apiGet wraps a GET request with a status code check, such that if the GET does
// not return 2xx, the error will be read and returned. The response body is
// not closed.
func apiGet(ctx context.Context, addr, call, apiPassword string) (*http.Response, error) {
req, err := http.NewRequest("GET", "http://"+addr+call, nil)
if err != nil {
logger.LogIf(ctx, err)
return nil, err
}
req.Header.Set("User-Agent", "Sia-Agent")
if apiPassword != "" {
req.SetBasicAuth("", apiPassword)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
logger.LogIf(ctx, err)
return nil, err
}
if resp.StatusCode == http.StatusNotFound {
resp.Body.Close()
logger.LogIf(ctx, MethodNotSupported{call})
return nil, MethodNotSupported{call}
}
if non2xx(resp.StatusCode) {
err := decodeError(resp)
resp.Body.Close()
logger.LogIf(ctx, err)
return nil, err
}
return resp, nil
}
// apiPost wraps a POST request with a status code check, such that if the POST
// does not return 2xx, the error will be read and returned. The response body
// is not closed.
func apiPost(ctx context.Context, addr, call, vals, apiPassword string) (*http.Response, error) {
req, err := http.NewRequest("POST", "http://"+addr+call, strings.NewReader(vals))
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", "Sia-Agent")
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if apiPassword != "" {
req.SetBasicAuth("", apiPassword)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
logger.LogIf(ctx, err)
return nil, err
}
if resp.StatusCode == http.StatusNotFound {
resp.Body.Close()
return nil, MethodNotSupported{call}
}
if non2xx(resp.StatusCode) {
err := decodeError(resp)
resp.Body.Close()
return nil, err
}
return resp, nil
}
// post makes an API call and discards the response. An error is returned if
// the response status is not 2xx.
func post(ctx context.Context, addr, call, vals, apiPassword string) error {
resp, err := apiPost(ctx, addr, call, vals, apiPassword)
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// list makes a lists all the uploaded files, decodes the json response.
func list(ctx context.Context, addr string, apiPassword string, obj *renterFiles) error {
resp, err := apiGet(ctx, addr, "/renter/files", apiPassword)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNoContent {
logger.LogIf(ctx, fmt.Errorf("Expecting a response, but API returned %s", resp.Status))
return fmt.Errorf("Expecting a response, but API returned %s", resp.Status)
}
err = json.NewDecoder(resp.Body).Decode(obj)
logger.LogIf(ctx, err)
return err
}
// get makes an API call and discards the response. An error is returned if the
// responsee status is not 2xx.
func get(ctx context.Context, addr, call, apiPassword string) error {
resp, err := apiGet(ctx, addr, call, apiPassword)
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// Shutdown saves any gateway metadata to disk
// if necessary and reload upon next restart.
func (s *siaObjects) Shutdown(ctx context.Context) error {
return nil
}
// StorageInfo is not relevant to Sia backend.
func (s *siaObjects) StorageInfo(ctx context.Context) (si minio.StorageInfo) {
return si
}
// MakeBucket creates a new container on Sia backend.
func (s *siaObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
srcFile := path.Join(s.TempDir, minio.MustGetUUID())
defer os.Remove(srcFile)
writer, err := os.Create(srcFile)
if err != nil {
return err
}
if _, err = io.Copy(writer, bytes.NewReader([]byte(""))); err != nil {
return err
}
sha256sum := sha256.Sum256([]byte(bucket))
var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
return post(ctx, s.Address, "/renter/upload/"+siaObj, "source="+srcFile, s.password)
}
// GetBucketInfo gets bucket metadata.
func (s *siaObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) {
sha256sum := sha256.Sum256([]byte(bucket))
var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
dstFile := path.Join(s.TempDir, minio.MustGetUUID())
defer os.Remove(dstFile)
if err := get(ctx, s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil {
return bi, err
}
return minio.BucketInfo{Name: bucket}, nil
}
// ListBuckets will detect and return existing buckets on Sia.
func (s *siaObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) {
sObjs, serr := s.listRenterFiles(ctx, "")
if serr != nil {
return buckets, serr
}
m := make(set.StringSet)
prefix := s.RootDir + "/"
for _, sObj := range sObjs {
if strings.HasPrefix(sObj.SiaPath, prefix) {
siaObj := strings.TrimPrefix(sObj.SiaPath, prefix)
idx := strings.Index(siaObj, "/")
if idx > 0 {
m.Add(siaObj[0:idx])
}
}
}
for _, bktName := range m.ToSlice() {
buckets = append(buckets, minio.BucketInfo{
Name: bktName,
Created: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
})
}
return buckets, nil
}
// DeleteBucket deletes a bucket on Sia.
func (s *siaObjects) DeleteBucket(ctx context.Context, bucket string) error {
sha256sum := sha256.Sum256([]byte(bucket))
var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
return post(ctx, s.Address, "/renter/delete/"+siaObj, "", s.password)
}
func (s *siaObjects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
siaObjs, siaErr := s.listRenterFiles(ctx, bucket)
if siaErr != nil {
return loi, siaErr
}
loi.IsTruncated = false
loi.NextMarker = ""
root := s.RootDir + "/"
sha256sum := sha256.Sum256([]byte(bucket))
// FIXME(harsha) - No paginated output supported for Sia backend right now, only prefix
// based filtering. Once list renter files API supports paginated output we can support
// paginated results here as well - until then Listing is an expensive operation.
for _, sObj := range siaObjs {
name := strings.TrimPrefix(sObj.SiaPath, path.Join(root, bucket)+"/")
// Skip the file created specially when bucket was created.
if name == hex.EncodeToString(sha256sum[:]) {
continue
}
if strings.HasPrefix(name, prefix) {
loi.Objects = append(loi.Objects, minio.ObjectInfo{
Bucket: bucket,
Name: name,
Size: int64(sObj.Filesize),
IsDir: false,
})
}
}
return loi, nil
}
func (s *siaObjects) GetObject(ctx context.Context, bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
dstFile := path.Join(s.TempDir, minio.MustGetUUID())
defer os.Remove(dstFile)
var siaObj = path.Join(s.RootDir, bucket, object)
if err := get(ctx, s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil {
return err
}
reader, err := os.Open(dstFile)
if err != nil {
return err
}
defer reader.Close()
st, err := reader.Stat()
if err != nil {
return err
}
size := st.Size()
if _, err = reader.Seek(startOffset, os.SEEK_SET); err != nil {
return err
}
// For negative length we read everything.
if length < 0 {
length = size - startOffset
}
bufSize := int64(1 * humanize.MiByte)
if bufSize > length {
bufSize = length
}
// Reply back invalid range if the input offset and length fall out of range.
if startOffset > size || startOffset+length > size {
logger.LogIf(ctx, minio.InvalidRange{
OffsetBegin: startOffset,
OffsetEnd: length,
ResourceSize: size,
})
return minio.InvalidRange{
OffsetBegin: startOffset,
OffsetEnd: length,
ResourceSize: size,
}
}
// Allocate a staging buffer.
buf := make([]byte, int(bufSize))
_, err = io.CopyBuffer(writer, io.LimitReader(reader, length), buf)
return err
}
// findSiaObject retrieves the siaObjectInfo for the Sia object with the given
// Sia path name.
func (s *siaObjects) findSiaObject(ctx context.Context, bucket, object string) (siaObjectInfo, error) {
siaPath := path.Join(s.RootDir, bucket, object)
sObjs, err := s.listRenterFiles(ctx, "")
if err != nil {
return siaObjectInfo{}, err
}
for _, sObj := range sObjs {
if sObj.SiaPath == siaPath {
return sObj, nil
}
}
logger.LogIf(ctx, minio.ObjectNotFound{
Bucket: bucket,
Object: object,
})
return siaObjectInfo{}, minio.ObjectNotFound{
Bucket: bucket,
Object: object,
}
}
// GetObjectInfo reads object info and replies back ObjectInfo
func (s *siaObjects) GetObjectInfo(ctx context.Context, bucket string, object string) (minio.ObjectInfo, error) {
so, err := s.findSiaObject(ctx, bucket, object)
if err != nil {
return minio.ObjectInfo{}, err
}
// Metadata about sia objects is just quite minimal. Sia only provides file size.
return minio.ObjectInfo{
Bucket: bucket,
Name: object,
ModTime: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Size: int64(so.Filesize),
IsDir: false,
}, nil
}
// PutObject creates a new object with the incoming data,
func (s *siaObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
srcFile := path.Join(s.TempDir, minio.MustGetUUID())
writer, err := os.Create(srcFile)
if err != nil {
return objInfo, err
}
wsize, err := io.CopyN(writer, data, data.Size())
if err != nil {
os.Remove(srcFile)
return objInfo, err
}
if err = post(ctx, s.Address, "/renter/upload/"+path.Join(s.RootDir, bucket, object), "source="+srcFile, s.password); err != nil {
os.Remove(srcFile)
return objInfo, err
}
defer s.deleteTempFileWhenUploadCompletes(ctx, srcFile, bucket, object)
return minio.ObjectInfo{
Name: object,
Bucket: bucket,
ModTime: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC),
Size: wsize,
ETag: minio.GenETag(),
}, nil
}
// DeleteObject deletes a blob in bucket
func (s *siaObjects) DeleteObject(ctx context.Context, bucket string, object string) error {
// Tell Sia daemon to delete the object
var siaObj = path.Join(s.RootDir, bucket, object)
return post(ctx, s.Address, "/renter/delete/"+siaObj, "", s.password)
}
// siaObjectInfo represents object info stored on Sia
type siaObjectInfo struct {
SiaPath string `json:"siapath"`
LocalPath string `json:"localpath"`
Filesize uint64 `json:"filesize"`
Available bool `json:"available"`
Renewing bool `json:"renewing"`
Redundancy float64 `json:"redundancy"`
UploadProgress float64 `json:"uploadprogress"`
}
type renterFiles struct {
Files []siaObjectInfo `json:"files"`
}
// listRenterFiles will return a list of existing objects in the bucket provided
func (s *siaObjects) listRenterFiles(ctx context.Context, bucket string) (siaObjs []siaObjectInfo, err error) {
// Get list of all renter files
var rf renterFiles
if err = list(ctx, s.Address, s.password, &rf); err != nil {
return siaObjs, err
}
var prefix string
root := s.RootDir + "/"
if bucket == "" {
prefix = root
} else {
prefix = root + bucket + "/"
}
for _, f := range rf.Files {
if strings.HasPrefix(f.SiaPath, prefix) {
siaObjs = append(siaObjs, f)
}
}
return siaObjs, nil
}
// deleteTempFileWhenUploadCompletes checks the status of a Sia file upload
// until it reaches 100% upload progress, then deletes the local temp copy from
// the filesystem.
func (s *siaObjects) deleteTempFileWhenUploadCompletes(ctx context.Context, tempFile string, bucket, object string) {
var soi siaObjectInfo
// Wait until 100% upload instead of 1x redundancy because if we delete
// after 1x redundancy, the user has to pay the cost of other hosts
// redistributing the file.
for soi.UploadProgress < 100.0 {
var err error
soi, err = s.findSiaObject(ctx, bucket, object)
if err != nil {
break
}
// Sleep between each check so that we're not hammering
// the Sia daemon with requests.
time.Sleep(15 * time.Second)
}
os.Remove(tempFile)
}
| [
"\"SIA_TEMP_DIR\"",
"\"SIA_API_PASSWORD\""
]
| []
| [
"SIA_API_PASSWORD",
"SIA_TEMP_DIR"
]
| [] | ["SIA_API_PASSWORD", "SIA_TEMP_DIR"] | go | 2 | 0 | |
tests/python/cycles_render_tests.py | #!/usr/bin/env python3
# Apache License, Version 2.0
import argparse
import os
import shlex
import shutil
import subprocess
import sys
def get_arguments(filepath, output_filepath):
dirname = os.path.dirname(filepath)
basedir = os.path.dirname(dirname)
subject = os.path.basename(dirname)
args = [
"--background",
"-noaudio",
"--factory-startup",
"--enable-autoexec",
filepath,
"-E", "CYCLES",
"-o", output_filepath,
"-F", "PNG"]
# OSL and GPU examples
# custom_args += ["--python-expr", "import bpy; bpy.context.scene.cycles.shading_system = True"]
# custom_args += ["--python-expr", "import bpy; bpy.context.scene.cycles.device = 'GPU'"]
custom_args = os.getenv('CYCLESTEST_ARGS')
if custom_args:
args.extend(shlex.split(custom_args))
if subject == 'bake':
args.extend(['--python', os.path.join(basedir, "util", "render_bake.py")])
elif subject == 'denoise_animation':
args.extend(['--python', os.path.join(basedir, "util", "render_denoise.py")])
else:
args.extend(["-f", "1"])
return args
def create_argparse():
parser = argparse.ArgumentParser()
parser.add_argument("-blender", nargs="+")
parser.add_argument("-testdir", nargs=1)
parser.add_argument("-outdir", nargs=1)
parser.add_argument("-idiff", nargs=1)
return parser
def main():
parser = create_argparse()
args = parser.parse_args()
blender = args.blender[0]
test_dir = args.testdir[0]
idiff = args.idiff[0]
output_dir = args.outdir[0]
from modules import render_report
report = render_report.Report("Cycles", output_dir, idiff)
report.set_pixelated(True)
report.set_reference_dir("cycles_renders")
report.set_compare_engines('cycles', 'eevee')
ok = report.run(test_dir, blender, get_arguments, batch=True)
sys.exit(not ok)
if __name__ == "__main__":
main()
| []
| []
| [
"CYCLESTEST_ARGS"
]
| [] | ["CYCLESTEST_ARGS"] | python | 1 | 0 | |
actions/app_test.go | package actions
import (
"os"
"testing"
)
// TestInit
func TestInit(t *testing.T) {
os.Setenv("GO_ENV", "development")
ENV = os.Getenv("GO_ENV")
}
// TestApp
func TestApp(t *testing.T) {
}
// TestSetTemplate
func TestSetTemplate(t *testing.T) {
}
| [
"\"GO_ENV\""
]
| []
| [
"GO_ENV"
]
| [] | ["GO_ENV"] | go | 1 | 0 | |
test/e2e/e2e.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"os"
"testing"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"k8s.io/component-base/logs"
// required
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/ingress-nginx/test/e2e/framework"
// tests to run
_ "k8s.io/ingress-nginx/test/e2e/admission"
_ "k8s.io/ingress-nginx/test/e2e/annotations"
_ "k8s.io/ingress-nginx/test/e2e/dbg"
_ "k8s.io/ingress-nginx/test/e2e/defaultbackend"
_ "k8s.io/ingress-nginx/test/e2e/gracefulshutdown"
_ "k8s.io/ingress-nginx/test/e2e/ingress"
_ "k8s.io/ingress-nginx/test/e2e/leaks"
_ "k8s.io/ingress-nginx/test/e2e/loadbalance"
_ "k8s.io/ingress-nginx/test/e2e/lua"
_ "k8s.io/ingress-nginx/test/e2e/security"
_ "k8s.io/ingress-nginx/test/e2e/servicebackend"
_ "k8s.io/ingress-nginx/test/e2e/settings"
_ "k8s.io/ingress-nginx/test/e2e/settings/ocsp"
_ "k8s.io/ingress-nginx/test/e2e/ssl"
_ "k8s.io/ingress-nginx/test/e2e/status"
_ "k8s.io/ingress-nginx/test/e2e/tcpudp"
)
// RunE2ETests checks configuration parameters (specified through flags) and then runs
// E2E tests using the Ginkgo runner.
func RunE2ETests(t *testing.T) {
logs.InitLogs()
defer logs.FlushLogs()
// Disable skipped tests unless they are explicitly requested.
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`
}
if os.Getenv("KUBECTL_PATH") != "" {
framework.KubectlPath = os.Getenv("KUBECTL_PATH")
framework.Logf("Using kubectl path '%s'", framework.KubectlPath)
}
framework.Logf("Starting e2e run %q on Ginkgo node %d", framework.RunID, config.GinkgoConfig.ParallelNode)
ginkgo.RunSpecs(t, "nginx-ingress-controller e2e suite")
}
| [
"\"KUBECTL_PATH\"",
"\"KUBECTL_PATH\""
]
| []
| [
"KUBECTL_PATH"
]
| [] | ["KUBECTL_PATH"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'locallibrary.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
shelltools/backup.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# backup.py
#
# (c) 2015 Mike Chaberski
#
# MIT License
from __future__ import with_statement
import csv
import shutil
import sys
import os.path
import tempfile
import time
import logging
from optparse import OptionParser
import traceback
import platform
_log = logging.getLogger('backup')
ERR_FILE_ALREADY_EXISTS = 200
ERR_TEMP_PATHNAME_USED = 201
ERR_UNDEFINED = 202
_BACKUP_CONF_FILENAME = ".backup.conf"
_SITE_BACKUP_CONF_FILENAME = "backup.conf"
_USER_BACKUP_CONF_FILENAME = "backup.conf"
_OPTS_NOT_IMPLEMENTED = tuple("keep_extension")
_SITE_CONFIG_DIR = "/etc"
_USER_HOME_DIR = os.getenv('USERPROFILE') or os.getenv('HOME')
_PLATFORM_UNAME = platform.uname()
_PLATFORM_OS = _PLATFORM_UNAME[0]
_STAGE_SITE = 1
_STAGE_USER = 2
_STAGE_DIRECTORY = 3
_STAGE_COMMAND = 4
_CONFIG_STAGES = (_STAGE_SITE, _STAGE_USER, _STAGE_DIRECTORY, _STAGE_COMMAND)
_ALL_OPTS = ("strict", "stacktraces", "destdir", "log_level", "logfile",
"overwrite", "config_file", "stamp", "tag", "temp")
_LOG_LEVEL_CHOICES=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),
if _PLATFORM_OS == 'Windows':
raise NotImplementedError("Windows support is not yet implemented")
_USER_CONFIG_DIR = os.path.join(_USER_HOME_DIR, '.config', _BACKUP_CONF_FILENAME)
def _get_progname(parser=None):
progname = os.path.basename(sys.argv[0]) or 'backup'
if parser is not None:
progname = parser.prog or progname
return progname
def _check_src_pathname(src_pathname, parser):
if not os.path.isfile(src_pathname):
#parser.error("source pathname must exist and be a file")
raise IOError("%s: source pathname must exist and be a file: %s" % (_get_progname(parser), src_pathname))
def _create_filename(options, srcname, stampfmt="%Y%m%d-%H%M"):
tag, stamp = None, None
if options.tag is not None:
tag = options.tag
if options.stamp is not None and options.stamp:
stamp = time.strftime(stampfmt)
if tag is None and (stamp is None or not stamp):
raise ValueError("stamp or tag is required, but neither is specified")
if tag is None and stamp:
filename = "%s-%s" % (srcname, stamp)
elif tag is not None and not stamp:
filename = "%s-%s" % (srcname, tag)
elif tag is not None and stamp:
filename = "%s-%s-%s" % (srcname, tag, stamp)
assert filename is not None
return filename
def _override(defValues, newValues):
oplist = _ALL_OPTS
for k in newValues.__dict__.keys():
v = newValues.__dict__[k]
if k in oplist and v is not None:
defValues.__dict__[k] = v
return defValues
def _eval_level(option, opt_str, value, parser):
parser.values.log_level = eval('logging.' + value)
def _config_logging(options):
"""Configures logging based on an options object. The options object
must be one that was created from a parser passed to the
add_log_level_option function.
"""
if options.log_level is None:
options.log_level = logging.INFO
logging.basicConfig(logfile=options.logfile, level=options.log_level)
def _add_logging_options(parser):
"""Add log destination and log level options to a parser. The log
level option sets the log_level attribute of the options object
returned by parser.parse_args() to a logging.LEVEL value (not a
string), and has default value logging.INFO.
"""
parser.add_option("-L", "--logfile", dest="logfile",
metavar="PATHNAME",
action="store",
help="print log messages to specified file instead" +
" of standard error")
parser.add_option("-l", "--log-level", dest="log_level",
metavar="LEVEL",
nargs=1,
action="callback",
type="str",
callback=_eval_level,
help="set log level to one of " +
str(_LOG_LEVEL_CHOICES))
def _check_options(parser, options):
# stamp must be set to False explicitly
if options.stamp is None: options.stamp = True
if not options.stamp and options.tag is None:
parser.error("backup must be stamped or tagged, but neither is specified")
if (options.strict and options.overwrite):
parser.error("overwrite and strict options cannot be used together")
allopts = set(_ALL_OPTS)
flagged = set()
for k in options.__dict__.keys():
if k in allopts and options.__dict__[k] is not None and k in _OPTS_NOT_IMPLEMENTED:
raise NotImplementedError("options not yet implemented: " + str(_OPTS_NOT_IMPLEMENTED))
def _configure(argv):
"""Create a parser and parse command line arguments.
"""
parser = OptionParser(version="%prog 0.3", usage="""
%prog [OPTIONS] PATHNAME
Makes a copy of a file. The filename of the copy contains the current
timestamp by default. This is the 'ideal' destination filename.
The program first creates a temporary file with a guaranteed unique name. It
then copies from the source to the temporary file and tries to rename the temp
file to the ideal destination name.
- In default mode, if a file or directory already exists at the ideal
destination name, the temporary file is left in the destination directory,
so the backup exists even though it's not at the expected filename, and the
program exists with a nonzero code.
- In strict mode, the temporary file is removed and the program exists with a
nonzero code.
- In overwrite mode, the existing file is overwritten with the copied file.
Overwrite mode conflicts with strict mode, so the two options cannot be used
simultaneously.""")
_add_logging_options(parser)
parser.add_option("-s", "--strict", action="store_true",
help="fail if file already exists at target pathname")
parser.add_option("-d", "--dir", action="store", dest="destdir",
help="set destination directory to DIRNAME", metavar="DIRNAME")
parser.add_option("-w", "--overwrite", action="store_true",
help="if file already exists at target pathname, overwrite it")
parser.add_option("-f", "--config-file", metavar="PATHNAME",
help="use options defined in file at PATHNAME")
parser.add_option("-S", "--no-stamp", action="store_false", dest="stamp",
help="do not use a timestamp (requires -t)")
parser.add_option("-t", "--tag", metavar="TAG", action="store",
help="use TAG before timestamp (or in place of: see -S)")
parser.add_option("-m", "--temp", action="store_true",
help="write backup file to temp directory")
parser.add_option("-E", "--stacktraces", action="store_true",
dest="stacktraces", help="print stacktrace on exception")
parser.add_option("-k", "--keep-extension", action="store_true",
help="keep filename extension (insert suffix as infix)")
# First pass parses command line, because some options may direct our
# search for other configuration files.
cmdline_options, args = parser.parse_args(argv)
if len(args) != 1: parser.error("source file argument required")
# Now go through the real configuration stages
stage_options = cmdline_options
for stage in _CONFIG_STAGES:
old_options = stage_options
stage_options, args = _parse_config(stage, argv, parser, old_options,
cmdline_options, args)
options = _override(old_options, stage_options)
#print >> sys.stderr, "STDERR:backup: final options:", str(options)
#~ print >> sys.stderr, "post-override options:", str(options)
#~ _log.debug(" options: %s" % str(options))
return parser, options, args
def _find_backup_conf(stage, parser, cmdline_options, src_pathname):
"""Get the pathname of the configuration file to use. Return None if
no configuration file is specified or present.
The options argument must be the command line options, because those
may direct how the configuration file is found. For example, if it's the
directory stage, the command line options may specify what the name of
the configuration file is. The options are not used for any other stage.
"""
if stage == _STAGE_DIRECTORY:
if cmdline_options.config_file is None:
backup_conf_pathname = os.path.join(os.path.dirname(src_pathname), _BACKUP_CONF_FILENAME)
if not os.path.isfile(backup_conf_pathname):
backup_conf_pathname = None
elif os.path.isdir(cmdline_options.config_file):
backup_conf_pathname = os.path.join(cmdline_options.config_file, _BACKUP_CONF_FILENAME)
elif os.path.isfile(cmdline_options.config_file):
backup_conf_pathname = cmdline_options.config_file
if cmdline_options.config_file is not None and backup_conf_pathname is None:
parser.error("backup: configuration file specified but not present: %s" % backup_conf_pathname)
elif stage == _STAGE_SITE:
backup_conf_pathname = os.path.join(_SITE_CONFIG_DIR, _SITE_BACKUP_CONF_FILENAME)
elif stage == _STAGE_USER:
backup_conf_pathname = os.path.join(_USER_CONFIG_DIR, _USER_BACKUP_CONF_FILENAME)
if backup_conf_pathname is not None and not os.path.isfile(backup_conf_pathname):
backup_conf_pathname = None
return backup_conf_pathname
def _parse_config_file(stage, parser, options, args, cfgfile_pathname):
"""Parse a configuration file. If a file exists at the specified pathname,
then a new options object is returned. Otherwise, the same options object
is returned.
"""
if cfgfile_pathname is None:
return options, args
#print >> sys.stderr, ("STDERR:backup: parsing config file: %s" % cfgfile_pathname)
if os.path.getsize(cfgfile_pathname) == 0: # empty file
return options, args
if stage == _STAGE_DIRECTORY:
allargs = list()
with open(cfgfile_pathname, 'r') as cfile:
reader = csv.reader(cfile, delimiter=' ')
for row in reader:
allargs += row
allargs += args
options, args = parser.parse_args(allargs)
else:
raise NotImplementedError("parsing of site/user config files not yet implemented")
# For all configuration files, the directory argument, if non-absolute,
# is relative to the source file's directory, not the current directory
if options.dest_dir is not None and not os.path.isabs(options.dest_dir):
options.dest_dir = os.path.join(os.path.dirname(args[0]), options.dest_dir)
return options, args
def _parse_config(stage, argv, parser, options_base, cmdline_options, args):
"""Get the (options, args) tuple for a configuration stage. Either parse
configuration file options or command line options. Assume the
argument options object is the defaults that should be
overridden with new option values defined by this latest stage.
"""
if stage not in _CONFIG_STAGES:
raise ValueError("invalid config stage: " + stage)
src_pathname = args[0]
# No need to re-parse the command line options here
if stage == _STAGE_COMMAND:
# cmdline_options, args = parser.parse_args(argv)
return cmdline_options, args
else:
cfgfile_pathname = _find_backup_conf(stage, parser, cmdline_options, src_pathname)
return _parse_config_file(stage, parser, options_base, args, cfgfile_pathname)
def _do_copy(src_pathname, dest_pathname, options):
_log.debug(" ideal destination: " + dest_pathname)
if options.strict and os.path.exists(dest_pathname):
_log.error(" backup NOT created because file already exists: " + dest_pathname)
sys.exit(ERR_FILE_ALREADY_EXISTS)
#~ print >> sys.stderr, "destdir:", options.destdir
#~ print >> sys.stderr, "dest_pathname:", dest_pathname
dest_basename = os.path.basename(dest_pathname)
fd, temp_pathname = tempfile.mkstemp(prefix=dest_basename, dir=options.destdir)
_log.debug(" created temp file: " + temp_pathname)
with os.fdopen(fd, 'wb') as ofile:
with open(src_pathname, 'rb') as ifile:
shutil.copyfileobj(ifile, ofile)
shutil.copymode(src_pathname, temp_pathname)
_log.debug(" source copied to temp file")
if os.path.exists(dest_pathname):
_log.debug(" file already exists at ideal target pathname")
if options.strict:
os.remove(temp_pathname)
_log.error("backup NOT created because file already exists: " + dest_pathname)
return ERR_FILE_ALREADY_EXISTS
elif not options.overwrite:
_log.info(" temp filename used for backup instead: " + temp_pathname)
return ERR_TEMP_PATHNAME_USED
os.rename(temp_pathname, dest_pathname)
_log.debug(" renamed temp file: " + dest_pathname)
return 0
def main():
parser, options, args = _configure(sys.argv[1:])
ret = ERR_UNDEFINED
try:
_config_logging(options)
_check_options(parser, options)
src_pathname = args[0]
_check_src_pathname(src_pathname, parser)
src_dirname, src_filename = os.path.split(src_pathname)
dest_filename = _create_filename(options, src_filename)
if options.destdir is None and not options.temp:
options.destdir = src_dirname
elif options.temp:
options.destdir = tempfile.gettempdir()
_log.debug(" using backup directory: %s" % options.destdir)
dest_pathname = os.path.join(options.destdir, dest_filename)
ret = _do_copy(src_pathname, dest_pathname, options)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
if options.stacktraces:
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
else:
print(traceback.format_exc().splitlines()[-1], file=sys.stderr)
return ret
| []
| []
| [
"HOME",
"USERPROFILE"
]
| [] | ["HOME", "USERPROFILE"] | python | 2 | 0 | |
rbd/mirror_test.go | // +build !nautilus
// Initially, we're only providing mirroring related functions for octopus as
// that version of ceph deprecated a number of the functions in nautilus. If
// you need mirroring on an earlier supported version of ceph please file an
// issue in our tracker.
package rbd
import (
"fmt"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGetMirrorMode(t *testing.T) {
conn := radosConnect(t)
poolName := GetUUID()
err := conn.MakePool(poolName)
require.NoError(t, err)
defer func() {
assert.NoError(t, conn.DeletePool(poolName))
conn.Shutdown()
}()
ioctx, err := conn.OpenIOContext(poolName)
assert.NoError(t, err)
defer func() {
ioctx.Destroy()
}()
t.Run("mirrorModeDisabled", func(t *testing.T) {
m, err := GetMirrorMode(ioctx)
assert.NoError(t, err)
assert.Equal(t, m, MirrorModeDisabled)
})
t.Run("mirrorModeEnabled", func(t *testing.T) {
err = SetMirrorMode(ioctx, MirrorModeImage)
require.NoError(t, err)
m, err := GetMirrorMode(ioctx)
assert.NoError(t, err)
assert.Equal(t, m, MirrorModeImage)
})
t.Run("ioctxNil", func(t *testing.T) {
assert.Panics(t, func() {
GetMirrorMode(nil)
})
})
}
func TestMirroring(t *testing.T) {
conn := radosConnect(t)
poolName := GetUUID()
err := conn.MakePool(poolName)
require.NoError(t, err)
defer func() {
assert.NoError(t, conn.DeletePool(poolName))
conn.Shutdown()
}()
ioctx, err := conn.OpenIOContext(poolName)
assert.NoError(t, err)
defer func() {
ioctx.Destroy()
}()
// verify that mirroring is not enabled on this new pool
m, err := GetMirrorMode(ioctx)
assert.NoError(t, err)
assert.Equal(t, m, MirrorModeDisabled)
// enable per-image mirroring for this pool
err = SetMirrorMode(ioctx, MirrorModeImage)
require.NoError(t, err)
name1 := GetUUID()
options := NewRbdImageOptions()
assert.NoError(t,
options.SetUint64(ImageOptionOrder, uint64(testImageOrder)))
err = CreateImage(ioctx, name1, testImageSize, options)
require.NoError(t, err)
t.Run("enableDisable", func(t *testing.T) {
img, err := OpenImage(ioctx, name1, NoSnapshot)
assert.NoError(t, err)
defer func() {
assert.NoError(t, img.Close())
}()
err = img.MirrorEnable(ImageMirrorModeSnapshot)
assert.NoError(t, err)
mode, err := img.GetImageMirrorMode()
assert.NoError(t, err)
assert.Equal(t, mode, ImageMirrorModeSnapshot)
err = img.MirrorDisable(false)
assert.NoError(t, err)
})
t.Run("enableDisableInvalid", func(t *testing.T) {
img, err := OpenImage(ioctx, name1, NoSnapshot)
assert.NoError(t, err)
assert.NoError(t, img.Close())
err = img.MirrorEnable(ImageMirrorModeSnapshot)
assert.Error(t, err)
err = img.MirrorDisable(false)
assert.Error(t, err)
_, err = img.GetImageMirrorMode()
assert.Error(t, err)
})
t.Run("promoteDemote", func(t *testing.T) {
img, err := OpenImage(ioctx, name1, NoSnapshot)
assert.NoError(t, err)
defer func() {
assert.NoError(t, img.Close())
}()
err = img.MirrorEnable(ImageMirrorModeSnapshot)
assert.NoError(t, err)
err = img.MirrorDemote()
assert.NoError(t, err)
err = img.MirrorPromote(false)
assert.NoError(t, err)
err = img.MirrorDisable(false)
assert.NoError(t, err)
})
t.Run("promoteDemoteInvalid", func(t *testing.T) {
img, err := OpenImage(ioctx, name1, NoSnapshot)
assert.NoError(t, err)
assert.NoError(t, img.Close())
err = img.MirrorDemote()
assert.Error(t, err)
err = img.MirrorPromote(false)
assert.Error(t, err)
})
t.Run("resync", func(t *testing.T) {
img, err := OpenImage(ioctx, name1, NoSnapshot)
assert.NoError(t, err)
defer func() {
assert.NoError(t, img.Close())
}()
err = img.MirrorEnable(ImageMirrorModeSnapshot)
assert.NoError(t, err)
err = img.MirrorDemote()
assert.NoError(t, err)
err = img.MirrorResync()
assert.NoError(t, err)
err = img.MirrorDisable(true)
assert.NoError(t, err)
})
t.Run("resyncInvalid", func(t *testing.T) {
img, err := OpenImage(ioctx, name1, NoSnapshot)
assert.NoError(t, err)
assert.NoError(t, img.Close())
err = img.MirrorResync()
assert.Error(t, err)
})
t.Run("instanceId", func(t *testing.T) {
img, err := OpenImage(ioctx, name1, NoSnapshot)
assert.NoError(t, err)
defer func() {
assert.NoError(t, img.Close())
}()
err = img.MirrorEnable(ImageMirrorModeSnapshot)
assert.NoError(t, err)
miid, err := img.MirrorInstanceID()
// this is not currently testable for the "success" case
// see also the ceph tree where nothing is asserted except
// that the error is raised.
// TODO(?): figure out how to test this
assert.Error(t, err)
assert.Equal(t, "", miid)
err = img.MirrorDisable(false)
assert.NoError(t, err)
})
t.Run("instanceIdInvalid", func(t *testing.T) {
img, err := OpenImage(ioctx, name1, NoSnapshot)
assert.NoError(t, err)
assert.NoError(t, img.Close())
_, err = img.MirrorInstanceID()
assert.Error(t, err)
})
}
func TestGetMirrorImageInfo(t *testing.T) {
conn := radosConnect(t)
poolName := GetUUID()
err := conn.MakePool(poolName)
require.NoError(t, err)
defer func() {
assert.NoError(t, conn.DeletePool(poolName))
conn.Shutdown()
}()
ioctx, err := conn.OpenIOContext(poolName)
assert.NoError(t, err)
defer func() {
ioctx.Destroy()
}()
// enable per-image mirroring for this pool
err = SetMirrorMode(ioctx, MirrorModeImage)
require.NoError(t, err)
imgName := GetUUID()
options := NewRbdImageOptions()
assert.NoError(t, options.SetUint64(ImageOptionOrder, uint64(testImageOrder)))
err = CreateImage(ioctx, imgName, testImageSize, options)
require.NoError(t, err)
t.Run("closedImage", func(t *testing.T) {
img := GetImage(ioctx, imgName)
_, err = img.GetMirrorImageInfo()
assert.Error(t, err)
})
t.Run("getInfo", func(t *testing.T) {
// open image, enable, mirroring.
img, err := OpenImage(ioctx, imgName, NoSnapshot)
assert.NoError(t, err)
defer func() {
assert.NoError(t, img.Close())
}()
err = img.MirrorEnable(ImageMirrorModeSnapshot)
assert.NoError(t, err)
mii, err := img.GetMirrorImageInfo()
assert.NoError(t, err)
assert.NotNil(t, mii.GlobalID)
assert.Equal(t, mii.State, MirrorImageEnabled)
assert.Equal(t, mii.Primary, true)
})
}
func TestMirrorConstantStrings(t *testing.T) {
x := []struct {
s fmt.Stringer
t string
}{
{MirrorModeDisabled, "disabled"},
{MirrorModeImage, "image"},
{MirrorModePool, "pool"},
{MirrorMode(9999), "<unknown>"},
{ImageMirrorModeJournal, "journal"},
{ImageMirrorModeSnapshot, "snapshot"},
{ImageMirrorMode(9999), "<unknown>"},
{MirrorImageDisabling, "disabling"},
{MirrorImageEnabled, "enabled"},
{MirrorImageDisabled, "disabled"},
{MirrorImageState(9999), "<unknown>"},
}
for _, v := range x {
assert.Equal(t, v.s.String(), v.t)
}
}
func TestGetGlobalMirrorStatus(t *testing.T) {
conn := radosConnect(t)
poolName := GetUUID()
err := conn.MakePool(poolName)
require.NoError(t, err)
defer func() {
assert.NoError(t, conn.DeletePool(poolName))
conn.Shutdown()
}()
ioctx, err := conn.OpenIOContext(poolName)
assert.NoError(t, err)
defer func() {
ioctx.Destroy()
}()
// enable per-image mirroring for this pool
err = SetMirrorMode(ioctx, MirrorModeImage)
require.NoError(t, err)
imgName := GetUUID()
options := NewRbdImageOptions()
assert.NoError(t, options.SetUint64(ImageOptionOrder, uint64(testImageOrder)))
err = CreateImage(ioctx, imgName, testImageSize, options)
require.NoError(t, err)
t.Run("closedImage", func(t *testing.T) {
img := GetImage(ioctx, imgName)
_, err = img.GetGlobalMirrorStatus()
assert.Error(t, err)
})
t.Run("getStatus", func(t *testing.T) {
// open image, enable, mirroring.
img, err := OpenImage(ioctx, imgName, NoSnapshot)
assert.NoError(t, err)
defer func() {
assert.NoError(t, img.Close())
}()
err = img.MirrorEnable(ImageMirrorModeSnapshot)
assert.NoError(t, err)
gms, err := img.GetGlobalMirrorStatus()
assert.NoError(t, err)
assert.NotEqual(t, "", gms.Name)
assert.NotEqual(t, "", gms.Info.GlobalID)
assert.Equal(t, gms.Info.State, MirrorImageEnabled)
assert.Equal(t, gms.Info.Primary, true)
if assert.Len(t, gms.SiteStatuses, 1) {
ss := gms.SiteStatuses[0]
assert.Equal(t, "", ss.MirrorUUID)
assert.Equal(t, MirrorImageStatusStateUnknown, ss.State, ss.State)
assert.Equal(t, "status not found", ss.Description)
assert.Equal(t, int64(0), ss.LastUpdate)
assert.False(t, ss.Up)
ls, err := gms.LocalStatus()
assert.NoError(t, err)
assert.Equal(t, ss, ls)
}
})
}
func mirrorConfig() string {
return os.Getenv("MIRROR_CONF")
}
func TestGetGlobalMirrorStatusMirroredPool(t *testing.T) {
mconfig := mirrorConfig()
if mconfig == "" {
t.Skip("no mirror config env var set")
}
conn := radosConnect(t)
// this test assumes the rbd pool already exists and is mirrored
// this must be set up previously by the CI or manually
poolName := "rbd"
ioctx, err := conn.OpenIOContext(poolName)
assert.NoError(t, err)
defer func() {
ioctx.Destroy()
}()
imgName := GetUUID()
options := NewRbdImageOptions()
assert.NoError(t, options.SetUint64(ImageOptionOrder, uint64(testImageOrder)))
err = CreateImage(ioctx, imgName, testImageSize, options)
require.NoError(t, err)
defer func() {
err = RemoveImage(ioctx, imgName)
assert.NoError(t, err)
}()
// this next section is not a t.Run because it must be unconditionally
// executed. It is wrapped in a func to use defer to close the img.
func() {
img, err := OpenImage(ioctx, imgName, NoSnapshot)
assert.NoError(t, err)
defer func() {
assert.NoError(t, img.Close())
}()
err = img.MirrorEnable(ImageMirrorModeSnapshot)
assert.NoError(t, err)
mid, err := img.CreateMirrorSnapshot()
assert.NoError(t, err)
assert.NotEqual(t, 0, mid)
// wait for site statuses to get updated
for i := 0; i < 30; i++ {
gms, err := img.GetGlobalMirrorStatus()
assert.NoError(t, err)
if len(gms.SiteStatuses) > 1 {
break
}
time.Sleep(time.Second)
}
gms, err := img.GetGlobalMirrorStatus()
assert.NoError(t, err)
assert.NotEqual(t, "", gms.Name)
assert.NotEqual(t, "", gms.Info.GlobalID)
assert.Equal(t, gms.Info.State, MirrorImageEnabled)
assert.Equal(t, gms.Info.Primary, true)
if assert.Len(t, gms.SiteStatuses, 2) {
ss1 := gms.SiteStatuses[0]
assert.Equal(t, "", ss1.MirrorUUID)
assert.Equal(t, MirrorImageStatusStateStopped, ss1.State, ss1.State)
assert.Equal(t, "local image is primary", ss1.Description)
assert.Greater(t, ss1.LastUpdate, int64(0))
assert.True(t, ss1.Up)
ls, err := gms.LocalStatus()
assert.NoError(t, err)
assert.Equal(t, ss1, ls)
ss2 := gms.SiteStatuses[1]
assert.NotEqual(t, "", ss2.MirrorUUID)
assert.Equal(t, MirrorImageStatusStateReplaying, ss2.State, ss2.State)
assert.Contains(t, ss2.Description, "replaying,")
assert.Greater(t, ss2.LastUpdate, int64(0))
assert.True(t, ss2.Up)
}
}()
// test the results of GetGlobalMirrorStatus using the "other"
// mirror+pool as a source
t.Run("fromMirror", func(t *testing.T) {
conn := radosConnectConfig(t, mconfig)
ioctx2, err := conn.OpenIOContext(poolName)
assert.NoError(t, err)
defer func() {
ioctx2.Destroy()
}()
img, err := OpenImage(ioctx2, imgName, NoSnapshot)
assert.NoError(t, err)
defer func() {
assert.NoError(t, img.Close())
}()
// wait for site statuses to get updated
for i := 0; i < 30; i++ {
gms, err := img.GetGlobalMirrorStatus()
assert.NoError(t, err)
if len(gms.SiteStatuses) > 1 {
break
}
time.Sleep(time.Second)
}
gms, err := img.GetGlobalMirrorStatus()
assert.NoError(t, err)
assert.NotEqual(t, "", gms.Name)
assert.NotEqual(t, "", gms.Info.GlobalID)
assert.Equal(t, gms.Info.State, MirrorImageEnabled)
assert.Equal(t, gms.Info.Primary, false)
if assert.Len(t, gms.SiteStatuses, 2) {
ls, err := gms.LocalStatus()
assert.NoError(t, err)
assert.Equal(t, "", ls.MirrorUUID)
assert.Equal(t, MirrorImageStatusStateReplaying, ls.State, ls.State)
assert.Contains(t, ls.Description, "replaying,")
assert.Greater(t, ls.LastUpdate, int64(0))
assert.True(t, ls.Up)
assert.Equal(t, ls, gms.SiteStatuses[0])
ss2 := gms.SiteStatuses[1]
assert.NotEqual(t, "", ss2.MirrorUUID)
assert.Equal(t, MirrorImageStatusStateStopped, ss2.State, ss2.State)
assert.Equal(t, "local image is primary", ss2.Description)
assert.Greater(t, ss2.LastUpdate, int64(0))
assert.True(t, ss2.Up)
}
})
}
func TestMirrorImageStatusSummary(t *testing.T) {
t.Run("ioctxNil", func(t *testing.T) {
assert.Panics(t, func() {
MirrorImageStatusSummary(nil)
})
})
t.Run("emptyPool", func(t *testing.T) {
conn := radosConnect(t)
poolName := GetUUID()
err := conn.MakePool(poolName)
require.NoError(t, err)
defer func() {
assert.NoError(t, conn.DeletePool(poolName))
conn.Shutdown()
}()
ioctx, err := conn.OpenIOContext(poolName)
assert.NoError(t, err)
defer func() {
ioctx.Destroy()
}()
ssum, err := MirrorImageStatusSummary(ioctx)
assert.NoError(t, err)
assert.Len(t, ssum, 0)
})
t.Run("mirroredPool", testMirrorImageStatusSummaryMirroredPool)
}
func testMirrorImageStatusSummaryMirroredPool(t *testing.T) {
mconfig := mirrorConfig()
if mconfig == "" {
t.Skip("no mirror config env var set")
}
conn := radosConnect(t)
// this test assumes the rbd pool already exists and is mirrored
// this must be set up previously by the CI or manually
poolName := "rbd"
ioctx, err := conn.OpenIOContext(poolName)
assert.NoError(t, err)
defer func() {
ioctx.Destroy()
}()
imgBase := GetUUID()
imgName1 := imgBase + "a"
imgName2 := imgBase + "b"
imgName3 := imgBase + "c"
imgName4 := imgBase + "d"
options := NewRbdImageOptions()
assert.NoError(t, options.SetUint64(ImageOptionOrder, uint64(testImageOrder)))
for _, n := range []string{imgName1, imgName2, imgName3, imgName4} {
err = CreateImage(ioctx, n, testImageSize, options)
require.NoError(t, err)
defer func(n string) {
err = RemoveImage(ioctx, n)
assert.NoError(t, err)
}(n)
}
mkMirror := func(n string) {
img, err := OpenImage(ioctx, n, NoSnapshot)
assert.NoError(t, err)
defer func() {
assert.NoError(t, img.Close())
}()
err = img.MirrorEnable(ImageMirrorModeSnapshot)
assert.NoError(t, err)
mid, err := img.CreateMirrorSnapshot()
assert.NoError(t, err)
assert.NotEqual(t, 0, mid)
}
checkMirror := func(n string) {
img, err := OpenImage(ioctx, n, NoSnapshot)
assert.NoError(t, err)
defer func() {
assert.NoError(t, img.Close())
}()
// wait for site statuses to get updated
for i := 0; i < 30; i++ {
gms, err := img.GetGlobalMirrorStatus()
assert.NoError(t, err)
if len(gms.SiteStatuses) > 1 {
break
}
time.Sleep(time.Second)
}
}
for _, n := range []string{imgName1, imgName3} {
mkMirror(n)
}
for _, n := range []string{imgName1, imgName3} {
checkMirror(n)
}
ssum, err := MirrorImageStatusSummary(ioctx)
assert.NoError(t, err)
if assert.Len(t, ssum, 1) {
assert.Contains(t, ssum, MirrorImageStatusStateReplaying)
assert.GreaterOrEqual(t, ssum[MirrorImageStatusStateReplaying], uint(2))
}
// immediately going for status right after enabling mirroring and not
// waiting for things to settle should give us one unknown status
mkMirror(imgName2)
ssum, err = MirrorImageStatusSummary(ioctx)
assert.NoError(t, err)
if assert.Len(t, ssum, 2) {
assert.Contains(t, ssum, MirrorImageStatusStateReplaying)
assert.GreaterOrEqual(t, ssum[MirrorImageStatusStateReplaying], uint(2))
assert.Contains(t, ssum, MirrorImageStatusStateUnknown)
assert.GreaterOrEqual(t, ssum[MirrorImageStatusStateUnknown], uint(1))
}
}
| [
"\"MIRROR_CONF\""
]
| []
| [
"MIRROR_CONF"
]
| [] | ["MIRROR_CONF"] | go | 1 | 0 | |
cmd/evergreen/evergreen.go | package main
import (
"os"
"path/filepath"
"runtime"
// this *must* be included in the binary so that the legacy
// plugins are built into the binary.
_ "github.com/evergreen-ci/evergreen/plugin"
"github.com/evergreen-ci/evergreen"
"github.com/evergreen-ci/evergreen/operations"
homedir "github.com/mitchellh/go-homedir"
"github.com/mongodb/grip"
"github.com/mongodb/grip/level"
"github.com/mongodb/grip/send"
"github.com/urfave/cli"
)
func main() {
// this is where the main action of the program starts. The
// command line interface is managed by the cli package and
// its objects/structures. This, plus the basic configuration
// in buildApp(), is all that's necessary for bootstrapping the
// environment.
app := buildApp()
err := app.Run(os.Args)
grip.EmergencyFatal(err)
}
func buildApp() *cli.App {
app := cli.NewApp()
app.Name = "evergreen"
app.Usage = "MongoDB Continuous Integration Platform"
app.Version = evergreen.ClientVersion
// Register sub-commands here.
app.Commands = []cli.Command{
// Version and auto-update
operations.Version(),
operations.Update(),
// Sub-Commands
operations.Service(),
operations.Agent(),
operations.Admin(),
operations.Host(),
operations.Volume(),
operations.Notification(),
operations.Buildlogger(),
// Top-level commands.
operations.Keys(),
operations.Fetch(),
operations.Evaluate(),
operations.Validate(),
operations.List(),
operations.LastGreen(),
operations.Subscriptions(),
operations.CommitQueue(),
operations.Export(),
// Patch creation and management commands (top-level)
operations.Patch(),
operations.PatchFile(),
operations.PatchList(),
operations.PatchSetModule(),
operations.PatchRemoveModule(),
operations.PatchFinalize(),
operations.PatchCancel(),
operations.CreateVersion(),
}
userHome, err := homedir.Dir()
if err != nil {
// workaround for cygwin if we're on windows but couldn't get a homedir
if runtime.GOOS == "windows" && len(os.Getenv("HOME")) > 0 {
userHome = os.Getenv("HOME")
}
}
confPath := filepath.Join(userHome, evergreen.DefaultEvergreenConfig)
// These are global options. Use this to configure logging or
// other options independent from specific sub commands.
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "level",
Value: "info",
Usage: "Specify lowest visible log level as string: 'emergency|alert|critical|error|warning|notice|info|debug|trace'",
},
cli.StringFlag{
Name: "conf, config, c",
Usage: "specify the path for the evergreen CLI config",
Value: confPath,
},
}
app.Before = func(c *cli.Context) error {
return loggingSetup(app.Name, c.String("level"))
}
return app
}
func loggingSetup(name, l string) error {
if err := grip.SetSender(send.MakeErrorLogger()); err != nil {
return err
}
grip.SetName(name)
sender := grip.GetSender()
info := sender.Level()
info.Threshold = level.FromString(l)
return sender.SetLevel(info)
}
| [
"\"HOME\"",
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
internal/config/config.go | package config
import (
"errors"
"os"
"github.com/joho/godotenv"
"github.com/kelseyhightower/envconfig"
)
// List of available cache type.
const (
NoCache = "nocache"
InMemory = "inmemory"
Redis = "redis"
Memcache = "memcache"
)
var caches = map[string]int{NoCache: 1, InMemory: 1, Redis: 1, Memcache: 1}
// Config is configuration model for whole malscraper project.
type Config struct {
// Web server config.
Web webConfig `envconfig:"WEB"`
// Clean URL from MyAnimeList.
Clean cleanConfig `envconfig:"CLEAN"`
// Cache config.
Cache cacheConfig `envconfig:"CACHE"`
// Logging config.
Log logConfig `envconfig:"LOG"`
// Elasticsearch config.
ES esConfig `envconfig:"ES"`
}
type webConfig struct {
// HTTP port.
Port string `envconfig:"PORT" default:"8005"`
// Read timeout (in seconds).
ReadTimeout int `envconfig:"READ_TIMEOUT" default:"5"`
// Write timeout (in seconds).
WriteTimeout int `envconfig:"WRITE_TIMEOUT" default:"5"`
// Graceful shutdown timeout (in seconds).
GracefulTimeout int `envconfig:"GRACEFUL_TIMEOUT" default:"10"`
}
type cleanConfig struct {
// Clean image URL.
Image bool `envconfig:"IMAGE" default:"true"`
// Clean video URL.
Video bool `envconfig:"VIDEO" default:"true"`
}
type cacheConfig struct {
// Type of caching (string).
Dialect string `envconfig:"DIALECT" default:"inmemory"`
// Cache address with format `host:port`.
Address string `envconfig:"ADDRESS"`
// Cache password if exists.
Password string `envconfig:"PASSWORD"`
// Caching time duration (in seconds).
Time int `envconfig:"TIME" default:"86400"`
}
type logConfig struct {
// Log level.
Level int `envconfig:"LEVEL" default:"4"`
// Log color.
Color bool `envconfig:"COLOR" default:"true"`
}
type esConfig struct {
// Elasticsearch addresses. Split by comma.
Address string `envconfig:"ADDRESS"`
// Elasticsearch username.
User string `envconfig:"USER"`
// Elasticsearch password.
Password string `envconfig:"PASSWORD"`
}
const envPath = "../../.env"
const envPrefix = "MAL"
// GetConfig to read and parse config from `.env`.
func GetConfig() (cfg Config, err error) {
// Load .env file.
_ = godotenv.Load(envPath)
// Convert env to struct.
if err = envconfig.Process(envPrefix, &cfg); err != nil {
return cfg, err
}
// Override port.
if port := os.Getenv("PORT"); port != "" {
cfg.Web.Port = port
}
// Validate cache type.
if caches[cfg.Cache.Dialect] == 0 {
return cfg, errors.New("invalid cache type (nocache|inmemory|redis|memcache)")
}
return cfg, nil
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
ilf/symbolic/solidity/solidity_utils.py | from subprocess import Popen, PIPE
from ilf.symbolic.exceptions import CompilerError
import json
import logging
import os
import sys
COMBINED_JSON_FILENAME = 'combined.json'
COMPACT_AST_SUFFIX = '_json.ast'
def solc_exists(version):
solc_binary = os.path.join(os.environ['HOME'], '.py-solc/solc-v' + version, 'bin/solc')
if os.path.exists(solc_binary):
return True
else:
return False
def compile_sol_file(filename, solc_binary):
solc_binary = solc_binary if solc_binary is not None else 'solc'
file_dirname = os.path.dirname(filename) or '.'
file_basename = os.path.basename(filename)
cmd = [solc_binary,
# '--optimize', '--optimize-runs', '1000',
'--evm-version', 'byzantium',
'--combined-json', 'asm,ast,bin,bin-runtime,srcmap-runtime,srcmap,hashes,abi',
'--allow-paths', '.']
cmd.append(file_basename)
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE, cwd=file_dirname)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise CompilerError('Solc experienced a fatal error (code %d).\n\n%s' % (p.returncode, stderr.decode('UTF-8')))
except FileNotFoundError:
raise CompilerError('Solc not found.')
out = stdout.decode('UTF-8')
assert len(out), 'Compilation failed.'
combined_json_data = json.loads(out)
return combined_json_data
def offset_to_line(source_code, bytecode_offset, source_mapping):
srcmap_runtime_mappings = source_mapping[0].split(';')
srcmap_mappings = source_mapping[1].split(';')
mappings = None
if bytecode_offset < 0 or len(srcmap_mappings) <= bytecode_offset:
if bytecode_offset < 0 or len(srcmap_runtime_mappings) <= bytecode_offset:
logging.debug('Bytecode offset is wrong!')
return 0
else:
mappings = srcmap_runtime_mappings
else:
mappings = srcmap_mappings
src_offset = -1
while True:
src_offset = mappings[bytecode_offset].split(':')[0]
bytecode_offset -= 1
if not ((src_offset == '' or int(src_offset) < 0) and bytecode_offset >= 0):
break
if src_offset != '' and int(src_offset) >= 0:
source_line = get_source_line_from_offset(source_code, int(src_offset))
return source_line
def get_source_line_from_offset(source_code, src_offset):
linebreaks = 0
for line, content in enumerate(source_code):
if line > src_offset:
break
if content == '\n':
linebreaks += 1
return linebreaks
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
hack/clean/clean.go | package main
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"flag"
"os"
"strings"
"time"
mgmtfeatures "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features"
"github.com/sirupsen/logrus"
utillog "github.com/Azure/ARO-RP/pkg/util/log"
"github.com/Azure/ARO-RP/pkg/util/purge"
)
var (
dryRun = flag.Bool("dryRun", true, `Dry run`)
)
var denylist = []string{
"aro-v4-shared",
"aro-v4-shared-cluster",
"v4-eastus",
"v4-australiasoutheast",
"v4-westeurope",
"management-westeurope",
"management-eastus",
"management-australiasoutheast",
"images",
"secrets",
"dns",
}
const (
defaultTTL = 48 * time.Hour
defaultCreatedAtTag = "createdAt"
defaultKeepTag = "persist"
)
func main() {
flag.Parse()
ctx := context.Background()
log := utillog.GetLogger()
if err := run(ctx, log); err != nil {
log.Fatal(err)
}
}
func run(ctx context.Context, log *logrus.Entry) error {
subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID")
var ttl time.Duration
if os.Getenv("AZURE_PURGE_TTL") != "" {
var err error
ttl, err = time.ParseDuration(os.Getenv("AZURE_PURGE_TTL"))
if err != nil {
return err
}
} else {
ttl = defaultTTL
}
var createdTag = defaultCreatedAtTag
if os.Getenv("AZURE_PURGE_CREATED_TAG") != "" {
createdTag = os.Getenv("AZURE_PURGE_CREATED_TAG")
}
deleteGroupPrefixes := []string{}
if os.Getenv("AZURE_PURGE_RESOURCEGROUP_PREFIXES") != "" {
deleteGroupPrefixes = strings.Split(os.Getenv("AZURE_PURGE_RESOURCEGROUP_PREFIXES"), ",")
}
shouldDelete := func(resourceGroup mgmtfeatures.ResourceGroup, log *logrus.Entry) bool {
// if prefix is set we check if we need to evaluate this group for purge
// before we check other fields.
if len(deleteGroupPrefixes) > 0 {
isDeleteGroup := false
for _, deleteGroupPrefix := range deleteGroupPrefixes {
if strings.HasPrefix(*resourceGroup.Name, deleteGroupPrefix) {
isDeleteGroup = true
break
}
}
// return if prefix not matched
if !isDeleteGroup {
return false
}
}
for t := range resourceGroup.Tags {
if strings.ToLower(t) == defaultKeepTag {
log.Debugf("Group %s is to persist. SKIP.", *resourceGroup.Name)
return false
}
}
// azure tags is not consistent with lower/upper cases.
if _, ok := resourceGroup.Tags[createdTag]; !ok {
log.Debugf("Group %s does not have createdAt tag. SKIP.", *resourceGroup.Name)
return false
}
createdAt, err := time.Parse(time.RFC3339Nano, *resourceGroup.Tags[createdTag])
if err != nil {
log.Errorf("%s: %s", *resourceGroup.Name, err)
return false
}
if time.Since(createdAt) < ttl {
log.Debugf("Group %s is still less than TTL. SKIP.", *resourceGroup.Name)
return false
}
// TODO(mj): Fix this!
if contains(denylist, *resourceGroup.Name) {
return false
}
return true
}
log.Infof("Starting the resource cleaner, DryRun: %t", *dryRun)
rc, err := purge.NewResourceCleaner(log, subscriptionID, shouldDelete, *dryRun)
if err != nil {
return err
}
return rc.CleanResourceGroups(ctx)
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
| [
"\"AZURE_SUBSCRIPTION_ID\"",
"\"AZURE_PURGE_TTL\"",
"\"AZURE_PURGE_TTL\"",
"\"AZURE_PURGE_CREATED_TAG\"",
"\"AZURE_PURGE_CREATED_TAG\"",
"\"AZURE_PURGE_RESOURCEGROUP_PREFIXES\"",
"\"AZURE_PURGE_RESOURCEGROUP_PREFIXES\""
]
| []
| [
"AZURE_PURGE_CREATED_TAG",
"AZURE_PURGE_RESOURCEGROUP_PREFIXES",
"AZURE_SUBSCRIPTION_ID",
"AZURE_PURGE_TTL"
]
| [] | ["AZURE_PURGE_CREATED_TAG", "AZURE_PURGE_RESOURCEGROUP_PREFIXES", "AZURE_SUBSCRIPTION_ID", "AZURE_PURGE_TTL"] | go | 4 | 0 | |
pkg/cmd/upgrade/upgrade_cli_test.go | // +build unit
package upgrade
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/blang/semver"
v1 "github.com/jenkins-x/jx-api/pkg/apis/jenkins.io/v1"
"github.com/jenkins-x/jx-api/pkg/client/clientset/versioned/fake"
"github.com/jenkins-x/jx-logging/pkg/log"
"github.com/olli-ai/jx/v2/pkg/brew"
"github.com/olli-ai/jx/v2/pkg/cmd/create/options"
"github.com/olli-ai/jx/v2/pkg/cmd/opts"
"github.com/olli-ai/jx/v2/pkg/extensions"
"github.com/olli-ai/jx/v2/pkg/version"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var sampleBrewInfo = `[
{
"name":"jx",
"full_name":"jenkins-x/jx/jx",
"oldname":null,
"aliases":[
],
"versioned_formulae":[
],
"desc":"A tool to install and interact with Jenkins X on your Kubernetes cluster.",
"homepage":"https://jenkins-x.github.io/jenkins-x-website/",
"versions":{
"stable":"2.0.181",
"devel":null,
"head":null,
"bottle":false
},
"revision":0,
"version_scheme":0,
"bottle":{
},
"keg_only":false,
"bottle_disabled":false,
"options":[
],
"build_dependencies":[
],
"dependencies":[
],
"recommended_dependencies":[
],
"optional_dependencies":[
],
"requirements":[
],
"conflicts_with":[
],
"caveats":null,
"installed":[
{
"version":"2.0.181",
"used_options":[
],
"built_as_bottle":false,
"poured_from_bottle":false,
"runtime_dependencies":[
],
"installed_as_dependency":false,
"installed_on_request":true
}
],
"linked_keg":"2.0.181",
"pinned":false,
"outdated":false
}
]`
func TestLatestJxBrewVersion(t *testing.T) {
version, err := brew.LatestJxBrewVersion(sampleBrewInfo)
assert.NoError(t, err)
assert.Equal(t, "2.0.181", version)
}
func TestNeedsUpgrade(t *testing.T) {
type testData struct {
current string
latest string
expectedUpgradeNeeded bool
expectedMessage string
}
testCases := []testData{
{
"1.0.0", "1.0.0", false, "You are already on the latest version of jx 1.0.0\n",
},
{
"1.0.0", "1.0.1", true, "",
},
{
"1.0.0", "0.0.99", true, "",
},
}
o := UpgradeCLIOptions{}
for _, data := range testCases {
currentVersion, _ := semver.New(data.current)
latestVersion, _ := semver.New(data.latest)
actualMessage := log.CaptureOutput(func() {
actualUpgradeNeeded := o.needsUpgrade(*currentVersion, *latestVersion)
assert.Equal(t, data.expectedUpgradeNeeded, actualUpgradeNeeded, fmt.Sprintf("Unexpected upgrade flag for %v", data))
})
assert.Equal(t, data.expectedMessage, actualMessage, fmt.Sprintf("Unexpected message for %v", data))
}
}
func TestVersionCheckWhenCurrentVersionIsGreaterThanReleaseVersion(t *testing.T) {
jxVersion := semver.Version{Major: 1, Minor: 3, Patch: 153}
version.Map["version"] = "1.4.0"
opts := &UpgradeCLIOptions{
CreateOptions: options.CreateOptions{
CommonOptions: &opts.CommonOptions{},
},
}
update, err := opts.ShouldUpdate(jxVersion)
assert.NoError(t, err, "should check version without failure")
assert.False(t, update, "should not update")
}
func TestVersionCheckWhenCurrentVersionIsEqualToReleaseVersion(t *testing.T) {
jxVersion := semver.Version{Major: 1, Minor: 2, Patch: 3}
version.Map["version"] = "1.2.3"
opts := &UpgradeCLIOptions{
CreateOptions: options.CreateOptions{
CommonOptions: &opts.CommonOptions{},
},
}
update, err := opts.ShouldUpdate(jxVersion)
assert.NoError(t, err, "should check version without failure")
assert.False(t, update, "should not update")
}
func TestVersionCheckWhenCurrentVersionIsLessThanReleaseVersion(t *testing.T) {
jxVersion := semver.Version{Major: 1, Minor: 3, Patch: 153}
version.Map["version"] = "1.0.0"
opts := &UpgradeCLIOptions{
CreateOptions: options.CreateOptions{
CommonOptions: &opts.CommonOptions{},
},
}
update, err := opts.ShouldUpdate(jxVersion)
assert.NoError(t, err, "should check version without failure")
assert.True(t, update, "should update")
}
func TestVersionCheckWhenCurrentVersionIsEqualToReleaseVersionWithPatch(t *testing.T) {
prVersions := []semver.PRVersion{}
prVersions = append(prVersions, semver.PRVersion{VersionStr: "dev"})
jxVersion := semver.Version{Major: 1, Minor: 2, Patch: 3, Pre: prVersions, Build: []string(nil)}
version.Map["version"] = "1.2.3"
opts := &UpgradeCLIOptions{
CreateOptions: options.CreateOptions{
CommonOptions: &opts.CommonOptions{},
},
}
update, err := opts.ShouldUpdate(jxVersion)
assert.NoError(t, err, "should check version without failure")
assert.False(t, update, "should not update")
}
func TestVersionCheckWhenCurrentVersionWithPatchIsEqualToReleaseVersion(t *testing.T) {
jxVersion := semver.Version{Major: 1, Minor: 2, Patch: 3}
version.Map["version"] = "1.2.3-dev+6a8285f4"
opts := &UpgradeCLIOptions{
CreateOptions: options.CreateOptions{
CommonOptions: &opts.CommonOptions{},
},
}
update, err := opts.ShouldUpdate(jxVersion)
assert.NoError(t, err, "should check version without failure")
assert.False(t, update, "should not update")
}
func TestVersionCheckWhenCurrentVersionWithPatchIsLessThanReleaseVersion(t *testing.T) {
jxVersion := semver.Version{Major: 1, Minor: 2, Patch: 3}
version.Map["version"] = "1.2.2-dev+6a8285f4"
opts := &UpgradeCLIOptions{
CreateOptions: options.CreateOptions{
CommonOptions: &opts.CommonOptions{},
},
}
update, err := opts.ShouldUpdate(jxVersion)
assert.NoError(t, err, "should check version without failure")
assert.False(t, update, "should not update")
}
func TestUpgradeBinaryPlugins(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "")
require.NoError(t, err, "failed to create tmp dir")
opts := &UpgradeCLIOptions{
CreateOptions: options.CreateOptions{
CommonOptions: &opts.CommonOptions{},
},
}
dummyPluginURL := "https://raw.githubusercontent.com/jenkins-x/jx/master/hack/gofmt.sh"
ns := "jx"
pluginName := "jx-my-plugin"
pluginVersion := "1.2.3"
jxClient := fake.NewSimpleClientset(
&v1.Plugin{
ObjectMeta: metav1.ObjectMeta{
Name: pluginName,
Namespace: ns,
Labels: map[string]string{
extensions.PluginCommandLabel: pluginName,
},
},
Spec: v1.PluginSpec{
Name: pluginName,
SubCommand: "my plugin",
Group: "",
Binaries: []v1.Binary{
{
URL: dummyPluginURL,
Goarch: "amd64",
Goos: "Windows",
},
{
URL: dummyPluginURL,
Goarch: "amd64",
Goos: "Darwin",
},
{
URL: dummyPluginURL,
Goarch: "amd64",
Goos: "Linux",
},
},
Description: "my awesome plugin extension",
Version: pluginVersion,
},
})
opts.SetJxClient(jxClient)
opts.SetDevNamespace(ns)
oldJXHome := os.Getenv("JX_HOME")
os.Setenv("JX_HOME", tmpDir)
defer os.Setenv("JX_HOME", oldJXHome)
t.Logf("downloading plugins to JX_HOME %s\n", tmpDir)
err = opts.UpgradeBinaryPlugins()
require.NoError(t, err, "should not fail upgrading the binary plugins")
assert.FileExists(t, filepath.Join(tmpDir, "plugins", ns, "bin", pluginName+"-"+pluginVersion))
}
| [
"\"JX_HOME\""
]
| []
| [
"JX_HOME"
]
| [] | ["JX_HOME"] | go | 1 | 0 | |
buildserver/integration_test.go | package buildserver_test
import (
"context"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"strings"
"testing"
"github.com/pkg/errors"
"github.com/sourcegraph/ctxvfs"
gobuildserver "github.com/sourcegraph/go-langserver/buildserver"
"github.com/sourcegraph/go-langserver/gituri"
"github.com/sourcegraph/go-langserver/gosrc"
"github.com/sourcegraph/go-langserver/pkg/lsp"
lsext "github.com/sourcegraph/go-langserver/pkg/lspext"
"github.com/sourcegraph/go-langserver/vfsutil"
"github.com/sourcegraph/go-lsp/lspext"
)
func init() {
gosrc.RuntimeVersion = "go1.7.1"
}
func TestIntegration(t *testing.T) {
if testing.Short() {
t.Skip("skip long integration test")
}
tests := map[lsp.DocumentURI]struct { // map key is rootURI
mode string
ciBlacklist bool
pinDepReposToRev map[string]string // so that file:line:col expectations are stable
wantHover map[string]string
wantDefinition map[string]string
wantXDefinition map[string]string
wantReferences map[string][]string
wantSymbols map[string][]string
wantXDependencies string
wantXReferences map[*lsext.WorkspaceReferencesParams][]string
wantXPackages []string
}{
"git://github.com/gorilla/mux?0a192a193177452756c362c20087ddafcf6829c4": {
mode: "go",
pinDepReposToRev: map[string]string{
"https://github.com/gorilla/context": "08b5f424b9271eedf6f9f0ce86cb9396ed337a42",
},
wantHover: map[string]string{
"mux.go:61:38": "type Request struct", // stdlib
},
wantDefinition: map[string]string{
"mux.go:61:38": "git://github.com/golang/go?go1.7.1#src/net/http/request.go:76:6", // stdlib
},
wantXDefinition: map[string]string{
"mux.go:61:38": "git://github.com/golang/go?go1.7.1#src/net/http/request.go:76:6 id:net/http/-/Request name:Request package:net/http packageName:http recv: vendor:false",
},
wantXDependencies: "gorilla-mux.json",
wantXPackages: []string{"github.com/gorilla/mux"},
},
"git://github.com/coreos/fuze?7df4f06041d9daba45e4c68221b9b04203dff1d8": {
mode: "go",
pinDepReposToRev: map[string]string{
"https://github.com/stretchr/testify": "976c720a22c8eb4eb6a0b4348ad85ad12491a506",
"https://github.com/go-check/check": "4f90aeace3a26ad7021961c297b22c42160c7b25",
"https://github.com/go-yaml/yaml": "a5b47d31c556af34a302ce5d659e6fea44d90de0",
},
wantHover: map[string]string{
"config/convert.go:262:26": "func ParseBase2Bytes(s string) (Base2Bytes, error)", // vendored
"config/vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/semver/semver_test.go:287:27": "func Marshal(in interface{}) (out []byte, err error)",
},
wantDefinition: map[string]string{
"config/convert.go:262:26": "git://github.com/coreos/fuze?7df4f06041d9daba45e4c68221b9b04203dff1d8#config/vendor/github.com/alecthomas/units/bytes.go:30:6", // vendored TODO(sqs): really want the below result which has the non-vendored path as well, need to implement that
//"config/convert.go:262:26": "git://github.com/coreos/fuze?7df4f06041d9daba45e4c68221b9b04203dff1d8#config/vendor/github.com/alecthomas/units/bytes.go:30:6 git://github.com/alecthomas/units#bytes.go:30:6", // vendored
"config/vendor/github.com/coreos/ignition/config/vendor/github.com/coreos/go-semver/semver/semver_test.go:287:27": "git://github.com/go-yaml/yaml?v2.2.2#yaml.go:138:6", // diff repo
},
wantXDefinition: map[string]string{
"config/convert.go:262:26": "git://github.com/coreos/fuze?7df4f06041d9daba45e4c68221b9b04203dff1d8#config/vendor/github.com/alecthomas/units/bytes.go:30:6 id:github.com/coreos/fuze/config/vendor/github.com/alecthomas/units/-/ParseBase2Bytes name:ParseBase2Bytes package:github.com/coreos/fuze/config/vendor/github.com/alecthomas/units packageName:units recv: vendor:true",
},
},
"git://github.com/golang/lint?c7bacac2b21ca01afa1dee0acf64df3ce047c28f": {
mode: "go",
pinDepReposToRev: map[string]string{
"https://github.com/golang/tools": "73d2e795b859a48cba2d70040c384dd1cea7e113",
},
wantHover: map[string]string{
"golint/golint.go:91:18": "type Linter struct", // diff pkg, same repo
},
wantDefinition: map[string]string{
"golint/golint.go:91:18": "git://github.com/golang/lint?c7bacac2b21ca01afa1dee0acf64df3ce047c28f#lint.go:31:6", // diff pkg, same repo
},
wantXDefinition: map[string]string{
"golint/golint.go:91:18": "git://github.com/golang/lint?c7bacac2b21ca01afa1dee0acf64df3ce047c28f#lint.go:31:6 id:github.com/golang/lint/-/Linter name:Linter package:github.com/golang/lint packageName:lint recv: vendor:false",
},
},
"git://github.com/gorilla/csrf?a8abe8abf66db8f4a9750d76ba95b4021a354757": {
mode: "go",
pinDepReposToRev: map[string]string{
"https://github.com/gorilla/securecookie": "c13558c2b1c44da35e0eb043053609a5ba3a1f19",
"https://github.com/gorilla/context": "08b5f424b9271eedf6f9f0ce86cb9396ed337a42",
"https://github.com/pkg/errors": "839d9e913e063e28dfd0e6c7b7512793e0a48be9",
},
wantHover: map[string]string{
"csrf.go:57:28": "type SecureCookie struct", // diff repo
},
wantDefinition: map[string]string{
"csrf.go:57:28": "git://github.com/gorilla/securecookie?HEAD#securecookie.go:154:6", // diff repo
},
wantXDefinition: map[string]string{
"csrf.go:57:28": "git://github.com/gorilla/securecookie?HEAD#securecookie.go:154:6 id:github.com/gorilla/securecookie/-/SecureCookie name:SecureCookie package:github.com/gorilla/securecookie packageName:securecookie recv: vendor:false",
},
wantXDependencies: "gorilla-csrf.json",
},
"git://github.com/golang/go?f75aafdf56dd90eab75cfeac8cf69358f73ba171": {
// SHA is equivalent to go1.7.1 tag, but make sure we
// retain the original rev spec in definition results.
mode: "go",
pinDepReposToRev: map[string]string{
"https://github.com/foobar/externalprintf": "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef",
},
ciBlacklist: true, // skip on CI since the repo is large
wantHover: map[string]string{
"src/encoding/hex/hex.go:70:12": "func fromHexChar(c byte) (byte, bool)", // func decl
"src/encoding/hex/hex.go:104:18": "type Buffer struct", // bytes.Buffer
"src/net/http/server.go:78:32": "type Request struct",
},
wantDefinition: map[string]string{
"src/encoding/hex/hex.go:70:12": "git://github.com/golang/go?f75aafdf56dd90eab75cfeac8cf69358f73ba171#src/encoding/hex/hex.go:70:6", // func decl
"src/encoding/hex/hex.go:104:18": "git://github.com/golang/go?f75aafdf56dd90eab75cfeac8cf69358f73ba171#src/bytes/buffer.go:17:6", // stdlib type
},
wantXDefinition: map[string]string{
"src/encoding/hex/hex.go:70:12": "git://github.com/golang/go?f75aafdf56dd90eab75cfeac8cf69358f73ba171#src/encoding/hex/hex.go:70:6 id:encoding/hex/-/fromHexChar name:fromHexChar package:encoding/hex packageName:hex recv: vendor:false",
"src/encoding/hex/hex.go:104:18": "git://github.com/golang/go?f75aafdf56dd90eab75cfeac8cf69358f73ba171#src/bytes/buffer.go:17:6 id:bytes/-/Buffer name:Buffer package:bytes packageName:bytes recv: vendor:false",
},
wantReferences: map[string][]string{
"src/net/http/httptest/server.go:204:25": []string{ // httptest.Server
"git://github.com/golang/go?f75aafdf56dd90eab75cfeac8cf69358f73ba171#src/net/http/httptest/server.go:204:18",
"git://github.com/golang/go?f75aafdf56dd90eab75cfeac8cf69358f73ba171#src/net/http/httptest/server_test.go:92:5",
"git://github.com/golang/go?f75aafdf56dd90eab75cfeac8cf69358f73ba171#src/net/http/serve_test.go:2625:7",
"git://github.com/golang/go?f75aafdf56dd90eab75cfeac8cf69358f73ba171#src/net/http/transport_test.go:2553:6",
"git://github.com/golang/go?f75aafdf56dd90eab75cfeac8cf69358f73ba171#src/net/http/transport_test.go:478:5",
"git://github.com/golang/go?f75aafdf56dd90eab75cfeac8cf69358f73ba171#src/net/http/transport_test.go:532:5",
},
},
wantSymbols: map[string][]string{
"Sum256": []string{"git://github.com/golang/go?f75aafdf56dd90eab75cfeac8cf69358f73ba171#src/crypto/sha256/sha256.go:function:Sum256:176:5"},
"dir:src/crypto/sha256 Sum256": []string{"git://github.com/golang/go?f75aafdf56dd90eab75cfeac8cf69358f73ba171#src/crypto/sha256/sha256.go:function:Sum256:176:5"},
"dir:crypto/sha256 Sum256": []string{}, // invalid dir
"dir:foo Sum256": []string{}, // invalid dir
},
wantXDependencies: "golang-go.json",
},
"git://github.com/docker/machine?e1a03348ad83d8e8adb19d696bc7bcfb18ccd770": {
mode: "go",
ciBlacklist: true, // skip on CI due to large repo size
wantHover: map[string]string{
"libmachine/provision/provisioner.go:107:50": "func RunSSHCommandFromDriver(...",
},
wantDefinition: map[string]string{
"libmachine/provision/provisioner.go:107:50": "git://github.com/docker/machine?e1a03348ad83d8e8adb19d696bc7bcfb18ccd770#libmachine/drivers/utils.go:36:6",
},
wantXDefinition: map[string]string{
"libmachine/provision/provisioner.go:107:50": "git://github.com/docker/machine?e1a03348ad83d8e8adb19d696bc7bcfb18ccd770#libmachine/drivers/utils.go:36:6 id:github.com/docker/machine/libmachine/drivers/-/RunSSHCommandFromDriver name:RunSSHCommandFromDriver package:github.com/docker/machine/libmachine/drivers packageName:drivers recv: vendor:false",
},
},
"git://github.com/kubernetes/kubernetes?c41c24fbf300cd7ba504ea1ac2e052c4a1bbed33": {
mode: "go",
ciBlacklist: true, // skip on CI due to large repo size
pinDepReposToRev: map[string]string{
"https://github.com/kubernetes/client-go": "5fe6fc56cb38d04ef4af601a03599c984229dea2",
},
wantHover: map[string]string{
"pkg/ssh/ssh.go:49:38": "func NewCounter(...",
"pkg/util/workqueue/queue.go:113:15": "struct field L sync.Locker",
},
wantSymbols: map[string][]string{
"kubectlAnn": []string{"git://github.com/kubernetes/kubernetes?c41c24fbf300cd7ba504ea1ac2e052c4a1bbed33#pkg/kubectl/kubectl.go:constant:kubectlAnnotationPrefix:31:1"},
},
wantXDependencies: "kubernetes-kubernetes.json",
},
"git://github.com/uber-go/atomic?3b8db5e93c4c02efbc313e17b2e796b0914a01fb": {
mode: "go",
wantDefinition: map[string]string{
// glide.lock specifies testify to something other than HEAD
"atomic_test.go:32:12": "git://github.com/stretchr/testify?d77da356e56a7428ad25149ca77381849a6a5232#require/require.go:58:6",
},
},
"git://github.com/sgtest/godep-include?d92076664c875c0134dbd475b81f88d97df2bc41": {
mode: "go",
wantDefinition: map[string]string{
// Godeps.json specifies testify to something other than HEAD
"foo.go:12:12": "git://github.com/stretchr/testify?d77da356e56a7428ad25149ca77381849a6a5232#require/require.go:58:6",
},
},
}
for rootURI, test := range tests {
root, err := gituri.Parse(string(rootURI))
if err != nil {
t.Fatal(err)
}
label := strings.Replace(strings.TrimPrefix(root.Path, "/"), "/", "-", -1)
t.Run(label, func(t *testing.T) {
if os.Getenv("CI") != "" && test.ciBlacklist {
t.Skipf("Skipping the %s integration test in CI", rootURI)
}
cleanup := useGithubForVFS()
defer cleanup()
{
// If integration tests depend on external repos, we
// need to use a pinned, hardcoded revision instead of
// "HEAD", or else any file:line:col expectations we
// have will break if the dep repo's files change.
orig := gobuildserver.NewDepRepoVFS
gobuildserver.NewDepRepoVFS = func(ctx context.Context, cloneURL *url.URL, rev string, zipURLTemplate *string) (ctxvfs.FileSystem, error) {
if pinRev, ok := test.pinDepReposToRev[cloneURL.String()]; ok {
rev = pinRev
} else if len(rev) != 40 && rev != gosrc.RuntimeVersion {
// It's OK to hardcode allowable Git tags
// (such as "goN.N.N") here, since we know
// those to be stable. Branches like "master"
// are not stable and are not OK to hardcode
// here.
// We panic since t.Fatal does not interact nicely with subtests
panic(fmt.Sprintf("TestIntegration/%s: must specify pinDepReposToRev in integration test definition so that test analysis is deterministic/stable (and not dependent on the mutable git rev spec %q for repo %q)", label, rev, cloneURL))
}
return orig(ctx, cloneURL, rev, zipURLTemplate)
}
defer func() {
gobuildserver.NewDepRepoVFS = orig
}()
}
ctx := context.Background()
c, done := connectionToNewBuildServer(string(rootURI), t, true)
defer done()
// Prepare the connection.
if err := c.Call(ctx, "initialize", lspext.InitializeParams{
InitializeParams: lsp.InitializeParams{RootURI: "file:///"},
OriginalRootURI: rootURI,
}, nil); err != nil {
t.Fatal("initialize:", err, rootURI)
}
root, err := gituri.Parse(string(rootURI))
if err != nil {
t.Fatal(err)
}
lspTests(t, ctx, c, root, test.wantHover, test.wantDefinition, test.wantXDefinition, test.wantReferences, test.wantSymbols, test.wantXDependencies, test.wantXReferences, test.wantXPackages)
if err := c.Close(); err != nil {
t.Fatal(err)
}
})
}
}
// useGitHubForVFS allows us to serve repository data from codeload.github.com
// for test performance instead of from gitserver. This technically means we
// aren't testing gitserver, but that is well tested separately, and the
// benefit of fast tests here outweighs the benefits of a coarser integration
// test.
func useGithubForVFS() func() {
origRemoteFS := gobuildserver.RemoteFS
gobuildserver.RemoteFS = func(ctx context.Context, initializeParams lspext.InitializeParams) (ctxvfs.FileSystem, io.Closer, error) {
u, err := gituri.Parse(string(initializeParams.OriginalRootURI))
if err != nil {
return nil, ioutil.NopCloser(strings.NewReader("")), errors.Wrap(err, "could not parse workspace URI for remotefs")
}
if u.Rev() == "" {
return nil, ioutil.NopCloser(strings.NewReader("")), errors.Errorf("rev is required in uri: %s", initializeParams.OriginalRootURI)
}
fs, err := vfsutil.NewGitHubRepoVFS(ctx, string(u.Repo()), u.Rev())
return fs, ioutil.NopCloser(strings.NewReader("")), err
}
return func() {
gobuildserver.RemoteFS = origRemoteFS
}
}
| [
"\"CI\""
]
| []
| [
"CI"
]
| [] | ["CI"] | go | 1 | 0 | |
Godeps/_workspace/src/github.com/magiconair/properties/properties_test.go | // Copyright 2013-2014 Frank Schroeder. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package properties
import (
"bytes"
"flag"
"fmt"
"math"
"os"
"strings"
"testing"
"time"
. "github.com/zerobotlabs/nestor-cli/Godeps/_workspace/src/github.com/magiconair/properties/_third_party/gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }
type TestSuite struct {
prevHandler ErrorHandlerFunc
}
var (
_ = Suite(&TestSuite{})
verbose = flag.Bool("verbose", false, "Verbose output")
)
// --------------------------------------------------------------------
func (s *TestSuite) SetUpSuite(c *C) {
s.prevHandler = ErrorHandler
ErrorHandler = PanicHandler
}
// --------------------------------------------------------------------
func (s *TestSuite) TearDownSuite(c *C) {
ErrorHandler = s.prevHandler
}
// ----------------------------------------------------------------------------
// define test cases in the form of
// {"input", "key1", "value1", "key2", "value2", ...}
var complexTests = [][]string{
// whitespace prefix
{" key=value", "key", "value"}, // SPACE prefix
{"\fkey=value", "key", "value"}, // FF prefix
{"\tkey=value", "key", "value"}, // TAB prefix
{" \f\tkey=value", "key", "value"}, // mix prefix
// multiple keys
{"key1=value1\nkey2=value2\n", "key1", "value1", "key2", "value2"},
{"key1=value1\rkey2=value2\r", "key1", "value1", "key2", "value2"},
{"key1=value1\r\nkey2=value2\r\n", "key1", "value1", "key2", "value2"},
// blank lines
{"\nkey=value\n", "key", "value"},
{"\rkey=value\r", "key", "value"},
{"\r\nkey=value\r\n", "key", "value"},
// escaped chars in key
{"k\\ ey = value", "k ey", "value"},
{"k\\:ey = value", "k:ey", "value"},
{"k\\=ey = value", "k=ey", "value"},
{"k\\fey = value", "k\fey", "value"},
{"k\\ney = value", "k\ney", "value"},
{"k\\rey = value", "k\rey", "value"},
{"k\\tey = value", "k\tey", "value"},
// escaped chars in value
{"key = v\\ alue", "key", "v alue"},
{"key = v\\:alue", "key", "v:alue"},
{"key = v\\=alue", "key", "v=alue"},
{"key = v\\falue", "key", "v\falue"},
{"key = v\\nalue", "key", "v\nalue"},
{"key = v\\ralue", "key", "v\ralue"},
{"key = v\\talue", "key", "v\talue"},
// silently dropped escape character
{"k\\zey = value", "kzey", "value"},
{"key = v\\zalue", "key", "vzalue"},
// unicode literals
{"key\\u2318 = value", "key⌘", "value"},
{"k\\u2318ey = value", "k⌘ey", "value"},
{"key = value\\u2318", "key", "value⌘"},
{"key = valu\\u2318e", "key", "valu⌘e"},
// multiline values
{"key = valueA,\\\n valueB", "key", "valueA,valueB"}, // SPACE indent
{"key = valueA,\\\n\f\f\fvalueB", "key", "valueA,valueB"}, // FF indent
{"key = valueA,\\\n\t\t\tvalueB", "key", "valueA,valueB"}, // TAB indent
{"key = valueA,\\\n \f\tvalueB", "key", "valueA,valueB"}, // mix indent
// comments
{"# this is a comment\n! and so is this\nkey1=value1\nkey#2=value#2\n\nkey!3=value!3\n# and another one\n! and the final one", "key1", "value1", "key#2", "value#2", "key!3", "value!3"},
// expansion tests
{"key=value\nkey2=${key}", "key", "value", "key2", "value"},
{"key=value\nkey2=aa${key}", "key", "value", "key2", "aavalue"},
{"key=value\nkey2=${key}bb", "key", "value", "key2", "valuebb"},
{"key=value\nkey2=aa${key}bb", "key", "value", "key2", "aavaluebb"},
{"key=value\nkey2=${key}\nkey3=${key2}", "key", "value", "key2", "value", "key3", "value"},
{"key=${USER}", "key", os.Getenv("USER")},
{"key=${USER}\nUSER=value", "key", "value", "USER", "value"},
}
// ----------------------------------------------------------------------------
var commentTests = []struct {
input, key, value string
comments []string
}{
{"key=value", "key", "value", nil},
{"#\nkey=value", "key", "value", []string{""}},
{"#comment\nkey=value", "key", "value", []string{"comment"}},
{"# comment\nkey=value", "key", "value", []string{"comment"}},
{"# comment\nkey=value", "key", "value", []string{"comment"}},
{"# comment\n\nkey=value", "key", "value", []string{"comment"}},
{"# comment1\n# comment2\nkey=value", "key", "value", []string{"comment1", "comment2"}},
{"# comment1\n\n# comment2\n\nkey=value", "key", "value", []string{"comment1", "comment2"}},
{"!comment\nkey=value", "key", "value", []string{"comment"}},
{"! comment\nkey=value", "key", "value", []string{"comment"}},
{"! comment\nkey=value", "key", "value", []string{"comment"}},
{"! comment\n\nkey=value", "key", "value", []string{"comment"}},
{"! comment1\n! comment2\nkey=value", "key", "value", []string{"comment1", "comment2"}},
{"! comment1\n\n! comment2\n\nkey=value", "key", "value", []string{"comment1", "comment2"}},
}
// ----------------------------------------------------------------------------
var errorTests = []struct {
input, msg string
}{
// unicode literals
{"key\\u1 = value", "invalid unicode literal"},
{"key\\u12 = value", "invalid unicode literal"},
{"key\\u123 = value", "invalid unicode literal"},
{"key\\u123g = value", "invalid unicode literal"},
{"key\\u123", "invalid unicode literal"},
// circular references
{"key=${key}", "circular reference"},
{"key1=${key2}\nkey2=${key1}", "circular reference"},
// malformed expressions
{"key=${ke", "malformed expression"},
{"key=valu${ke", "malformed expression"},
}
// ----------------------------------------------------------------------------
var writeTests = []struct {
input, output, encoding string
}{
// ISO-8859-1 tests
{"key = value", "key = value\n", "ISO-8859-1"},
{"key = value \\\n continued", "key = value continued\n", "ISO-8859-1"},
{"key⌘ = value", "key\\u2318 = value\n", "ISO-8859-1"},
{"ke\\ \\:y = value", "ke\\ \\:y = value\n", "ISO-8859-1"},
// UTF-8 tests
{"key = value", "key = value\n", "UTF-8"},
{"key = value \\\n continued", "key = value continued\n", "UTF-8"},
{"key⌘ = value⌘", "key⌘ = value⌘\n", "UTF-8"},
{"ke\\ \\:y = value", "ke\\ \\:y = value\n", "UTF-8"},
}
// ----------------------------------------------------------------------------
var writeCommentTests = []struct {
input, output, encoding string
}{
// ISO-8859-1 tests
{"key = value", "key = value\n", "ISO-8859-1"},
{"#\nkey = value", "key = value\n", "ISO-8859-1"},
{"#\n#\n#\nkey = value", "key = value\n", "ISO-8859-1"},
{"# comment\nkey = value", "# comment\nkey = value\n", "ISO-8859-1"},
{"\n# comment\nkey = value", "# comment\nkey = value\n", "ISO-8859-1"},
{"# comment\n\nkey = value", "# comment\nkey = value\n", "ISO-8859-1"},
{"# comment1\n# comment2\nkey = value", "# comment1\n# comment2\nkey = value\n", "ISO-8859-1"},
{"#comment1\nkey1 = value1\n#comment2\nkey2 = value2", "# comment1\nkey1 = value1\n\n# comment2\nkey2 = value2\n", "ISO-8859-1"},
// UTF-8 tests
{"key = value", "key = value\n", "UTF-8"},
{"# comment⌘\nkey = value⌘", "# comment⌘\nkey = value⌘\n", "UTF-8"},
{"\n# comment⌘\nkey = value⌘", "# comment⌘\nkey = value⌘\n", "UTF-8"},
{"# comment⌘\n\nkey = value⌘", "# comment⌘\nkey = value⌘\n", "UTF-8"},
{"# comment1⌘\n# comment2⌘\nkey = value⌘", "# comment1⌘\n# comment2⌘\nkey = value⌘\n", "UTF-8"},
{"#comment1⌘\nkey1 = value1⌘\n#comment2⌘\nkey2 = value2⌘", "# comment1⌘\nkey1 = value1⌘\n\n# comment2⌘\nkey2 = value2⌘\n", "UTF-8"},
}
// ----------------------------------------------------------------------------
var boolTests = []struct {
input, key string
def, value bool
}{
// valid values for TRUE
{"key = 1", "key", false, true},
{"key = on", "key", false, true},
{"key = On", "key", false, true},
{"key = ON", "key", false, true},
{"key = true", "key", false, true},
{"key = True", "key", false, true},
{"key = TRUE", "key", false, true},
{"key = yes", "key", false, true},
{"key = Yes", "key", false, true},
{"key = YES", "key", false, true},
// valid values for FALSE (all other)
{"key = 0", "key", true, false},
{"key = off", "key", true, false},
{"key = false", "key", true, false},
{"key = no", "key", true, false},
// non existent key
{"key = true", "key2", false, false},
}
// ----------------------------------------------------------------------------
var durationTests = []struct {
input, key string
def, value time.Duration
}{
// valid values
{"key = 1", "key", 999, 1},
{"key = 0", "key", 999, 0},
{"key = -1", "key", 999, -1},
{"key = 0123", "key", 999, 123},
// invalid values
{"key = 0xff", "key", 999, 999},
{"key = 1.0", "key", 999, 999},
{"key = a", "key", 999, 999},
// non existent key
{"key = 1", "key2", 999, 999},
}
// ----------------------------------------------------------------------------
var parsedDurationTests = []struct {
input, key string
def, value time.Duration
}{
// valid values
{"key = -1ns", "key", 999, -1 * time.Nanosecond},
{"key = 300ms", "key", 999, 300 * time.Millisecond},
{"key = 5s", "key", 999, 5 * time.Second},
{"key = 3h", "key", 999, 3 * time.Hour},
{"key = 2h45m", "key", 999, 2*time.Hour + 45*time.Minute},
// invalid values
{"key = 0xff", "key", 999, 999},
{"key = 1.0", "key", 999, 999},
{"key = a", "key", 999, 999},
{"key = 1", "key", 999, 999},
{"key = 0", "key", 999, 0},
// non existent key
{"key = 1", "key2", 999, 999},
}
// ----------------------------------------------------------------------------
var floatTests = []struct {
input, key string
def, value float64
}{
// valid values
{"key = 1.0", "key", 999, 1.0},
{"key = 0.0", "key", 999, 0.0},
{"key = -1.0", "key", 999, -1.0},
{"key = 1", "key", 999, 1},
{"key = 0", "key", 999, 0},
{"key = -1", "key", 999, -1},
{"key = 0123", "key", 999, 123},
// invalid values
{"key = 0xff", "key", 999, 999},
{"key = a", "key", 999, 999},
// non existent key
{"key = 1", "key2", 999, 999},
}
// ----------------------------------------------------------------------------
var int64Tests = []struct {
input, key string
def, value int64
}{
// valid values
{"key = 1", "key", 999, 1},
{"key = 0", "key", 999, 0},
{"key = -1", "key", 999, -1},
{"key = 0123", "key", 999, 123},
// invalid values
{"key = 0xff", "key", 999, 999},
{"key = 1.0", "key", 999, 999},
{"key = a", "key", 999, 999},
// non existent key
{"key = 1", "key2", 999, 999},
}
// ----------------------------------------------------------------------------
var uint64Tests = []struct {
input, key string
def, value uint64
}{
// valid values
{"key = 1", "key", 999, 1},
{"key = 0", "key", 999, 0},
{"key = 0123", "key", 999, 123},
// invalid values
{"key = -1", "key", 999, 999},
{"key = 0xff", "key", 999, 999},
{"key = 1.0", "key", 999, 999},
{"key = a", "key", 999, 999},
// non existent key
{"key = 1", "key2", 999, 999},
}
// ----------------------------------------------------------------------------
var stringTests = []struct {
input, key string
def, value string
}{
// valid values
{"key = abc", "key", "def", "abc"},
// non existent key
{"key = abc", "key2", "def", "def"},
}
// ----------------------------------------------------------------------------
var keysTests = []struct {
input string
keys []string
}{
{"", []string{}},
{"key = abc", []string{"key"}},
{"key = abc\nkey2=def", []string{"key", "key2"}},
{"key2 = abc\nkey=def", []string{"key2", "key"}},
{"key = abc\nkey=def", []string{"key"}},
}
// ----------------------------------------------------------------------------
var filterTests = []struct {
input string
pattern string
keys []string
err string
}{
{"", "", []string{}, ""},
{"", "abc", []string{}, ""},
{"key=value", "", []string{"key"}, ""},
{"key=value", "key=", []string{}, ""},
{"key=value\nfoo=bar", "", []string{"foo", "key"}, ""},
{"key=value\nfoo=bar", "f", []string{"foo"}, ""},
{"key=value\nfoo=bar", "fo", []string{"foo"}, ""},
{"key=value\nfoo=bar", "foo", []string{"foo"}, ""},
{"key=value\nfoo=bar", "fooo", []string{}, ""},
{"key=value\nkey2=value2\nfoo=bar", "ey", []string{"key", "key2"}, ""},
{"key=value\nkey2=value2\nfoo=bar", "key", []string{"key", "key2"}, ""},
{"key=value\nkey2=value2\nfoo=bar", "^key", []string{"key", "key2"}, ""},
{"key=value\nkey2=value2\nfoo=bar", "^(key|foo)", []string{"foo", "key", "key2"}, ""},
{"key=value\nkey2=value2\nfoo=bar", "[ abc", nil, "error parsing regexp.*"},
}
// ----------------------------------------------------------------------------
var filterPrefixTests = []struct {
input string
prefix string
keys []string
}{
{"", "", []string{}},
{"", "abc", []string{}},
{"key=value", "", []string{"key"}},
{"key=value", "key=", []string{}},
{"key=value\nfoo=bar", "", []string{"foo", "key"}},
{"key=value\nfoo=bar", "f", []string{"foo"}},
{"key=value\nfoo=bar", "fo", []string{"foo"}},
{"key=value\nfoo=bar", "foo", []string{"foo"}},
{"key=value\nfoo=bar", "fooo", []string{}},
{"key=value\nkey2=value2\nfoo=bar", "key", []string{"key", "key2"}},
}
// ----------------------------------------------------------------------------
var filterStripPrefixTests = []struct {
input string
prefix string
keys []string
}{
{"", "", []string{}},
{"", "abc", []string{}},
{"key=value", "", []string{"key"}},
{"key=value", "key=", []string{}},
{"key=value\nfoo=bar", "", []string{"foo", "key"}},
{"key=value\nfoo=bar", "f", []string{"foo"}},
{"key=value\nfoo=bar", "fo", []string{"foo"}},
{"key=value\nfoo=bar", "foo", []string{"foo"}},
{"key=value\nfoo=bar", "fooo", []string{}},
{"key=value\nkey2=value2\nfoo=bar", "key", []string{"key", "key2"}},
}
// ----------------------------------------------------------------------------
var setTests = []struct {
input string
key, value string
prev string
ok bool
err string
keys []string
}{
{"", "", "", "", false, "", []string{}},
{"", "key", "value", "", false, "", []string{"key"}},
{"key=value", "key2", "value2", "", false, "", []string{"key", "key2"}},
{"key=value", "abc", "value3", "", false, "", []string{"key", "abc"}},
{"key=value", "key", "value3", "value", true, "", []string{"key"}},
}
// ----------------------------------------------------------------------------
// TestBasic tests basic single key/value combinations with all possible
// whitespace, delimiter and newline permutations.
func (s *TestSuite) TestBasic(c *C) {
testWhitespaceAndDelimiterCombinations(c, "key", "")
testWhitespaceAndDelimiterCombinations(c, "key", "value")
testWhitespaceAndDelimiterCombinations(c, "key", "value ")
}
func (s *TestSuite) TestComplex(c *C) {
for _, test := range complexTests {
testKeyValue(c, test[0], test[1:]...)
}
}
func (s *TestSuite) TestErrors(c *C) {
for _, test := range errorTests {
_, err := Load([]byte(test.input), ISO_8859_1)
c.Assert(err, NotNil)
c.Assert(strings.Contains(err.Error(), test.msg), Equals, true, Commentf("Expected %q got %q", test.msg, err.Error()))
}
}
func (s *TestSuite) TestDisableExpansion(c *C) {
input := "key=value\nkey2=${key}"
p, err := parse(input)
p.DisableExpansion = true
c.Assert(err, IsNil)
c.Assert(p.MustGet("key"), Equals, "value")
c.Assert(p.MustGet("key2"), Equals, "${key}")
// with expansion disabled we can introduce circular references
p.Set("keyA", "${keyB}")
p.Set("keyB", "${keyA}")
c.Assert(p.MustGet("keyA"), Equals, "${keyB}")
c.Assert(p.MustGet("keyB"), Equals, "${keyA}")
}
func (s *TestSuite) TestMustGet(c *C) {
input := "key = value\nkey2 = ghi"
p, err := parse(input)
c.Assert(err, IsNil)
c.Assert(p.MustGet("key"), Equals, "value")
c.Assert(func() { p.MustGet("invalid") }, PanicMatches, "unknown property: invalid")
}
func (s *TestSuite) TestGetBool(c *C) {
for _, test := range boolTests {
p, err := parse(test.input)
c.Assert(err, IsNil)
c.Assert(p.Len(), Equals, 1)
c.Assert(p.GetBool(test.key, test.def), Equals, test.value)
}
}
func (s *TestSuite) TestMustGetBool(c *C) {
input := "key = true\nkey2 = ghi"
p, err := parse(input)
c.Assert(err, IsNil)
c.Assert(p.MustGetBool("key"), Equals, true)
c.Assert(func() { p.MustGetBool("invalid") }, PanicMatches, "unknown property: invalid")
}
func (s *TestSuite) TestGetDuration(c *C) {
for _, test := range durationTests {
p, err := parse(test.input)
c.Assert(err, IsNil)
c.Assert(p.Len(), Equals, 1)
c.Assert(p.GetDuration(test.key, test.def), Equals, test.value)
}
}
func (s *TestSuite) TestMustGetDuration(c *C) {
input := "key = 123\nkey2 = ghi"
p, err := parse(input)
c.Assert(err, IsNil)
c.Assert(p.MustGetDuration("key"), Equals, time.Duration(123))
c.Assert(func() { p.MustGetDuration("key2") }, PanicMatches, "strconv.ParseInt: parsing.*")
c.Assert(func() { p.MustGetDuration("invalid") }, PanicMatches, "unknown property: invalid")
}
func (s *TestSuite) TestGetParsedDuration(c *C) {
for _, test := range parsedDurationTests {
p, err := parse(test.input)
c.Assert(err, IsNil)
c.Assert(p.Len(), Equals, 1)
c.Assert(p.GetParsedDuration(test.key, test.def), Equals, test.value)
}
}
func (s *TestSuite) TestMustGetParsedDuration(c *C) {
input := "key = 123ms\nkey2 = ghi"
p, err := parse(input)
c.Assert(err, IsNil)
c.Assert(p.MustGetParsedDuration("key"), Equals, 123*time.Millisecond)
c.Assert(func() { p.MustGetParsedDuration("key2") }, PanicMatches, "time: invalid duration ghi")
c.Assert(func() { p.MustGetParsedDuration("invalid") }, PanicMatches, "unknown property: invalid")
}
func (s *TestSuite) TestGetFloat64(c *C) {
for _, test := range floatTests {
p, err := parse(test.input)
c.Assert(err, IsNil)
c.Assert(p.Len(), Equals, 1)
c.Assert(p.GetFloat64(test.key, test.def), Equals, test.value)
}
}
func (s *TestSuite) TestMustGetFloat64(c *C) {
input := "key = 123\nkey2 = ghi"
p, err := parse(input)
c.Assert(err, IsNil)
c.Assert(p.MustGetFloat64("key"), Equals, float64(123))
c.Assert(func() { p.MustGetFloat64("key2") }, PanicMatches, "strconv.ParseFloat: parsing.*")
c.Assert(func() { p.MustGetFloat64("invalid") }, PanicMatches, "unknown property: invalid")
}
func (s *TestSuite) TestGetInt(c *C) {
for _, test := range int64Tests {
p, err := parse(test.input)
c.Assert(err, IsNil)
c.Assert(p.Len(), Equals, 1)
c.Assert(p.GetInt(test.key, int(test.def)), Equals, int(test.value))
}
}
func (s *TestSuite) TestMustGetInt(c *C) {
input := "key = 123\nkey2 = ghi"
p, err := parse(input)
c.Assert(err, IsNil)
c.Assert(p.MustGetInt("key"), Equals, int(123))
c.Assert(func() { p.MustGetInt("key2") }, PanicMatches, "strconv.ParseInt: parsing.*")
c.Assert(func() { p.MustGetInt("invalid") }, PanicMatches, "unknown property: invalid")
}
func (s *TestSuite) TestGetInt64(c *C) {
for _, test := range int64Tests {
p, err := parse(test.input)
c.Assert(err, IsNil)
c.Assert(p.Len(), Equals, 1)
c.Assert(p.GetInt64(test.key, test.def), Equals, test.value)
}
}
func (s *TestSuite) TestMustGetInt64(c *C) {
input := "key = 123\nkey2 = ghi"
p, err := parse(input)
c.Assert(err, IsNil)
c.Assert(p.MustGetInt64("key"), Equals, int64(123))
c.Assert(func() { p.MustGetInt64("key2") }, PanicMatches, "strconv.ParseInt: parsing.*")
c.Assert(func() { p.MustGetInt64("invalid") }, PanicMatches, "unknown property: invalid")
}
func (s *TestSuite) TestGetUint(c *C) {
for _, test := range uint64Tests {
p, err := parse(test.input)
c.Assert(err, IsNil)
c.Assert(p.Len(), Equals, 1)
c.Assert(p.GetUint(test.key, uint(test.def)), Equals, uint(test.value))
}
}
func (s *TestSuite) TestMustGetUint(c *C) {
input := "key = 123\nkey2 = ghi"
p, err := parse(input)
c.Assert(err, IsNil)
c.Assert(p.MustGetUint("key"), Equals, uint(123))
c.Assert(func() { p.MustGetUint64("key2") }, PanicMatches, "strconv.ParseUint: parsing.*")
c.Assert(func() { p.MustGetUint64("invalid") }, PanicMatches, "unknown property: invalid")
}
func (s *TestSuite) TestGetUint64(c *C) {
for _, test := range uint64Tests {
p, err := parse(test.input)
c.Assert(err, IsNil)
c.Assert(p.Len(), Equals, 1)
c.Assert(p.GetUint64(test.key, test.def), Equals, test.value)
}
}
func (s *TestSuite) TestMustGetUint64(c *C) {
input := "key = 123\nkey2 = ghi"
p, err := parse(input)
c.Assert(err, IsNil)
c.Assert(p.MustGetUint64("key"), Equals, uint64(123))
c.Assert(func() { p.MustGetUint64("key2") }, PanicMatches, "strconv.ParseUint: parsing.*")
c.Assert(func() { p.MustGetUint64("invalid") }, PanicMatches, "unknown property: invalid")
}
func (s *TestSuite) TestGetString(c *C) {
for _, test := range stringTests {
p, err := parse(test.input)
c.Assert(err, IsNil)
c.Assert(p.Len(), Equals, 1)
c.Assert(p.GetString(test.key, test.def), Equals, test.value)
}
}
func (s *TestSuite) TestMustGetString(c *C) {
input := `key = value`
p, err := parse(input)
c.Assert(err, IsNil)
c.Assert(p.MustGetString("key"), Equals, "value")
c.Assert(func() { p.MustGetString("invalid") }, PanicMatches, "unknown property: invalid")
}
func (s *TestSuite) TestComment(c *C) {
for _, test := range commentTests {
p, err := parse(test.input)
c.Assert(err, IsNil)
c.Assert(p.MustGetString(test.key), Equals, test.value)
c.Assert(p.GetComments(test.key), DeepEquals, test.comments)
if test.comments != nil {
c.Assert(p.GetComment(test.key), Equals, test.comments[len(test.comments)-1])
} else {
c.Assert(p.GetComment(test.key), Equals, "")
}
// test setting comments
if len(test.comments) > 0 {
// set single comment
p.ClearComments()
c.Assert(len(p.c), Equals, 0)
p.SetComment(test.key, test.comments[0])
c.Assert(p.GetComment(test.key), Equals, test.comments[0])
// set multiple comments
p.ClearComments()
c.Assert(len(p.c), Equals, 0)
p.SetComments(test.key, test.comments)
c.Assert(p.GetComments(test.key), DeepEquals, test.comments)
// clear comments for a key
p.SetComments(test.key, nil)
c.Assert(p.GetComment(test.key), Equals, "")
c.Assert(p.GetComments(test.key), IsNil)
}
}
}
func (s *TestSuite) TestFilter(c *C) {
for _, test := range filterTests {
p, err := parse(test.input)
c.Assert(err, IsNil)
pp, err := p.Filter(test.pattern)
if err != nil {
c.Assert(err, ErrorMatches, test.err)
continue
}
c.Assert(pp, NotNil)
c.Assert(pp.Len(), Equals, len(test.keys))
for _, key := range test.keys {
v1, ok1 := p.Get(key)
v2, ok2 := pp.Get(key)
c.Assert(ok1, Equals, true)
c.Assert(ok2, Equals, true)
c.Assert(v1, Equals, v2)
}
}
}
func (s *TestSuite) TestFilterPrefix(c *C) {
for _, test := range filterPrefixTests {
p, err := parse(test.input)
c.Assert(err, IsNil)
pp := p.FilterPrefix(test.prefix)
c.Assert(pp, NotNil)
c.Assert(pp.Len(), Equals, len(test.keys))
for _, key := range test.keys {
v1, ok1 := p.Get(key)
v2, ok2 := pp.Get(key)
c.Assert(ok1, Equals, true)
c.Assert(ok2, Equals, true)
c.Assert(v1, Equals, v2)
}
}
}
func (s *TestSuite) TestKeys(c *C) {
for _, test := range keysTests {
p, err := parse(test.input)
c.Assert(err, IsNil)
c.Assert(p.Len(), Equals, len(test.keys))
c.Assert(len(p.Keys()), Equals, len(test.keys))
c.Assert(p.Keys(), DeepEquals, test.keys)
}
}
func (s *TestSuite) TestSet(c *C) {
for _, test := range setTests {
p, err := parse(test.input)
c.Assert(err, IsNil)
prev, ok, err := p.Set(test.key, test.value)
if test.err != "" {
c.Assert(err, ErrorMatches, test.err)
continue
}
c.Assert(err, IsNil)
c.Assert(ok, Equals, test.ok)
if ok {
c.Assert(prev, Equals, test.prev)
}
c.Assert(p.Keys(), DeepEquals, test.keys)
}
}
func (s *TestSuite) TestMustSet(c *C) {
input := "key=${key}"
p, err := parse(input)
c.Assert(err, IsNil)
c.Assert(func() { p.MustSet("key", "${key}") }, PanicMatches, "circular reference .*")
}
func (s *TestSuite) TestWrite(c *C) {
for _, test := range writeTests {
p, err := parse(test.input)
buf := new(bytes.Buffer)
var n int
switch test.encoding {
case "UTF-8":
n, err = p.Write(buf, UTF8)
case "ISO-8859-1":
n, err = p.Write(buf, ISO_8859_1)
}
c.Assert(err, IsNil)
s := string(buf.Bytes())
c.Assert(n, Equals, len(test.output), Commentf("input=%q expected=%q obtained=%q", test.input, test.output, s))
c.Assert(s, Equals, test.output, Commentf("input=%q expected=%q obtained=%q", test.input, test.output, s))
}
}
func (s *TestSuite) TestWriteComment(c *C) {
for _, test := range writeCommentTests {
p, err := parse(test.input)
buf := new(bytes.Buffer)
var n int
switch test.encoding {
case "UTF-8":
n, err = p.WriteComment(buf, "# ", UTF8)
case "ISO-8859-1":
n, err = p.WriteComment(buf, "# ", ISO_8859_1)
}
c.Assert(err, IsNil)
s := string(buf.Bytes())
c.Assert(n, Equals, len(test.output), Commentf("input=%q expected=%q obtained=%q", test.input, test.output, s))
c.Assert(s, Equals, test.output, Commentf("input=%q expected=%q obtained=%q", test.input, test.output, s))
}
}
func (s *TestSuite) TestCustomExpansionExpression(c *C) {
testKeyValuePrePostfix(c, "*[", "]*", "key=value\nkey2=*[key]*", "key", "value", "key2", "value")
}
func (s *TestSuite) TestPanicOn32BitIntOverflow(c *C) {
is32Bit = true
var min, max int64 = math.MinInt32 - 1, math.MaxInt32 + 1
input := fmt.Sprintf("min=%d\nmax=%d", min, max)
p, err := parse(input)
c.Assert(err, IsNil)
c.Assert(p.MustGetInt64("min"), Equals, min)
c.Assert(p.MustGetInt64("max"), Equals, max)
c.Assert(func() { p.MustGetInt("min") }, PanicMatches, ".* out of range")
c.Assert(func() { p.MustGetInt("max") }, PanicMatches, ".* out of range")
}
func (s *TestSuite) TestPanicOn32BitUintOverflow(c *C) {
is32Bit = true
var max uint64 = math.MaxUint32 + 1
input := fmt.Sprintf("max=%d", max)
p, err := parse(input)
c.Assert(err, IsNil)
c.Assert(p.MustGetUint64("max"), Equals, max)
c.Assert(func() { p.MustGetUint("max") }, PanicMatches, ".* out of range")
}
func (s *TestSuite) TestDeleteKey(c *C) {
input := "#comments should also be gone\nkey=to-be-deleted\nsecond=key"
p, err := parse(input)
c.Assert(err, IsNil)
c.Check(len(p.m), Equals, 2)
c.Check(len(p.c), Equals, 1)
c.Check(len(p.k), Equals, 2)
p.Delete("key")
c.Check(len(p.m), Equals, 1)
c.Check(len(p.c), Equals, 0)
c.Check(len(p.k), Equals, 1)
}
func (s *TestSuite) TestDeleteUnknownKey(c *C) {
input := "#comments should also be gone\nkey=to-be-deleted"
p, err := parse(input)
c.Assert(err, IsNil)
c.Check(len(p.m), Equals, 1)
c.Check(len(p.c), Equals, 1)
c.Check(len(p.k), Equals, 1)
p.Delete("wrong-key")
c.Check(len(p.m), Equals, 1)
c.Check(len(p.c), Equals, 1)
c.Check(len(p.k), Equals, 1)
}
// ----------------------------------------------------------------------------
// tests all combinations of delimiters, leading and/or trailing whitespace and newlines.
func testWhitespaceAndDelimiterCombinations(c *C, key, value string) {
whitespace := []string{"", " ", "\f", "\t"}
delimiters := []string{"", " ", "=", ":"}
newlines := []string{"", "\r", "\n", "\r\n"}
for _, dl := range delimiters {
for _, ws1 := range whitespace {
for _, ws2 := range whitespace {
for _, nl := range newlines {
// skip the one case where there is nothing between a key and a value
if ws1 == "" && dl == "" && ws2 == "" && value != "" {
continue
}
input := fmt.Sprintf("%s%s%s%s%s%s", key, ws1, dl, ws2, value, nl)
testKeyValue(c, input, key, value)
}
}
}
}
}
// tests whether key/value pairs exist for a given input.
// keyvalues is expected to be an even number of strings of "key", "value", ...
func testKeyValue(c *C, input string, keyvalues ...string) {
testKeyValuePrePostfix(c, "${", "}", input, keyvalues...)
}
// tests whether key/value pairs exist for a given input.
// keyvalues is expected to be an even number of strings of "key", "value", ...
func testKeyValuePrePostfix(c *C, prefix, postfix, input string, keyvalues ...string) {
printf("%q\n", input)
p, err := Load([]byte(input), ISO_8859_1)
c.Assert(err, IsNil)
p.Prefix = prefix
p.Postfix = postfix
assertKeyValues(c, input, p, keyvalues...)
}
// tests whether key/value pairs exist for a given input.
// keyvalues is expected to be an even number of strings of "key", "value", ...
func assertKeyValues(c *C, input string, p *Properties, keyvalues ...string) {
c.Assert(p, NotNil)
c.Assert(2*p.Len(), Equals, len(keyvalues), Commentf("Odd number of key/value pairs."))
for i := 0; i < len(keyvalues); i += 2 {
key, value := keyvalues[i], keyvalues[i+1]
v, ok := p.Get(key)
c.Assert(ok, Equals, true, Commentf("No key %q found (input=%q)", key, input))
c.Assert(v, Equals, value, Commentf("Value %q does not match %q (input=%q)", v, value, input))
}
}
// prints to stderr if the -verbose flag was given.
func printf(format string, args ...interface{}) {
if *verbose {
fmt.Fprintf(os.Stderr, format, args...)
}
}
| [
"\"USER\""
]
| []
| [
"USER"
]
| [] | ["USER"] | go | 1 | 0 | |
repo/repository.go | package repo
import (
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/disiqueira/gotree"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/storer"
ssh2 "github.com/go-git/go-git/v5/plumbing/transport/ssh"
giturls "github.com/whilp/git-urls"
"golang.org/x/crypto/ssh"
"github.com/FalcoSuessgott/gitget/fs"
t "github.com/FalcoSuessgott/gitget/tree"
"github.com/FalcoSuessgott/gitget/ui"
)
// Repository represents a repository.
type Repository struct {
URL string
Repo *git.Repository
Path string
Branches []string
Branch string
Files []string
Tree gotree.Tree
}
// IsGitURL returns true if the url is a valid giturl.
func IsGitURL(rawURL string) bool {
parsedURL, err := giturls.Parse(rawURL)
if err == nil && parsedURL.IsAbs() && parsedURL.Hostname() != "" {
return true
}
return false
}
func isSSHURL(rawURL string) bool {
url, err := giturls.Parse(rawURL)
return err == nil && (url.Scheme == "git" || url.Scheme == "ssh")
}
// Name returns the namespace of git url.
func Name(repoURL string) string {
u, _ := giturls.Parse(repoURL)
return u.Path[1:]
}
func getBranches(repo *git.Repository) ([]string, error) {
var branches []string
bs, _ := remoteBranches(repo.Storer)
err := bs.ForEach(func(b *plumbing.Reference) error {
name := strings.Split(b.Name().String(), "/")[3:]
branches = append(branches, strings.Join(name, ""))
return nil
})
if err != nil {
return nil, err
}
return branches, nil
}
func cloneRepo(url string) (*git.Repository, string, error) {
var r *git.Repository
dir, err := ioutil.TempDir("", "tmp-dir")
if err != nil {
return nil, dir, err
}
if isSSHURL(url) {
s := fmt.Sprintf("%s/.ssh/id_rsa", os.Getenv("HOME"))
sshKey, _ := ioutil.ReadFile(s)
signer, _ := ssh.ParsePrivateKey(sshKey)
auth := &ssh2.PublicKeys{User: "git", Signer: signer}
r, _ = git.PlainClone(dir, false, &git.CloneOptions{
URL: url,
Progress: os.Stdout,
Tags: git.NoTags,
Auth: auth,
})
} else {
r, err = git.PlainClone(dir, false, &git.CloneOptions{
URL: url,
Tags: git.NoTags,
Progress: os.Stdout,
})
}
if err != nil {
return nil, dir, err
}
return r, dir, nil
}
func remoteBranches(s storer.ReferenceStorer) (storer.ReferenceIter, error) {
refs, err := s.IterReferences()
if err != nil {
return nil, err
}
return storer.NewReferenceFilteredIter(func(ref *plumbing.Reference) bool {
return ref.Name().IsRemote()
}, refs), nil
}
func checkoutBranch(repo *git.Repository, branch string) error {
w, err := repo.Worktree()
if err != nil {
return err
}
return w.Checkout(&git.CheckoutOptions{
Branch: plumbing.ReferenceName(fmt.Sprintf("refs/heads/%s", branch)),
Force: true,
})
}
// NewRepository returns a new repository struct.
func NewRepository(url string) Repository {
if !IsGitURL(url) {
fmt.Println("Invalid git url. Exiting.")
os.Exit(1)
}
fmt.Printf("Fetching %s\n\n", url)
repo, path, err := cloneRepo(url)
if err != nil {
fmt.Println("Error while cloning. Exiting.")
os.Exit(1)
}
branches, err := getBranches(repo)
if err != nil {
fmt.Println("Error while receiving Branches. Exiting.")
}
branch := ""
if len(branches) == 1 {
fmt.Println("\nChecking out the only branch: " + branches[0])
branch = branches[0]
} else {
branch = ui.PromptList("Choose the branch to be checked out", "master", branches)
}
err = checkoutBranch(repo, branch)
if err != nil {
fmt.Println("Error while checking out branch " + branch + " .Exiting.")
}
files := fs.ListFiles(path)
tree, err := t.BuildDirectoryTree(url, path)
if err != nil {
fmt.Println(err)
}
return Repository{
URL: url,
Branch: branch,
Branches: branches,
Files: files,
Path: path,
Repo: repo,
Tree: tree,
}
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
ssh.go | package sup
import (
"fmt"
"io"
"io/ioutil"
"log"
"net"
"os"
"os/user"
"path/filepath"
"strings"
"sync"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
)
// Client is a wrapper over the SSH connection/sessions.
type SSHClient struct {
conn *ssh.Client
sess *ssh.Session
user string
host string
remoteStdin io.WriteCloser
remoteStdout io.Reader
remoteStderr io.Reader
connOpened bool
sessOpened bool
running bool
env string //export FOO="bar"; export BAR="baz";
color string
identityFile string
}
type ErrConnect struct {
User string
Host string
Reason string
}
func (e ErrConnect) Error() string {
return fmt.Sprintf(`Connect("%v@%v"): %v`, e.User, e.Host, e.Reason)
}
// parseHost parses and normalizes <user>@<host:port> from a given string.
func (c *SSHClient) parseHost(host string) error {
c.host = host
// Remove extra "ssh://" schema
if len(c.host) > 6 && c.host[:6] == "ssh://" {
c.host = c.host[6:]
}
if at := strings.Index(c.host, "@"); at != -1 {
c.user = c.host[:at]
c.host = c.host[at+1:]
}
// Add default user, if not set
if c.user == "" {
u, err := user.Current()
if err != nil {
return err
}
c.user = u.Username
}
if strings.Index(c.host, "/") != -1 {
return ErrConnect{c.user, c.host, "unexpected slash in the host URL"}
}
// Add default port, if not set
if strings.Index(c.host, ":") == -1 {
c.host += ":22"
}
return nil
}
var initAuthMethodOnce sync.Once
var authMethod ssh.AuthMethod
// initAuthMethod initiates SSH authentication method.
func (c *SSHClient) initAuthMethod() {
var signers []ssh.Signer
// If there's a running SSH Agent, try to use its Private keys.
sock, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK"))
if err == nil {
agent := agent.NewClient(sock)
signers, _ = agent.Signers()
}
// Try to read user's SSH private keys form the standard paths.
files, _ := filepath.Glob(os.Getenv("HOME") + "/.ssh/id_*")
for _, file := range append(files, c.identityFile) {
log.Println(fmt.Sprintf("Trying key: %v", file))
if strings.HasSuffix(file, ".pub") {
continue // Skip public keys.
}
data, err := ioutil.ReadFile(file)
if err != nil {
log.Println("Ssh key error: ", err)
continue
}
signer, err := ssh.ParsePrivateKey(data)
if err != nil {
log.Println("Ssh parsing key error: ", err)
continue
}
signers = append(signers, signer)
}
authMethod = ssh.PublicKeys(signers...)
}
// SSHDialFunc can dial an ssh server and return a client
type SSHDialFunc func(net, addr string, config *ssh.ClientConfig) (*ssh.Client, error)
// Connect creates SSH connection to a specified host.
// It expects the host of the form "[ssh://]host[:port]".
func (c *SSHClient) Connect(host string) error {
return c.ConnectWith(host, ssh.Dial)
}
func (c *SSHClient) SetIdentityFile(path string) {
c.identityFile = path
}
// ConnectWith creates a SSH connection to a specified host. It will use dialer to establish the
// connection.
// TODO: Split Signers to its own method.
func (c *SSHClient) ConnectWith(host string, dialer SSHDialFunc) error {
if c.connOpened {
return fmt.Errorf("Already connected")
}
initAuthMethodOnce.Do(c.initAuthMethod)
err := c.parseHost(host)
if err != nil {
return err
}
config := &ssh.ClientConfig{
User: c.user,
Auth: []ssh.AuthMethod{
authMethod,
},
}
c.conn, err = dialer("tcp", c.host, config)
if err != nil {
return ErrConnect{c.user, c.host, err.Error()}
}
c.connOpened = true
return nil
}
// Run runs the task.Run command remotely on c.host.
func (c *SSHClient) Run(task *Task) error {
if c.running {
return fmt.Errorf("Session already running")
}
if c.sessOpened {
return fmt.Errorf("Session already connected")
}
sess, err := c.conn.NewSession()
if err != nil {
return err
}
c.remoteStdin, err = sess.StdinPipe()
if err != nil {
return err
}
c.remoteStdout, err = sess.StdoutPipe()
if err != nil {
return err
}
c.remoteStderr, err = sess.StderrPipe()
if err != nil {
return err
}
if task.TTY {
// Set up terminal modes
modes := ssh.TerminalModes{
ssh.ECHO: 0, // disable echoing
ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud
ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud
}
// Request pseudo terminal
if err := sess.RequestPty("xterm", 80, 40, modes); err != nil {
return ErrTask{task, fmt.Sprintf("request for pseudo terminal failed: %s", err)}
}
}
// Start the remote command.
if err := sess.Start(c.env + task.Run); err != nil {
return ErrTask{task, err.Error()}
}
c.sess = sess
c.sessOpened = true
c.running = true
return nil
}
// Wait waits until the remote command finishes and exits.
// It closes the SSH session.
func (c *SSHClient) Wait() error {
if !c.running {
return fmt.Errorf("Trying to wait on stopped session")
}
err := c.sess.Wait()
c.sess.Close()
c.running = false
c.sessOpened = false
return err
}
// DialThrough will create a new connection from the ssh server sc is connected to. DialThrough is an SSHDialer.
func (sc *SSHClient) DialThrough(net, addr string, config *ssh.ClientConfig) (*ssh.Client, error) {
conn, err := sc.conn.Dial(net, addr)
if err != nil {
return nil, err
}
c, chans, reqs, err := ssh.NewClientConn(conn, addr, config)
if err != nil {
return nil, err
}
return ssh.NewClient(c, chans, reqs), nil
}
// Close closes the underlying SSH connection and session.
func (c *SSHClient) Close() error {
if c.sessOpened {
c.sess.Close()
c.sessOpened = false
}
if !c.connOpened {
return fmt.Errorf("Trying to close the already closed connection")
}
err := c.conn.Close()
c.connOpened = false
c.running = false
return err
}
func (c *SSHClient) Stdin() io.WriteCloser {
return c.remoteStdin
}
func (c *SSHClient) Stderr() io.Reader {
return c.remoteStderr
}
func (c *SSHClient) Stdout() io.Reader {
return c.remoteStdout
}
func (c *SSHClient) Prefix() (string, int) {
host := c.user + "@" + c.host + " | "
return c.color + host + ResetColor, len(host)
}
func (c *SSHClient) Write(p []byte) (n int, err error) {
return c.remoteStdin.Write(p)
}
func (c *SSHClient) WriteClose() error {
return c.remoteStdin.Close()
}
func (c *SSHClient) Signal(sig os.Signal) error {
if !c.sessOpened {
return fmt.Errorf("session is not open")
}
switch sig {
case os.Interrupt:
// TODO: Turns out that .Signal(ssh.SIGHUP) doesn't work for me.
// Instead, sending \x03 to the remote session works for me,
// which sounds like something that should be fixed/resolved
// upstream in the golang.org/x/crypto/ssh pkg.
// https://github.com/golang/go/issues/4115#issuecomment-66070418
c.remoteStdin.Write([]byte("\x03"))
return c.sess.Signal(ssh.SIGINT)
default:
return fmt.Errorf("%v not supported", sig)
}
}
| [
"\"SSH_AUTH_SOCK\"",
"\"HOME\""
]
| []
| [
"HOME",
"SSH_AUTH_SOCK"
]
| [] | ["HOME", "SSH_AUTH_SOCK"] | go | 2 | 0 | |
tests/test_e2e.py | """
Requires the following env var: TEST_XLWINGS_LICENSE_KEY
If you run this on a built/installed package, make sure to cd out of the xlwings source
directory, copy the test folder next to the install xlwings package,then run:
* all tests (this relies on the settings in pytest.ini):
pytest test_e2e.py
* single test:
pytest test_e2e.py::test_name
"""
import os
from pathlib import Path
import shutil
from shlex import split
import subprocess
import pytest
import xlwings as xw
this_dir = Path(__file__).resolve().parent
@pytest.fixture
def app():
with xw.App(visible=False) as app:
yield app
@pytest.fixture
def clear_user_config():
if (Path.home() / '.backup_xlwings').exists():
shutil.rmtree(Path.home() / '.backup_xlwings')
if (Path.home() / '.xlwings').exists():
shutil.copytree(Path.home() / '.xlwings', Path.home() / '.backup_xlwings')
shutil.rmtree(Path.home() / '.xlwings')
yield
if (Path.home() / '.xlwings').exists():
shutil.rmtree(Path.home() / '.xlwings')
if (Path.home() / '.backup_xlwings').exists():
shutil.copytree(Path.home() / '.backup_xlwings', Path.home() / '.xlwings')
@pytest.fixture
def addin(app):
return app.books.open(Path(xw.__path__[0]) / 'addin' / 'xlwings.xlam')
@pytest.fixture
def quickstart_book(app, tmpdir):
os.chdir(tmpdir)
subprocess.run(split('xlwings quickstart testproject'))
return app.books.open(Path(tmpdir) / 'testproject' / 'testproject.xlsm')
def test_config(clear_user_config, app, addin):
get_config = addin.macro('GetConfig')
assert get_config('PYTHONPATH') == ''
# Workbook sheet config
book = app.books.open(this_dir / 'test book.xlsx')
sheet = book.sheets[0]
sheet.name = 'xlwings.conf'
sheet['A1'].value = ['PYTHONPATH', 'workbook sheet']
# Addin sheet config
addin.sheets[0].name = 'xlwings.conf'
addin.sheets[0]['A1'].value = ['PYTHONPATH', 'addin sheet']
# Config file workbook directory
with open(this_dir / 'xlwings.conf', 'w') as config:
config.write('"PYTHONPATH","directory config"')
# Config file user home directory
os.makedirs(Path.home() / '.xlwings', exist_ok=True)
with open((Path.home() / '.xlwings' / 'xlwings.conf'), 'w') as config:
config.write('"PYTHONPATH","user config"')
assert get_config('PYTHONPATH') == 'workbook sheet'
sheet.name = '_xlwings.conf'
assert get_config('PYTHONPATH') == 'addin sheet'
addin.sheets[0].name = '_xlwings.conf'
assert get_config('PYTHONPATH') == 'directory config'
(this_dir / 'xlwings.conf').unlink()
assert get_config('PYTHONPATH') == 'user config'
def test_runpython(addin, quickstart_book):
sample_call = quickstart_book.macro('Module1.SampleCall')
sample_call()
assert quickstart_book.sheets[0]['A1'].value == 'Hello xlwings!'
sample_call()
assert quickstart_book.sheets[0]['A1'].value == 'Bye xlwings!'
def test_runpython_server(addin, quickstart_book):
sample_call = quickstart_book.macro('Module1.SampleCall')
quickstart_book.sheets['_xlwings.conf'].name = 'xlwings.conf'
quickstart_book.sheets['xlwings.conf']['B8'].value = True
sample_call()
assert quickstart_book.sheets[0]['A1'].value == 'Hello xlwings!'
sample_call()
assert quickstart_book.sheets[0]['A1'].value == 'Bye xlwings!'
def test_runpython_embedded_code(clear_user_config, addin, quickstart_book):
os.makedirs(Path.home() / '.xlwings')
with open((Path.home() / '.xlwings' / 'xlwings.conf'), 'w') as config:
config.write(f'"LICENSE_KEY","{os.getenv("TEST_XLWINGS_LICENSE_KEY")}"')
os.chdir(Path(quickstart_book.fullname).parent)
subprocess.run(split('xlwings code embed'))
(Path(quickstart_book.fullname).parent / 'testproject.py').unlink()
sample_call = quickstart_book.macro('Module1.SampleCall')
sample_call()
assert quickstart_book.sheets[0]['A1'].value == 'Hello xlwings!'
sample_call()
assert quickstart_book.sheets[0]['A1'].value == 'Bye xlwings!'
def test_udf(clear_user_config, addin, quickstart_book):
addin.macro('ImportPythonUDFs')()
quickstart_book.sheets[0]['A1'].value = '=hello("test")'
assert quickstart_book.sheets[0]['A1'].value == 'Hello test!'
def test_udf_embedded_code(clear_user_config, addin, quickstart_book):
os.makedirs(Path.home() / '.xlwings')
with open((Path.home() / '.xlwings' / 'xlwings.conf'), 'w') as config:
config.write(f'"LICENSE_KEY","{os.getenv("TEST_XLWINGS_LICENSE_KEY")}"')
os.chdir(Path(quickstart_book.fullname).parent)
subprocess.run(split('xlwings code embed'))
(Path(quickstart_book.fullname).parent / 'testproject.py').unlink()
addin.macro('ImportPythonUDFs')()
quickstart_book.sheets[0]['A1'].value = '=hello("test")'
assert quickstart_book.sheets[0]['A1'].value == 'Hello test!'
(Path.home() / '.xlwings' / 'xlwings.conf').unlink()
quickstart_book.app.api.CalculateFull()
assert 'xlwings.LicenseError: Embedded code requires a valid LICENSE_KEY.' in quickstart_book.sheets[0]['A1'].value
def test_can_use_xlwings_without_license_key(clear_user_config, tmp_path):
import xlwings
os.chdir(tmp_path)
subprocess.run(split('xlwings quickstart testproject'))
def test_can_use_xlwings_with_wrong_license_key(clear_user_config, tmp_path):
os.makedirs(Path.home() / '.xlwings')
with open((Path.home() / '.xlwings' / 'xlwings.conf'), 'w') as config:
config.write(f'"LICENSE_KEY","xxx"')
import xlwings
os.chdir(tmp_path)
subprocess.run(split('xlwings quickstart testproject'))
def test_cant_use_xlwings_pro_without_license_key(clear_user_config):
with pytest.raises(xw.LicenseError):
import xlwings.pro
def test_addin_installation(app):
assert not (Path(app.startup_path) / 'xlwings.xlam').exists()
subprocess.run(split('xlwings addin install'))
assert (Path(app.startup_path) / 'xlwings.xlam').exists()
subprocess.run(split('xlwings addin remove'))
assert not (Path(app.startup_path) / 'xlwings.xlam').exists()
# Custom file
assert not (Path(app.startup_path) / 'test book.xlsx').exists()
os.chdir(this_dir)
subprocess.run(split('xlwings addin install -f "test book.xlsx"'))
assert (Path(app.startup_path) / 'test book.xlsx').exists()
subprocess.run(split('xlwings addin remove -f "test book.xlsx"'))
assert not (Path(app.startup_path) / 'test book.xlsx').exists()
def test_update_license_key(clear_user_config):
subprocess.run(split('xlwings license update -k test_key'))
with open(Path.home() / '.xlwings' / 'xlwings.conf', 'r') as f:
assert f.read() == '"LICENSE_KEY","test_key"\n'
@pytest.mark.skipif(xw.__version__ == 'dev', reason='requires a built package')
def test_standalone(clear_user_config, app, tmp_path):
os.chdir(tmp_path)
subprocess.run(split('xlwings quickstart testproject --standalone'))
standalone_book = app.books.open(tmp_path / 'testproject' / 'testproject.xlsm')
sample_call = standalone_book.macro('Module1.SampleCall')
sample_call()
assert standalone_book.sheets[0]['A1'].value == 'Hello xlwings!'
sample_call()
assert standalone_book.sheets[0]['A1'].value == 'Bye xlwings!'
@pytest.mark.skipif(xw.__version__ == 'dev', reason='requires a built package')
def test_runpython_embedded_code_standalone(app, clear_user_config, tmp_path):
os.chdir(tmp_path)
subprocess.run(split(f'xlwings quickstart testproject --standalone'))
quickstart_book = app.books.open(tmp_path / 'testproject' / 'testproject.xlsm')
os.makedirs(Path.home() / '.xlwings')
with open((Path.home() / '.xlwings' / 'xlwings.conf'), 'w') as config:
config.write(f'"LICENSE_KEY","{os.getenv("TEST_XLWINGS_LICENSE_KEY")}"')
os.chdir(tmp_path / 'testproject')
subprocess.run(split('xlwings code embed'))
(tmp_path / 'testproject' / f'testproject.py').unlink()
sample_call = quickstart_book.macro('Module1.SampleCall')
sample_call()
assert quickstart_book.sheets[0]['A1'].value == 'Hello xlwings!'
sample_call()
assert quickstart_book.sheets[0]['A1'].value == 'Bye xlwings!'
@pytest.mark.skipif(xw.__version__ == 'dev', reason='requires a built package')
def test_udf_embedded_code_standalone(clear_user_config, app, tmp_path):
os.chdir(tmp_path)
subprocess.run(split(f'xlwings quickstart testproject --standalone'))
quickstart_book = app.books.open(tmp_path / 'testproject' / 'testproject.xlsm')
os.makedirs(Path.home() / '.xlwings')
with open((Path.home() / '.xlwings' / 'xlwings.conf'), 'w') as config:
config.write(f'"LICENSE_KEY","{os.getenv("TEST_XLWINGS_LICENSE_KEY")}"')
os.chdir(tmp_path / 'testproject')
subprocess.run(split('xlwings code embed'))
(tmp_path / 'testproject' / f'testproject.py').unlink()
quickstart_book.macro('ImportPythonUDFs')()
quickstart_book.sheets[0]['A1'].value = '=hello("test")'
assert quickstart_book.sheets[0]['A1'].value == 'Hello test!'
(Path.home() / '.xlwings' / 'xlwings.conf').unlink()
quickstart_book.app.api.CalculateFull()
assert 'xlwings.LicenseError: Embedded code requires a valid LICENSE_KEY.' in quickstart_book.sheets[0]['A1'].value
| []
| []
| [
"TEST_XLWINGS_LICENSE_KEY"
]
| [] | ["TEST_XLWINGS_LICENSE_KEY"] | python | 1 | 0 | |
test/functional/test_runner.py | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
# 20 minutes represented in seconds
TRAVIS_TIMEOUT_DURATION = 20 * 60
BASE_SCRIPTS = [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'feature_block.py',
'rpc_fundrawtransaction.py',
'p2p_compactblocks.py',
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_labels.py',
'p2p_segwit.py',
'wallet_dump.py',
'wallet_listtransactions.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'wallet_address_types.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --usecli',
'interface_http.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
'mining_prioritisetransaction.py',
'p2p_invalid_block.py',
'p2p_invalid_tx.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'rpc_signmessage.py',
'feature_nulldummy.py',
'mempool_accept.py',
'wallet_import_rescan.py',
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'mining_basic.py',
'wallet_bumpfee.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'wallet_encryption.py',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'wallet_fallbackfee.py',
'feature_minchainwork.py',
'p2p_fingerprint.py',
'feature_uacomment.py',
'p2p_unrequested_blocks.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_blocksdir.py',
'feature_config_args.py',
'feature_help.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
# vv Tests less than 20m vv
'feature_fee_estimation.py',
# vv Tests less than 5m vv
'feature_maxuploadtarget.py',
'mempool_packages.py',
'feature_dbcrash.py',
# vv Tests less than 2m vv
'feature_bip68_sequence.py',
'mining_getblocktemplate_longpoll.py',
'p2p_timeouts.py',
# vv Tests less than 60s vv
'p2p_feefilter.py',
# vv Tests less than 30s vv
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/bitcoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and bitcoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", test) + ".py" for test in tests]
for test in tests:
if test in ALL_SCRIPTS:
test_list.append(test)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [re.sub("\.py$", "", test) + ".py" for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list,
config["environment"]["SRCDIR"],
config["environment"]["BUILDDIR"],
tmpdir,
jobs=args.jobs,
enable_coverage=args.coverage,
args=passon_args,
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
)
def run_tests(test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False):
args = args or []
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "bitcoind"]) is not None:
print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([sys.executable, os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exiting after test failure")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if os.getenv('TRAVIS') == 'true' and int(time.time() - start_time) > TRAVIS_TIMEOUT_DURATION:
# In travis, timeout individual tests (to stop tests hanging and not providing useful output).
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
print('.', end='', flush=True)
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.wait()
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| []
| []
| [
"TRAVIS"
]
| [] | ["TRAVIS"] | python | 1 | 0 | |
soracom/generated/cmd/port_mappings_get.go | // Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"fmt"
"net/url"
"os"
"github.com/spf13/cobra"
)
// PortMappingsGetCmdImsi holds value of 'imsi' option
var PortMappingsGetCmdImsi string
func init() {
PortMappingsGetCmd.Flags().StringVar(&PortMappingsGetCmdImsi, "imsi", "", TRAPI("Target subscriber IMSI."))
PortMappingsCmd.AddCommand(PortMappingsGetCmd)
}
// PortMappingsGetCmd defines 'get' subcommand
var PortMappingsGetCmd = &cobra.Command{
Use: "get",
Short: TRAPI("/port_mappings/subscribers/{imsi}:get:summary"),
Long: TRAPI(`/port_mappings/subscribers/{imsi}:get:description`),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
return fmt.Errorf("unexpected arguments passed => %v", args)
}
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectPortMappingsGetCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectPortMappingsGetCmdParams(ac *apiClient) (*apiParams, error) {
var parsedBody interface{}
var err error
err = checkIfRequiredStringParameterIsSupplied("imsi", "imsi", "path", parsedBody, PortMappingsGetCmdImsi)
if err != nil {
return nil, err
}
return &apiParams{
method: "GET",
path: buildPathForPortMappingsGetCmd("/port_mappings/subscribers/{imsi}"),
query: buildQueryForPortMappingsGetCmd(),
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForPortMappingsGetCmd(path string) string {
escapedImsi := url.PathEscape(PortMappingsGetCmdImsi)
path = strReplace(path, "{"+"imsi"+"}", escapedImsi, -1)
return path
}
func buildQueryForPortMappingsGetCmd() url.Values {
result := url.Values{}
return result
}
| [
"\"SORACOM_VERBOSE\""
]
| []
| [
"SORACOM_VERBOSE"
]
| [] | ["SORACOM_VERBOSE"] | go | 1 | 0 | |
Data/Juliet-Java/Juliet-Java-v103/000/142/789/CWE789_Uncontrolled_Mem_Alloc__Environment_ArrayList_11.java | /* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE789_Uncontrolled_Mem_Alloc__Environment_ArrayList_11.java
Label Definition File: CWE789_Uncontrolled_Mem_Alloc.int.label.xml
Template File: sources-sink-11.tmpl.java
*/
/*
* @description
* CWE: 789 Uncontrolled Memory Allocation
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded non-zero, non-min, non-max, even number
* BadSink: ArrayList Create an ArrayList using data as the initial size
* Flow Variant: 11 Control flow: if(IO.staticReturnsTrue()) and if(IO.staticReturnsFalse())
*
* */
import java.util.logging.Level;
import java.util.ArrayList;
public class CWE789_Uncontrolled_Mem_Alloc__Environment_ArrayList_11 extends AbstractTestCase
{
/* uses badsource and badsink */
public void bad() throws Throwable
{
int data;
if (IO.staticReturnsTrue())
{
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
/* POTENTIAL FLAW: Create an ArrayList using data as the initial size. data may be very large, creating memory issues */
ArrayList intArrayList = new ArrayList(data);
}
/* goodG2B1() - use goodsource and badsink by changing IO.staticReturnsTrue() to IO.staticReturnsFalse() */
private void goodG2B1() throws Throwable
{
int data;
if (IO.staticReturnsFalse())
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
else
{
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
}
/* POTENTIAL FLAW: Create an ArrayList using data as the initial size. data may be very large, creating memory issues */
ArrayList intArrayList = new ArrayList(data);
}
/* goodG2B2() - use goodsource and badsink by reversing statements in if */
private void goodG2B2() throws Throwable
{
int data;
if (IO.staticReturnsTrue())
{
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
/* POTENTIAL FLAW: Create an ArrayList using data as the initial size. data may be very large, creating memory issues */
ArrayList intArrayList = new ArrayList(data);
}
public void good() throws Throwable
{
goodG2B1();
goodG2B2();
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
| [
"\"ADD\""
]
| []
| [
"ADD"
]
| [] | ["ADD"] | java | 1 | 0 | |
pkg/test/testutil/testutil.go | // Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package testutil contains utility functions for runsc tests.
package testutil
import (
"bufio"
"context"
"debug/elf"
"encoding/base32"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"math/rand"
"net/http"
"os"
"os/exec"
"os/signal"
"path"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
"github.com/cenkalti/backoff"
specs "github.com/opencontainers/runtime-spec/specs-go"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/sentry/watchdog"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/runsc/config"
"gvisor.dev/gvisor/runsc/specutils"
)
var (
checkpoint = flag.Bool("checkpoint", true, "control checkpoint/restore support")
partition = flag.Int("partition", 1, "partition number, this is 1-indexed")
totalPartitions = flag.Int("total_partitions", 1, "total number of partitions")
isRunningWithHostNet = flag.Bool("hostnet", false, "whether test is running with hostnet")
runscPath = flag.String("runsc", "", "path to runsc binary")
)
// IsCheckpointSupported returns the relevant command line flag.
func IsCheckpointSupported() bool {
return *checkpoint
}
// IsRunningWithHostNet returns the relevant command line flag.
func IsRunningWithHostNet() bool {
return *isRunningWithHostNet
}
// ImageByName mangles the image name used locally. This depends on the image
// build infrastructure in images/ and tools/vm.
func ImageByName(name string) string {
return fmt.Sprintf("gvisor.dev/images/%s", name)
}
// ConfigureExePath configures the executable for runsc in the test environment.
func ConfigureExePath() error {
if *runscPath == "" {
path, err := FindFile("runsc/runsc")
if err != nil {
return err
}
*runscPath = path
}
specutils.ExePath = *runscPath
return nil
}
// TmpDir returns the absolute path to a writable directory that can be used as
// scratch by the test.
func TmpDir() string {
if dir, ok := os.LookupEnv("TEST_TMPDIR"); ok {
return dir
}
return "/tmp"
}
// Logger is a simple logging wrapper.
//
// This is designed to be implemented by *testing.T.
type Logger interface {
Name() string
Logf(fmt string, args ...interface{})
}
// DefaultLogger logs using the log package.
type DefaultLogger string
// Name implements Logger.Name.
func (d DefaultLogger) Name() string {
return string(d)
}
// Logf implements Logger.Logf.
func (d DefaultLogger) Logf(fmt string, args ...interface{}) {
log.Printf(fmt, args...)
}
// multiLogger logs to multiple Loggers.
type multiLogger []Logger
// Name implements Logger.Name.
func (m multiLogger) Name() string {
names := make([]string, len(m))
for i, l := range m {
names[i] = l.Name()
}
return strings.Join(names, "+")
}
// Logf implements Logger.Logf.
func (m multiLogger) Logf(fmt string, args ...interface{}) {
for _, l := range m {
l.Logf(fmt, args...)
}
}
// NewMultiLogger returns a new Logger that logs on multiple Loggers.
func NewMultiLogger(loggers ...Logger) Logger {
return multiLogger(loggers)
}
// Cmd is a simple wrapper.
type Cmd struct {
logger Logger
*exec.Cmd
}
// CombinedOutput returns the output and logs.
func (c *Cmd) CombinedOutput() ([]byte, error) {
out, err := c.Cmd.CombinedOutput()
if len(out) > 0 {
c.logger.Logf("output: %s", string(out))
}
if err != nil {
c.logger.Logf("error: %v", err)
}
return out, err
}
// Command is a simple wrapper around exec.Command, that logs.
func Command(logger Logger, args ...string) *Cmd {
logger.Logf("command: %s", strings.Join(args, " "))
return &Cmd{
logger: logger,
Cmd: exec.Command(args[0], args[1:]...),
}
}
// TestConfig returns the default configuration to use in tests. Note that
// 'RootDir' must be set by caller if required.
func TestConfig(t *testing.T) *config.Config {
logDir := os.TempDir()
if dir, ok := os.LookupEnv("TEST_UNDECLARED_OUTPUTS_DIR"); ok {
logDir = dir + "/"
}
// Only register flags if config is being used. Otherwise anyone that uses
// testutil will get flags registered and they may conflict.
config.RegisterFlags()
conf, err := config.NewFromFlags()
if err != nil {
panic(err)
}
// Change test defaults.
conf.Debug = true
conf.DebugLog = path.Join(logDir, "runsc.log."+t.Name()+".%TIMESTAMP%.%COMMAND%")
conf.LogPackets = true
conf.Network = config.NetworkNone
conf.Strace = true
conf.TestOnlyAllowRunAsCurrentUserWithoutChroot = true
conf.WatchdogAction = watchdog.Panic
return conf
}
// NewSpecWithArgs creates a simple spec with the given args suitable for use
// in tests.
func NewSpecWithArgs(args ...string) *specs.Spec {
return &specs.Spec{
// The host filesystem root is the container root.
Root: &specs.Root{
Path: "/",
Readonly: true,
},
Process: &specs.Process{
Args: args,
Env: []string{
"PATH=" + os.Getenv("PATH"),
},
Capabilities: specutils.AllCapabilities(),
},
Mounts: []specs.Mount{
// Hide the host /etc to avoid any side-effects.
// For example, bash reads /etc/passwd and if it is
// very big, tests can fail by timeout.
{
Type: "tmpfs",
Destination: "/etc",
},
// Root is readonly, but many tests want to write to tmpdir.
// This creates a writable mount inside the root. Also, when tmpdir points
// to "/tmp", it makes the the actual /tmp to be mounted and not a tmpfs
// inside the sentry.
{
Type: "bind",
Destination: TmpDir(),
Source: TmpDir(),
},
},
Hostname: "runsc-test-hostname",
}
}
// SetupRootDir creates a root directory for containers.
func SetupRootDir() (string, func(), error) {
rootDir, err := ioutil.TempDir(TmpDir(), "containers")
if err != nil {
return "", nil, fmt.Errorf("error creating root dir: %v", err)
}
return rootDir, func() { os.RemoveAll(rootDir) }, nil
}
// SetupContainer creates a bundle and root dir for the container, generates a
// test config, and writes the spec to config.json in the bundle dir.
func SetupContainer(spec *specs.Spec, conf *config.Config) (rootDir, bundleDir string, cleanup func(), err error) {
rootDir, rootCleanup, err := SetupRootDir()
if err != nil {
return "", "", nil, err
}
conf.RootDir = rootDir
bundleDir, bundleCleanup, err := SetupBundleDir(spec)
if err != nil {
rootCleanup()
return "", "", nil, err
}
return rootDir, bundleDir, func() {
bundleCleanup()
rootCleanup()
}, err
}
// SetupBundleDir creates a bundle dir and writes the spec to config.json.
func SetupBundleDir(spec *specs.Spec) (string, func(), error) {
bundleDir, err := ioutil.TempDir(TmpDir(), "bundle")
if err != nil {
return "", nil, fmt.Errorf("error creating bundle dir: %v", err)
}
cleanup := func() { os.RemoveAll(bundleDir) }
if err := writeSpec(bundleDir, spec); err != nil {
cleanup()
return "", nil, fmt.Errorf("error writing spec: %v", err)
}
return bundleDir, cleanup, nil
}
// writeSpec writes the spec to disk in the given directory.
func writeSpec(dir string, spec *specs.Spec) error {
b, err := json.Marshal(spec)
if err != nil {
return err
}
return ioutil.WriteFile(filepath.Join(dir, "config.json"), b, 0755)
}
// idRandomSrc is a pseudo random generator used to in RandomID.
var idRandomSrc = rand.New(rand.NewSource(time.Now().UnixNano()))
// idRandomSrcMtx is the mutex protecting idRandomSrc.Read from being used
// concurrently in differnt goroutines.
var idRandomSrcMtx sync.Mutex
// RandomID returns 20 random bytes following the given prefix.
func RandomID(prefix string) string {
// Read 20 random bytes.
b := make([]byte, 20)
// Rand.Read is not safe for concurrent use. Packetimpact tests can be run in
// parallel now, so we have to protect the Read with a mutex. Otherwise we'll
// run into name conflicts.
// https://golang.org/pkg/math/rand/#Rand.Read
idRandomSrcMtx.Lock()
// "[Read] always returns len(p) and a nil error." --godoc
if _, err := idRandomSrc.Read(b); err != nil {
idRandomSrcMtx.Unlock()
panic("rand.Read failed: " + err.Error())
}
idRandomSrcMtx.Unlock()
if prefix != "" {
prefix = prefix + "-"
}
return fmt.Sprintf("%s%s", prefix, base32.StdEncoding.EncodeToString(b))
}
// RandomContainerID generates a random container id for each test.
//
// The container id is used to create an abstract unix domain socket, which
// must be unique. While the container forbids creating two containers with the
// same name, sometimes between test runs the socket does not get cleaned up
// quickly enough, causing container creation to fail.
func RandomContainerID() string {
return RandomID("test-container")
}
// Copy copies file from src to dst.
func Copy(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
st, err := in.Stat()
if err != nil {
return err
}
out, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, st.Mode().Perm())
if err != nil {
return err
}
defer out.Close()
// Mirror the local user's permissions across all users. This is
// because as we inject things into the container, the UID/GID will
// change. Also, the build system may generate artifacts with different
// modes. At the top-level (volume mapping) we have a big read-only
// knob that can be applied to prevent modifications.
//
// Note that this must be done via a separate Chmod call, otherwise the
// current process's umask will get in the way.
var mode os.FileMode
if st.Mode()&0100 != 0 {
mode |= 0111
}
if st.Mode()&0200 != 0 {
mode |= 0222
}
if st.Mode()&0400 != 0 {
mode |= 0444
}
if err := os.Chmod(dst, mode); err != nil {
return err
}
_, err = io.Copy(out, in)
return err
}
// Poll is a shorthand function to poll for something with given timeout.
func Poll(cb func() error, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
return PollContext(ctx, cb)
}
// PollContext is like Poll, but takes a context instead of a timeout.
func PollContext(ctx context.Context, cb func() error) error {
b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)
return backoff.Retry(cb, b)
}
// WaitForHTTP tries GET requests on a port until the call succeeds or timeout.
func WaitForHTTP(ip string, port int, timeout time.Duration) error {
cb := func() error {
c := &http.Client{
// Calculate timeout to be able to do minimum 5 attempts.
Timeout: timeout / 5,
}
url := fmt.Sprintf("http://%s:%d/", ip, port)
resp, err := c.Get(url)
if err != nil {
log.Printf("Waiting %s: %v", url, err)
return err
}
resp.Body.Close()
return nil
}
return Poll(cb, timeout)
}
// Reaper reaps child processes.
type Reaper struct {
// mu protects ch, which will be nil if the reaper is not running.
mu sync.Mutex
ch chan os.Signal
}
// Start starts reaping child processes.
func (r *Reaper) Start() {
r.mu.Lock()
defer r.mu.Unlock()
if r.ch != nil {
panic("reaper.Start called on a running reaper")
}
r.ch = make(chan os.Signal, 1)
signal.Notify(r.ch, unix.SIGCHLD)
go func() {
for {
r.mu.Lock()
ch := r.ch
r.mu.Unlock()
if ch == nil {
return
}
_, ok := <-ch
if !ok {
// Channel closed.
return
}
for {
cpid, _ := unix.Wait4(-1, nil, unix.WNOHANG, nil)
if cpid < 1 {
break
}
}
}
}()
}
// Stop stops reaping child processes.
func (r *Reaper) Stop() {
r.mu.Lock()
defer r.mu.Unlock()
if r.ch == nil {
panic("reaper.Stop called on a stopped reaper")
}
signal.Stop(r.ch)
close(r.ch)
r.ch = nil
}
// StartReaper is a helper that starts a new Reaper and returns a function to
// stop it.
func StartReaper() func() {
r := &Reaper{}
r.Start()
return r.Stop
}
// WaitUntilRead reads from the given reader until the wanted string is found
// or until timeout.
func WaitUntilRead(r io.Reader, want string, timeout time.Duration) error {
sc := bufio.NewScanner(r)
// done must be accessed atomically. A value greater than 0 indicates
// that the read loop can exit.
doneCh := make(chan bool)
defer close(doneCh)
go func() {
for sc.Scan() {
t := sc.Text()
if strings.Contains(t, want) {
doneCh <- true
return
}
select {
case <-doneCh:
return
default:
}
}
doneCh <- false
}()
select {
case <-time.After(timeout):
return fmt.Errorf("timeout waiting to read %q", want)
case res := <-doneCh:
if !res {
return fmt.Errorf("reader closed while waiting to read %q", want)
}
return nil
}
}
// KillCommand kills the process running cmd unless it hasn't been started. It
// returns an error if it cannot kill the process unless the reason is that the
// process has already exited.
//
// KillCommand will also reap the process.
func KillCommand(cmd *exec.Cmd) error {
if cmd.Process == nil {
return nil
}
if err := cmd.Process.Kill(); err != nil {
if !strings.Contains(err.Error(), "process already finished") {
return fmt.Errorf("failed to kill process %v: %v", cmd, err)
}
}
return cmd.Wait()
}
// WriteTmpFile writes text to a temporary file, closes the file, and returns
// the name of the file. A cleanup function is also returned.
func WriteTmpFile(pattern, text string) (string, func(), error) {
file, err := ioutil.TempFile(TmpDir(), pattern)
if err != nil {
return "", nil, err
}
defer file.Close()
if _, err := file.Write([]byte(text)); err != nil {
return "", nil, err
}
return file.Name(), func() { os.RemoveAll(file.Name()) }, nil
}
// IsStatic returns true iff the given file is a static binary.
func IsStatic(filename string) (bool, error) {
f, err := elf.Open(filename)
if err != nil {
return false, err
}
for _, prog := range f.Progs {
if prog.Type == elf.PT_INTERP {
return false, nil // Has interpreter.
}
}
return true, nil
}
// TouchShardStatusFile indicates to Bazel that the test runner supports
// sharding by creating or updating the last modified date of the file
// specified by TEST_SHARD_STATUS_FILE.
//
// See https://docs.bazel.build/versions/master/test-encyclopedia.html#role-of-the-test-runner.
func TouchShardStatusFile() error {
if statusFile, ok := os.LookupEnv("TEST_SHARD_STATUS_FILE"); ok {
cmd := exec.Command("touch", statusFile)
if b, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("touch %q failed:\n output: %s\n error: %s", statusFile, string(b), err.Error())
}
}
return nil
}
// TestIndicesForShard returns indices for this test shard based on the
// TEST_SHARD_INDEX and TEST_TOTAL_SHARDS environment vars, as well as
// the passed partition flags.
//
// If either of the env vars are not present, then the function will return all
// tests. If there are more shards than there are tests, then the returned list
// may be empty.
func TestIndicesForShard(numTests int) ([]int, error) {
var (
shardIndex = 0
shardTotal = 1
)
indexStr, indexOk := os.LookupEnv("TEST_SHARD_INDEX")
totalStr, totalOk := os.LookupEnv("TEST_TOTAL_SHARDS")
if indexOk && totalOk {
// Parse index and total to ints.
var err error
shardIndex, err = strconv.Atoi(indexStr)
if err != nil {
return nil, fmt.Errorf("invalid TEST_SHARD_INDEX %q: %v", indexStr, err)
}
shardTotal, err = strconv.Atoi(totalStr)
if err != nil {
return nil, fmt.Errorf("invalid TEST_TOTAL_SHARDS %q: %v", totalStr, err)
}
}
// Combine with the partitions.
partitionSize := shardTotal
shardTotal = (*totalPartitions) * shardTotal
shardIndex = partitionSize*(*partition-1) + shardIndex
// Calculate!
var indices []int
numBlocks := int(math.Ceil(float64(numTests) / float64(shardTotal)))
for i := 0; i < numBlocks; i++ {
pick := i*shardTotal + shardIndex
if pick < numTests {
indices = append(indices, pick)
}
}
return indices, nil
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
pkg/config/config_test.go | package config
import (
"encoding/json"
"os"
"testing"
"github.com/openware/kaigara/pkg/vault"
"github.com/openware/kaigara/types"
"github.com/stretchr/testify/assert"
)
var scopes []string = []string{"secret"}
var vaultAddr string = os.Getenv("KAIGARA_VAULT_ADDR")
var vaultToken string = os.Getenv("KAIGARA_VAULT_TOKEN")
var deploymentID string = "kaigara_test"
var secretStore types.SecretStore = vault.NewService(vaultAddr, vaultToken, deploymentID)
func TestBuildCmdEnvFromSecretStore(t *testing.T) {
appName := "test1"
appNames := []string{"test1"}
env := []string{
"ANYTHING=must_be_kept",
"KAIGARA_ANYTHING=must_be_ignored",
}
err := secretStore.LoadSecrets(appName, scopes[0])
assert.NoError(t, err)
err = secretStore.SetSecret(appName, "key_"+scopes[0], "value_"+scopes[0], scopes[0])
assert.NoError(t, err)
err = secretStore.SaveSecrets(appName, scopes[0])
assert.NoError(t, err)
err = secretStore.LoadSecrets("global", "secret")
assert.NoError(t, err)
err = secretStore.SetSecret("global", "key_global", "value_global", scopes[0])
assert.NoError(t, err)
err = secretStore.SaveSecrets("global", scopes[0])
assert.NoError(t, err)
r := BuildCmdEnv(appNames, secretStore, env, scopes)
assert.Equal(t, map[string]*File{}, r.Files)
assert.ElementsMatch(t, []string{
"ANYTHING=must_be_kept",
"KEY_SECRET=value_secret",
"KEY_GLOBAL=value_global",
}, r.Vars)
}
func TestLoadNumberAndBool(t *testing.T) {
appName := "test2"
appNames := []string{"test2"}
scopes = []string{"public"}
env := []string{}
err := secretStore.LoadSecrets(appName, scopes[0])
assert.NoError(t, err)
err = secretStore.SetSecret(appName, "key_number", json.Number("1337"), scopes[0])
assert.NoError(t, err)
err = secretStore.SetSecret(appName, "key_bool", true, scopes[0])
assert.NoError(t, err)
err = secretStore.SaveSecrets(appName, scopes[0])
assert.NoError(t, err)
r := BuildCmdEnv(appNames, secretStore, env, scopes)
assert.Equal(t, map[string]*File{}, r.Files)
assert.ElementsMatch(t, []string{
"KEY_NUMBER=1337",
"KEY_BOOL=true",
}, r.Vars)
}
func TestBuildCmdEnvFileUpperCase(t *testing.T) {
appName := "test3"
appNames := []string{"test3"}
err := secretStore.LoadSecrets(appName, scopes[0])
assert.NoError(t, err)
err = secretStore.SetSecret(appName, "ANYTHING", "must_be_set", scopes[0])
assert.NoError(t, err)
err = secretStore.SetSecret(appName, "KFILE_NAME_PATH", "config/config.json", scopes[0])
assert.NoError(t, err)
err = secretStore.SetSecret(appName, "KFILE_NAME_CONTENT", `{"app":"example"}`, scopes[0])
assert.NoError(t, err)
err = secretStore.SaveSecrets(appName, scopes[0])
assert.NoError(t, err)
env := []string{}
assert.Equal(t, &Env{
Vars: []string{
"ANYTHING=must_be_set",
},
Files: map[string]*File{
"NAME": {
Path: "config/config.json",
Content: `{"app":"example"}`,
},
},
}, BuildCmdEnv(appNames, secretStore, env, scopes))
}
func TestBuildCmdEnvFileLowerCase(t *testing.T) {
appName := "test4"
appNames := []string{"test4"}
err := secretStore.LoadSecrets(appName, scopes[0])
assert.NoError(t, err)
err = secretStore.SetSecret(appName, "anything", "must_be_set", scopes[0])
assert.NoError(t, err)
err = secretStore.SetSecret(appName, "kfile_name_path", "config/config.json", scopes[0])
assert.NoError(t, err)
err = secretStore.SetSecret(appName, "kfile_name_content", `{"app":"example"}`, scopes[0])
assert.NoError(t, err)
err = secretStore.SaveSecrets(appName, scopes[0])
assert.NoError(t, err)
env := []string{}
assert.Equal(t, &Env{
Vars: []string{
"ANYTHING=must_be_set",
},
Files: map[string]*File{
"NAME": {
Path: "config/config.json",
Content: `{"app":"example"}`,
},
},
}, BuildCmdEnv(appNames, secretStore, env, scopes))
}
func TestBuildCmdEnvSeveralAppNames(t *testing.T) {
appNameFirst := "test5"
appNameSecond := "test6"
appNames := []string{"test5", "test6"}
err := secretStore.LoadSecrets(appNameFirst, scopes[0])
assert.NoError(t, err)
err = secretStore.SetSecret(appNameFirst, "anything_5", "must_be_set", scopes[0])
assert.NoError(t, err)
err = secretStore.SaveSecrets(appNameFirst, scopes[0])
assert.NoError(t, err)
err = secretStore.LoadSecrets(appNameSecond, scopes[0])
assert.NoError(t, err)
err = secretStore.SetSecret(appNameSecond, "anything_6", "must_be_set", scopes[0])
assert.NoError(t, err)
err = secretStore.SaveSecrets(appNameSecond, scopes[0])
assert.NoError(t, err)
env := []string{}
assert.Equal(t, &Env{
Vars: []string{
"ANYTHING_5=must_be_set",
"ANYTHING_6=must_be_set",
},
Files: map[string]*File{},
}, BuildCmdEnv(appNames, secretStore, env, scopes))
}
| [
"\"KAIGARA_VAULT_ADDR\"",
"\"KAIGARA_VAULT_TOKEN\""
]
| []
| [
"KAIGARA_VAULT_ADDR",
"KAIGARA_VAULT_TOKEN"
]
| [] | ["KAIGARA_VAULT_ADDR", "KAIGARA_VAULT_TOKEN"] | go | 2 | 0 | |
dynaml/eval.go | package dynaml
func func_eval(arguments []interface{}, binding Binding, locally bool) (interface{}, EvaluationInfo, bool) {
info := DefaultInfo()
if len(arguments) != 1 {
return info.Error("one argument required for 'eval'")
}
str, ok := arguments[0].(string)
if !ok {
return info.Error("string argument required for 'eval'")
}
expr, err := Parse(str, binding.Path(), binding.StubPath())
if err != nil {
return info.Error("cannot parse expression '%s'", str)
}
return expr.Evaluate(binding, locally)
}
| []
| []
| []
| [] | [] | go | null | null | null |
qa/pull-tester/rpc-tests.py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
sys.path.append("qa/pull-tester/")
from tests_config import *
BOLD = ("","")
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/'
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passon string
opts = set()
passon_args = []
PASSON_REGEX = re.compile("^--")
PARALLEL_REGEX = re.compile('^-parallel=')
print_help = False
run_parallel = 4
for arg in sys.argv[1:]:
if arg == "--help" or arg == "-h" or arg == "-?":
print_help = True
break
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif PASSON_REGEX.match(arg):
passon_args.append(arg)
elif PARALLEL_REGEX.match(arg):
run_parallel = int(arg.split(sep='=', maxsplit=1)[1])
else:
opts.add(arg)
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = BUILDDIR + '/src/bitcoind' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Win tests currently disabled by default. Use -win option to enable")
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or "
"to run zmq tests, see dependency info in /qa/README.md.")
# ENABLE_ZMQ=0
raise
testScripts = [
# longest test should go first, to favor running tests in parallel
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py',
'fundrawtransaction.py',
#'p2p-compactblocks.py',
# 'segwit.py',
# vv Tests less than 2m vv
'auxpow.py',
'getauxblock.py',
'wallet.py',
'wallet-accounts.py',
# 'p2p-segwit.py',
'wallet-dump.py',
'listtransactions.py',
# vv Tests less than 60s vv
# 'sendheaders.py',
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
# 'bip68-112-113-p2p.py',
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
'mempool_resurrect_test.py',
#'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'nodehandling.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'keypool.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
# 'invalidtxrequest.py',
# 'p2p-versionbits-warning.py',
'preciousblock.py',
'importprunedfunds.py',
'signmessages.py',
# 'nulldummy.py',
'import-rescan.py',
# While fee bumping should work in Doge, these tests depend on free transactions, which we don't support.
# Disable until we can do a full rewrite of the tests (possibly upstream), or revise fee schedule, or something
# 'bumpfee.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'pruning.py',
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
# vv Tests less than 2m vv
# 'bip68-sequence.py',
'getblocktemplate_longpoll.py',
'p2p-timeouts.py',
# vv Tests less than 60s vv
# 'bip9-softforks.py',
'p2p-feefilter.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'bip65-cltv.py',
# 'bip65-cltv-p2p.py',
# 'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'replace-by-fee.py',
]
def runtests():
test_list = []
if '-extended' in opts:
test_list = testScripts + testScriptsExt
elif len(opts) == 0 or (len(opts) == 1 and "-win" in opts):
test_list = testScripts
else:
for t in testScripts + testScriptsExt:
if t in opts or re.sub(".py$", "", t) in opts:
test_list.append(t)
if print_help:
# Only print help of the first script and exit
subprocess.check_call((RPC_TESTS_DIR + test_list[0]).split() + ['-h'])
sys.exit(0)
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
flags = ["--srcdir=%s/src" % BUILDDIR] + passon_args
flags.append("--cachedir=%s/qa/cache" % BUILDDIR)
if coverage:
flags.append(coverage.flag)
if len(test_list) > 1 and run_parallel > 1:
# Populate cache
subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags)
#Run Tests
max_len_name = len(max(test_list, key=len))
time_sum = 0
time0 = time.time()
job_queue = RPCTestHandler(run_parallel, test_list, flags)
results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0]
all_passed = True
for _ in range(len(test_list)):
(name, stdout, stderr, passed, duration) = job_queue.get_next()
all_passed = all_passed and passed
time_sum += duration
print('\n' + BOLD[1] + name + BOLD[0] + ":")
print('' if passed else stdout + '\n', end='')
print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='')
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration)
print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration))
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0]
print(results)
print("\nRuntime: %s s" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
sys.exit(not all_passed)
class RPCTestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
self.jobs.append((t,
time.time(),
subprocess.Popen(['python3.6']+(RPC_TESTS_DIR + t).split() + self.flags + port_seed,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
passed = stderr == "" and proc.returncode == 0
self.num_running -= 1
self.jobs.remove(j)
return name, stdout, stderr, passed, int(time.time() - time0)
print('.', end='', flush=True)
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| []
| []
| [
"BITCOIND"
]
| [] | ["BITCOIND"] | python | 1 | 0 | |
libcst/tool.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Usage:
#
# python -m libcst.tool --help
# python -m libcst.tool print python_file.py
import argparse
import dataclasses
import distutils.spawn
import importlib
import inspect
import os
import os.path
import sys
import textwrap
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Sequence, Tuple, Type
import yaml
from libcst import (
LIBCST_VERSION,
CSTNode,
IndentedBlock,
Module,
PartialParserConfig,
parse_module,
)
from libcst._nodes.deep_equals import deep_equals
from libcst.codemod import (
CodemodCommand,
CodemodContext,
diff_code,
exec_transform_with_prettyprint,
gather_files,
parallel_exec_transform_with_prettyprint,
)
_DEFAULT_INDENT: str = " "
def _node_repr_recursive( # noqa: C901
node: object,
*,
indent: str = _DEFAULT_INDENT,
show_defaults: bool = False,
show_syntax: bool = False,
show_whitespace: bool = False,
) -> List[str]:
if isinstance(node, CSTNode):
# This is a CSTNode, we must pretty-print it.
tokens: List[str] = [node.__class__.__name__]
fields: Sequence["dataclasses.Field[object]"] = dataclasses.fields(node)
# Hide all fields prefixed with "_"
fields = [f for f in fields if f.name[0] != "_"]
# Filter whitespace nodes if needed
if not show_whitespace:
def _is_whitespace(field: "dataclasses.Field[object]") -> bool:
if "whitespace" in field.name:
return True
if "leading_lines" in field.name:
return True
if "lines_after_decorators" in field.name:
return True
if isinstance(node, (IndentedBlock, Module)) and field.name in [
"header",
"footer",
]:
return True
if isinstance(node, IndentedBlock) and field.name == "indent":
return True
return False
fields = [f for f in fields if not _is_whitespace(f)]
# Filter values which aren't changed from their defaults
if not show_defaults:
def _get_default(fld: "dataclasses.Field[object]") -> object:
if fld.default_factory is not dataclasses.MISSING:
return fld.default_factory()
return fld.default
fields = [
f
for f in fields
if not deep_equals(getattr(node, f.name), _get_default(f))
]
# Filter out values which aren't interesting if needed
if not show_syntax:
def _is_syntax(field: "dataclasses.Field[object]") -> bool:
if isinstance(node, Module) and field.name in [
"encoding",
"default_indent",
"default_newline",
"has_trailing_newline",
]:
return True
type_str = repr(field.type)
if (
"Sentinel" in type_str
and field.name not in ["star_arg", "star", "posonly_ind"]
and "whitespace" not in field.name
):
# This is a value that can optionally be specified, so its
# definitely syntax.
return True
for name in ["Semicolon", "Colon", "Comma", "Dot", "AssignEqual"]:
# These are all nodes that exist for separation syntax
if name in type_str:
return True
return False
fields = [f for f in fields if not _is_syntax(f)]
if len(fields) == 0:
tokens.append("()")
else:
tokens.append("(\n")
for field in fields:
child_tokens: List[str] = [field.name, "="]
value = getattr(node, field.name)
if isinstance(value, (str, bytes)) or not isinstance(value, Sequence):
# Render out the node contents
child_tokens.extend(
_node_repr_recursive(
value,
show_whitespace=show_whitespace,
show_defaults=show_defaults,
show_syntax=show_syntax,
)
)
elif isinstance(value, Sequence):
# Render out a list of individual nodes
if len(value) > 0:
child_tokens.append("[\n")
list_tokens: List[str] = []
last_value = len(value) - 1
for j, v in enumerate(value):
list_tokens.extend(
_node_repr_recursive(
v,
show_whitespace=show_whitespace,
show_defaults=show_defaults,
show_syntax=show_syntax,
)
)
if j != last_value:
list_tokens.append(",\n")
else:
list_tokens.append(",")
split_by_line = "".join(list_tokens).split("\n")
child_tokens.append(
"\n".join(f"{indent}{t}" for t in split_by_line)
)
child_tokens.append("\n]")
else:
child_tokens.append("[]")
else:
raise Exception("Logic error!")
# Handle indentation and trailing comma.
split_by_line = "".join(child_tokens).split("\n")
tokens.append("\n".join(f"{indent}{t}" for t in split_by_line))
tokens.append(",\n")
tokens.append(")")
return tokens
else:
# This is a python value, just return the repr
return [repr(node)]
def dump(
node: CSTNode,
*,
indent: str = _DEFAULT_INDENT,
show_defaults: bool = False,
show_syntax: bool = False,
show_whitespace: bool = False,
) -> str:
"""
Returns a string representation of the node that contains minimal differences
from the default contruction of the node while also hiding whitespace and
syntax fields.
Setting ``show_default`` to ``True`` will add fields regardless if their
value is different from the default value.
Setting ``show_whitespace`` will add whitespace fields and setting
``show_syntax`` will add syntax fields while respecting the value of
``show_default``.
When all keyword args are set to true, the output of this function is
indentical to the __repr__ method of the node.
"""
return "".join(
_node_repr_recursive(
node,
indent=indent,
show_defaults=show_defaults,
show_syntax=show_syntax,
show_whitespace=show_whitespace,
)
)
def _print_tree_impl(proc_name: str, command_args: List[str]) -> int:
parser = argparse.ArgumentParser(
description="Print the LibCST tree representation of a file.",
prog=f"{proc_name} print",
fromfile_prefix_chars="@",
)
parser.add_argument(
"infile",
metavar="INFILE",
help='File to print tree for. Use "-" for stdin',
type=str,
)
parser.add_argument(
"--show-whitespace",
action="store_true",
help="Show whitespace nodes in printed tree",
)
parser.add_argument(
"--show-defaults",
action="store_true",
help="Show values that are unchanged from the default",
)
parser.add_argument(
"--show-syntax",
action="store_true",
help="Show values that exist only for syntax, like commas or semicolons",
)
parser.add_argument(
"-p",
"--python-version",
metavar="VERSION",
help=(
"Override the version string used for parsing Python source files. Defaults "
+ "to the version of python used to run this tool."
),
type=str,
default=None,
)
args = parser.parse_args(command_args)
infile = args.infile
# Grab input file
if infile == "-":
code = sys.stdin.read()
else:
with open(infile, "rb") as fp:
code = fp.read()
tree = parse_module(
code,
config=(
PartialParserConfig(python_version=args.python_version)
if args.python_version is not None
else PartialParserConfig()
),
)
print(
dump(
tree,
show_defaults=args.show_defaults,
show_syntax=args.show_syntax,
show_whitespace=args.show_whitespace,
)
)
return 0
def _default_config() -> Dict[str, Any]:
return {
"generated_code_marker": f"@gen{''}erated",
"formatter": ["black", "-"],
"blacklist_patterns": [],
"modules": ["libcst.codemod.commands"],
"repo_root": ".",
}
CONFIG_FILE_NAME = ".libcst.codemod.yaml"
def _find_and_load_config(proc_name: str) -> Dict[str, Any]:
# Initialize with some sane defaults.
config = _default_config()
# Walk up the filesystem looking for a config file.
current_dir = os.path.abspath(os.getcwd())
previous_dir = None
found_config = False
while current_dir != previous_dir:
# See if the config file exists
config_file = os.path.join(current_dir, CONFIG_FILE_NAME)
if os.path.isfile(config_file):
# Load it, override defaults with what is in the config.
with open(config_file, "r") as fp:
possible_config = yaml.safe_load(fp.read())
# Lets be careful with all user input so we don't crash.
if isinstance(possible_config, dict):
# Grab the generated code marker.
for str_setting in ["generated_code_marker"]:
if str_setting in possible_config and isinstance(
possible_config[str_setting], str
):
config[str_setting] = possible_config[str_setting]
# Grab the formatter, blacklisted patterns and module directories.
for list_setting in ["formatter", "blacklist_patterns", "modules"]:
if (
list_setting in possible_config
and isinstance(possible_config[list_setting], list)
and all(
isinstance(s, str) for s in possible_config[list_setting]
)
):
config[list_setting] = possible_config[list_setting]
# Grab the repo root config.
for path_setting in ["repo_root"]:
if path_setting in possible_config and isinstance(
possible_config[path_setting], str
):
config[path_setting] = os.path.abspath(
os.path.join(current_dir, possible_config[path_setting]),
)
# We successfully located a file, stop traversing.
found_config = True
break
# Try the parent directory.
previous_dir = current_dir
current_dir = os.path.abspath(os.path.join(current_dir, os.pardir))
requires_config = bool(os.environ.get("LIBCST_TOOL_REQUIRE_CONFIG", ""))
if requires_config and not found_config:
raise Exception(
f"Did not find a {CONFIG_FILE_NAME} in current directory or any "
+ "parent directory! Perhaps you meant to run this command from a "
+ "configured subdirectory, or you need to initialize a new project "
+ f'using "{proc_name} initialize"?'
)
# Make sure that the formatter is findable.
if config["formatter"]:
exe = (
distutils.spawn.find_executable(config["formatter"][0])
or config["formatter"][0]
)
config["formatter"] = [os.path.abspath(exe), *config["formatter"][1:]]
return config
def _codemod_impl(proc_name: str, command_args: List[str]) -> int: # noqa: C901
# Grab the configuration for running this, if it exsts.
config = _find_and_load_config(proc_name)
# First, try to grab the command with a first pass. We aren't going to react
# to user input here, so refuse to add help. Help will be parsed in the
# full parser below once we know the command and have added its arguments.
parser = argparse.ArgumentParser(add_help=False, fromfile_prefix_chars="@")
parser.add_argument("command", metavar="COMMAND", type=str, nargs="?", default=None)
args, _ = parser.parse_known_args(command_args)
# Now, try to load the class and get its arguments for help purposes.
if args.command is not None:
command_path = args.command.split(".")
if len(command_path) < 2:
print(f"{args.command} is not a valid codemod command", file=sys.stderr)
return 1
command_module_name, command_class_name = (
".".join(command_path[:-1]),
command_path[-1],
)
command_class = None
for module in config["modules"]:
try:
command_class = getattr(
importlib.import_module(f"{module}.{command_module_name}"),
command_class_name,
)
break
# Only swallow known import errors, show the rest of the exceptions
# to the user who is trying to run the codemod.
except AttributeError:
continue
except ModuleNotFoundError:
continue
if command_class is None:
print(
f"Could not find {command_module_name} in any configured modules",
file=sys.stderr,
)
return 1
else:
# Dummy, specifically to allow for running --help with no arguments.
command_class = CodemodCommand
# Now, construct the full parser, parse the args and run the class.
parser = argparse.ArgumentParser(
description=(
"Execute a codemod against a series of files."
if command_class is CodemodCommand
else command_class.DESCRIPTION
),
prog=f"{proc_name} codemod",
fromfile_prefix_chars="@",
)
parser.add_argument(
"command",
metavar="COMMAND",
type=str,
help=(
"The name of the file (minus the path and extension) and class joined with "
+ "a '.' that defines your command (e.g. strip_strings_from_types.StripStringsCommand)"
),
)
parser.add_argument(
"path",
metavar="PATH",
nargs="+",
help=(
"Path to codemod. Can be a directory, file, or multiple of either. To "
+ 'instead read from stdin and write to stdout, use "-"'
),
)
parser.add_argument(
"-j",
"--jobs",
metavar="JOBS",
help="Number of jobs to use when processing files. Defaults to number of cores",
type=int,
default=None,
)
parser.add_argument(
"-p",
"--python-version",
metavar="VERSION",
help=(
"Override the version string used for parsing Python source files. Defaults "
+ "to the version of python used to run this tool."
),
type=str,
default=None,
)
parser.add_argument(
"-u",
"--unified-diff",
metavar="CONTEXT",
help="Output unified diff instead of contents. Implies outputting to stdout",
type=int,
nargs="?",
default=None,
const=5,
)
parser.add_argument(
"--include-generated", action="store_true", help="Codemod generated files."
)
parser.add_argument(
"--include-stubs", action="store_true", help="Codemod typing stub files."
)
parser.add_argument(
"--no-format",
action="store_true",
help="Don't format resulting codemod with configured formatter.",
)
parser.add_argument(
"--show-successes",
action="store_true",
help="Print files successfully codemodded with no warnings.",
)
parser.add_argument(
"--hide-generated-warnings",
action="store_true",
help="Do not print files that are skipped for being autogenerated.",
)
parser.add_argument(
"--hide-blacklisted-warnings",
action="store_true",
help="Do not print files that are skipped for being blacklisted.",
)
parser.add_argument(
"--hide-progress",
action="store_true",
help="Do not print progress indicator. Useful if calling from a script.",
)
command_class.add_args(parser)
args = parser.parse_args(command_args)
codemod_args = {
k: v
for k, v in vars(args).items()
if k
not in [
"command",
"path",
"unified_diff",
"jobs",
"python_version",
"include_generated",
"include_stubs",
"no_format",
"show_successes",
"hide_generated_warnings",
"hide_blacklisted_warnings",
"hide_progress",
]
}
command_instance = command_class(CodemodContext(), **codemod_args)
# Special case for allowing stdin/stdout. Note that this does not allow for
# full-repo metadata since there is no path.
if any(p == "-" for p in args.path):
if len(args.path) > 1:
raise Exception("Cannot specify multiple paths when reading from stdin!")
print("Codemodding from stdin", file=sys.stderr)
oldcode = sys.stdin.read()
newcode = exec_transform_with_prettyprint(
command_instance,
oldcode,
include_generated=args.include_generated,
generated_code_marker=config["generated_code_marker"],
format_code=not args.no_format,
formatter_args=config["formatter"],
python_version=args.python_version,
)
if not newcode:
print("Failed to codemod from stdin", file=sys.stderr)
return 1
# Now, either print or diff the code
if args.unified_diff:
print(diff_code(oldcode, newcode, args.unified_diff, filename="stdin"))
else:
print(newcode)
return 0
# Let's run it!
files = gather_files(args.path, include_stubs=args.include_stubs)
try:
result = parallel_exec_transform_with_prettyprint(
command_instance,
files,
jobs=args.jobs,
unified_diff=args.unified_diff,
include_generated=args.include_generated,
generated_code_marker=config["generated_code_marker"],
format_code=not args.no_format,
formatter_args=config["formatter"],
show_successes=args.show_successes,
hide_generated=args.hide_generated_warnings,
hide_blacklisted=args.hide_blacklisted_warnings,
hide_progress=args.hide_progress,
blacklist_patterns=config["blacklist_patterns"],
python_version=args.python_version,
repo_root=config["repo_root"],
)
except KeyboardInterrupt:
print("Interrupted!", file=sys.stderr)
return 2
# Print a fancy summary at the end.
print(
f"Finished codemodding {result.successes + result.skips + result.failures} files!",
file=sys.stderr,
)
print(f" - Transformed {result.successes} files successfully.", file=sys.stderr)
print(f" - Skipped {result.skips} files.", file=sys.stderr)
print(f" - Failed to codemod {result.failures} files.", file=sys.stderr)
print(f" - {result.warnings} warnings were generated.", file=sys.stderr)
return 1 if result.failures > 0 else 0
class _SerializerBase(ABC):
def __init__(self, comment: str) -> None:
self.comment = comment
def serialize(self, key: str, value: object) -> str:
comments = os.linesep.join(
f"# {comment}" for comment in textwrap.wrap(self.comment)
)
return f"{comments}{os.linesep}{self._serialize_impl(key, value)}{os.linesep}"
@abstractmethod
def _serialize_impl(self, key: str, value: object) -> str:
...
class _StrSerializer(_SerializerBase):
def _serialize_impl(self, key: str, value: object) -> str:
return f"{key}: {value!r}"
class _ListSerializer(_SerializerBase):
def __init__(self, comment: str, *, newlines: bool = False) -> None:
super().__init__(comment)
self.newlines = newlines
def _serialize_impl(self, key: str, value: object) -> str:
if not isinstance(value, list):
raise Exception("Can only serialize lists!")
if self.newlines:
values = [f"- {v!r}" for v in value]
return f"{key}:{os.linesep}{os.linesep.join(values)}"
else:
values = [repr(v) for v in value]
return f"{key}: [{', '.join(values)}]"
def _initialize_impl(proc_name: str, command_args: List[str]) -> int:
# Now, construct the full parser, parse the args and run the class.
parser = argparse.ArgumentParser(
description="Initialize a directory by writing a default LibCST config to it.",
prog=f"{proc_name} initialize",
fromfile_prefix_chars="@",
)
parser.add_argument(
"path",
metavar="PATH",
type=str,
help="Path to initialize with a default LibCST codemod configuration",
)
args = parser.parse_args(command_args)
# Get default configuration file, write it to the YAML file we
# recognize as our config.
default_config = _default_config()
# We serialize for ourselves here, since PyYAML doesn't allow
# us to control comments in the default file.
serializers: Dict[str, _SerializerBase] = {
"generated_code_marker": _StrSerializer(
"String that LibCST should look for in code which indicates "
+ "that the module is generated code."
),
"formatter": _ListSerializer(
"Command line and arguments for invoking a code formatter. "
+ "Anything specified here must be capable of taking code via "
+ "stdin and returning formatted code via stdout."
),
"blacklist_patterns": _ListSerializer(
"List of regex patterns which LibCST will evaluate against "
+ "filenames to determine if the module should be touched."
),
"modules": _ListSerializer(
"List of modules that contain codemods inside of them.", newlines=True
),
"repo_root": _StrSerializer(
"Absolute or relative path of the repository root, used for "
+ "providing full-repo metadata. Relative paths should be "
+ "specified with this file location as the base."
),
}
config_str = "".join(
serializers[key].serialize(key, val) for key, val in default_config.items()
)
# For safety, verify that it parses to the identical file.
actual_config = yaml.safe_load(config_str)
if actual_config != default_config:
raise Exception("Logic error, serialization is invalid!")
config_file = os.path.abspath(os.path.join(args.path, CONFIG_FILE_NAME))
with open(config_file, "w") as fp:
fp.write(config_str)
print(f"Successfully wrote default config file to {config_file}")
return 0
def _recursive_find(base_dir: str, base_module: str) -> List[Tuple[str, object]]:
"""
Given a base directory and a base module, recursively walk the directory looking
for importable python modules, returning them and their relative module name
based off of the base_module.
"""
modules: List[Tuple[str, object]] = []
for path in os.listdir(base_dir):
full_path = os.path.join(base_dir, path)
if os.path.isdir(full_path):
# Recursively add files in subdirectories.
additions = _recursive_find(full_path, f"{base_module}.{path}")
for module_name, module_object in additions:
modules.append((f"{path}.{module_name}", module_object))
continue
if not os.path.isfile(full_path) or not path.endswith(".py"):
continue
try:
module_name = path[:-3]
potential_codemod = importlib.import_module(f"{base_module}.{module_name}")
modules.append((module_name, potential_codemod))
except Exception:
# Unlike running a codemod, listing shouldn't crash with exceptions.
continue
return modules
def _list_impl(proc_name: str, command_args: List[str]) -> int: # noqa: C901
# Grab the configuration so we can determine which modules to list from
config = _find_and_load_config(proc_name)
parser = argparse.ArgumentParser(
description="List all codemods available to run.",
prog=f"{proc_name} list",
fromfile_prefix_chars="@",
)
_ = parser.parse_args(command_args)
# Now, import each of the modules to determine their paths.
codemods: Dict[Type[CodemodCommand], str] = {}
for module in config["modules"]:
try:
imported_module = importlib.import_module(module)
except Exception:
# Unlike running a codemod, listing shouldn't crash with exceptions.
imported_module = None
if not imported_module:
print(
f"Could not import {module}, cannot list codemods inside it",
file=sys.stderr,
)
continue
# Grab the path, try to import all of the files inside of it.
path = os.path.dirname(os.path.abspath(imported_module.__file__))
for name, imported_module in _recursive_find(path, module):
for objname in dir(imported_module):
try:
obj = getattr(imported_module, objname)
if not issubclass(obj, CodemodCommand):
continue
if inspect.isabstract(obj):
continue
# isabstract is broken for direct subclasses of ABC which
# don't themselves define any abstract methods, so lets
# check for that here.
if any(cls[0] is ABC for cls in inspect.getclasstree([obj])):
continue
# Deduplicate any codemods that were referenced in other
# codemods. Always take the shortest name.
fullname = f"{name}.{obj.__name__}"
if obj in codemods:
if len(fullname) < len(codemods[obj]):
codemods[obj] = fullname
else:
codemods[obj] = fullname
except TypeError:
continue
printable_codemods: List[str] = [
f"{name} - {obj.DESCRIPTION}" for obj, name in codemods.items()
]
print("\n".join(sorted(printable_codemods)))
return 0
def main(proc_name: str, cli_args: List[str]) -> int:
# Hack to allow "--help" to print out generic help, but also allow subcommands
# to customize their parsing and help messages.
first_arg = cli_args[0] if cli_args else "--help"
add_help = first_arg in {"--help", "-h"}
# Create general parser to determine which command we are invoking.
parser: argparse.ArgumentParser = argparse.ArgumentParser(
description="Collection of utilities that ship with LibCST.",
add_help=add_help,
prog=proc_name,
fromfile_prefix_chars="@",
)
parser.add_argument(
"--version",
help="Print current version of LibCST toolset.",
action="version",
version=f"LibCST version {LIBCST_VERSION}", # pyre-ignore[16] pyre bug?
)
parser.add_argument(
"action",
help="Action to take. Valid options include: print, codemod, list, initialize.",
choices=["print", "codemod", "list", "initialize"],
)
args, command_args = parser.parse_known_args(cli_args)
# Create a dummy command in case the user manages to get into
# this state.
def _invalid_command(proc_name: str, command_args: List[str]) -> int:
print("Please specify a command!\n", file=sys.stderr)
parser.print_help(sys.stderr)
return 1
# Look up the command and delegate parsing/running.
lookup: Dict[str, Callable[[str, List[str]], int]] = {
"print": _print_tree_impl,
"codemod": _codemod_impl,
"initialize": _initialize_impl,
"list": _list_impl,
}
return lookup.get(args.action or None, _invalid_command)(proc_name, command_args)
if __name__ == "__main__":
sys.exit(
main(os.environ.get("LIBCST_TOOL_COMMAND_NAME", "libcst.tool"), sys.argv[1:])
)
| []
| []
| [
"LIBCST_TOOL_REQUIRE_CONFIG",
"LIBCST_TOOL_COMMAND_NAME"
]
| [] | ["LIBCST_TOOL_REQUIRE_CONFIG", "LIBCST_TOOL_COMMAND_NAME"] | python | 2 | 0 | |
pkg/agent/main.go | /*
SPDX-License-Identifier: Apache-2.0
Copyright Contributors to the Submariner project.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"errors"
"flag"
"fmt"
"net/http"
"os"
"github.com/kelseyhightower/envconfig"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/submariner-io/admiral/pkg/syncer/broker"
"github.com/submariner-io/admiral/pkg/util"
"github.com/submariner-io/lighthouse/pkg/agent/controller"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
mcsv1a1 "sigs.k8s.io/mcs-api/pkg/apis/v1alpha1"
)
var (
masterURL string
kubeConfig string
)
func main() {
agentSpec := controller.AgentSpecification{}
// Handle environment variables:
// SUBMARINER_VERBOSITY determines the verbosity level (1 by default)
// SUBMARINER_DEBUG, if set to true, sets the verbosity level to 3
if debug := os.Getenv("SUBMARINER_DEBUG"); debug == "true" {
os.Args = append(os.Args, "-v=3")
} else if verbosity := os.Getenv("SUBMARINER_VERBOSITY"); verbosity != "" {
os.Args = append(os.Args, fmt.Sprintf("-v=%s", verbosity))
} else {
os.Args = append(os.Args, "-v=2")
}
klog.InitFlags(nil)
flag.Parse()
err := envconfig.Process("submariner", &agentSpec)
if err != nil {
klog.Fatal(err)
}
klog.Infof("Arguments: %v", os.Args)
klog.Infof("AgentSpec: %v", agentSpec)
err = mcsv1a1.AddToScheme(scheme.Scheme)
if err != nil {
klog.Exitf("Error adding Multicluster v1alpha1 to the scheme: %v", err)
}
cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeConfig)
if err != nil {
klog.Fatalf("Error building kubeconfig: %s", err.Error())
}
kubeClientSet, err := kubernetes.NewForConfig(cfg)
if err != nil {
klog.Fatalf("Error building clientset: %s", err.Error())
}
restMapper, err := util.BuildRestMapper(cfg)
if err != nil {
klog.Fatal(err.Error())
}
localClient, err := dynamic.NewForConfig(cfg)
if err != nil {
klog.Fatalf("error creating dynamic client: %v", err)
}
klog.Infof("Starting submariner-lighthouse-agent %v", agentSpec)
// set up signals so we handle the first shutdown signal gracefully
stopCh := signals.SetupSignalHandler()
httpServer := startHTTPServer()
lightHouseAgent, err := controller.New(&agentSpec, broker.SyncerConfig{
LocalRestConfig: cfg,
LocalClient: localClient,
RestMapper: restMapper,
Scheme: scheme.Scheme,
}, kubeClientSet,
controller.AgentConfig{
ServiceImportCounterName: "submariner_service_import",
ServiceExportCounterName: "submariner_service_export",
})
if err != nil {
klog.Fatalf("Failed to create lighthouse agent: %v", err)
}
if err := lightHouseAgent.Start(stopCh); err != nil {
klog.Fatalf("Failed to start lighthouse agent: %v", err)
}
<-stopCh
klog.Info("All controllers stopped or exited. Stopping main loop")
if err := httpServer.Shutdown(context.TODO()); err != nil {
klog.Errorf("Error shutting down metrics HTTP server: %v", err)
}
}
func init() {
flag.StringVar(&kubeConfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&masterURL, "master", "",
"The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
}
func startHTTPServer() *http.Server {
srv := &http.Server{Addr: ":8082"}
http.Handle("/metrics", promhttp.Handler())
go func() {
if err := srv.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) {
klog.Errorf("Error starting metrics server: %v", err)
}
}()
return srv
}
| [
"\"SUBMARINER_DEBUG\"",
"\"SUBMARINER_VERBOSITY\""
]
| []
| [
"SUBMARINER_DEBUG",
"SUBMARINER_VERBOSITY"
]
| [] | ["SUBMARINER_DEBUG", "SUBMARINER_VERBOSITY"] | go | 2 | 0 | |
components/function-controller/pkg/utils/build_utils.go | /*
Copyright 2019 The Kyma Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"os"
"time"
tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/gogo/protobuf/proto"
serverlessv1alpha1 "github.com/kyma-project/kyma/components/function-controller/pkg/apis/serverless/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var buildTimeout = os.Getenv("BUILD_TIMEOUT")
const (
// Timeout after which a TaskRun gets canceled. Can be overridden by the BUILD_TIMEOUT env
// var.
defaultBuildTimeout = 30 * time.Minute
// Kaniko executor image used to build Function container images.
// https://github.com/GoogleContainerTools/kaniko/blob/master/deploy/Dockerfile
kanikoExecutorImage = "gcr.io/kaniko-project/executor:v0.12.0"
// Standard volume names required during a Function build.
sourceVolName = "source"
dockerfileVolName = "dockerfile"
// https://github.com/tektoncd/pipeline/blob/v0.10.1/docs/auth.md#least-privilege
tektonDockerVolume = "/tekton/home/.docker/"
// Default mode of files mounted from ConfiMap volumes.
defaultFileMode int32 = 420
)
// GetBuildTaskRunSpec generates a TaskRun spec from a RuntimeInfo.
func GetBuildTaskRunSpec(rnInfo *RuntimeInfo, fn *serverlessv1alpha1.Function, imageName string) *tektonv1alpha1.TaskRunSpec {
// find Dockerfile name for runtime
var dockerfileName string
for _, rt := range rnInfo.AvailableRuntimes {
if fn.Spec.Runtime == rt.ID {
dockerfileName = rt.DockerfileName
break
}
}
vols, volMounts := makeConfigMapVolumes(
configmapVolumeSpec{
name: sourceVolName,
path: "/src",
cmap: fn.Name,
},
configmapVolumeSpec{
name: dockerfileVolName,
path: "/workspace",
cmap: dockerfileName,
},
)
steps := []tektonv1alpha1.Step{
{Container: corev1.Container{
Name: "build-and-push",
Image: kanikoExecutorImage,
Args: []string{
fmt.Sprintf("--destination=%s", imageName),
},
Env: []corev1.EnvVar{{
// Environment variable read by Kaniko to locate the container
// registry credentials.
// The Tekton credentials initializer sources container registry
// credentials from the Secrets referenced in TaskRun's
// ServiceAccounts, and makes them available in this directory.
// https://github.com/tektoncd/pipeline/blob/master/docs/auth.md
// https://github.com/GoogleContainerTools/kaniko/blob/v0.17.1/deploy/Dockerfile#L45
Name: "DOCKER_CONFIG",
Value: tektonDockerVolume,
}},
VolumeMounts: volMounts,
}},
}
timeout, err := time.ParseDuration(buildTimeout)
if err != nil {
timeout = defaultBuildTimeout
}
return &tektonv1alpha1.TaskRunSpec{
ServiceAccountName: rnInfo.ServiceAccount,
Timeout: &metav1.Duration{Duration: timeout},
TaskSpec: &tektonv1alpha1.TaskSpec{
Steps: steps,
Volumes: vols,
},
}
}
// configmapVolumeSpec is a succinct description of a ConfigMap Volume.
type configmapVolumeSpec struct {
name string
path string
cmap string
}
// makeConfigMapVolumes returns a combination of Volumes and VolumeMounts for
// the given volume specs.
func makeConfigMapVolumes(vspecs ...configmapVolumeSpec) ([]corev1.Volume, []corev1.VolumeMount) {
vols := make([]corev1.Volume, len(vspecs))
vmounts := make([]corev1.VolumeMount, len(vspecs))
for i, vspec := range vspecs {
vols[i] = corev1.Volume{
Name: vspec.name,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
DefaultMode: proto.Int32(defaultFileMode),
LocalObjectReference: corev1.LocalObjectReference{
Name: vspec.cmap,
},
},
},
}
vmounts[i] = corev1.VolumeMount{
Name: vspec.name,
MountPath: vspec.path,
}
}
return vols, vmounts
}
| [
"\"BUILD_TIMEOUT\""
]
| []
| [
"BUILD_TIMEOUT"
]
| [] | ["BUILD_TIMEOUT"] | go | 1 | 0 | |
cmd/genbootstrap/genbootstrap.go | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
The genbootstrap command prepares GO_BOOTSTRAP tarballs suitable for
use on builders. It's a wrapper around bootstrap.bash. After
bootstrap.bash produces the full output, genbootstrap trims it up,
removing unnecessary and unwanted files.
Usage: genbootstrap GOOS/GOARCH
*/
package main
import (
"flag"
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
)
var skipBuild = flag.Bool("skip_build", false, "skip bootstrap.bash step; useful during development of cleaning code")
func usage() {
fmt.Fprintln(os.Stderr, "Usage: genbootstrap GOOS/GOARCH")
flag.PrintDefaults()
}
func main() {
flag.Usage = usage
flag.Parse()
if flag.NArg() != 1 {
flag.Usage()
os.Exit(2)
}
f := strings.Split(flag.Arg(0), "/")
if len(f) != 2 {
flag.Usage()
os.Exit(2)
}
goos, goarch := f[0], f[1]
if os.Getenv("GOROOT") == "" {
log.Fatalf("GOROOT not set in environment")
}
tgz := filepath.Join(os.Getenv("GOROOT"), "src", "..", "..", "gobootstrap-"+goos+"-"+goarch+".tar.gz")
os.Remove(tgz)
outDir := filepath.Join(os.Getenv("GOROOT"), "src", "..", "..", "go-"+goos+"-"+goarch+"-bootstrap")
if !*skipBuild {
os.RemoveAll(outDir)
cmd := exec.Command(filepath.Join(os.Getenv("GOROOT"), "src", "bootstrap.bash"))
cmd.Dir = filepath.Join(os.Getenv("GOROOT"), "src")
cmd.Env = append(os.Environ(), "GOOS="+goos, "GOARCH="+goarch)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
// bootstrap.bash makes a bzipped tar file too, but it's fat and full of stuff we
// dont need it. delete it.
os.Remove(outDir + ".tbz")
}
if err := filepath.Walk(outDir, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
rel := strings.TrimPrefix(strings.TrimPrefix(path, outDir), "/")
base := filepath.Base(path)
var pkgrel string // relative to pkg/<goos>_<goarch>/, or empty
if strings.HasPrefix(rel, "pkg/") && strings.Count(rel, "/") >= 2 {
pkgrel = strings.TrimPrefix(rel, "pkg/")
pkgrel = pkgrel[strings.Index(pkgrel, "/")+1:]
log.Printf("rel %q => %q", rel, pkgrel)
}
remove := func() error {
if err := os.RemoveAll(path); err != nil {
return err
}
if fi.IsDir() {
return filepath.SkipDir
}
return nil
}
switch pkgrel {
case "cmd":
return remove()
}
switch rel {
case "api",
"bin/gofmt",
"doc",
"misc/android",
"misc/cgo",
"misc/chrome",
"misc/swig",
"test":
return remove()
}
if base == "testdata" {
return remove()
}
if strings.HasPrefix(rel, "pkg/tool/") {
switch base {
case "addr2line", "api", "cgo", "cover",
"dist", "doc", "fix", "nm",
"objdump", "pack", "pprof",
"trace", "vet", "yacc":
return remove()
}
}
if fi.IsDir() {
return nil
}
if isEditorJunkFile(path) {
return remove()
}
if !fi.Mode().IsRegular() {
return remove()
}
if strings.HasSuffix(path, "_test.go") {
return remove()
}
log.Printf("keeping: %s\n", rel)
return nil
}); err != nil {
log.Fatal(err)
}
log.Printf("Running: tar zcf %s .", tgz)
cmd := exec.Command("tar", "zcf", tgz, ".")
cmd.Dir = outDir
if err := cmd.Run(); err != nil {
log.Fatalf("tar zf failed: %v", err)
}
log.Printf("Done. Output is %s", tgz)
}
func isEditorJunkFile(path string) bool {
path = filepath.Base(path)
if strings.HasPrefix(path, "#") && strings.HasSuffix(path, "#") {
return true
}
if strings.HasSuffix(path, "~") {
return true
}
return false
}
| [
"\"GOROOT\"",
"\"GOROOT\"",
"\"GOROOT\"",
"\"GOROOT\"",
"\"GOROOT\""
]
| []
| [
"GOROOT"
]
| [] | ["GOROOT"] | go | 1 | 0 | |
gentlecoffee/wsgi.py | """
WSGI config for zara project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gentlecoffee.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/client/factory.go | /*
Copyright 2017, 2019 the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"os"
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
k8scheme "k8s.io/client-go/kubernetes/scheme"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
velerov1api "github.com/adi-bhardwaj/velero-modified/pkg/apis/velero/v1"
clientset "github.com/adi-bhardwaj/velero-modified/pkg/generated/clientset/versioned"
)
// Factory knows how to create a VeleroClient and Kubernetes client.
type Factory interface {
// BindFlags binds common flags (--kubeconfig, --namespace) to the passed-in FlagSet.
BindFlags(flags *pflag.FlagSet)
// Client returns a VeleroClient. It uses the following priority to specify the cluster
// configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration.
Client() (clientset.Interface, error)
// KubeClient returns a Kubernetes client. It uses the following priority to specify the cluster
// configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration.
KubeClient() (kubernetes.Interface, error)
// DynamicClient returns a Kubernetes dynamic client. It uses the following priority to specify the cluster
// configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration.
DynamicClient() (dynamic.Interface, error)
// KubebuilderClient returns a client for the controller runtime framework. It adds Kubernetes and Velero
// types to its scheme. It uses the following priority to specify the cluster
// configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration.
KubebuilderClient() (kbclient.Client, error)
// SetBasename changes the basename for an already-constructed client.
// This is useful for generating clients that require a different user-agent string below the root `velero`
// command, such as the server subcommand.
SetBasename(string)
// SetClientQPS sets the Queries Per Second for a client.
SetClientQPS(float32)
// SetClientBurst sets the Burst for a client.
SetClientBurst(int)
// ClientConfig returns a rest.Config struct used for client-go clients.
ClientConfig() (*rest.Config, error)
// Namespace returns the namespace which the Factory will create clients for.
Namespace() string
}
type factory struct {
flags *pflag.FlagSet
kubeconfig string
kubecontext string
baseName string
namespace string
clientQPS float32
clientBurst int
}
// NewFactory returns a Factory.
func NewFactory(baseName string, config VeleroConfig) Factory {
f := &factory{
flags: pflag.NewFlagSet("", pflag.ContinueOnError),
baseName: baseName,
}
f.namespace = os.Getenv("VELERO_NAMESPACE")
if config.Namespace() != "" {
f.namespace = config.Namespace()
}
// We didn't get the namespace via env var or config file, so use the default.
// Command line flags will override when BindFlags is called.
if f.namespace == "" {
f.namespace = velerov1api.DefaultNamespace
}
f.flags.StringVar(&f.kubeconfig, "kubeconfig", "", "Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration")
f.flags.StringVarP(&f.namespace, "namespace", "n", f.namespace, "The namespace in which Velero should operate")
f.flags.StringVar(&f.kubecontext, "kubecontext", "", "The context to use to talk to the Kubernetes apiserver. If unset defaults to whatever your current-context is (kubectl config current-context)")
return f
}
func (f *factory) BindFlags(flags *pflag.FlagSet) {
flags.AddFlagSet(f.flags)
}
func (f *factory) ClientConfig() (*rest.Config, error) {
return Config(f.kubeconfig, f.kubecontext, f.baseName, f.clientQPS, f.clientBurst)
}
func (f *factory) Client() (clientset.Interface, error) {
clientConfig, err := f.ClientConfig()
if err != nil {
return nil, err
}
veleroClient, err := clientset.NewForConfig(clientConfig)
if err != nil {
return nil, errors.WithStack(err)
}
return veleroClient, nil
}
func (f *factory) KubeClient() (kubernetes.Interface, error) {
clientConfig, err := f.ClientConfig()
if err != nil {
return nil, err
}
kubeClient, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, errors.WithStack(err)
}
return kubeClient, nil
}
func (f *factory) DynamicClient() (dynamic.Interface, error) {
clientConfig, err := f.ClientConfig()
if err != nil {
return nil, err
}
dynamicClient, err := dynamic.NewForConfig(clientConfig)
if err != nil {
return nil, errors.WithStack(err)
}
return dynamicClient, nil
}
func (f *factory) KubebuilderClient() (kbclient.Client, error) {
clientConfig, err := f.ClientConfig()
if err != nil {
return nil, err
}
scheme := runtime.NewScheme()
velerov1api.AddToScheme(scheme)
k8scheme.AddToScheme(scheme)
apiextv1beta1.AddToScheme(scheme)
kubebuilderClient, err := kbclient.New(clientConfig, kbclient.Options{
Scheme: scheme,
})
if err != nil {
return nil, err
}
return kubebuilderClient, nil
}
func (f *factory) SetBasename(name string) {
f.baseName = name
}
func (f *factory) SetClientQPS(qps float32) {
f.clientQPS = qps
}
func (f *factory) SetClientBurst(burst int) {
f.clientBurst = burst
}
func (f *factory) Namespace() string {
return f.namespace
}
| [
"\"VELERO_NAMESPACE\""
]
| []
| [
"VELERO_NAMESPACE"
]
| [] | ["VELERO_NAMESPACE"] | go | 1 | 0 | |
tests/end-to-end/tools/example-index-generator/main.go | package main
import (
"fmt"
"io/ioutil"
"os"
"strings"
"text/template"
)
func main() {
dir := os.Getenv("EXAMPLES_DIRECTORY")
if dir == "" {
panic("Missing `EXAMPLES_DIRECTORY` environment variable")
}
files, err := ioutil.ReadDir(dir)
if err != nil {
panic(err)
}
data := make([]Data, 0)
for _, f := range files {
if !strings.HasSuffix(f.Name(), ".graphql") {
continue
}
withoutExt := strings.Replace(f.Name(), ".graphql", "", -1)
withoutDash := strings.Replace(withoutExt, "-", " ", -1)
data = append(data, Data{Description: withoutDash, FileName: f.Name()})
}
t, err := template.ParseFiles("./md.tpl")
if err != nil {
panic(err)
}
dest, err := os.Create(fmt.Sprintf("%s/README.md", dir))
if err != nil {
panic(err)
}
defer func() {
err := dest.Close()
if err != nil {
panic(err)
}
}()
err = t.Execute(dest, data)
if err != nil {
panic(err)
}
}
type Data struct {
FileName string
Description string
}
| [
"\"EXAMPLES_DIRECTORY\""
]
| []
| [
"EXAMPLES_DIRECTORY"
]
| [] | ["EXAMPLES_DIRECTORY"] | go | 1 | 0 | |
cloudformation/s3outposts/aws-s3outposts-bucket_rule.go | package s3outposts
import (
"github.com/awslabs/goformation/v5/cloudformation/policies"
)
// Bucket_Rule AWS CloudFormation Resource (AWS::S3Outposts::Bucket.Rule)
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3outposts-bucket-rule.html
type Bucket_Rule struct {
// AbortIncompleteMultipartUpload AWS CloudFormation Property
// Required: false
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3outposts-bucket-rule.html#cfn-s3outposts-bucket-rule-abortincompletemultipartupload
AbortIncompleteMultipartUpload *Bucket_AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"`
// ExpirationDate AWS CloudFormation Property
// Required: false
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3outposts-bucket-rule.html#cfn-s3outposts-bucket-rule-expirationdate
ExpirationDate string `json:"ExpirationDate,omitempty"`
// ExpirationInDays AWS CloudFormation Property
// Required: false
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3outposts-bucket-rule.html#cfn-s3outposts-bucket-rule-expirationindays
ExpirationInDays int `json:"ExpirationInDays,omitempty"`
// Filter AWS CloudFormation Property
// Required: false
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3outposts-bucket-rule.html#cfn-s3outposts-bucket-rule-filter
Filter interface{} `json:"Filter,omitempty"`
// Id AWS CloudFormation Property
// Required: false
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3outposts-bucket-rule.html#cfn-s3outposts-bucket-rule-id
Id string `json:"Id,omitempty"`
// Status AWS CloudFormation Property
// Required: false
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3outposts-bucket-rule.html#cfn-s3outposts-bucket-rule-status
Status string `json:"Status,omitempty"`
// AWSCloudFormationDeletionPolicy represents a CloudFormation DeletionPolicy
AWSCloudFormationDeletionPolicy policies.DeletionPolicy `json:"-"`
// AWSCloudFormationUpdateReplacePolicy represents a CloudFormation UpdateReplacePolicy
AWSCloudFormationUpdateReplacePolicy policies.UpdateReplacePolicy `json:"-"`
// AWSCloudFormationDependsOn stores the logical ID of the resources to be created before this resource
AWSCloudFormationDependsOn []string `json:"-"`
// AWSCloudFormationMetadata stores structured data associated with this resource
AWSCloudFormationMetadata map[string]interface{} `json:"-"`
// AWSCloudFormationCondition stores the logical ID of the condition that must be satisfied for this resource to be created
AWSCloudFormationCondition string `json:"-"`
}
// AWSCloudFormationType returns the AWS CloudFormation resource type
func (r *Bucket_Rule) AWSCloudFormationType() string {
return "AWS::S3Outposts::Bucket.Rule"
}
| []
| []
| []
| [] | [] | go | null | null | null |
ldap/connection.go | package ldap
import (
"crypto/tls"
"crypto/x509"
"fmt"
"log"
"os"
"strings"
l "github.com/go-ldap/ldap"
"github.com/vmwarepivotallabs/cf-mgmt/config"
"github.com/xchapter7x/lo"
)
type Connection interface {
Close()
Search(*l.SearchRequest) (*l.SearchResult, error)
IsClosing() bool
}
type RefreshableConnection struct {
Connection
refreshConnection func() (Connection, error)
}
func (r *RefreshableConnection) Search(searchRequest *l.SearchRequest) (*l.SearchResult, error) {
if r.Connection.IsClosing() {
err := r.RefreshConnection()
if err != nil {
return nil, err
}
}
return r.Connection.Search(searchRequest)
}
func (r *RefreshableConnection) RefreshConnection() error {
connection, err := r.refreshConnection()
if err != nil {
lo.G.Error("Could not re-establish LDAP connection")
return err
}
r.Connection = connection
return nil
}
// NewRefreshableConnection creates a connection that will use the function
// `createConnection` to refresh the connection if it has been closed.
func NewRefreshableConnection(createConnection func() (Connection, error)) (*RefreshableConnection, error) {
connection, err := createConnection()
if err != nil {
return nil, err
}
return &RefreshableConnection{
Connection: connection,
refreshConnection: createConnection,
}, nil
}
func setMaxTLSVersion(tlsMaxVersion int, tlsConfig *tls.Config) {
switch tlsMaxVersion {
case 1:
tlsConfig.MaxVersion = tls.VersionTLS11
case 2:
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
func createConnection(config *config.LdapConfig) (Connection, error) {
var connection *l.Conn
var err error
ldapURL := fmt.Sprintf("%s:%d", config.LdapHost, config.LdapPort)
lo.G.Debug("Connecting to", ldapURL)
if config.TLS {
if config.TLSMaxVersion != 0 {
tlsConfig := &tls.Config{}
setMaxTLSVersion(config.TLSMaxVersion, tlsConfig)
}
if config.InsecureSkipVerify == "" || strings.EqualFold(config.InsecureSkipVerify, "true") {
tlsConfig.InsecureSkipVerify = true
connection, err = l.DialTLS("tcp", ldapURL, tlsConfig)
} else {
// Get the SystemCertPool, continue with an empty pool on error
rootCAs, _ := x509.SystemCertPool()
if rootCAs == nil {
rootCAs = x509.NewCertPool()
}
// Append our cert to the system pool
if ok := rootCAs.AppendCertsFromPEM([]byte(config.CACert)); !ok {
log.Println("No certs appended, using system certs only")
}
// Trust the augmented cert pool in our client
tlsConfig.RootCAs = rootCAs
tlsConfig.ServerName = config.LdapHost
connection, err = l.DialTLS("tcp", ldapURL, tlsConfig)
}
} else {
connection, err = l.Dial("tcp", ldapURL)
}
if err != nil {
return nil, err
}
if connection != nil {
if strings.EqualFold(os.Getenv("LOG_LEVEL"), "debug") {
connection.Debug = true
}
if err = connection.Bind(config.BindDN, config.BindPassword); err != nil {
connection.Close()
return nil, fmt.Errorf("cannot bind with %s: %v", config.BindDN, err)
}
}
return connection, err
}
| [
"\"LOG_LEVEL\""
]
| []
| [
"LOG_LEVEL"
]
| [] | ["LOG_LEVEL"] | go | 1 | 0 | |
fastfood_env/Lib/site-packages/py/_path/local.py | """
local path implementation.
"""
from __future__ import with_statement
from contextlib import contextmanager
import sys, os, atexit, io, uuid
import py
from py._path import common
from py._path.common import iswin32, fspath
from stat import S_ISLNK, S_ISDIR, S_ISREG
from os.path import abspath, normpath, isabs, exists, isdir, isfile, islink, dirname
if sys.version_info > (3,0):
def map_as_list(func, iter):
return list(map(func, iter))
else:
map_as_list = map
class Stat(object):
def __getattr__(self, name):
return getattr(self._osstatresult, "st_" + name)
def __init__(self, path, osstatresult):
self.path = path
self._osstatresult = osstatresult
@property
def owner(self):
if iswin32:
raise NotImplementedError("XXX win32")
import pwd
entry = py.error.checked_call(pwd.getpwuid, self.uid)
return entry[0]
@property
def group(self):
""" return group name of file. """
if iswin32:
raise NotImplementedError("XXX win32")
import grp
entry = py.error.checked_call(grp.getgrgid, self.gid)
return entry[0]
def isdir(self):
return S_ISDIR(self._osstatresult.st_mode)
def isfile(self):
return S_ISREG(self._osstatresult.st_mode)
def islink(self):
st = self.path.lstat()
return S_ISLNK(self._osstatresult.st_mode)
class PosixPath(common.PathBase):
def chown(self, user, group, rec=0):
""" change ownership to the given user and group.
user and group may be specified by a number or
by a name. if rec is True change ownership
recursively.
"""
uid = getuserid(user)
gid = getgroupid(group)
if rec:
for x in self.visit(rec=lambda x: x.check(link=0)):
if x.check(link=0):
py.error.checked_call(os.chown, str(x), uid, gid)
py.error.checked_call(os.chown, str(self), uid, gid)
def readlink(self):
""" return value of a symbolic link. """
return py.error.checked_call(os.readlink, self.strpath)
def mklinkto(self, oldname):
""" posix style hard link to another name. """
py.error.checked_call(os.link, str(oldname), str(self))
def mksymlinkto(self, value, absolute=1):
""" create a symbolic link with the given value (pointing to another name). """
if absolute:
py.error.checked_call(os.symlink, str(value), self.strpath)
else:
base = self.common(value)
# with posix local paths '/' is always a common base
relsource = self.__class__(value).relto(base)
reldest = self.relto(base)
n = reldest.count(self.sep)
target = self.sep.join(('..', )*n + (relsource, ))
py.error.checked_call(os.symlink, target, self.strpath)
def getuserid(user):
import pwd
if not isinstance(user, int):
user = pwd.getpwnam(user)[2]
return user
def getgroupid(group):
import grp
if not isinstance(group, int):
group = grp.getgrnam(group)[2]
return group
FSBase = not iswin32 and PosixPath or common.PathBase
class LocalPath(FSBase):
""" object oriented interface to os.path and other local filesystem
related information.
"""
class ImportMismatchError(ImportError):
""" raised on pyimport() if there is a mismatch of __file__'s"""
sep = os.sep
class Checkers(common.Checkers):
def _stat(self):
try:
return self._statcache
except AttributeError:
try:
self._statcache = self.path.stat()
except py.error.ELOOP:
self._statcache = self.path.lstat()
return self._statcache
def dir(self):
return S_ISDIR(self._stat().mode)
def file(self):
return S_ISREG(self._stat().mode)
def exists(self):
return self._stat()
def link(self):
st = self.path.lstat()
return S_ISLNK(st.mode)
def __init__(self, path=None, expanduser=False):
""" Initialize and return a local Path instance.
Path can be relative to the current directory.
If path is None it defaults to the current working directory.
If expanduser is True, tilde-expansion is performed.
Note that Path instances always carry an absolute path.
Note also that passing in a local path object will simply return
the exact same path object. Use new() to get a new copy.
"""
if path is None:
self.strpath = py.error.checked_call(os.getcwd)
else:
try:
path = fspath(path)
except TypeError:
raise ValueError("can only pass None, Path instances "
"or non-empty strings to LocalPath")
if expanduser:
path = os.path.expanduser(path)
self.strpath = abspath(path)
def __hash__(self):
return hash(self.strpath)
def __eq__(self, other):
s1 = fspath(self)
try:
s2 = fspath(other)
except TypeError:
return False
if iswin32:
s1 = s1.lower()
try:
s2 = s2.lower()
except AttributeError:
return False
return s1 == s2
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return fspath(self) < fspath(other)
def __gt__(self, other):
return fspath(self) > fspath(other)
def samefile(self, other):
""" return True if 'other' references the same file as 'self'.
"""
other = fspath(other)
if not isabs(other):
other = abspath(other)
if self == other:
return True
if iswin32:
return False # there is no samefile
return py.error.checked_call(
os.path.samefile, self.strpath, other)
def remove(self, rec=1, ignore_errors=False):
""" remove a file or directory (or a directory tree if rec=1).
if ignore_errors is True, errors while removing directories will
be ignored.
"""
if self.check(dir=1, link=0):
if rec:
# force remove of readonly files on windows
if iswin32:
self.chmod(0o700, rec=1)
import shutil
py.error.checked_call(
shutil.rmtree, self.strpath,
ignore_errors=ignore_errors)
else:
py.error.checked_call(os.rmdir, self.strpath)
else:
if iswin32:
self.chmod(0o700)
py.error.checked_call(os.remove, self.strpath)
def computehash(self, hashtype="md5", chunksize=524288):
""" return hexdigest of hashvalue for this file. """
try:
try:
import hashlib as mod
except ImportError:
if hashtype == "sha1":
hashtype = "sha"
mod = __import__(hashtype)
hash = getattr(mod, hashtype)()
except (AttributeError, ImportError):
raise ValueError("Don't know how to compute %r hash" %(hashtype,))
f = self.open('rb')
try:
while 1:
buf = f.read(chunksize)
if not buf:
return hash.hexdigest()
hash.update(buf)
finally:
f.close()
def new(self, **kw):
""" create a modified version of this path.
the following keyword arguments modify various path parts::
a:/some/path/to/a/file.ext
xx drive
xxxxxxxxxxxxxxxxx dirname
xxxxxxxx basename
xxxx purebasename
xxx ext
"""
obj = object.__new__(self.__class__)
if not kw:
obj.strpath = self.strpath
return obj
drive, dirname, basename, purebasename,ext = self._getbyspec(
"drive,dirname,basename,purebasename,ext")
if 'basename' in kw:
if 'purebasename' in kw or 'ext' in kw:
raise ValueError("invalid specification %r" % kw)
else:
pb = kw.setdefault('purebasename', purebasename)
try:
ext = kw['ext']
except KeyError:
pass
else:
if ext and not ext.startswith('.'):
ext = '.' + ext
kw['basename'] = pb + ext
if ('dirname' in kw and not kw['dirname']):
kw['dirname'] = drive
else:
kw.setdefault('dirname', dirname)
kw.setdefault('sep', self.sep)
obj.strpath = normpath(
"%(dirname)s%(sep)s%(basename)s" % kw)
return obj
def _getbyspec(self, spec):
""" see new for what 'spec' can be. """
res = []
parts = self.strpath.split(self.sep)
args = filter(None, spec.split(',') )
append = res.append
for name in args:
if name == 'drive':
append(parts[0])
elif name == 'dirname':
append(self.sep.join(parts[:-1]))
else:
basename = parts[-1]
if name == 'basename':
append(basename)
else:
i = basename.rfind('.')
if i == -1:
purebasename, ext = basename, ''
else:
purebasename, ext = basename[:i], basename[i:]
if name == 'purebasename':
append(purebasename)
elif name == 'ext':
append(ext)
else:
raise ValueError("invalid part specification %r" % name)
return res
def dirpath(self, *args, **kwargs):
""" return the directory path joined with any given path arguments. """
if not kwargs:
path = object.__new__(self.__class__)
path.strpath = dirname(self.strpath)
if args:
path = path.join(*args)
return path
return super(LocalPath, self).dirpath(*args, **kwargs)
def join(self, *args, **kwargs):
""" return a new path by appending all 'args' as path
components. if abs=1 is used restart from root if any
of the args is an absolute path.
"""
sep = self.sep
strargs = [fspath(arg) for arg in args]
strpath = self.strpath
if kwargs.get('abs'):
newargs = []
for arg in reversed(strargs):
if isabs(arg):
strpath = arg
strargs = newargs
break
newargs.insert(0, arg)
# special case for when we have e.g. strpath == "/"
actual_sep = "" if strpath.endswith(sep) else sep
for arg in strargs:
arg = arg.strip(sep)
if iswin32:
# allow unix style paths even on windows.
arg = arg.strip('/')
arg = arg.replace('/', sep)
strpath = strpath + actual_sep + arg
actual_sep = sep
obj = object.__new__(self.__class__)
obj.strpath = normpath(strpath)
return obj
def open(self, mode='r', ensure=False, encoding=None):
""" return an opened file with the given mode.
If ensure is True, create parent directories if needed.
"""
if ensure:
self.dirpath().ensure(dir=1)
if encoding:
return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding)
return py.error.checked_call(open, self.strpath, mode)
def _fastjoin(self, name):
child = object.__new__(self.__class__)
child.strpath = self.strpath + self.sep + name
return child
def islink(self):
return islink(self.strpath)
def check(self, **kw):
if not kw:
return exists(self.strpath)
if len(kw) == 1:
if "dir" in kw:
return not kw["dir"] ^ isdir(self.strpath)
if "file" in kw:
return not kw["file"] ^ isfile(self.strpath)
return super(LocalPath, self).check(**kw)
_patternchars = set("*?[" + os.path.sep)
def listdir(self, fil=None, sort=None):
""" list directory contents, possibly filter by the given fil func
and possibly sorted.
"""
if fil is None and sort is None:
names = py.error.checked_call(os.listdir, self.strpath)
return map_as_list(self._fastjoin, names)
if isinstance(fil, py.builtin._basestring):
if not self._patternchars.intersection(fil):
child = self._fastjoin(fil)
if exists(child.strpath):
return [child]
return []
fil = common.FNMatcher(fil)
names = py.error.checked_call(os.listdir, self.strpath)
res = []
for name in names:
child = self._fastjoin(name)
if fil is None or fil(child):
res.append(child)
self._sortlist(res, sort)
return res
def size(self):
""" return size of the underlying file object """
return self.stat().size
def mtime(self):
""" return last modification time of the path. """
return self.stat().mtime
def copy(self, target, mode=False, stat=False):
""" copy path to target.
If mode is True, will copy copy permission from path to target.
If stat is True, copy permission, last modification
time, last access time, and flags from path to target.
"""
if self.check(file=1):
if target.check(dir=1):
target = target.join(self.basename)
assert self!=target
copychunked(self, target)
if mode:
copymode(self.strpath, target.strpath)
if stat:
copystat(self, target)
else:
def rec(p):
return p.check(link=0)
for x in self.visit(rec=rec):
relpath = x.relto(self)
newx = target.join(relpath)
newx.dirpath().ensure(dir=1)
if x.check(link=1):
newx.mksymlinkto(x.readlink())
continue
elif x.check(file=1):
copychunked(x, newx)
elif x.check(dir=1):
newx.ensure(dir=1)
if mode:
copymode(x.strpath, newx.strpath)
if stat:
copystat(x, newx)
def rename(self, target):
""" rename this path to target. """
target = fspath(target)
return py.error.checked_call(os.rename, self.strpath, target)
def dump(self, obj, bin=1):
""" pickle object into path location"""
f = self.open('wb')
import pickle
try:
py.error.checked_call(pickle.dump, obj, f, bin)
finally:
f.close()
def mkdir(self, *args):
""" create & return the directory joined with args. """
p = self.join(*args)
py.error.checked_call(os.mkdir, fspath(p))
return p
def write_binary(self, data, ensure=False):
""" write binary data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('wb') as f:
f.write(data)
def write_text(self, data, encoding, ensure=False):
""" write text data into path using the specified encoding.
If ensure is True create missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('w', encoding=encoding) as f:
f.write(data)
def write(self, data, mode='w', ensure=False):
""" write data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
if 'b' in mode:
if not py.builtin._isbytes(data):
raise ValueError("can only process bytes")
else:
if not py.builtin._istext(data):
if not py.builtin._isbytes(data):
data = str(data)
else:
data = py.builtin._totext(data, sys.getdefaultencoding())
f = self.open(mode)
try:
f.write(data)
finally:
f.close()
def _ensuredirs(self):
parent = self.dirpath()
if parent == self:
return self
if parent.check(dir=0):
parent._ensuredirs()
if self.check(dir=0):
try:
self.mkdir()
except py.error.EEXIST:
# race condition: file/dir created by another thread/process.
# complain if it is not a dir
if self.check(dir=0):
raise
return self
def ensure(self, *args, **kwargs):
""" ensure that an args-joined path exists (by default as
a file). if you specify a keyword argument 'dir=True'
then the path is forced to be a directory path.
"""
p = self.join(*args)
if kwargs.get('dir', 0):
return p._ensuredirs()
else:
p.dirpath()._ensuredirs()
if not p.check(file=1):
p.open('w').close()
return p
def stat(self, raising=True):
""" Return an os.stat() tuple. """
if raising == True:
return Stat(self, py.error.checked_call(os.stat, self.strpath))
try:
return Stat(self, os.stat(self.strpath))
except KeyboardInterrupt:
raise
except Exception:
return None
def lstat(self):
""" Return an os.lstat() tuple. """
return Stat(self, py.error.checked_call(os.lstat, self.strpath))
def setmtime(self, mtime=None):
""" set modification time for the given path. if 'mtime' is None
(the default) then the file's mtime is set to current time.
Note that the resolution for 'mtime' is platform dependent.
"""
if mtime is None:
return py.error.checked_call(os.utime, self.strpath, mtime)
try:
return py.error.checked_call(os.utime, self.strpath, (-1, mtime))
except py.error.EINVAL:
return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
def chdir(self):
""" change directory to self and return old current directory """
try:
old = self.__class__()
except py.error.ENOENT:
old = None
py.error.checked_call(os.chdir, self.strpath)
return old
@contextmanager
def as_cwd(self):
""" return context manager which changes to current dir during the
managed "with" context. On __enter__ it returns the old dir.
"""
old = self.chdir()
try:
yield old
finally:
old.chdir()
def realpath(self):
""" return a new path which contains no symbolic links."""
return self.__class__(os.path.realpath(self.strpath))
def atime(self):
""" return last access time of the path. """
return self.stat().atime
def __repr__(self):
return 'local(%r)' % self.strpath
def __str__(self):
""" return string representation of the Path. """
return self.strpath
def chmod(self, mode, rec=0):
""" change permissions to the given mode. If mode is an
integer it directly encodes the os-specific modes.
if rec is True perform recursively.
"""
if not isinstance(mode, int):
raise TypeError("mode %r must be an integer" % (mode,))
if rec:
for x in self.visit(rec=rec):
py.error.checked_call(os.chmod, str(x), mode)
py.error.checked_call(os.chmod, self.strpath, mode)
def pypkgpath(self):
""" return the Python package path by looking for the last
directory upwards which still contains an __init__.py.
Return None if a pkgpath can not be determined.
"""
pkgpath = None
for parent in self.parts(reverse=True):
if parent.isdir():
if not parent.join('__init__.py').exists():
break
if not isimportable(parent.basename):
break
pkgpath = parent
return pkgpath
def _ensuresyspath(self, ensuremode, path):
if ensuremode:
s = str(path)
if ensuremode == "append":
if s not in sys.path:
sys.path.append(s)
else:
if s != sys.path[0]:
sys.path.insert(0, s)
def pyimport(self, modname=None, ensuresyspath=True):
""" return path as an imported python module.
If modname is None, look for the containing package
and construct an according module name.
The module will be put/looked up in sys.modules.
if ensuresyspath is True then the root dir for importing
the file (taking __init__.py files into account) will
be prepended to sys.path if it isn't there already.
If ensuresyspath=="append" the root dir will be appended
if it isn't already contained in sys.path.
if ensuresyspath is False no modification of syspath happens.
"""
if not self.check():
raise py.error.ENOENT(self)
pkgpath = None
if modname is None:
pkgpath = self.pypkgpath()
if pkgpath is not None:
pkgroot = pkgpath.dirpath()
names = self.new(ext="").relto(pkgroot).split(self.sep)
if names[-1] == "__init__":
names.pop()
modname = ".".join(names)
else:
pkgroot = self.dirpath()
modname = self.purebasename
self._ensuresyspath(ensuresyspath, pkgroot)
__import__(modname)
mod = sys.modules[modname]
if self.basename == "__init__.py":
return mod # we don't check anything as we might
# be in a namespace package ... too icky to check
modfile = mod.__file__
if modfile[-4:] in ('.pyc', '.pyo'):
modfile = modfile[:-1]
elif modfile.endswith('$py.class'):
modfile = modfile[:-9] + '.py'
if modfile.endswith(os.path.sep + "__init__.py"):
if self.basename != "__init__.py":
modfile = modfile[:-12]
try:
issame = self.samefile(modfile)
except py.error.ENOENT:
issame = False
if not issame:
raise self.ImportMismatchError(modname, modfile, self)
return mod
else:
try:
return sys.modules[modname]
except KeyError:
# we have a custom modname, do a pseudo-import
import types
mod = types.ModuleType(modname)
mod.__file__ = str(self)
sys.modules[modname] = mod
try:
py.builtin.execfile(str(self), mod.__dict__)
except:
del sys.modules[modname]
raise
return mod
def sysexec(self, *argv, **popen_opts):
""" return stdout text from executing a system child process,
where the 'self' path points to executable.
The process is directly invoked and not through a system shell.
"""
from subprocess import Popen, PIPE
argv = map_as_list(str, argv)
popen_opts['stdout'] = popen_opts['stderr'] = PIPE
proc = Popen([str(self)] + argv, **popen_opts)
stdout, stderr = proc.communicate()
ret = proc.wait()
if py.builtin._isbytes(stdout):
stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
if ret != 0:
if py.builtin._isbytes(stderr):
stderr = py.builtin._totext(stderr, sys.getdefaultencoding())
raise py.process.cmdexec.Error(ret, ret, str(self),
stdout, stderr,)
return stdout
def sysfind(cls, name, checker=None, paths=None):
""" return a path object found by looking at the systems
underlying PATH specification. If the checker is not None
it will be invoked to filter matching paths. If a binary
cannot be found, None is returned
Note: This is probably not working on plain win32 systems
but may work on cygwin.
"""
if isabs(name):
p = py.path.local(name)
if p.check(file=1):
return p
else:
if paths is None:
if iswin32:
paths = os.environ['Path'].split(';')
if '' not in paths and '.' not in paths:
paths.append('.')
try:
systemroot = os.environ['SYSTEMROOT']
except KeyError:
pass
else:
paths = [path.replace('%SystemRoot%', systemroot)
for path in paths]
else:
paths = os.environ['PATH'].split(':')
tryadd = []
if iswin32:
tryadd += os.environ['PATHEXT'].split(os.pathsep)
tryadd.append("")
for x in paths:
for addext in tryadd:
p = py.path.local(x).join(name, abs=True) + addext
try:
if p.check(file=1):
if checker:
if not checker(p):
continue
return p
except py.error.EACCES:
pass
return None
sysfind = classmethod(sysfind)
def _gethomedir(cls):
try:
x = os.environ['HOME']
except KeyError:
try:
x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH']
except KeyError:
return None
return cls(x)
_gethomedir = classmethod(_gethomedir)
# """
# special class constructors for local filesystem paths
# """
@classmethod
def get_temproot(cls):
""" return the system's temporary directory
(where tempfiles are usually created in)
"""
import tempfile
return py.path.local(tempfile.gettempdir())
@classmethod
def mkdtemp(cls, rootdir=None):
""" return a Path object pointing to a fresh new temporary directory
(which we created ourself).
"""
import tempfile
if rootdir is None:
rootdir = cls.get_temproot()
return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir)))
def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
lock_timeout=172800): # two days
""" return unique directory with a number greater than the current
maximum one. The number is assumed to start directly after prefix.
if keep is true directories with a number less than (maxnum-keep)
will be removed. If .lock files are used (lock_timeout non-zero),
algorithm is multi-process safe.
"""
if rootdir is None:
rootdir = cls.get_temproot()
nprefix = prefix.lower()
def parse_num(path):
""" parse the number out of a path (if it matches the prefix) """
nbasename = path.basename.lower()
if nbasename.startswith(nprefix):
try:
return int(nbasename[len(nprefix):])
except ValueError:
pass
def create_lockfile(path):
""" exclusively create lockfile. Throws when failed """
mypid = os.getpid()
lockfile = path.join('.lock')
if hasattr(lockfile, 'mksymlinkto'):
lockfile.mksymlinkto(str(mypid))
else:
fd = py.error.checked_call(os.open, str(lockfile), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
with os.fdopen(fd, 'w') as f:
f.write(str(mypid))
return lockfile
def atexit_remove_lockfile(lockfile):
""" ensure lockfile is removed at process exit """
mypid = os.getpid()
def try_remove_lockfile():
# in a fork() situation, only the last process should
# remove the .lock, otherwise the other processes run the
# risk of seeing their temporary dir disappear. For now
# we remove the .lock in the parent only (i.e. we assume
# that the children finish before the parent).
if os.getpid() != mypid:
return
try:
lockfile.remove()
except py.error.Error:
pass
atexit.register(try_remove_lockfile)
# compute the maximum number currently in use with the prefix
lastmax = None
while True:
maxnum = -1
for path in rootdir.listdir():
num = parse_num(path)
if num is not None:
maxnum = max(maxnum, num)
# make the new directory
try:
udir = rootdir.mkdir(prefix + str(maxnum+1))
if lock_timeout:
lockfile = create_lockfile(udir)
atexit_remove_lockfile(lockfile)
except (py.error.EEXIST, py.error.ENOENT, py.error.EBUSY):
# race condition (1): another thread/process created the dir
# in the meantime - try again
# race condition (2): another thread/process spuriously acquired
# lock treating empty directory as candidate
# for removal - try again
# race condition (3): another thread/process tried to create the lock at
# the same time (happened in Python 3.3 on Windows)
# https://ci.appveyor.com/project/pytestbot/py/build/1.0.21/job/ffi85j4c0lqwsfwa
if lastmax == maxnum:
raise
lastmax = maxnum
continue
break
def get_mtime(path):
""" read file modification time """
try:
return path.lstat().mtime
except py.error.Error:
pass
garbage_prefix = prefix + 'garbage-'
def is_garbage(path):
""" check if path denotes directory scheduled for removal """
bn = path.basename
return bn.startswith(garbage_prefix)
# prune old directories
udir_time = get_mtime(udir)
if keep and udir_time:
for path in rootdir.listdir():
num = parse_num(path)
if num is not None and num <= (maxnum - keep):
try:
# try acquiring lock to remove directory as exclusive user
if lock_timeout:
create_lockfile(path)
except (py.error.EEXIST, py.error.ENOENT, py.error.EBUSY):
path_time = get_mtime(path)
if not path_time:
# assume directory doesn't exist now
continue
if abs(udir_time - path_time) < lock_timeout:
# assume directory with lockfile exists
# and lock timeout hasn't expired yet
continue
# path dir locked for exclusive use
# and scheduled for removal to avoid another thread/process
# treating it as a new directory or removal candidate
garbage_path = rootdir.join(garbage_prefix + str(uuid.uuid4()))
try:
path.rename(garbage_path)
garbage_path.remove(rec=1)
except KeyboardInterrupt:
raise
except: # this might be py.error.Error, WindowsError ...
pass
if is_garbage(path):
try:
path.remove(rec=1)
except KeyboardInterrupt:
raise
except: # this might be py.error.Error, WindowsError ...
pass
# make link...
try:
username = os.environ['USER'] #linux, et al
except KeyError:
try:
username = os.environ['USERNAME'] #windows
except KeyError:
username = 'current'
src = str(udir)
dest = src[:src.rfind('-')] + '-' + username
try:
os.unlink(dest)
except OSError:
pass
try:
os.symlink(src, dest)
except (OSError, AttributeError, NotImplementedError):
pass
return udir
make_numbered_dir = classmethod(make_numbered_dir)
def copymode(src, dest):
""" copy permission from src to dst. """
import shutil
shutil.copymode(src, dest)
def copystat(src, dest):
""" copy permission, last modification time,
last access time, and flags from src to dst."""
import shutil
shutil.copystat(str(src), str(dest))
def copychunked(src, dest):
chunksize = 524288 # half a meg of bytes
fsrc = src.open('rb')
try:
fdest = dest.open('wb')
try:
while 1:
buf = fsrc.read(chunksize)
if not buf:
break
fdest.write(buf)
finally:
fdest.close()
finally:
fsrc.close()
def isimportable(name):
if name and (name[0].isalpha() or name[0] == '_'):
name = name.replace("_", '')
return not name or name.isalnum()
| []
| []
| [
"USERNAME",
"HOMEPATH",
"PATHEXT",
"HOMEDRIVE",
"SYSTEMROOT",
"Path",
"USER",
"HOME",
"PATH"
]
| [] | ["USERNAME", "HOMEPATH", "PATHEXT", "HOMEDRIVE", "SYSTEMROOT", "Path", "USER", "HOME", "PATH"] | python | 9 | 0 | |
tfx/components/example_gen/big_query_example_gen/executor_test.py | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.example_gen.big_query_example_gen.executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import apache_beam as beam
from apache_beam.testing import util
import mock
import tensorflow as tf
from google.cloud import bigquery
from google.protobuf import json_format
from tfx.components.example_gen.big_query_example_gen import executor
from tfx.proto import example_gen_pb2
from tfx.types import standard_artifacts
@beam.ptransform_fn
def _MockReadFromBigQuery(pipeline, query): # pylint: disable=invalid-name, unused-argument
mock_query_results = []
for i in range(10000):
mock_query_result = {
'i': None if random.randrange(10) == 0 else i,
'f': None if random.randrange(10) == 0 else float(i),
's': None if random.randrange(10) == 0 else str(i)
}
mock_query_results.append(mock_query_result)
return pipeline | beam.Create(mock_query_results)
@beam.ptransform_fn
def _MockReadFromBigQuery2(pipeline, query): # pylint: disable=invalid-name, unused-argument
mock_query_results = [{
'i': 1,
'f': 2.0,
's': 'abc',
}]
return pipeline | beam.Create(mock_query_results)
class ExecutorTest(tf.test.TestCase):
def setUp(self):
# Mock BigQuery result schema.
self._schema = [
bigquery.SchemaField('i', 'INTEGER', mode='REQUIRED'),
bigquery.SchemaField('f', 'FLOAT', mode='REQUIRED'),
bigquery.SchemaField('s', 'STRING', mode='REQUIRED'),
]
super(ExecutorTest, self).setUp()
@mock.patch.multiple(
executor,
_ReadFromBigQuery=_MockReadFromBigQuery2, # pylint: disable=invalid-name, unused-argument
)
@mock.patch.object(bigquery, 'Client')
def testBigQueryToExample(self, mock_client):
# Mock query result schema for _BigQueryConverter.
mock_client.return_value.query.return_value.result.return_value.schema = self._schema
with beam.Pipeline() as pipeline:
examples = (
pipeline | 'ToTFExample' >> executor._BigQueryToExample(
input_dict={},
exec_properties={},
split_pattern='SELECT i, f, s FROM `fake`'))
feature = {}
feature['i'] = tf.train.Feature(int64_list=tf.train.Int64List(value=[1]))
feature['f'] = tf.train.Feature(
float_list=tf.train.FloatList(value=[2.0]))
feature['s'] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes('abc')]))
example_proto = tf.train.Example(
features=tf.train.Features(feature=feature))
util.assert_that(examples, util.equal_to([example_proto]))
@mock.patch.multiple(
executor,
_ReadFromBigQuery=_MockReadFromBigQuery, # pylint: disable=invalid-name, unused-argument
)
@mock.patch.object(bigquery, 'Client')
def testDo(self, mock_client):
# Mock query result schema for _BigQueryConverter.
mock_client.return_value.query.return_value.result.return_value.schema = self._schema
output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
# Create output dict.
train_examples = standard_artifacts.Examples(split='train')
train_examples.uri = os.path.join(output_data_dir, 'train')
eval_examples = standard_artifacts.Examples(split='eval')
eval_examples.uri = os.path.join(output_data_dir, 'eval')
output_dict = {'examples': [train_examples, eval_examples]}
# Create exe properties.
exec_properties = {
'input_config':
json_format.MessageToJson(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='bq', pattern='SELECT i, f, s FROM `fake`'),
]),
preserving_proto_field_name=True),
'output_config':
json_format.MessageToJson(
example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(
name='train', hash_buckets=2),
example_gen_pb2.SplitConfig.Split(
name='eval', hash_buckets=1)
])),
preserving_proto_field_name=True)
}
# Run executor.
big_query_example_gen = executor.Executor()
big_query_example_gen.Do({}, output_dict, exec_properties)
# Check BigQuery example gen outputs.
train_output_file = os.path.join(train_examples.uri,
'data_tfrecord-00000-of-00001.gz')
eval_output_file = os.path.join(eval_examples.uri,
'data_tfrecord-00000-of-00001.gz')
self.assertTrue(tf.io.gfile.exists(train_output_file))
self.assertTrue(tf.io.gfile.exists(eval_output_file))
self.assertGreater(
tf.io.gfile.GFile(train_output_file).size(),
tf.io.gfile.GFile(eval_output_file).size())
if __name__ == '__main__':
tf.test.main()
| []
| []
| [
"TEST_UNDECLARED_OUTPUTS_DIR"
]
| [] | ["TEST_UNDECLARED_OUTPUTS_DIR"] | python | 1 | 0 | |
dependency/django_models.py | import os
from micro_framework.dependencies import Dependency
class DjangoModels(Dependency):
def bind(self, worker):
"""Initialize the dependency"""
import django
django.setup()
def before_call(self, worker):
# import django
# django.setup()
if os.environ.get('DJANGO_NAMEKO_STANDALONE_SETTINGS_MODULE'):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", os.environ.get('DJANGO_NAMEKO_STANDALONE_SETTINGS_MODULE'))
elif not os.environ.get('DJANGO_SETTINGS_MODULE'):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
def get_dependency(self, worker):
"""Get the dependency for the concrete service"""
from django.apps import apps
from django.conf import settings
from django.contrib.auth.models import User
apps_config = map(apps.get_app_config, settings.DJANGO_NAMEKO_STANDALONE_APPS)
models = type('NonExistingClass_', (), {})
for config in apps_config:
for model in config.get_models():
setattr(models, model.__name__, model)
setattr(models, User.__name__, User)
return models
def after_call(self, worker, result, exc):
"""Close all the connections on teardown
TODO: Autocommit??
"""
from django.db import connections
connections.close_all()
__all__ = ["DjangoModels"]
| []
| []
| [
"DJANGO_NAMEKO_STANDALONE_SETTINGS_MODULE",
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_NAMEKO_STANDALONE_SETTINGS_MODULE", "DJANGO_SETTINGS_MODULE"] | python | 2 | 0 | |
scripts/extract_findings_to_csv.py | #!/usr/bin/env python3
#
# Extract a CSV of findings for a particular bucket
#
import boto3
from botocore.exceptions import ClientError
import json
import os
import time
import csv
from time import sleep
from datetime import datetime
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
CSV_HEADER = ['AccountId', 'BucketName', 'Region', 'FileExtension', 'Severity', 'FindingType',
'FindingCount', 'Details', 'ObjectKey', 'S3Path', 'URLPath', 'FindingConsoleURL', 'Finding Creation Date', 'Object-level Public ACL']
def main(args, logger):
# Macie is regional even though buckets aren't. So we need to iterate across regions to find out bucket
# Unless you know already
if args.region:
regions = [args.region]
else:
regions = get_regions()
# Store bucket results
results = {
"Low": 0,
"Medium": 0,
"High": 0
}
with open(args.filename, 'w') as csvoutfile:
writer = csv.writer(csvoutfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow(CSV_HEADER)
for r in regions:
macie_client = boto3.client('macie2', region_name=r)
# Build a Findings criteria dictionary to pass to Macie2
findingCriteria = {'criterion': {'category': {'eq': ['CLASSIFICATION']}}}
if args.bucket:
findingCriteria['criterion']['resourcesAffected.s3Bucket.name'] = {'eq': [args.bucket]}
if args.severity:
if args.severity == "High":
findingCriteria['criterion']['severity.description'] = {'eq': ["High"]}
elif args.severity == "Medium":
findingCriteria['criterion']['severity.description'] = {'eq': ["High", "Medium"]}
else:
# No need to add a severity filter
pass
if args.since:
end_time = datetime.now()
start_time = datetime.strptime(args.since, "%Y-%m-%d")
findingCriteria['criterion']['createdAt'] = {
'gte': int(start_time.timestamp())*1000,
'lte': int(end_time.timestamp())*1000
}
logger.debug(f"findingCriteria: {json.dumps(findingCriteria, indent=2)}")
# Macie is annyoing in that I have to list each findings, then pass the list of ids to the
# get_findings() API to get any useful details. Bah
list_response = macie_client.list_findings(
findingCriteria=findingCriteria,
maxResults=40
)
findings = list_response['findingIds']
logger.debug(f"Found {len(findings)} findings in {r}")
if len(findings) == 0:
# No findings in this region, move along
continue
# Now get the meat of these findings
get_response = macie_client.get_findings(findingIds=findings)
for f in get_response['findings']:
bucket_name = f['resourcesAffected']['s3Bucket']['name']
key = f['resourcesAffected']['s3Object']['key']
summary, count = get_summary(f)
obj_publicAccess = "Unknown"
if 'publicAccess' in f['resourcesAffected']['s3Object']:
obj_publicAccess = f['resourcesAffected']['s3Object']['publicAccess']
writer.writerow([f['accountId'], bucket_name, r,
f['resourcesAffected']['s3Object']['extension'],
f['severity']['description'], f['type'],
count, summary, key,
f"s3://{bucket_name}/{key}",
f"https://{bucket_name}.s3.amazonaws.com/{key}",
f"https://{r}.console.aws.amazon.com/macie/home?region={r}#findings?search=resourcesAffected.s3Bucket.name%3D{bucket_name}¯os=current&itemId={f['id']}",
f['createdAt'], obj_publicAccess
])
results[f['severity']['description']] += 1
# pagination is a pita. Here we continue to the List pagination
while 'nextToken' in list_response:
sleep(0.5)
list_response = macie_client.list_findings(
findingCriteria=findingCriteria,
maxResults=40,
nextToken=list_response['nextToken']
)
findings = list_response['findingIds']
logger.debug(f"Found {len(findings)} more findings in {r}")
get_response = macie_client.get_findings(findingIds=findings)
for f in get_response['findings']:
bucket_name = f['resourcesAffected']['s3Bucket']['name']
key = f['resourcesAffected']['s3Object']['key']
summary, count = get_summary(f)
obj_publicAccess = "Unknown"
if 'publicAccess' in f['resourcesAffected']['s3Object']:
obj_publicAccess = f['resourcesAffected']['s3Object']['publicAccess']
writer.writerow([f['accountId'], bucket_name, r,
f['resourcesAffected']['s3Object']['extension'],
f['severity']['description'], f['type'],
count, summary, key,
f"s3://{bucket_name}/{key}",
f"https://{bucket_name}.s3.amazonaws.com/{key}",
f"https://{r}.console.aws.amazon.com/macie/home?region={r}#findings?search=resourcesAffected.s3Bucket.name%3D{bucket_name}¯os=current&itemId={f['id']}",
f['createdAt'], obj_publicAccess
])
results[f['severity']['description']] += 1
print(f"Exported High: {results['High']} Medium: {results['Medium']} Low: {results['Low']} ")
csvoutfile.close()
def get_summary(finding):
summary = []
count = 0
for data_type in finding['classificationDetails']['result']['sensitiveData']:
summary.append(f"{data_type['category']}: {data_type['totalCount']}")
count += data_type['totalCount']
return("\n".join(summary), count)
def get_regions():
"""Return an array of the regions this account is active in. Ordered with us-east-1 in the front."""
ec2 = boto3.client('ec2')
response = ec2.describe_regions()
output = ['us-east-1']
for r in response['Regions']:
if r['RegionName'] == "us-east-1":
continue
output.append(r['RegionName'])
return(output)
def do_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="print debugging info", action='store_true')
parser.add_argument("--error", help="print error info only", action='store_true')
parser.add_argument("--region", help="Only Process this region")
parser.add_argument("--bucket", help="Only price out this bucket")
parser.add_argument("--filename", help="Save to filename", required=True)
parser.add_argument("--since", help="Only output findings after this date - specified as YYYY-MM-DD")
parser.add_argument("--severity", help="Filter on this severity and higher",
choices=['High', 'Medium', 'Low'], default='Medium')
args = parser.parse_args()
return(args)
if __name__ == '__main__':
args = do_args()
# Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging
# create console handler and set level to debug
ch = logging.StreamHandler()
if args.error:
logger.setLevel(logging.ERROR)
elif args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# create formatter
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
# # Sanity check region
# if args.region:
# os.environ['AWS_DEFAULT_REGION'] = args.region
# if 'AWS_DEFAULT_REGION' not in os.environ:
# logger.error("AWS_DEFAULT_REGION Not set. Aborting...")
# exit(1)
try:
main(args, logger)
except KeyboardInterrupt:
exit(1)
| []
| []
| [
"AWS_DEFAULT_REGION"
]
| [] | ["AWS_DEFAULT_REGION"] | python | 1 | 0 | |
everything_else/djfrontend/django-1.0.2/core/management/__init__.py | import os
import sys
from optparse import OptionParser
import imp
import django
from django.core.management.base import BaseCommand, CommandError, handle_default_options
# For backwards compatibility: get_version() used to be in this module.
get_version = django.get_version
# A cache of loaded commands, so that call_command
# doesn't have to reload every time it's called.
_commands = None
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
try:
return [f[:-3] for f in os.listdir(command_dir)
if not f.startswith('_') and f.endswith('.py')]
except OSError:
return []
def find_management_module(app_name):
"""
Determines the path to the management module for the given app_name,
without actually importing the application or the management module.
Raises ImportError if the management module cannot be found for any reason.
"""
parts = app_name.split('.')
parts.append('management')
parts.reverse()
part = parts.pop()
path = None
# When using manage.py, the project module is added to the path,
# loaded, then removed from the path. This means that
# testproject.testapp.models can be loaded in future, even if
# testproject isn't in the path. When looking for the management
# module, we need look for the case where the project name is part
# of the app_name but the project directory itself isn't on the path.
try:
f, path, descr = imp.find_module(part,path)
except ImportError,e:
if os.path.basename(os.getcwd()) != part:
raise e
while parts:
part = parts.pop()
f, path, descr = imp.find_module(part, path and [path] or None)
return path
def load_command_class(app_name, name):
"""
Given a command name and an application name, returns the Command
class instance. All errors raised by the import process
(ImportError, AttributeError) are allowed to propagate.
"""
return getattr(__import__('%s.management.commands.%s' % (app_name, name),
{}, {}, ['Command']), 'Command')()
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included, the
startproject command will be disabled, and the startapp command
will be modified to use the directory in which the settings module appears.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
global _commands
if _commands is None:
_commands = dict([(name, 'django.core') for name in find_commands(__path__[0])])
# Find the installed apps
try:
from django.conf import settings
apps = settings.INSTALLED_APPS
except (AttributeError, EnvironmentError, ImportError):
apps = []
# Find the project directory
try:
from django.conf import settings
project_directory = setup_environ(
__import__(
settings.SETTINGS_MODULE, {}, {},
(settings.SETTINGS_MODULE.split(".")[-1],)
), settings.SETTINGS_MODULE
)
except (AttributeError, EnvironmentError, ImportError):
project_directory = None
# Find and load the management module for each installed app.
for app_name in apps:
try:
path = find_management_module(app_name)
_commands.update(dict([(name, app_name)
for name in find_commands(path)]))
except ImportError:
pass # No management module - ignore this app
if project_directory:
# Remove the "startproject" command from self.commands, because
# that's a django-admin.py command, not a manage.py command.
del _commands['startproject']
# Override the startapp command so that it always uses the
# project_directory, not the current working directory
# (which is default).
from django.core.management.commands.startapp import ProjectCommand
_commands['startapp'] = ProjectCommand(project_directory)
return _commands
def call_command(name, *args, **options):
"""
Calls the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
Some examples:
call_command('syncdb')
call_command('shell', plain=True)
call_command('sqlall', 'myapp')
"""
try:
app_name = get_commands()[name]
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, name)
except KeyError:
raise CommandError, "Unknown command: %r" % name
return klass.execute(*args, **options)
class LaxOptionParser(OptionParser):
"""
An option parser that doesn't raise any errors on unknown options.
This is needed because the --settings and --pythonpath options affect
the commands (and thus the options) that are available to the user.
"""
def error(self, msg):
pass
def print_help(self):
"""Output nothing.
The lax options are included in the normal option parser, so under
normal usage, we don't need to print the lax options.
"""
pass
def print_lax_help(self):
"""Output the basic options available to every command.
This just redirects to the default print_help() behaviour.
"""
OptionParser.print_help(self)
def _process_args(self, largs, rargs, values):
"""
Overrides OptionParser._process_args to exclusively handle default
options and ignore args and other options.
This overrides the behavior of the super class, which stop parsing
at the first unrecognized option.
"""
while rargs:
arg = rargs[0]
try:
if arg[0:2] == "--" and len(arg) > 2:
# process a single long option (possibly with value(s))
# the superclass code pops the arg off rargs
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
# the superclass code pops the arg off rargs
self._process_short_opts(rargs, values)
else:
# it's either a non-default option or an arg
# either way, add it to the args list so we can keep
# dealing with options
del rargs[0]
raise error
except:
largs.append(arg)
class ManagementUtility(object):
"""
Encapsulates the logic of the django-admin.py and manage.py utilities.
A ManagementUtility has a number of commands, which can be manipulated
by editing the self.commands dictionary.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
def main_help_text(self):
"""
Returns the script's main help text, as a string.
"""
usage = ['',"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,'']
usage.append('Available subcommands:')
commands = get_commands().keys()
commands.sort()
for cmd in commands:
usage.append(' %s' % cmd)
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin.py" or "manage.py") if it can't be found.
"""
try:
app_name = get_commands()[subcommand]
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
except KeyError:
sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" % \
(subcommand, self.prog_name))
sys.exit(1)
return klass
def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = LaxOptionParser(usage="%prog subcommand [options] [args]",
version=get_version(),
option_list=BaseCommand.option_list)
try:
options, args = parser.parse_args(self.argv)
handle_default_options(options)
except:
pass # Ignore any option errors at this point.
try:
subcommand = self.argv[1]
except IndexError:
sys.stderr.write("Type '%s help' for usage.\n" % self.prog_name)
sys.exit(1)
if subcommand == 'help':
if len(args) > 2:
self.fetch_command(args[2]).print_help(self.prog_name, args[2])
else:
parser.print_lax_help()
sys.stderr.write(self.main_help_text() + '\n')
sys.exit(1)
# Special-cases: We want 'django-admin.py --version' and
# 'django-admin.py --help' to work, for backwards compatibility.
elif self.argv[1:] == ['--version']:
# LaxOptionParser already takes care of printing the version.
pass
elif self.argv[1:] == ['--help']:
parser.print_lax_help()
sys.stderr.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def setup_environ(settings_mod, original_settings_path=None):
"""
Configures the runtime environment. This can also be used by external
scripts wanting to set up a similar environment to manage.py.
Returns the project directory (assuming the passed settings module is
directly in the project directory).
The "original_settings_path" parameter is optional, but recommended, since
trying to work out the original path from the module can be problematic.
"""
# Add this project to sys.path so that it's importable in the conventional
# way. For example, if this file (manage.py) lives in a directory
# "myproject", this code would add "/path/to/myproject" to sys.path.
project_directory, settings_filename = os.path.split(settings_mod.__file__)
if project_directory == os.curdir or not project_directory:
project_directory = os.getcwd()
project_name = os.path.basename(project_directory)
settings_name = os.path.splitext(settings_filename)[0]
sys.path.append(os.path.join(project_directory, os.pardir))
project_module = __import__(project_name, {}, {}, [''])
sys.path.pop()
# Set DJANGO_SETTINGS_MODULE appropriately.
if original_settings_path:
os.environ['DJANGO_SETTINGS_MODULE'] = original_settings_path
else:
os.environ['DJANGO_SETTINGS_MODULE'] = '%s.%s' % (project_name, settings_name)
return project_directory
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
utility = ManagementUtility(argv)
utility.execute()
def execute_manager(settings_mod, argv=None):
"""
Like execute_from_command_line(), but for use by manage.py, a
project-specific django-admin.py utility.
"""
setup_environ(settings_mod)
utility = ManagementUtility(argv)
utility.execute()
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
replay.py | import logging
import gzip
import re
import os
from os import environ
import requests
import time
import sys
import boto
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
from multiprocessing import Pool
if environ.get('AWS_ACCESS_KEY') is None:
sys.exit("No AWS_ACCESS_KEY defined, stopping execution.")
if environ.get('AWS_SECRET_ACCESS_KEY') is None:
sys.exit("No AWS_SECRET_ACCESS_KEY defined, stopping execution.")
if environ.get('S3_BUCKET') is None:
os.environ['S3_BUCKET'] = "cold-storage.s3.example.com"
if environ.get('S3_BUCKET_REGION') is None:
os.environ['S3_BUCKET_REGION'] = "eu-west-1"
if environ.get('S3_BUCKET_PATH') is None:
os.environ['S3_BUCKET_PATH'] = "daily"
if environ.get('REPLAY_TO_DOMAIN') is None:
os.environ['REPLAY_TO_DOMAIN'] = "http://localhost:8080"
if environ.get('LOG_FILE_INPUT_DIR') is None:
os.environ['LOG_FILE_INPUT_DIR'] = "/tmp"
if environ.get('LOG_FILE_PATTERN') is None:
os.environ['LOG_FILE_PATTERN'] = "2017"
if environ.get('LOG_FILE_CHUNK_LINES') is None:
os.environ['LOG_FILE_CHUNK_LINES'] = "4"
if environ.get('OUTPUT_LOGGING_FILE') is None:
os.environ['OUTPUT_LOGGING_FILE'] = os.environ['LOG_FILE_INPUT_DIR'] + "/" \
+ str(int(time.time())) + "_replay.log"
logging.basicConfig(format='%(asctime)s %(message)s',
filename=os.environ['OUTPUT_LOGGING_FILE'],
filemode='w', level=logging.INFO)
headers = {
'User-Agent': "cold-storage-replay",
'X-Original-Timestamp': "",
}
fmt = re.compile(r"""(?P<ipaddress>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) - - \[(?P<dateandtime>\d{2}\/[a-z]{3}\/\d{4}:\d{2}:\d{2}:\d{2} (\+|\-)\d{4})\] ((\"(GET|POST) )(?P<url>.+)(http\/1\.1")) (?P<statuscode>\d{3}) (?P<bytessent>\d+) - (?P<body>.+)(["](?P<refferer>(\-)|(.+))["]) (["](?P<useragent>.+)["])""", re.IGNORECASE)
pattern = '%d/%b/%Y:%H:%M:%S +0000'
def process_log_file(l):
data = re.search(fmt, l)
if data:
datadict = data.groupdict()
ip = datadict["ipaddress"]
datetimestring = datadict["dateandtime"]
url = datadict["url"]
bytessent = datadict["bytessent"]
referrer = datadict["refferer"]
useragent = datadict["useragent"]
status = datadict["statuscode"]
method = data.group(6)
body = datadict["body"]
if method == "POST":
headers['X-Original-Timestamp'] = str(int(time.mktime(time.strptime(datetimestring, pattern))))
r = requests.post(os.environ['REPLAY_TO_DOMAIN'] + url.replace(" ", ""),
data=body.decode('string_escape'), headers=headers)
logging.info(str(r.status_code) + " " + datetimestring + " " + url)
try:
if '.' in os.environ['S3_BUCKET']:
conn = boto.s3.connect_to_region(region_name=os.environ['S3_BUCKET_REGION'],
aws_access_key_id=os.environ['AWS_ACCESS_KEY'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
calling_format=OrdinaryCallingFormat())
else:
conn = S3Connection(os.environ['AWS_ACCESS_KEY'], os.environ['AWS_SECRET_ACCESS_KEY'])
bucket = conn.get_bucket(os.environ['S3_BUCKET'])
for l in bucket.list():
if os.environ['LOG_FILE_PATTERN'] in str(l.key):
l.get_contents_to_filename(os.environ['LOG_FILE_INPUT_DIR'] + "/"
+ str(l.key).replace(os.environ['S3_BUCKET_PATH'] + "/", ""))
for f in os.listdir(os.environ['LOG_FILE_INPUT_DIR']):
if f.endswith(".gz"):
logfile = gzip.open(os.environ['LOG_FILE_INPUT_DIR'] + "/" + f, "r")
elif f.endswith(".log"):
logfile = open(os.environ['LOG_FILE_INPUT_DIR'] + "/" + f, "r")
else:
continue
pool = Pool()
for l in logfile:
pool.map(process_log_file,
logfile,
chunksize=int(os.environ['LOG_FILE_CHUNK_LINES']))
logfile.close()
except Exception as e:
logging.error(e)
| []
| []
| [
"S3_BUCKET_PATH",
"S3_BUCKET",
"LOG_FILE_INPUT_DIR",
"OUTPUT_LOGGING_FILE",
"AWS_SECRET_ACCESS_KEY",
"S3_BUCKET_REGION",
"AWS_ACCESS_KEY",
"LOG_FILE_PATTERN",
"REPLAY_TO_DOMAIN",
"LOG_FILE_CHUNK_LINES"
]
| [] | ["S3_BUCKET_PATH", "S3_BUCKET", "LOG_FILE_INPUT_DIR", "OUTPUT_LOGGING_FILE", "AWS_SECRET_ACCESS_KEY", "S3_BUCKET_REGION", "AWS_ACCESS_KEY", "LOG_FILE_PATTERN", "REPLAY_TO_DOMAIN", "LOG_FILE_CHUNK_LINES"] | python | 10 | 0 | |
efmq.go | // Package efmq provides basic MQTT like functionality for message
// publishing and subscriptions within a local area network
package efmq
import (
"encoding/json"
"errors"
"log"
"net"
"github.com/mdlayher/ethernet"
"github.com/mdlayher/raw"
)
// EFQM represents a connection
type EFMQ struct {
netInterface *net.Interface
connection *net.PacketConn
subscription []string
listening bool
Message chan Message
}
type Message struct {
Topic string `json:"tpc"`
Payload string `json:"pyld"`
}
const etherType = 0xcccc
// NewEFMQ is a factory function to create a value of EFMQ type
func NewEFMQ(networkInterface string) (*EFMQ, error) {
mq := new(EFMQ)
mq.Message = make(chan Message)
// set network interface
ni, err := net.InterfaceByName(networkInterface)
if err != nil {
return mq, errors.New("NewEFMQ: could not detect interface " + networkInterface)
}
// create connection/listener
conn, err := connect(ni)
if err != nil {
return mq, err
}
// store in struct
mq.netInterface = ni
mq.connection = &conn
return mq, nil
}
// connect opens network interface to create connection for listening
func connect(ni *net.Interface) (net.PacketConn, error) {
var conn net.PacketConn
conn, err := raw.ListenPacket(ni, etherType)
if err != nil {
return conn, err
}
return conn, nil
}
// Subscribe takes a new subscription and stores it to slice
func (mq *EFMQ) Subscribe(topic string) {
// add topic to subscriptions and start listener
mq.subscription = append(mq.subscription, topic)
}
// Unsubscribe removes subscription from slice store
func (mq *EFMQ) Unsubscribe(topic string) error {
// remove topic from subscriptions
for i, v := range mq.subscription {
if v == topic {
mq.subscription = append(mq.subscription[:i], mq.subscription[i+1:]...)
}
}
return nil
}
// Publish broadcasts a message on the network which comprises topic
// and payload
func (mq *EFMQ) Publish(topic string, payload string) error {
// build a JSON object
message := Message{
Topic: topic,
Payload: payload,
}
// marshal to byte slice of JSON
content, err := json.Marshal(&message)
if err != nil {
return errors.New("Publish: failed to marshal JSON")
}
// pass to despatcher
if err := mq.despatcher(content); err != nil {
return err
}
return nil
}
// despatcher handles the transmission of message over ethernet frames
func (mq *EFMQ) despatcher(content []byte) error {
// configure frame
f := ðernet.Frame{
Destination: ethernet.Broadcast,
Source: mq.netInterface.HardwareAddr,
EtherType: etherType,
Payload: content,
}
// required for linux as mdlayher ethecho
addr := &raw.Addr{
HardwareAddr: ethernet.Broadcast,
}
// prepare
binary, err := f.MarshalBinary()
if err != nil {
return errors.New("despatcher: failed to marshal ethernet frame")
}
// send
conn := *mq.connection
if _, err := conn.WriteTo(binary, addr); err != nil {
return errors.New("despatcher: failed to send message")
}
return nil
}
// Subscriptions returns list of topics currently subscribed to
func (mq *EFMQ) Subscriptions() []string {
return mq.subscription
}
// Listen announces the subscriptions to which we are subscribed
// and then starts listener func in goroutine
func (mq *EFMQ) Listen() {
var subs string
subsLen := len(mq.subscription)
for i, v := range mq.subscription {
subs += v
if i < subsLen-1 {
subs += ", "
} else {
subs += "."
}
}
// listen & log
log.Println("Subscribed to topic(s):", subs, "Now listening...")
go mq.listener()
}
// listener filters messages before presenting to client using topic
func (mq *EFMQ) listener() {
var f ethernet.Frame
var conn net.PacketConn
var subs []string
conn = *mq.connection
subs = mq.subscription
b := make([]byte, mq.netInterface.MTU)
// handle messages indefinitely
for {
n, _, err := conn.ReadFrom(b)
if err != nil {
log.Printf("listener: failed to receive message: %v", err)
}
if err := (&f).UnmarshalBinary(b[:n]); err != nil {
log.Printf("listener: failed to unmarshal ethernet frame: %v", err)
}
// f.Payload could be padded with zeros, need to deal before unmarshal
var payload []byte
for _, v := range f.Payload {
if v != 0 {
payload = append(payload, v)
}
}
// unmarshal JSON
message := new(Message)
err = json.Unmarshal(payload, message)
if err != nil {
log.Println(err)
}
for _, v := range subs {
if message.Topic == v {
// put message on channel if matches a subscription
mq.Message <- *message
}
}
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
pkg/mysql/connect_mysql.go | package mysql
import (
"fmt"
"github.com/SbstnErhrdt/go-gorm-all-sql/pkg/environment"
log "github.com/sirupsen/logrus"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"os"
)
var requiredEnvironmentVariablesForMySQL = []string{
"SQL_HOST",
"SQL_USER",
"SQL_PASSWORD",
"SQL_PORT",
"SQL_DATABASE",
}
func ConnectToMysql(config *gorm.Config) (client *gorm.DB, err error) {
// check env variables
environment.CheckEnvironmentVariables(requiredEnvironmentVariablesForMySQL)
// env variables
host := os.Getenv("SQL_HOST")
user := os.Getenv("SQL_USER")
pw := os.Getenv("SQL_PASSWORD")
port := os.Getenv("SQL_PORT")
dbName := os.Getenv("SQL_DATABASE")
// build connection string
dsn := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?charset=utf8mb4&parseTime=True&loc=Local", user, pw, host, port, dbName)
// connect to db
client, err = gorm.Open(mysql.Open(dsn), config)
if err != nil {
log.Error(err)
return nil, err
}
log.Info("MySQL Client: connected", err)
return
}
| [
"\"SQL_HOST\"",
"\"SQL_USER\"",
"\"SQL_PASSWORD\"",
"\"SQL_PORT\"",
"\"SQL_DATABASE\""
]
| []
| [
"SQL_PASSWORD",
"SQL_DATABASE",
"SQL_USER",
"SQL_PORT",
"SQL_HOST"
]
| [] | ["SQL_PASSWORD", "SQL_DATABASE", "SQL_USER", "SQL_PORT", "SQL_HOST"] | go | 5 | 0 | |
cmd/internal/cli/actions_linux.go | // Copyright (c) 2019-2021, Sylabs Inc. All rights reserved.
// This software is licensed under a 3-clause BSD license. Please consult the
// LICENSE.md file distributed with the sources of this project regarding your
// rights to use or distribute this software.
package cli
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/RyaxTech/singularity/internal/pkg/buildcfg"
"github.com/RyaxTech/singularity/internal/pkg/image/unpacker"
"github.com/RyaxTech/singularity/internal/pkg/instance"
"github.com/RyaxTech/singularity/internal/pkg/plugin"
"github.com/RyaxTech/singularity/internal/pkg/runtime/engine/config/oci"
"github.com/RyaxTech/singularity/internal/pkg/runtime/engine/config/oci/generate"
"github.com/RyaxTech/singularity/internal/pkg/security"
"github.com/RyaxTech/singularity/internal/pkg/util/bin"
"github.com/RyaxTech/singularity/internal/pkg/util/env"
"github.com/RyaxTech/singularity/internal/pkg/util/fs"
"github.com/RyaxTech/singularity/internal/pkg/util/gpu"
"github.com/RyaxTech/singularity/internal/pkg/util/shell/interpreter"
"github.com/RyaxTech/singularity/internal/pkg/util/starter"
"github.com/RyaxTech/singularity/internal/pkg/util/user"
imgutil "github.com/RyaxTech/singularity/pkg/image"
clicallback "github.com/RyaxTech/singularity/pkg/plugin/callback/cli"
singularitycallback "github.com/RyaxTech/singularity/pkg/plugin/callback/runtime/engine/singularity"
"github.com/RyaxTech/singularity/pkg/runtime/engine/config"
singularityConfig "github.com/RyaxTech/singularity/pkg/runtime/engine/singularity/config"
"github.com/RyaxTech/singularity/pkg/sylog"
"github.com/RyaxTech/singularity/pkg/util/capabilities"
"github.com/RyaxTech/singularity/pkg/util/cryptkey"
"github.com/RyaxTech/singularity/pkg/util/fs/proc"
"github.com/RyaxTech/singularity/pkg/util/namespaces"
"github.com/RyaxTech/singularity/pkg/util/rlimit"
"github.com/RyaxTech/singularity/pkg/util/singularityconf"
"github.com/spf13/cobra"
"golang.org/x/sys/unix"
)
// convertImage extracts the image found at filename to directory dir within a temporary directory
// tempDir. If the unsquashfs binary is not located, the binary at unsquashfsPath is used. It is
// the caller's responsibility to remove tempDir when no longer needed.
func convertImage(filename string, unsquashfsPath string) (tempDir, imageDir string, err error) {
img, err := imgutil.Init(filename, false)
if err != nil {
return "", "", fmt.Errorf("could not open image %s: %s", filename, err)
}
defer img.File.Close()
part, err := img.GetRootFsPartition()
if err != nil {
return "", "", fmt.Errorf("while getting root filesystem in %s: %s", filename, err)
}
// Nice message if we have been given an older ext3 image, which cannot be extracted due to lack of privilege
// to loopback mount.
if part.Type == imgutil.EXT3 {
sylog.Errorf("File %q is an ext3 format continer image.", filename)
sylog.Errorf("Only SIF and squashfs images can be extracted in unprivileged mode.")
sylog.Errorf("Use `singularity build` to convert this image to a SIF file using a setuid install of Singularity.")
}
// Only squashfs can be extracted
if part.Type != imgutil.SQUASHFS {
return "", "", fmt.Errorf("not a squashfs root filesystem")
}
// create a reader for rootfs partition
reader, err := imgutil.NewPartitionReader(img, "", 0)
if err != nil {
return "", "", fmt.Errorf("could not extract root filesystem: %s", err)
}
s := unpacker.NewSquashfs()
if !s.HasUnsquashfs() && unsquashfsPath != "" {
s.UnsquashfsPath = unsquashfsPath
}
// keep compatibility with v2
tmpdir := os.Getenv("SINGULARITY_TMPDIR")
if tmpdir == "" {
tmpdir = os.Getenv("SINGULARITY_LOCALCACHEDIR")
if tmpdir == "" {
tmpdir = os.Getenv("SINGULARITY_CACHEDIR")
}
}
// create temporary sandbox
tempDir, err = ioutil.TempDir(tmpdir, "rootfs-")
if err != nil {
return "", "", fmt.Errorf("could not create temporary sandbox: %s", err)
}
defer func() {
if err != nil {
os.RemoveAll(tempDir)
}
}()
// create an inner dir to extract to, so we don't clobber the secure permissions on the tmpDir.
imageDir = filepath.Join(tempDir, "root")
if err := os.Mkdir(imageDir, 0o755); err != nil {
return "", "", fmt.Errorf("could not create root directory: %s", err)
}
// extract root filesystem
if err := s.ExtractAll(reader, imageDir); err != nil {
return "", "", fmt.Errorf("root filesystem extraction failed: %s", err)
}
return tempDir, imageDir, err
}
// checkHidepid checks if hidepid is set on /proc mount point, when this
// option is an instance started with setuid workflow could not even be
// joined later or stopped correctly.
func hidepidProc() bool {
entries, err := proc.GetMountInfoEntry("/proc/self/mountinfo")
if err != nil {
sylog.Warningf("while reading /proc/self/mountinfo: %s", err)
return false
}
for _, e := range entries {
if e.Point == "/proc" {
for _, o := range e.SuperOptions {
if strings.HasPrefix(o, "hidepid=") {
return true
}
}
}
}
return false
}
// Set engine flags to disable mounts, to allow overriding them if they are set true
// in the singularity.conf
func setNoMountFlags(c *singularityConfig.EngineConfig) {
for _, v := range NoMount {
switch v {
case "proc":
c.SetNoProc(true)
case "sys":
c.SetNoSys(true)
case "dev":
c.SetNoDev(true)
case "devpts":
c.SetNoDevPts(true)
case "home":
c.SetNoHome(true)
case "tmp":
c.SetNoTmp(true)
case "hostfs":
c.SetNoHostfs(true)
case "cwd":
c.SetNoCwd(true)
default:
sylog.Warningf("Ignoring unknown mount type '%s'", v)
}
}
}
// TODO: Let's stick this in another file so that that CLI is just CLI
func execStarter(cobraCmd *cobra.Command, image string, args []string, name string) {
var err error
targetUID := 0
targetGID := make([]int, 0)
procname := ""
uid := uint32(os.Getuid())
gid := uint32(os.Getgid())
insideUserNs, _ := namespaces.IsInsideUserNamespace(os.Getpid())
// Are we running from a privileged account?
isPrivileged := uid == 0
checkPrivileges := func(cond bool, desc string, fn func()) {
if !cond {
return
}
if !isPrivileged {
sylog.Fatalf("%s requires root privileges", desc)
}
fn()
}
engineConfig := singularityConfig.NewConfig()
imageArg := os.Getenv("IMAGE_ARG")
os.Unsetenv("IMAGE_ARG")
engineConfig.SetImageArg(imageArg)
engineConfig.File = singularityconf.GetCurrentConfig()
if engineConfig.File == nil {
sylog.Fatalf("Unable to get singularity configuration")
}
ociConfig := &oci.Config{}
generator := generate.New(&ociConfig.Spec)
engineConfig.OciConfig = ociConfig
generator.SetProcessArgs(args)
currMask := syscall.Umask(0o022)
if !NoUmask {
// Save the current umask, to be set for the process run in the container
// https://github.com/RyaxTech/singularity/issues/5214
sylog.Debugf("Saving umask %04o for propagation into container", currMask)
engineConfig.SetUmask(currMask)
engineConfig.SetRestoreUmask(true)
}
uidParam := security.GetParam(Security, "uid")
gidParam := security.GetParam(Security, "gid")
// handle target UID/GID for root user
checkPrivileges(uidParam != "", "uid security feature", func() {
u, err := strconv.ParseUint(uidParam, 10, 32)
if err != nil {
sylog.Fatalf("failed to parse provided UID")
}
targetUID = int(u)
uid = uint32(targetUID)
engineConfig.SetTargetUID(targetUID)
})
checkPrivileges(gidParam != "", "gid security feature", func() {
gids := strings.Split(gidParam, ":")
for _, id := range gids {
g, err := strconv.ParseUint(id, 10, 32)
if err != nil {
sylog.Fatalf("failed to parse provided GID")
}
targetGID = append(targetGID, int(g))
}
if len(gids) > 0 {
gid = uint32(targetGID[0])
}
engineConfig.SetTargetGID(targetGID)
})
if strings.HasPrefix(image, "instance://") {
if name != "" {
sylog.Fatalf("Starting an instance from another is not allowed")
}
instanceName := instance.ExtractName(image)
file, err := instance.Get(instanceName, instance.SingSubDir)
if err != nil {
sylog.Fatalf("%s", err)
}
UserNamespace = file.UserNs
generator.AddProcessEnv("SINGULARITY_CONTAINER", file.Image)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(file.Image))
engineConfig.SetImage(image)
engineConfig.SetInstanceJoin(true)
} else {
abspath, err := filepath.Abs(image)
generator.AddProcessEnv("SINGULARITY_CONTAINER", abspath)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(abspath))
if err != nil {
sylog.Fatalf("Failed to determine image absolute path for %s: %s", image, err)
}
engineConfig.SetImage(abspath)
}
// privileged installation by default
useSuid := true
// singularity was compiled with '--without-suid' option
if buildcfg.SINGULARITY_SUID_INSTALL == 0 {
useSuid = false
if !UserNamespace && uid != 0 {
sylog.Verbosef("Unprivileged installation: using user namespace")
UserNamespace = true
}
}
// use non privileged starter binary:
// - if running as root
// - if already running inside a user namespace
// - if user namespace is requested
// - if running as user and 'allow setuid = no' is set in singularity.conf
if uid == 0 || insideUserNs || UserNamespace || !engineConfig.File.AllowSetuid {
useSuid = false
// fallback to user namespace:
// - for non root user with setuid installation and 'allow setuid = no'
// - for root user without effective capability CAP_SYS_ADMIN
if uid != 0 && buildcfg.SINGULARITY_SUID_INSTALL == 1 && !engineConfig.File.AllowSetuid {
sylog.Verbosef("'allow setuid' set to 'no' by configuration, fallback to user namespace")
UserNamespace = true
} else if uid == 0 && !UserNamespace {
caps, err := capabilities.GetProcessEffective()
if err != nil {
sylog.Fatalf("Could not get process effective capabilities: %s", err)
}
if caps&uint64(1<<unix.CAP_SYS_ADMIN) == 0 {
sylog.Verbosef("Effective capability CAP_SYS_ADMIN is missing, fallback to user namespace")
UserNamespace = true
}
}
}
// early check for key material before we start engine so we can fail fast if missing
// we do not need this check when joining a running instance, just for starting a container
if !engineConfig.GetInstanceJoin() {
sylog.Debugf("Checking for encrypted system partition")
img, err := imgutil.Init(engineConfig.GetImage(), false)
if err != nil {
sylog.Fatalf("could not open image %s: %s", engineConfig.GetImage(), err)
}
part, err := img.GetRootFsPartition()
if err != nil {
sylog.Fatalf("while getting root filesystem in %s: %s", engineConfig.GetImage(), err)
}
// ensure we have decryption material
if part.Type == imgutil.ENCRYPTSQUASHFS {
sylog.Debugf("Encrypted container filesystem detected")
keyInfo, err := getEncryptionMaterial(cobraCmd)
if err != nil {
sylog.Fatalf("Cannot load key for decryption: %v", err)
}
plaintextKey, err := cryptkey.PlaintextKey(keyInfo, engineConfig.GetImage())
if err != nil {
sylog.Errorf("Cannot decrypt %s: %v", engineConfig.GetImage(), err)
sylog.Fatalf("Please check you are providing the correct key for decryption")
}
engineConfig.SetEncryptionKey(plaintextKey)
}
// don't defer this call as in all cases it won't be
// called before execing starter, so it would leak the
// image file descriptor to the container process
img.File.Close()
}
// First get binds from -B/--bind and env var
binds, err := singularityConfig.ParseBindPath(BindPaths)
if err != nil {
sylog.Fatalf("while parsing bind path: %s", err)
}
// Now add binds from one or more --mount and env var.
for _, m := range Mounts {
bps, err := singularityConfig.ParseMountString(m)
if err != nil {
sylog.Fatalf("while parsing mount %q: %s", m, err)
}
binds = append(binds, bps...)
}
engineConfig.SetBindPath(binds)
generator.AddProcessEnv("SINGULARITY_BIND", strings.Join(BindPaths, ","))
if len(FuseMount) > 0 {
/* If --fusemount is given, imply --pid */
PidNamespace = true
if err := engineConfig.SetFuseMount(FuseMount); err != nil {
sylog.Fatalf("while setting fuse mount: %s", err)
}
}
engineConfig.SetNetwork(Network)
engineConfig.SetDNS(DNS)
engineConfig.SetNetworkArgs(NetworkArgs)
engineConfig.SetOverlayImage(OverlayPath)
engineConfig.SetWritableImage(IsWritable)
engineConfig.SetNoHome(NoHome)
setNoMountFlags(engineConfig)
if err := SetGPUConfig(engineConfig); err != nil {
// We must fatal on error, as we are checking for correct ownership of nvidia-container-cli,
// which is important to maintain security.
sylog.Fatalf("while setting GPU configuration: %s", err)
}
engineConfig.SetAddCaps(AddCaps)
engineConfig.SetDropCaps(DropCaps)
engineConfig.SetConfigurationFile(configurationFile)
checkPrivileges(AllowSUID, "--allow-setuid", func() {
engineConfig.SetAllowSUID(AllowSUID)
})
checkPrivileges(KeepPrivs, "--keep-privs", func() {
engineConfig.SetKeepPrivs(KeepPrivs)
})
engineConfig.SetNoPrivs(NoPrivs)
engineConfig.SetSecurity(Security)
engineConfig.SetShell(ShellPath)
engineConfig.AppendLibrariesPath(ContainLibsPath...)
engineConfig.SetFakeroot(IsFakeroot)
if ShellPath != "" {
generator.AddProcessEnv("SINGULARITY_SHELL", ShellPath)
}
checkPrivileges(CgroupsPath != "", "--apply-cgroups", func() {
engineConfig.SetCgroupsPath(CgroupsPath)
})
if IsWritable && IsWritableTmpfs {
sylog.Warningf("Disabling --writable-tmpfs flag, mutually exclusive with --writable")
engineConfig.SetWritableTmpfs(false)
} else {
engineConfig.SetWritableTmpfs(IsWritableTmpfs)
}
homeFlag := cobraCmd.Flag("home")
engineConfig.SetCustomHome(homeFlag.Changed)
// If we have fakeroot & the home flag has not been used then we have the standard
// /root location for the root user $HOME in the container.
// This doesn't count as a SetCustomHome(true), as we are mounting from the real
// user's standard $HOME -> /root and we want to respect --contain not mounting
// the $HOME in this case.
// See https://github.com/RyaxTech/singularity/pull/5227
if !homeFlag.Changed && IsFakeroot {
HomePath = fmt.Sprintf("%s:/root", HomePath)
}
// set home directory for the targeted UID if it exists on host system
if !homeFlag.Changed && targetUID != 0 {
if targetUID > 500 {
if pwd, err := user.GetPwUID(uint32(targetUID)); err == nil {
sylog.Debugf("Target UID requested, set home directory to %s", pwd.Dir)
HomePath = pwd.Dir
engineConfig.SetCustomHome(true)
} else {
sylog.Verbosef("Home directory for UID %d not found, home won't be mounted", targetUID)
engineConfig.SetNoHome(true)
HomePath = "/"
}
} else {
sylog.Verbosef("System UID %d requested, home won't be mounted", targetUID)
engineConfig.SetNoHome(true)
HomePath = "/"
}
}
if Hostname != "" {
UtsNamespace = true
engineConfig.SetHostname(Hostname)
}
checkPrivileges(IsBoot, "--boot", func() {})
if IsContained || IsContainAll || IsBoot {
engineConfig.SetContain(true)
if IsContainAll {
PidNamespace = true
IpcNamespace = true
IsCleanEnv = true
}
}
engineConfig.SetScratchDir(ScratchPath)
engineConfig.SetWorkdir(WorkdirPath)
homeSlice := strings.Split(HomePath, ":")
if len(homeSlice) > 2 || len(homeSlice) == 0 {
sylog.Fatalf("home argument has incorrect number of elements: %v", len(homeSlice))
}
engineConfig.SetHomeSource(homeSlice[0])
if len(homeSlice) == 1 {
engineConfig.SetHomeDest(homeSlice[0])
} else {
engineConfig.SetHomeDest(homeSlice[1])
}
if IsFakeroot {
UserNamespace = true
}
/* if name submitted, run as instance */
if name != "" {
PidNamespace = true
engineConfig.SetInstance(true)
engineConfig.SetBootInstance(IsBoot)
if useSuid && !UserNamespace && hidepidProc() {
sylog.Fatalf("hidepid option set on /proc mount, require 'hidepid=0' to start instance with setuid workflow")
}
_, err := instance.Get(name, instance.SingSubDir)
if err == nil {
sylog.Fatalf("instance %s already exists", name)
}
if IsBoot {
UtsNamespace = true
NetNamespace = true
if Hostname == "" {
engineConfig.SetHostname(name)
}
if !KeepPrivs {
engineConfig.SetDropCaps("CAP_SYS_BOOT,CAP_SYS_RAWIO")
}
generator.SetProcessArgs([]string{"/sbin/init"})
}
pwd, err := user.GetPwUID(uint32(os.Getuid()))
if err != nil {
sylog.Fatalf("failed to retrieve user information for UID %d: %s", os.Getuid(), err)
}
procname, err = instance.ProcName(name, pwd.Name)
if err != nil {
sylog.Fatalf("%s", err)
}
} else {
generator.SetProcessArgs(args)
procname = "Singularity runtime parent"
}
if NetNamespace {
if IsFakeroot && Network != "none" {
engineConfig.SetNetwork("fakeroot")
// unprivileged installation could not use fakeroot
// network because it requires a setuid installation
// so we fallback to none
if buildcfg.SINGULARITY_SUID_INSTALL == 0 || !engineConfig.File.AllowSetuid {
sylog.Warningf(
"fakeroot with unprivileged installation or 'allow setuid = no' " +
"could not use 'fakeroot' network, fallback to 'none' network",
)
engineConfig.SetNetwork("none")
}
}
generator.AddOrReplaceLinuxNamespace("network", "")
}
if UtsNamespace {
generator.AddOrReplaceLinuxNamespace("uts", "")
}
if PidNamespace {
generator.AddOrReplaceLinuxNamespace("pid", "")
engineConfig.SetNoInit(NoInit)
}
if IpcNamespace {
generator.AddOrReplaceLinuxNamespace("ipc", "")
}
if UserNamespace {
generator.AddOrReplaceLinuxNamespace("user", "")
if !IsFakeroot {
generator.AddLinuxUIDMapping(uid, uid, 1)
generator.AddLinuxGIDMapping(gid, gid, 1)
}
}
if SingularityEnvFile != "" {
currentEnv := append(
os.Environ(),
"SINGULARITY_IMAGE="+engineConfig.GetImage(),
)
content, err := ioutil.ReadFile(SingularityEnvFile)
if err != nil {
sylog.Fatalf("Could not read %q environment file: %s", SingularityEnvFile, err)
}
env, err := interpreter.EvaluateEnv(content, args, currentEnv)
if err != nil {
sylog.Fatalf("While processing %s: %s", SingularityEnvFile, err)
}
// --env variables will take precedence over variables
// defined by the environment file
sylog.Debugf("Setting environment variables from file %s", SingularityEnvFile)
SingularityEnv = append(env, SingularityEnv...)
}
// process --env and --env-file variables for injection
// into the environment by prefixing them with SINGULARITYENV_
for _, env := range SingularityEnv {
e := strings.SplitN(env, "=", 2)
if len(e) != 2 {
sylog.Warningf("Ignore environment variable %q: '=' is missing", env)
continue
}
os.Setenv("SINGULARITYENV_"+e[0], e[1])
}
// Copy and cache environment
environment := os.Environ()
// Clean environment
singularityEnv := env.SetContainerEnv(generator, environment, IsCleanEnv, engineConfig.GetHomeDest())
engineConfig.SetSingularityEnv(singularityEnv)
if pwd, err := os.Getwd(); err == nil {
engineConfig.SetCwd(pwd)
if PwdPath != "" {
generator.SetProcessCwd(PwdPath)
} else {
if engineConfig.GetContain() {
generator.SetProcessCwd(engineConfig.GetHomeDest())
} else {
generator.SetProcessCwd(pwd)
}
}
} else {
sylog.Warningf("can't determine current working directory: %s", err)
}
// starter will force the loading of kernel overlay module
loadOverlay := false
if !UserNamespace && buildcfg.SINGULARITY_SUID_INSTALL == 1 {
loadOverlay = true
}
generator.AddProcessEnv("SINGULARITY_APPNAME", AppName)
// convert image file to sandbox if we are using user
// namespace or if we are currently running inside a
// user namespace
if (UserNamespace || insideUserNs) && fs.IsFile(image) {
convert := true
if engineConfig.File.ImageDriver != "" {
// load image driver plugins
callbackType := (singularitycallback.RegisterImageDriver)(nil)
callbacks, err := plugin.LoadCallbacks(callbackType)
if err != nil {
sylog.Debugf("Loading plugins callbacks '%T' failed: %s", callbackType, err)
} else {
for _, callback := range callbacks {
if err := callback.(singularitycallback.RegisterImageDriver)(true); err != nil {
sylog.Debugf("While registering image driver: %s", err)
}
}
}
driver := imgutil.GetDriver(engineConfig.File.ImageDriver)
if driver != nil && driver.Features()&imgutil.ImageFeature != 0 {
// the image driver indicates support for image so let's
// proceed with the image driver without conversion
convert = false
}
}
if convert {
unsquashfsPath, err := bin.FindBin("unsquashfs")
if err != nil {
sylog.Fatalf("while extracting %s: %s", image, err)
}
sylog.Verbosef("User namespace requested, convert image %s to sandbox", image)
sylog.Infof("Converting SIF file to temporary sandbox...")
tempDir, imageDir, err := convertImage(image, unsquashfsPath)
if err != nil {
sylog.Fatalf("while extracting %s: %s", image, err)
}
engineConfig.SetImage(imageDir)
engineConfig.SetDeleteTempDir(tempDir)
generator.AddProcessEnv("SINGULARITY_CONTAINER", imageDir)
// if '--disable-cache' flag, then remove original SIF after converting to sandbox
if disableCache {
sylog.Debugf("Removing tmp image: %s", image)
err := os.Remove(image)
if err != nil {
sylog.Errorf("unable to remove tmp image: %s: %v", image, err)
}
}
}
}
// setuid workflow set RLIMIT_STACK to its default value,
// get the original value to restore it before executing
// container process
if useSuid {
soft, hard, err := rlimit.Get("RLIMIT_STACK")
if err != nil {
sylog.Warningf("can't retrieve stack size limit: %s", err)
}
generator.AddProcessRlimits("RLIMIT_STACK", hard, soft)
}
cfg := &config.Common{
EngineName: singularityConfig.Name,
ContainerID: name,
EngineConfig: engineConfig,
}
callbackType := (clicallback.SingularityEngineConfig)(nil)
callbacks, err := plugin.LoadCallbacks(callbackType)
if err != nil {
sylog.Fatalf("While loading plugins callbacks '%T': %s", callbackType, err)
}
for _, c := range callbacks {
c.(clicallback.SingularityEngineConfig)(cfg)
}
if engineConfig.GetInstance() {
stdout, stderr, err := instance.SetLogFile(name, int(uid), instance.LogSubDir)
if err != nil {
sylog.Fatalf("failed to create instance log files: %s", err)
}
start, err := stderr.Seek(0, io.SeekEnd)
if err != nil {
sylog.Warningf("failed to get standard error stream offset: %s", err)
}
cmdErr := starter.Run(
procname,
cfg,
starter.UseSuid(useSuid),
starter.WithStdout(stdout),
starter.WithStderr(stderr),
starter.LoadOverlayModule(loadOverlay),
)
if sylog.GetLevel() != 0 {
// starter can exit a bit before all errors has been reported
// by instance process, wait a bit to catch all errors
time.Sleep(100 * time.Millisecond)
end, err := stderr.Seek(0, io.SeekEnd)
if err != nil {
sylog.Warningf("failed to get standard error stream offset: %s", err)
}
if end-start > 0 {
output := make([]byte, end-start)
stderr.ReadAt(output, start)
fmt.Println(string(output))
}
}
if cmdErr != nil {
sylog.Fatalf("failed to start instance: %s", cmdErr)
} else {
sylog.Verbosef("you will find instance output here: %s", stdout.Name())
sylog.Verbosef("you will find instance error here: %s", stderr.Name())
sylog.Infof("instance started successfully")
}
} else {
err := starter.Exec(
procname,
cfg,
starter.UseSuid(useSuid),
starter.LoadOverlayModule(loadOverlay),
)
sylog.Fatalf("%s", err)
}
}
// SetGPUConfig sets up EngineConfig entries for NV / ROCm usage, if requested.
func SetGPUConfig(engineConfig *singularityConfig.EngineConfig) error {
if engineConfig.File.AlwaysUseNv && !NoNvidia {
Nvidia = true
sylog.Verbosef("'always use nv = yes' found in singularity.conf")
}
if engineConfig.File.AlwaysUseRocm && !NoRocm {
Rocm = true
sylog.Verbosef("'always use rocm = yes' found in singularity.conf")
}
if Nvidia && Rocm {
sylog.Warningf("--nv and --rocm cannot be used together. Only --nv will be applied.")
}
if Nvidia {
// If nvccli was not enabled by flag or config, drop down to legacy binds immediately
if !engineConfig.File.UseNvCCLI && !NvCCLI {
return setNVLegacyConfig(engineConfig)
}
// TODO: In privileged fakeroot mode we don't have the correct namespace context to run nvidia-container-cli
// from starter, so fall back to legacy NV handling until that workflow is refactored heavily.
fakeRootPriv := IsFakeroot && engineConfig.File.AllowSetuid && (buildcfg.SINGULARITY_SUID_INSTALL == 1)
if !fakeRootPriv {
return setNvCCLIConfig(engineConfig)
}
return fmt.Errorf("--fakeroot does not support --nvccli in set-uid installations")
}
if Rocm {
return setRocmConfig(engineConfig)
}
return nil
}
// setNvCCLIConfig sets up EngineConfig entries for NVIDIA GPU configuration via nvidia-container-cli
func setNvCCLIConfig(engineConfig *singularityConfig.EngineConfig) (err error) {
sylog.Debugf("Using nvidia-container-cli for GPU setup")
engineConfig.SetNvCCLI(true)
// When we use --contain we don't mount the NV devices by default in the nvidia-container-cli flow,
// they must be mounted via specifying with`NVIDIA_VISIBLE_DEVICES`. This differs from the legacy
// flow which mounts all GPU devices, always.
if (IsContained || IsContainAll) && os.Getenv("NVIDIA_VISIBLE_DEVICES") == "" {
sylog.Warningf("When using nvidia-container-cli with --contain NVIDIA_VISIBLE_DEVICES must be set or no GPUs will be available in container.")
}
// Pass NVIDIA_ env vars that will be converted to nvidia-container-cli options
nvCCLIEnv := []string{}
for _, e := range os.Environ() {
if strings.HasPrefix(e, "NVIDIA_") {
nvCCLIEnv = append(nvCCLIEnv, e)
}
}
engineConfig.SetNvCCLIEnv(nvCCLIEnv)
if UserNamespace && !IsWritable {
return fmt.Errorf("nvidia-container-cli requires --writable with user namespace/fakeroot")
}
if !IsWritable && !IsWritableTmpfs {
sylog.Infof("Setting --writable-tmpfs (required by nvidia-container-cli)")
IsWritableTmpfs = true
}
return nil
}
// setNvLegacyConfig sets up EngineConfig entries for NVIDIA GPU configuration via direct binds of configured bins/libs.
func setNVLegacyConfig(engineConfig *singularityConfig.EngineConfig) error {
sylog.Debugf("Using legacy binds for nv GPU setup")
engineConfig.SetNvLegacy(true)
gpuConfFile := filepath.Join(buildcfg.SINGULARITY_CONFDIR, "nvliblist.conf")
// bind persistenced socket if found
ipcs, err := gpu.NvidiaIpcsPath()
if err != nil {
sylog.Warningf("While finding nv ipcs: %v", err)
}
libs, bins, err := gpu.NvidiaPaths(gpuConfFile)
if err != nil {
sylog.Warningf("While finding nv bind points: %v", err)
}
setGPUBinds(engineConfig, libs, bins, ipcs, "nv")
return nil
}
// setRocmConfig sets up EngineConfig entries for ROCm GPU configuration via direct binds of configured bins/libs.
func setRocmConfig(engineConfig *singularityConfig.EngineConfig) error {
sylog.Debugf("Using rocm GPU setup")
engineConfig.SetRocm(true)
gpuConfFile := filepath.Join(buildcfg.SINGULARITY_CONFDIR, "rocmliblist.conf")
libs, bins, err := gpu.RocmPaths(gpuConfFile)
if err != nil {
sylog.Warningf("While finding ROCm bind points: %v", err)
}
setGPUBinds(engineConfig, libs, bins, []string{}, "nv")
return nil
}
// setGPUBinds sets EngineConfig entries to bind the provided list of libs, bins, ipc files.
func setGPUBinds(engineConfig *singularityConfig.EngineConfig, libs, bins, ipcs []string, gpuPlatform string) {
files := make([]string, len(bins)+len(ipcs))
if len(files) == 0 {
sylog.Warningf("Could not find any %s files on this host!", gpuPlatform)
} else {
if IsWritable {
sylog.Warningf("%s files may not be bound with --writable", gpuPlatform)
}
for i, binary := range bins {
usrBinBinary := filepath.Join("/usr/bin", filepath.Base(binary))
files[i] = strings.Join([]string{binary, usrBinBinary}, ":")
}
for i, ipc := range ipcs {
files[i+len(bins)] = ipc
}
engineConfig.SetFilesPath(files)
}
if len(libs) == 0 {
sylog.Warningf("Could not find any %s libraries on this host!", gpuPlatform)
} else {
engineConfig.SetLibrariesPath(libs)
}
}
| [
"\"SINGULARITY_TMPDIR\"",
"\"SINGULARITY_LOCALCACHEDIR\"",
"\"SINGULARITY_CACHEDIR\"",
"\"IMAGE_ARG\"",
"\"NVIDIA_VISIBLE_DEVICES\""
]
| []
| [
"SINGULARITY_LOCALCACHEDIR",
"SINGULARITY_CACHEDIR",
"NVIDIA_VISIBLE_DEVICES",
"SINGULARITY_TMPDIR",
"IMAGE_ARG"
]
| [] | ["SINGULARITY_LOCALCACHEDIR", "SINGULARITY_CACHEDIR", "NVIDIA_VISIBLE_DEVICES", "SINGULARITY_TMPDIR", "IMAGE_ARG"] | go | 5 | 0 | |
security_analysis_web_app/security_analysis_web_app/wsgi.py | """
WSGI config for security_analysis_web_app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'security_analysis_web_app.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
digitaloxford/settings/__init__.py | import os
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Set hosting environment, if not set, default to production for security
HOSTING_ENV = os.getenv("HOSTING_ENV", "production")
if HOSTING_ENV == "dev":
from .dev import *
else:
from .production import *
| []
| []
| [
"HOSTING_ENV"
]
| [] | ["HOSTING_ENV"] | python | 1 | 0 | |
vendor/github.com/urfave/cli/v2/help.go | package cli
import (
"fmt"
"io"
"os"
"strings"
"text/tabwriter"
"text/template"
"unicode/utf8"
)
var helpCommand = &Command{
Name: "help",
Aliases: []string{"h"},
Usage: "Shows a list of commands or help for one command",
ArgsUsage: "[command]",
Action: func(c *Context) error {
args := c.Args()
if args.Present() {
return ShowCommandHelp(c, args.First())
}
_ = ShowAppHelp(c)
return nil
},
}
var helpSubcommand = &Command{
Name: "help",
Aliases: []string{"h"},
Usage: "Shows a list of commands or help for one command",
ArgsUsage: "[command]",
Action: func(c *Context) error {
args := c.Args()
if args.Present() {
return ShowCommandHelp(c, args.First())
}
return ShowSubcommandHelp(c)
},
}
// Prints help for the App or Command
type helpPrinter func(w io.Writer, templ string, data interface{})
// Prints help for the App or Command with custom template function.
type helpPrinterCustom func(w io.Writer, templ string, data interface{}, customFunc map[string]interface{})
// HelpPrinter is a function that writes the help output. If not set a default
// is used. The function signature is:
// func(w io.Writer, templ string, data interface{})
var HelpPrinter helpPrinter = printHelp
// HelpPrinterCustom is same as HelpPrinter but
// takes a custom function for template function map.
var HelpPrinterCustom helpPrinterCustom = printHelpCustom
// VersionPrinter prints the version for the App
var VersionPrinter = printVersion
// ShowAppHelpAndExit - Prints the list of subcommands for the app and exits with exit code.
func ShowAppHelpAndExit(c *Context, exitCode int) {
_ = ShowAppHelp(c)
os.Exit(exitCode)
}
// ShowAppHelp is an action that displays the help.
func ShowAppHelp(c *Context) (err error) {
if c.App.CustomAppHelpTemplate == "" {
HelpPrinter(c.App.Writer, AppHelpTemplate, c.App)
return
}
customAppData := func() map[string]interface{} {
if c.App.ExtraInfo == nil {
return nil
}
return map[string]interface{}{
"ExtraInfo": c.App.ExtraInfo,
}
}
HelpPrinterCustom(c.App.Writer, c.App.CustomAppHelpTemplate, c.App, customAppData())
return nil
}
// DefaultAppComplete prints the list of subcommands as the default app completion method
func DefaultAppComplete(c *Context) {
DefaultCompleteWithFlags(nil)(c)
}
func printCommandSuggestions(commands []*Command, writer io.Writer) {
for _, command := range commands {
if command.Hidden {
continue
}
if os.Getenv("_CLI_ZSH_AUTOCOMPLETE_HACK") == "1" {
for _, name := range command.Names() {
_, _ = fmt.Fprintf(writer, "%s:%s\n", name, command.Usage)
}
} else {
for _, name := range command.Names() {
_, _ = fmt.Fprintf(writer, "%s\n", name)
}
}
}
}
func cliArgContains(flagName string) bool {
for _, name := range strings.Split(flagName, ",") {
name = strings.TrimSpace(name)
count := utf8.RuneCountInString(name)
if count > 2 {
count = 2
}
flag := fmt.Sprintf("%s%s", strings.Repeat("-", count), name)
for _, a := range os.Args {
if a == flag {
return true
}
}
}
return false
}
func printFlagSuggestions(lastArg string, flags []Flag, writer io.Writer) {
cur := strings.TrimPrefix(lastArg, "-")
cur = strings.TrimPrefix(cur, "-")
for _, flag := range flags {
if bflag, ok := flag.(*BoolFlag); ok && bflag.Hidden {
continue
}
for _, name := range flag.Names(){
name = strings.TrimSpace(name)
// this will get total count utf8 letters in flag name
count := utf8.RuneCountInString(name)
if count > 2 {
count = 2 // resuse this count to generate single - or -- in flag completion
}
// if flag name has more than one utf8 letter and last argument in cli has -- prefix then
// skip flag completion for short flags example -v or -x
if strings.HasPrefix(lastArg, "--") && count == 1 {
continue
}
// match if last argument matches this flag and it is not repeated
if strings.HasPrefix(name, cur) && cur != name && !cliArgContains(name) {
flagCompletion := fmt.Sprintf("%s%s", strings.Repeat("-", count), name)
_, _ = fmt.Fprintln(writer, flagCompletion)
}
}
}
}
func DefaultCompleteWithFlags(cmd *Command) func(c *Context) {
return func(c *Context) {
if len(os.Args) > 2 {
lastArg := os.Args[len(os.Args)-2]
if strings.HasPrefix(lastArg, "-") {
printFlagSuggestions(lastArg, c.App.Flags, c.App.Writer)
if cmd != nil {
printFlagSuggestions(lastArg, cmd.Flags, c.App.Writer)
}
return
}
}
if cmd != nil {
printCommandSuggestions(cmd.Subcommands, c.App.Writer)
} else {
printCommandSuggestions(c.App.Commands, c.App.Writer)
}
}
}
// ShowCommandHelpAndExit - exits with code after showing help
func ShowCommandHelpAndExit(c *Context, command string, code int) {
_ = ShowCommandHelp(c, command)
os.Exit(code)
}
// ShowCommandHelp prints help for the given command
func ShowCommandHelp(ctx *Context, command string) error {
// show the subcommand help for a command with subcommands
if command == "" {
HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App)
return nil
}
for _, c := range ctx.App.Commands {
if c.HasName(command) {
if c.CustomHelpTemplate != "" {
HelpPrinterCustom(ctx.App.Writer, c.CustomHelpTemplate, c, nil)
} else {
HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c)
}
return nil
}
}
if ctx.App.CommandNotFound == nil {
return Exit(fmt.Sprintf("No help topic for '%v'", command), 3)
}
ctx.App.CommandNotFound(ctx, command)
return nil
}
// ShowSubcommandHelp prints help for the given subcommand
func ShowSubcommandHelp(c *Context) error {
if c == nil {
return nil
}
if c.Command != nil {
return ShowCommandHelp(c, c.Command.Name)
}
return ShowCommandHelp(c, "")
}
// ShowVersion prints the version number of the App
func ShowVersion(c *Context) {
VersionPrinter(c)
}
func printVersion(c *Context) {
_, _ = fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version)
}
// ShowCompletions prints the lists of commands within a given context
func ShowCompletions(c *Context) {
a := c.App
if a != nil && a.BashComplete != nil {
a.BashComplete(c)
}
}
// ShowCommandCompletions prints the custom completions for a given command
func ShowCommandCompletions(ctx *Context, command string) {
c := ctx.App.Command(command)
if c != nil {
if c.BashComplete != nil {
c.BashComplete(ctx)
} else {
DefaultCompleteWithFlags(c)(ctx)
}
}
}
func printHelpCustom(out io.Writer, templ string, data interface{}, customFunc map[string]interface{}) {
funcMap := template.FuncMap{
"join": strings.Join,
}
for key, value := range customFunc {
funcMap[key] = value
}
w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0)
t := template.Must(template.New("help").Funcs(funcMap).Parse(templ))
err := t.Execute(w, data)
if err != nil {
// If the writer is closed, t.Execute will fail, and there's nothing
// we can do to recover.
if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" {
_, _ = fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err)
}
return
}
_ = w.Flush()
}
func printHelp(out io.Writer, templ string, data interface{}) {
printHelpCustom(out, templ, data, nil)
}
func checkVersion(c *Context) bool {
found := false
for _, name := range VersionFlag.Names() {
if c.Bool(name) {
found = true
}
}
return found
}
func checkHelp(c *Context) bool {
found := false
for _, name := range HelpFlag.Names() {
if c.Bool(name) {
found = true
}
}
return found
}
func checkCommandHelp(c *Context, name string) bool {
if c.Bool("h") || c.Bool("help") {
_ = ShowCommandHelp(c, name)
return true
}
return false
}
func checkSubcommandHelp(c *Context) bool {
if c.Bool("h") || c.Bool("help") {
_ = ShowSubcommandHelp(c)
return true
}
return false
}
func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) {
if !a.EnableBashCompletion {
return false, arguments
}
pos := len(arguments) - 1
lastArg := arguments[pos]
if lastArg != "--generate-bash-completion" {
return false, arguments
}
return true, arguments[:pos]
}
func checkCompletions(c *Context) bool {
if !c.shellComplete {
return false
}
if args := c.Args(); args.Present() {
name := args.First()
if cmd := c.App.Command(name); cmd != nil {
// let the command handle the completion
return false
}
}
ShowCompletions(c)
return true
}
func checkCommandCompletions(c *Context, name string) bool {
if !c.shellComplete {
return false
}
ShowCommandCompletions(c, name)
return true
}
| [
"\"_CLI_ZSH_AUTOCOMPLETE_HACK\"",
"\"CLI_TEMPLATE_ERROR_DEBUG\""
]
| []
| [
"CLI_TEMPLATE_ERROR_DEBUG",
"_CLI_ZSH_AUTOCOMPLETE_HACK"
]
| [] | ["CLI_TEMPLATE_ERROR_DEBUG", "_CLI_ZSH_AUTOCOMPLETE_HACK"] | go | 2 | 0 | |
EasySurveyDesigner/src/com/baconbanana/easysurveydesigner/functionalCore/dbops/old/DBOperationOld.java | //package com.baconbanana.easysurveydesigner.functionalCore.dbops.old;
//
//import java.sql.Connection;
//import java.sql.DriverManager;
//import java.sql.PreparedStatement;
//import java.sql.ResultSet;
//import java.sql.ResultSetMetaData;
//import java.sql.SQLException;
//import java.sql.Statement;
//import java.util.ArrayList;
//
//
//public class DBOperationOld {
//
// private static Connection con;
//
// public static Connection getConnect(){
// String osName = System.getProperty("os.name");
// String systemDir = "";
// if(osName.contains("Windows")){
// systemDir = System.getenv("USERPROFILE");
// }else if(osName.contains("Mac")){
// systemDir = System.getenv("HOME");
// }
// try{
// Class.forName("org.sqlite.JDBC");
// //That is the lane that creates the database.
// if(osName.contains("Windows")){
// con = DriverManager.getConnection("jdbc:sqlite:"+ systemDir +"\\My Documents\\SQLite\\easysurvey.db");
// }else if(osName.contains("Mac")){
// con = DriverManager.getConnection("jdbc:sqlite:"+ systemDir +"/Documents/SQLite/easysurvey.db");
// }
// Statement s = con.createStatement();
// s.execute("PRAGMA foreign_keys = ON");
// s.close();
// }catch (Exception e){
// e.printStackTrace();
// System.err.println(e.getClass().getName() + " : " + e.getMessage());
// System.exit(0);
// }
// return con;
// }
// private static void executeStatement(String stmt)throws SQLException{
// Connection c = getConnect();
// Statement s = null;
// s = c.createStatement();
// s.executeUpdate(stmt);
// s.close();
// //c.close();
//
// }
// //considering not that
// public static boolean createTable(String sql){
// try{
// executeStatement("CREATE TABLE " + sql);
// return true;
// }catch(SQLException e){
// return false;
// }
// }
// public static boolean insertRecord(String sql){
// try{
// executeStatement("INSERT INTO " + sql);
// return true;
// }catch(SQLException e){
// e.printStackTrace();
// System.err.println(e.getClass().getName() + " : " + e.getMessage());
// return false;
// }
// }
//
// public static boolean deleteRecord(String sql){
// try{
// executeStatement("DELETE from " + sql);
// return true;
// }catch(SQLException e){
// e.printStackTrace();
// System.err.println(e.getClass().getName() + " : " + e.getMessage());
// return false;
// }
// }
//
// public static boolean onUpdate(String sql){
// try{
// executeStatement(sql);
// return true;
// }catch(SQLException e){
// e.printStackTrace();
// System.err.println(e.getClass().getName() + " : " + e.getMessage());
// return false;
// }
// }
//
// public static int insertRecordReturnID(String sql){
// try{
// executeStatement(sql);
// sql = "SELECT last_insert_rowID()";
// ArrayList<String[]> lastRow = selectRecord(sql);
// return Integer.parseInt(lastRow.get(0)[0]);
// }catch(SQLException e){
// e.printStackTrace();
// System.err.println(e.getClass().getName() + " : " + e.getMessage());
// return -1;
// }
// }
// //questionable output
// public static ArrayList<String[]> selectRecord(String sql){
// Connection c = getConnect();
// Statement s;
// ResultSet rs;
// ResultSetMetaData rsmd;
// ArrayList<String[]> results = new ArrayList<String[]>();
// try{
// c.setAutoCommit(false);
// s = c.createStatement();
// rs = s.executeQuery(sql);
// rsmd = rs.getMetaData();
// while(rs.next()){
// int colCount = rsmd.getColumnCount();
// String[] row = new String[colCount];
// System.out.println(colCount);
// for(int i = 1; i < colCount; i++){
// row[i] = rs.getString(i);
// }
// results.add(row);
// }
// }catch (SQLException e){
// e.printStackTrace();
// System.err.println(e.getClass().getName() + " : " + e.getMessage());
// System.exit(0);
// }
// return results;
// }
//
// public static boolean exists(String table){
// Connection c = getConnect();
// Statement s = null;
// try{
// s = c.createStatement();
// s.executeUpdate("SELECT * FROM " + table);
// s.close();
// }catch(SQLException e){
// return false;
// }
// return true;
// }
// //--------------------------------------------------------------------------------------------------------------------------
// //I have created those methods to compare username with data from database but they did not work perfectly.
// //They are not in use at the moment but I left them here in case they would be useful in future.
// public static ArrayList<String> selectRecord2(String sql, String colName){
// Connection c = getConnect();
// Statement s;
// ResultSet rs;
// ArrayList<String> results = new ArrayList<String>();
// try{
// c.setAutoCommit(false);
// s = c.createStatement();
// rs = s.executeQuery(sql);
//
// while(rs.next()){
// String data = rs.getString(colName);
// results.add(data);
// System.out.println(data);
//
// }
// }catch (SQLException e){
// e.printStackTrace();
// System.err.println(e.getClass().getName() + " : " + e.getMessage());
// System.exit(0);
// }
// return results;
// }
//
// public static String checkPassword2(){
// Connection c = getConnect();
// String s = null;
// try {
// PreparedStatement st = con.prepareStatement("SELECT Username FROM Login WHERE Username = 'Barry'");
// ResultSet rs = st.executeQuery();
// while (rs.next()){
// String s1 = rs.getString(1);
// return s = s1 ;
// }
// } catch (SQLException e) {
// // TODO Auto-generated catch block
// e.printStackTrace();
// System.err.println(e.getClass().getName() + " : " + e.getMessage());
// System.exit(0);
// }
//
// return s;
//
// }
// //TO DO change to throws SQL exception
// public static boolean existsRecord(String sql){
// ArrayList<String[]> result = selectRecord(sql);
// if(result.size() > 0){
// return true;
//
// }
// else{
// return false;
// }
// }
// //----------------------------------------------------------------------------------------
// public static void raf() throws SQLException{
// try {
// Class.forName("org.sqlite.JDBC");
// } catch (ClassNotFoundException e1) {
// // TODO Auto-generated catch block
// e1.printStackTrace();
// }
// Connection c = getConnect();
//
// SqlTask tasks[] = { new SqlTask(c, "Gandhi", "politics"),
// new SqlTask(c, "Turing", "computers"),
// new SqlTask(c, "Picaso", "artist"),
// new SqlTask(c, "shakespeare", "writer"),
// new SqlTask(c, "tesla", "inventor"), };
//
// System.out.println("Sequential DB access:");
//
// Thread threads[] = new Thread[tasks.length];
// for (int i = 0; i < tasks.length; i++)
// threads[i] = new Thread(tasks[i]);
//
// for (int i = 0; i < tasks.length; i++)
// {
// threads[i].start();
// try {
// threads[i].join();
// } catch (InterruptedException e) {
// // TODO Auto-generated catch block
// e.printStackTrace();
// }
// }
//
// stat = c.createStatement();
// ResultSet rs = stat.executeQuery("SELECT * FROM people");
// ResultSetMetaData rsmd = rs.getMetaData();
// while (rs.next())
// {
// System.out.println("name = " + rs.getString("name"));
// System.out.println("job = " + rs.getString("occupation"));
// }
// stat.close();
// c.close();
// }
//
// public static class SqlTask implements Runnable
// {
// Connection conn;
// String name, occupation;
//
// public SqlTask(Connection conn, String name, String occupation)
// {
// this.conn = conn;
// this.name = name;
// this.occupation = occupation;
// }
//
// public void run()
// {
// PreparedStatement prep = null;
// long startTime = System.currentTimeMillis();
//
// try
// {
// try
// {
// prep = conn
// .prepareStatement("insert into people values (?, ?)");
//
// prep.setString(1, name);
// prep.setString(2, occupation);
// prep.executeUpdate();
//
// long duration = System.currentTimeMillis() - startTime;
// System.out.println("SQL Insert completed in :" + duration);
// }
// finally
// {
// if (prep != null)
// prep.close();
// }
// }
// catch (SQLException e)
// {
// long duration = System.currentTimeMillis() - startTime;
// System.out.print(" SQL Insert failed: " + duration);
// System.out.println(" SQLException: " + e);
// }
// }
// }
//
//} | [
"\"USERPROFILE\"",
"\"HOME\""
]
| []
| [
"HOME",
"USERPROFILE"
]
| [] | ["HOME", "USERPROFILE"] | java | 2 | 0 | |
pystrometry/pystrometry.py | """
Classes and functions for high-precision astrometry timeseries analysis.
Authors
-------
- Johannes Sahlmann
Notes
-----
- should support python 2.7 and 3.5 (for the time being)
"""
from __future__ import print_function
import copy
import os
import numpy as np
from matplotlib import pyplot as plt
import pylab as pl
from astropy import constants as const
from astropy.table import Table, Column
import astropy.units as u
from scipy.interpolate import *
import pdb
from astropy.time import Time, TimeDelta
from astropy.table import vstack as tablevstack
from astropy.table import hstack as tablehstack
from astroquery.simbad import Simbad
import sys
if sys.version_info[0] == 3:
# import urllib.request as urllib
from urllib.request import urlopen
from urllib.error import HTTPError
import pickle
import sympy as sp
from scipy.optimize import fmin as scipyfmin
from linearfit import linearfit
try:
import pyslalib as sla
except (ImportError):
pass
from .utils import mcmc_helpers, acceleration
#***********************************CONSTANTS***********************************
global MS_kg, MJ_kg
MS_kg = const.M_sun.value
# MJ_kg = const.M_jup.value
Ggrav = const.G.value
day2sec = u.day.to(u.second)
AU_m = const.au.value
pc_m = const.pc.value # parsec in meters
MJ_kg = const.M_jup.value # jupiter mass in kg
ME_kg = const.M_earth.value
deg2rad = u.deg.to(u.rad)
rad2mas = u.rad.to(u.arcsec)*1000.
deg2as = u.deg.to(u.arcsec)
year2day = u.year.to(u.day)
MJ2MS = MJ_kg/MS_kg
DEFAULT_EPHEMERIS_DICTIONARY = {'Spitzer': 'horizons_XYZ_2003-2020_EQUATORIAL_Spitzer_1day_csv',
'HST' : 'horizons_XYZ_1990-2016_EQUATORIAL_HST_1day_csv',
'WISE' : 'horizons_XYZ_2009-2016_EQUATORIAL_WISE_1day_csv',
'JWST' : 'horizons_XYZ_2012-2023_EQUATORIAL_JWST_1day_csv',
'L2' : 'horizons_XYZ_1990-2035_EQUATORIAL_L2_1day_csv',
'Earth' : 'horizons_XYZ_1990-2035_EQUATORIAL_Eart1day_csv'}
local_dir = os.path.dirname(os.path.abspath(__file__))
global ephemeris_dir
try:
ephemeris_dir = os.environ['EPHEMERIS_DIRECTORY']
except KeyError:
ephemeris_dir = os.path.join(local_dir, 'data')
def fractional_luminosity(mag1, mag2):
"""
defining fraction luminosity of masses M1 and M2 as beta = L2/(L1+L2) and
mag1-mag2=-2.5 log10(L1/L2), we find
beta = 1/(1+10^(mag2-mag1))
:param mag1:
:param mag2:
:return:
"""
return 1./(1. + 10.**(0.4*(mag2-mag1)))
def luminosity_ratio(fractional_lum):
"""Return luminosity ratio S=L2/L1."""
return fractional_lum / (1 - fractional_lum)
def fractional_mass(m1, m2):
"""
computes fractional mass
getB(m1,m2) returns m2/(m1+m2)
:param m1:
:param m2:
:return:
"""
return m2/(m1+m2)
def periastron_time(lambda_ref_deg, omega_deg, t_ref_mjd, p_day):
"""Return time of periastron passage.
Parameters
----------
lambda_ref_deg : float
mean_longitude_at_reference_time
omega_deg : float
argument of periastron
t_ref_mjd : float
reference time in MJD (e.g. mid-time of observations)
p_day : float
orbital period
Returns
-------
"""
# mean anomaly at reference date
m_ref_deg = lambda_ref_deg - omega_deg
# phase at pericentre passage
# phi0_1 = - np.deg2rad(m_ref_deg)/2./np.pi
# Tp_day = phi0_1 * P_day + TRef_MJD
# time at periastron
t_periastron_mjd = t_ref_mjd - p_day * np.deg2rad(m_ref_deg) / (2*np.pi)
return t_periastron_mjd
def mean_longitude(t_periastron_mjd, omega_deg, t_mjd, p_day):
"""Return mean longitude at time t_mjd.
Parameters
----------
t_periastron_mjd : float
time of periastron passage in MJD
omega_deg : float
argument of periastron
t_ref_mjd : float
reference time in MJD (e.g. mid-time of observations)
p_day : float
orbital period
Returns
-------
lambda_deg
"""
# mean anomaly
# m_deg = np.rad2deg((t_mjd - t_periastron_mjd) * (2 * np.pi)/p_day)
m_deg = mean_anomaly(t_mjd, t_periastron_mjd, p_day)
# mean longitude
lambda_deg = m_deg + omega_deg
return lambda_deg
class OrbitSystem(object):
"""Representation of a binary system following Keplerian motion.
The primary (m1) is typically the brighter component, i.e.
delta_mag = mag2-mag1 is positive. For cases, where the
secondary is more massive, the mass ratio q=m2/m1 > 1.
Notes
-----
These features are supported:
- Differential chromatic refraction
- Hipparcos and Gaia scan angle definitions
References
----------
- Started by JSA 2014-01-29
- Streamlined init by OJO
"""
def __init__(self, attribute_dict={}):
"""The default attribute values are stored in the hardcoded
dictionary below, which also defines the list of acceptable
attributes.
The content of attribute_dict is transferred to the instance.
Parameters
----------
attribute_dict : dict
"""
self.attribute_dict = attribute_dict
default_dict = {'P_day': 100, 'ecc': 0, 'm1_MS': 1, 'm2_MJ': 1,
'omega_deg': 0., 'OMEGA_deg': 0., 'i_deg': 90.,
'Tp_day': 0., 'RA_deg': 0., 'DE_deg': 0.,
'absolute_plx_mas': 25.,
'parallax_correction_mas': 0.,
'muRA_mas': 20., 'muDE_mas': 50.,
'accel_ra': None, # like Gaia DR3 datamodel: Acceleration in RA (double, Misc[mas/year**2])
'accel_dec': None, # like Gaia DR3 datamodel: Acceleration in Dec (double, Misc[mas/year**2])
'deriv_accel_ra': None, # like Gaia DR3 datamodel: Time derivative of the accel. in RA (double, Misc[mas/year**3])
'deriv_accel_dec': None, # like Gaia DR3 datamodel: Time derivative of the accel. in Dec (double, Misc[mas/year**3])
'solution_type': None, # like Gaia DR3 datamodel, when possible
'gamma_ms': 0., 'rvLinearDrift_mspyr': None,
'rvQuadraticDrift_mspyr': None,
'rvCubicDrift_mspyr': None, 'Tref_MJD': None,
'scan_angle_definition': 'hipparcos',
'solution_type': None,
'rho_mas': None, # DCR coefficient
'd_mas': None, # DCR coefficient (if DCR corrector is used)
'a_mas': None,
'offset_alphastar_mas': 0.,
'offset_delta_mas': 0.,
'alpha_mas': None, # photocenter semimajor axis,
'delta_mag': None, # magnitude difference between components
'nuisance_x': None, # nuisance parameters used when performing MCMC analyses
'nuisance_y': None, # nuisance parameters used when performing MCMC analyses
'esinw': None, # sqrt(ecc) * sin(omega), alternative variable set for MCMC
'ecosw': None, # sqrt(ecc) * 'plx_macos(omega)
'm2sini': None, # sqrt(m2_MJ) * sin(inclination), alternative variable set for MCMC
'm2cosi': None, # sqrt(m2_MJ) * cos(inclination)
'lambda_ref': None # mean longitude at reference time, substitute for time of periastron
}
# Assign user values as attributes when present, use defaults if not
attribute_keys = attribute_dict.keys()
for key, val in default_dict.items():
if key in attribute_keys:
if key == 'm2_MJ':
setattr(self, '_' + key, attribute_dict[key])
else:
setattr(self, key, attribute_dict[key])
else:
if key == 'm2_MJ':
key = '_' + key
setattr(self, key, val)
# Warn users if a key in attribute_dict isn't a default attribute
mismatch = [key for key in attribute_dict.keys()
if key not in default_dict.keys()]
if mismatch:
raise KeyError('Key{0} {1} {2} absent in default OrbitClass'
.format('s' if len(mismatch) > 1 else '',
mismatch,
'are' if len(mismatch) > 1 else 'is'))
# decode alternative parameter sets
if ('esinw' in attribute_keys) and (self.esinw is not None):
self.ecc, self.omega_deg = mcmc_helpers.decode_eccentricity_omega(self.esinw, self.ecosw)
if ('m2sini' in attribute_keys) and (self.m2sini is not None):
self.m2_MJ, self.i_deg = mcmc_helpers.decode_eccentricity_omega(self.m2sini, self.m2cosi)
self._m2_MJ = self.m2_MJ
if ('lambda_ref' in attribute_keys) and (self.lambda_ref is not None):
if self.Tref_MJD is None:
raise AttributeError('When lambda_ref is used, the reference time Tref_MJD needs to be set!')
self.Tp_day = periastron_time(self.lambda_ref, self.omega_deg, self.Tref_MJD, self.P_day)
# treatment of diluted systems
if ('delta_mag' in attribute_keys) and (self.delta_mag is not None) and (self.delta_mag != 0.):
# set photocenter orbit size
beta = fractional_luminosity(0., self.delta_mag)
f = fractional_mass(self.m1_MS, self.m2_MS)
a_rel_mas = self.a_relative_angular()
self.alpha_mas = (f - beta) * a_rel_mas
if self.alpha_mas < 0:
self.alpha_mas = 0.
else:
self.alpha_mas = self.a_barycentre_angular()
self.a_mas = self.alpha_mas
# 0J0: Assign m2_MJ and m2_MS to properties so their values will be linked
@property
def m2_MJ(self):
return self._m2_MJ
@m2_MJ.setter
def m2_MJ(self, val):
self._m2_MJ = val
@property
def m2_MS(self):
return self._m2_MJ * MJ_kg / MS_kg
@m2_MS.setter
def m2_MS(self, val):
self._m2_MJ = val * MS_kg / MJ_kg
def __repr__(self):
d_pc = 1. / (self.absolute_plx_mas / 1000.)
description = '+'*30 + '\n'
description += 'System parameters:\n'
description += "Distance is {:2.1f} pc \t Parallax = {:2.1f} mas\n".format(d_pc, self.absolute_plx_mas)
description += "Primary mass = {:4.3f} Msol \t = {:4.3f} Mjup\n".format(self.m1_MS, self.m1_MS * MS_kg / MJ_kg)
description += "Secondary mass = {:4.3f} Msol \t = {:4.3f} Mjup \t = {:4.3f} MEarth\n".format(self.m2_MS, self.m2_MJ, self.m2_MJ * MJ_kg / ME_kg)
description += "Mass ratio q=m2/m1 = {:4.6f}\n".format(self.m2_MS / self.m1_MS)
description += 'a1_mas = {:2.3f}, a_rel_mas = {:2.3f}\n'.format(self.a_barycentre_angular(), self.a_relative_angular())
if self.delta_mag is not None:
description += 'alpha_mas = {:2.3f}, delta_mag = {:2.3f}\n'.format(self.alpha_mas, self.delta_mag)
description += 'fract.lum beta = {:2.4f}, lum.ratio=L2/L1 = {:2.4f}\n'.format(fractional_luminosity(0, self.delta_mag), luminosity_ratio(fractional_luminosity(0, self.delta_mag)))
description += "Inclination {:2.1f} deg\n".format(self.i_deg)
description += "Period is {:2.1f} day \t Eccentricity = {:2.3f}\n".format(self.P_day, self.ecc)
description += "omega = {:2.1f} deg, OMEGA = {:2.1f} deg, T_periastron = {:2.1f} day\n".format(self.omega_deg, self.OMEGA_deg, self.Tp_day)
description += "RV semi-amplitude of primary = {:2.3f} m/s\n".format(self.rv_semiamplitude_mps())
return description
def pjGetOrbit(self, N, Norbit=None, t_MJD=None, psi_deg=None,
verbose=0, returnMeanAnomaly=0, returnTrueAnomaly=0):
"""
DOCUMENT ARV -- simulate simultaneous 2D-astrometric and RV observations
written: J. Sahlmann 27.07.2009 ObsGe
updated: J. Sahlmann 25.01.2016 STScI/ESA
:param N:
:param Norbit:
:param t_MJD:
:param psi_deg:
:param verbose:
:param returnMeanAnomaly:
:param returnTrueAnomaly:
:return:
"""
#**************************SYSTEM*PARAMETERS***************************
# Get companion mass in units of solar mass
m2_MS = self.m2_MS
#m2_MS = self.m2_MJ * MJ_kg/MS_kg # #companion mass in units of SOLAR mass
#gamma_ms = 0. #systemic velocity / m s^-1
d_pc = 1./ (self.absolute_plx_mas/1000.)
if verbose:
print("%s " % "++++++++++++++++++++")
print("Primary mass = %1.3f Msol \t = %4.3f Mjup "
% (self.m1_MS, self.m1_MS*MS_kg/MJ_kg))
print("Secondary mass = %1.3f Msol \t = %4.3f Mjup \t = %4.3f MEarth " % ( m2_MS, self.m2_MJ, self.m2_MJ*MJ_kg/ME_kg))
print("Inclination %1.3f deg " % self.i_deg)
print("Mass ratio q = %4.6f " %( m2_MS/self.m1_MS))
print("Period is %3.1f day \t Eccentricity = %2.1f " % (self.P_day,self.ecc))
print("Distance is %3.1f pc \t Parallax = %3.1f mas " % (d_pc, self.absolute_plx_mas))
print("omega = %2.1f deg, OMEGA = %2.1f deg, T0 = %2.1f day " % (self.omega_deg, self.OMEGA_deg,self.Tp_day))
omega_rad = np.deg2rad(self.omega_deg)
OMEGA_rad = np.deg2rad(self.OMEGA_deg)
i_rad = np.deg2rad(self.i_deg)
#*************************SIMULATION*PARAMATERS*************************
if Norbit is not None:
t_day = np.linspace(0, self.P_day*Norbit, N) + self.Tref_MJD
elif t_MJD is not None:
t_day = t_MJD
N = len(t_MJD)
#****************************RADIAL*VELOCITY****************************
E_rad = eccentric_anomaly(self.ecc, t_day, self.Tp_day, self.P_day) # eccentric anomaly
M = (Ggrav * (self.m2_MJ * MJ_kg)**3.
/ (self.m1_MS * MS_kg + self.m2_MJ * MJ_kg)**2.) # mass term for the barycentric orbit of the primary mass
#M = G * ( m1_MS*MS + m2_MJ*MJ ) #relative orbit
a_m = (M / (4. * np.pi**2.) * (self.P_day * day2sec)**2.)**(1./3.) # semimajor axis of the primary mass in m
a_AU = a_m / AU_m # in AU
if 0:
THETA_rad = 2 * np.arctan(np.sqrt((1 + self.ecc) / (1 - self.ecc))
* np.tan(E_rad/2)) #position angle between radius vector and ref
THETA_rad = np.arctan2(np.cos(THETA_rad), np.sin(THETA_rad))
k1 = (2. * np.pi * a_m * np.sin(i_rad)
/ (self.P_day * day2sec * (1. - self.ecc**2)**(1./2.))) #RV semiamplitude
rv_ms = k1 * (np.cos( THETA_rad + omega_rad ) +
self.ecc * np.cos(omega_rad)) + self.gamma_ms #radial velocity in m/s.
else: # damien's method
THETA_rad = TrueAnomaly(self.ecc, E_rad)
k1 = (2. * np.pi * a_m * np.sin(i_rad)
/ ( self.P_day * day2sec * (1. - self.ecc**2)**(1./2.))) #RV semiamplitude
a_mps = RadialVelocitiesConstants(k1, omega_rad, self.ecc)
#print(a_mps)
rv_ms = (RadialVelocitiesKepler(a_mps[0], a_mps[1],
a_mps[2], THETA_rad)
+ self.gamma_ms)
if self.rvLinearDrift_mspyr is not None:
drift_ms = ((t_day - self.Tref_MJD)
/ year2day * self.rvLinearDrift_mspyr)
rv_ms += drift_ms
if self.rvQuadraticDrift_mspyr is not None:
drift_ms = (((t_day - self.Tref_MJD) / year2day)**2
* self.rvQuadraticDrift_mspyr)
rv_ms += drift_ms
if self.rvCubicDrift_mspyr is not None:
drift_ms = (((t_day - self.Tref_MJD) / year2day)**3
* self.rvCubicDrift_mspyr)
rv_ms += drift_ms
a_rel_AU = (Ggrav * (self.m1_MS * MS_kg + self.m2_MJ * MJ_kg) / 4.
/ (np.pi**2.) * (self.P_day * day2sec)**2.)**(1./3.) / AU_m
if verbose:
print("Astrometric semimajor axis of Primary: a = %3.3f AU \t %6.3f muas " % (a_AU, a_AU / d_pc * 1.e6))
print("Relative semimajor axis of Primary: a = %3.3f AU \t %6.2f mas " %(a_rel_AU, a_rel_AU / d_pc * 1.e3))
print("Radial velocity semi-amplitude: K1 = %4.2f m/s " % k1)
#******************************ASTROMETRY*******************************
a_rad = np.arctan2(a_m, d_pc * pc_m)
a_mas = a_rad * rad2mas # semimajor axis in mas
aRel_mas = np.arctan2(a_rel_AU * AU_m, d_pc * pc_m) * rad2mas # relative semimajor axis in mas
TIC = thiele_innes_constants([a_mas, self.omega_deg, self.OMEGA_deg, self.i_deg]) #Thiele-Innes constants
TIC_rel = thiele_innes_constants([aRel_mas, self.omega_deg + 180.,
self.OMEGA_deg, self.i_deg]) #Thiele-Innes constants
#A = TIC[0] B = TIC[1] F = TIC[2] G = TIC[3]
if psi_deg is not None:
# psi_rad = np.deg2rad(psi_deg)
phi1 = astrom_signal(t_day, psi_deg, self.ecc,
self.P_day, self.Tp_day, TIC)
phi1_rel = astrom_signal(t_day, psi_deg, self.ecc,
self.P_day, self.Tp_day, TIC_rel)
phi2 = np.nan
phi2_rel = np.nan
else:
#first baseline second baseline
#bspread1 = 0.; bspread2 = 0. #baseline spread around offset in deg
bstart1 = 0.
bstart2 = 90. #baseline offset in deg
# for FORS aric + CRIRES RV simulation, the aric measurement gives both axis simultaneously
psi_deg1 = np.ones(N) * bstart1 #array(bstart1,N)
# psi_rad1 = psi_deg1*deg2rad
psi_deg2 = np.ones(N) * bstart2
# psi_rad2 = psi_deg2*deg2rad
phi1 = astrom_signal(t_day, psi_deg1, self.ecc,
self.P_day, self.Tp_day, TIC)
phi2 = astrom_signal(t_day, psi_deg2, self.ecc,
self.P_day, self.Tp_day, TIC)
phi1_rel = astrom_signal(t_day, psi_deg1, self.ecc,
self.P_day, self.Tp_day, TIC_rel)
phi2_rel = astrom_signal(t_day, psi_deg2, self.ecc,
self.P_day, self.Tp_day, TIC_rel)
if returnMeanAnomaly:
m_deg = mean_anomaly(t_day, self.Tp_day, self.P_day)
M_rad = np.deg2rad(m_deg)
return [phi1, phi2, t_day, rv_ms, phi1_rel, phi2_rel, M_rad]
elif returnTrueAnomaly:
#M_rad = mean_anomaly(t_day,self.Tp_day,self.P_day)
return [phi1, phi2, t_day, rv_ms, phi1_rel, phi2_rel, THETA_rad, TIC_rel]
return [phi1, phi2, t_day, rv_ms, phi1_rel, phi2_rel]
# 0J0: Added a function to calculate apparent proper motion given two times
def get_inter_epoch_accel(self, t0, t1):
"""
Get the apparent proper motion of a source from one epoch to another.
Estimated by using the parameters of the current `OrbitSystem` class
instance to calculate the difference in proper motions of the source
from its position at each time, then subtracting one proper motion from
the other. (Proxy for acceleration.)
Parameters
----------
t0 : `float`
The time (in MJD) of the initial astrometric observation.
t1 : `float`
The time (in MJD) of the final astrometric observation.
Returns
----------
accel_a : `float`
The proper motion difference on the Delta alpha axis of motion.
accel_d : `float`
The proper motion difference on the Delta delta axis of motion.
accel_mag : `float`
The magnitude of the previous two proper motion differences.
"""
# The amount of time over which to calculate the derivative of position
step = TimeDelta(60 * u.second)
# Make sure user-given times are interpreted in *JD units
assert (t0 + step).format.endswith('jd', -2), 't0/t1 not in *JD units'
# Get the values of the user-provided times plus the time step
t0_plus_step = (t0 + step).value
t1_plus_step = (t1 + step).value
# see about editing get_spsi with better indexing instead of xi/yi
t1D, cpsi, spsi, xi, yi = get_cpsi_spsi_for_2Dastrometry(
[t0, t0_plus_step, t1, t1_plus_step])
# Return coordinates of the source at the desired 4 times
phis = self.pjGetBarycentricAstrometricOrbitFast(t1D, spsi, cpsi)
# Separate the result into specific ra/dec arrays
del_alpha = phis[yi]; del_delta = phis[xi]
#del_alpha = phis[1::2]; del_delta = phis[::2]
# Calculate change in Delta alpha after the time step at both t0 and t1
shift_a0 = del_alpha[1] - del_alpha[0]
shift_a1 = del_alpha[3] - del_alpha[2]
# Differentiate over time to get proper motions in this coordinate
# (units of mas/yr)
pm_a0 = shift_a0 / ((t0_plus_step - t0) / year2day)
pm_a1 = shift_a1 / ((t1_plus_step - t1) / year2day)
# Do the same for Delta delta
shift_d0 = del_delta[1] - del_delta[0]
shift_d1 = del_delta[3] - del_delta[2]
pm_d0 = shift_d0 / ((t0_plus_step - t0) / year2day)
pm_d1 = shift_d1 / ((t1_plus_step - t1) / year2day)
# Estimate acceleration in each coord by subtracting PM @t0 from PM @t1
accel_a = pm_a1 - pm_a0
accel_d = pm_d1 - pm_d0
# Get the magnitude of acceleration by taking both coords into account
accel_mag = np.sqrt(accel_a**2 + accel_d**2)
return accel_a, accel_d, accel_mag
def a_barycentre_angular(self):
"""Get the semi-major axis, in milliarcseconds, of the primary object's
orbit around the system barycenter. Relies on parameter values from the
current OrbitSystem instance.
Returns
----------
a_barycentre : `float`
The apparent semi-major axis of the primary, in milliarcseconds.
"""
return semimajor_axis_barycentre_angular(self.m1_MS, self.m2_MJ, self.P_day, self.absolute_plx_mas)
# M = (Ggrav * (self.m2_MJ * MJ_kg)**3.
# / (self.m1_MS * MS_kg + self.m2_MJ * MJ_kg)**2.) # mass term for the barycentric orbit of the primary mass
# a_m = (M / (4. * np.pi**2.) * (self.P_day * day2sec)**2.)**(1./3.) # semimajor axis of the primary mass in m
# d_pc = 1. / (self.absolute_plx_mas / 1000.)
# a_rad = np.arctan2(a_m, d_pc*pc_m)
# a_mas = a_rad * rad2mas # semimajor axis in mas
# return a_mas
def a_barycentre_linear(self):
"""Get the semi-major axis, in meters, of the primary object's orbit
around the system barycenter. Relies on parameter values from the
current OrbitSystem instance.
Returns
----------
a_m_barycentre : `float`
The physical semi-major axis of the primary, in meters.
"""
return semimajor_axis_barycentre_linear(self.m1_MS, self.m2_MJ, self.P_day)
# M = (Ggrav * (self.m2_MJ * MJ_kg)**3.
# / (self.m1_MS * MS_kg + self.m2_MJ * MJ_kg)**2.) # mass term for the barycentric orbit of the primary mass
# a_m = (M / (4. * np.pi**2.) * (self.P_day * day2sec)**2.)**(1./3.) # semimajor axis of the primary mass in m
# return a_m
def a_relative_angular(self):
"""Get the semi-major axis, in milliarcseconds, of the secondary object's
orbit around the primary. Relies on parameter values from the current
OrbitSystem instance.
Returns
----------
a_relative : `float`
The apparent semi-major axis of the secondary, in milliarcseconds.
"""
return semimajor_axis_relative_angular(self.m1_MS, self.m2_MJ, self.P_day, self.absolute_plx_mas)
# a_rel_m = ((Ggrav * (self.m1_MS * MS_kg + self.m2_MJ * MJ_kg)
# / 4. / (np.pi**2.)
# * (self.P_day * day2sec)**2.)**(1./3.))
# #M = Ggrav * (self.m2_MJ * MJ_kg)**3. / ( m1_MS*MS_kg + m2_MJ*MJ_kg )**2. # mass term for the barycentric orbit of the primary mass
# #a_m = ( M / (4. * np.pi**2.) * (P_day*day2sec)**2. )**(1./3.) # semimajor axis of the primary mass in m
# d_pc = 1./ (self.absolute_plx_mas / 1000.)
# a_rel_rad = np.arctan2(a_rel_m, d_pc * pc_m)
# a_rel_mas = a_rel_rad * rad2mas # semimajor axis in mas
# return a_rel_mas
def a_relative_linear(self):
"""Get the semi-major axis, in meters, of the secondary object's orbit
around the primary. Relies on parameter values from the current
OrbitSystem instance.
Returns
----------
a_m_relative : `float`
The physical semi-major axis of the secondary, in meters.
"""
return semimajor_axis_relative_linear(self.m1_MS, self.m2_MJ, self.P_day)
# a_rel_m = ((Ggrav * (self.m1_MS * MS_kg + self.m2_MJ * MJ_kg)
# / 4. / (np.pi**2.)
# * (self.P_day * day2sec)**2.)**(1./3.))
# return a_rel_m
def astrometric_acceleration(self, t_MJD, spsi, cpsi):
"""Compute acceleration offset along abscissa."""
total_offset_ra = 0
total_offset_dec = 0
if self.solution_type in ['Acceleration7', 'Acceleration9']:
tau = t_MJD - self.Tref_MJD
total_offset_ra = acceleration.offset_7p(self.accel_ra, tau)
total_offset_dec = acceleration.offset_7p(self.accel_dec, tau)
if self.solution_type in ['Acceleration9']:
total_offset_ra += acceleration.offset_9p(self.deriv_accel_ra, tau)
total_offset_dec += acceleration.offset_9p(self.deriv_accel_dec, tau)
# see Equation 1 in Sahlmann+2011
if self.scan_angle_definition == 'hipparcos':
phi = total_offset_ra*cpsi + total_offset_dec*spsi
elif self.scan_angle_definition == 'gaia':
phi = total_offset_ra*spsi + total_offset_dec*cpsi
return phi
def rv_semiamplitude_mps(self, component='primary'):
"""Return semi-amplitude of radial velocity orbit."""
if component=='primary':
M = Ggrav * (self.m2_MJ * MJ_kg)**3. / ( self.m1_MS*MS_kg + self.m2_MJ*MJ_kg )**2. # mass term for the barycentric orbit of the primary mass
elif component == 'secondary':
M = Ggrav * (self.m1_MS * MS_kg)**3. / ( self.m1_MS*MS_kg + self.m2_MJ*MJ_kg )**2. # mass term for the barycentric orbit of the secondary mass
a_m = ( M / (4. * np.pi**2.) * (self.P_day*day2sec)**2. )**(1./3.) # semimajor axis of the component mass in m
k_mps = 2. * np.pi * a_m * np.sin(np.deg2rad(self.i_deg)) / (
self.P_day * day2sec * (1. - self.ecc ** 2) ** (1. / 2.)) # RV semiamplitude
return k_mps
# def pjGetRV(self,t_day):
def compute_radial_velocity(self, t_day, component='primary'):
"""Compute radial velocity of primary or secondary component in m/s.
updated: J. Sahlmann 25.01.2016 STScI/ESA
updated: J. Sahlmann 13.07.2018 STScI/AURA
Parameters
----------
t_day
component
Returns
-------
rv_ms : ndarray
RV in m/s
"""
# m2_MS = self.m2_MJ * MJ_kg/MS_kg# #companion mass in units of SOLAR mass
# i_rad = np.deg2rad(self.i_deg)
#**************RADIAL*VELOCITY**************************************************
E_rad = eccentric_anomaly(self.ecc, t_day, self.Tp_day, self.P_day) # eccentric anomaly
if component=='primary':
# M = Ggrav * (self.m2_MJ * MJ_kg)**3. / ( self.m1_MS*MS_kg + self.m2_MJ*MJ_kg )**2. # mass term for the barycentric orbit of the primary mass
omega_rad = np.deg2rad(self.omega_deg)
elif component == 'secondary':
# M = Ggrav * (self.m1_MS * MS_kg)**3. / ( self.m1_MS*MS_kg + self.m2_MJ*MJ_kg )**2. # mass term for the barycentric orbit of the secondary mass
omega_rad = np.deg2rad(self.omega_deg + 180.)
# a_m = ( M / (4. * np.pi**2.) * (self.P_day*day2sec)**2. )**(1./3.) # semimajor axis of the component mass in m
# a_AU = a_m / AU_m # in AU
# damien's method
THETA_rad = TrueAnomaly(self.ecc, E_rad)
# k_m = 2. * np.pi * a_m * np.sin(i_rad) / ( self.P_day*day2sec * (1.-self.ecc**2)**(1./2.) ) #RV semiamplitude
k_m = self.rv_semiamplitude_mps(component=component)
a_mps = RadialVelocitiesConstants(k_m, omega_rad, self.ecc)
rv_ms = RadialVelocitiesKepler(a_mps[0], a_mps[1], a_mps[2], THETA_rad) + self.gamma_ms
if self.rvLinearDrift_mspyr is not None:
drift_ms = (t_day - self.Tref_MJD)/year2day * self.rvLinearDrift_mspyr
rv_ms += drift_ms
if self.rvQuadraticDrift_mspyr is not None:
drift_ms = ((t_day - self.Tref_MJD)/year2day)**2 * self.rvQuadraticDrift_mspyr
rv_ms += drift_ms
if self.rvCubicDrift_mspyr is not None:
drift_ms = ((t_day - self.Tref_MJD)/year2day)**3 * self.rvCubicDrift_mspyr
rv_ms += drift_ms
return rv_ms
def get_t_plot(self, time_offset_day=0., n_curve=100, n_orbit=1, format='jyear'):
"""Return an array of times to use for plotting the timeseries
Parameters
----------
time_offset_day
Returns
-------
"""
t_day = np.linspace(0, self.P_day * n_orbit, n_curve) - self.P_day/2 + self.Tp_day + time_offset_day
t_plot = getattr(Time(t_day, format='mjd'), format)
return t_plot
def plot_rv_orbit(self, component='primary', n_curve=100, n_orbit=1, line_color='k',
line_style='-', line_width=1, rv_unit='kmps', time_offset_day=0.,
gamma_mps=None, axis=None, plot_parameters_ensemble=None):
"""Plot the radial velocity orbit of the primary
Returns
-------
"""
# if gamma_mps is None:
# gamma_mps = self.gamma_ms
if axis is None:
axis = pl.gca()
if rv_unit == 'kmps':
rv_factor = 1/1000.
else:
rv_factor = 1.
t_day = np.linspace(0, self.P_day * n_orbit, n_curve) - self.P_day/2 + self.Tp_day + time_offset_day
t_plot = Time(t_day, format='mjd').jyear
if component=='primary':
rv_mps = (self.compute_radial_velocity(t_day, component=component)) * rv_factor
axis.plot(t_plot, rv_mps, ls=line_style, color=line_color, lw=line_width)
# if plot_parameters_ensemble is not None:
# rv_mps = (self.compute_radial_velocity(t_day, component=component)) * rv_factor
# 1/0
elif component=='secondary':
rv_mps = (self.compute_radial_velocity(t_day, component=component)) * rv_factor
axis.plot(t_plot, rv_mps, ls=line_style, color=line_color, lw=line_width)
elif component=='both':
rv_mps_1 = (self.compute_radial_velocity(t_day, component='primary')) * rv_factor
rv_mps_2 = (self.compute_radial_velocity(t_day, component='secondary')) * rv_factor
axis.plot(t_plot, rv_mps_1, ls=line_style, color=line_color, lw=line_width+2, label='primary')
axis.plot(t_plot, rv_mps_2, ls=line_style, color=line_color, lw=line_width, label='secondary')
elif component=='difference':
rv_mps_1 = self.compute_radial_velocity(t_day, component='primary') * rv_factor
rv_mps_2 = self.compute_radial_velocity(t_day, component='secondary') * rv_factor
axis.plot(t_plot, rv_mps_1-rv_mps_2, ls=line_style, color=line_color, lw=line_width+2, label='difference')
def pjGetOrbitFast(self, N, Norbit=None, t_MJD=None, psi_deg=None, verbose=0):
# /* DOCUMENT ARV -- simulate fast 1D astrometry for planet detection limits
# written: J. Sahlmann 18 May 2015 ESAC
# */
m2_MS = self.m2_MJ * MJ_kg/MS_kg# #companion mass in units of SOLAR mass
d_pc = 1./ (self.absolute_plx_mas/1000.)
omega_rad = np.deg2rad(self.omega_deg)
OMEGA_rad = np.deg2rad(self.OMEGA_deg)
i_rad = np.deg2rad(self.i_deg)
t_day = t_MJD
N = len(t_MJD)
#**************ASTROMETRY********************************************************
M = Ggrav * (self.m2_MJ * MJ_kg)**3. / ( self.m1_MS*MS_kg + self.m2_MJ*MJ_kg )**2. # mass term for the barycentric orbit of the primary mass
a_m = ( M / (4. * np.pi**2.) * (self.P_day*day2sec)**2. )**(1./3.) # semimajor axis of the primary mass in m
a_AU = a_m / AU_m # in AU
a_rel_AU = (Ggrav*(self.m1_MS*MS_kg+self.m2_MJ*MJ_kg) / 4. /(np.pi**2.) *(self.P_day*day2sec)**2.)**(1./3.)/AU_m
a_rad = np.arctan2(a_m,d_pc*pc_m)
a_mas = a_rad * rad2mas # semimajor axis in mas
aRel_mas = np.arctan2(a_rel_AU*AU_m,d_pc*pc_m) * rad2mas # relative semimajor axis in mas
TIC = thiele_innes_constants([a_mas , self.omega_deg , self.OMEGA_deg, self.i_deg]) #Thiele-Innes constants
phi1 = astrom_signal(t_day, psi_deg, self.ecc, self.P_day, self.Tp_day, TIC)
phi1_rel = np.nan #astrom_signal(t_day,psi_deg,self.ecc,self.P_day,self.Tp_day,TIC_rel)
phi2 = np.nan
phi2_rel = np.nan
rv_ms=np.nan
return [phi1 ,phi2, t_day, rv_ms, phi1_rel ,phi2_rel]
def pjGetBarycentricAstrometricOrbitFast(self, t_MJD, spsi, cpsi):
"""Simulate fast 1D astrometry for planet detection limits.
written: J. Sahlmann 18 May 2015 ESAC
updated: J. Sahlmann 25.01.2016 STScI/ESA
updated: J. Sahlmann 14.01.2021 RHEA for ESA
Parameters
----------
t_MJD
spsi
cpsi
Returns
-------
"""
# semimajor axis in mas
a_mas = self.a_barycentre_angular()
# Thiele-Innes constants
TIC = thiele_innes_constants([a_mas, self.omega_deg, self.OMEGA_deg, self.i_deg])
phi1 = astrom_signalFast(t_MJD, spsi, cpsi, self.ecc, self.P_day, self.Tp_day, TIC,
scan_angle_definition=self.scan_angle_definition)
return phi1
def photocenter_orbit(self, t_MJD, spsi, cpsi):
"""Return the photocenter displacement at the input times.
Parameters
----------
t_MJD
spsi
cpsi
Returns
-------
"""
if (self.delta_mag is None) or (self.delta_mag == 0):
return self.pjGetBarycentricAstrometricOrbitFast(t_MJD, spsi, cpsi)
else:
relative_orbit_mas = self.relative_orbit_fast(t_MJD, spsi, cpsi, shift_omega_by_pi=False)
beta = fractional_luminosity(0., self.delta_mag)
f = fractional_mass(self.m1_MS, self.m2_MS)
photocentric_orbit_mas = (f - beta) * relative_orbit_mas
return photocentric_orbit_mas
def relative_orbit_fast(self, t_MJD, spsi, cpsi, unit='mas', shift_omega_by_pi=True,
coordinate_system='cartesian'):
"""
Simulate fast 1D orbital astrometry
written: J. Sahlmann 18 May 2015 ESAC
updated: J. Sahlmann 25.01.2016 STScI/ESA
updated: J. Sahlmann 27 February 2017 STScI/AURA
returns relative orbit in linear or angular units
Parameters
----------
t_MJD
spsi
cpsi
unit
shift_omega_by_pi
coordinate_system
Returns
-------
"""
#mass term of relative orbit
M_rel = Ggrav*(self.m1_MS*MS_kg+self.m2_MJ*MJ_kg)
# semimajor axis of the relative orbit in m
a_rel_m = ( M_rel / (4. * np.pi**2.) * (self.P_day*day2sec)**2. )**(1./3.)
# shift argument of periastron relative to barycentric orbit of primary mass M1
if shift_omega_by_pi:
omega_rel_deg = self.omega_deg + 180.
else:
omega_rel_deg = self.omega_deg
if unit == 'mas':
d_pc = 1./ (self.absolute_plx_mas/1000.)
a_rad = np.arctan2(a_rel_m,d_pc*pc_m)
# semimajor axis in mas
a_rel_mas = a_rad * rad2mas
a_rel = a_rel_mas
elif unit == 'meter':
a_rel = a_rel_m
#Thiele-Innes constants
TIC = thiele_innes_constants([a_rel, omega_rel_deg, self.OMEGA_deg, self.i_deg])
# by default these are cartesian coordinates
phi1 = astrom_signalFast(t_MJD, spsi, cpsi, self.ecc, self.P_day, self.Tp_day, TIC)
# convert to polar coordinates if requested
if coordinate_system=='polar':
xi = np.where(cpsi==1)[0]
yi = np.where(cpsi==0)[0]
rho = np.sqrt(phi1[xi]**2 + phi1[yi]**2)
phi_deg = np.rad2deg(np.arctan2(phi1[xi], phi1[yi]))%360.
phi1[xi] = rho
phi1[yi] = phi_deg
return phi1
def ppm(self, t_MJD, psi_deg=None, offsetRA_mas=0, offsetDE_mas=0, externalParallaxFactors=None,
horizons_file_seed=None, instrument=None, verbose=False):
"""Compute parallax and proper motion.
Parameters
----------
t_MJD
psi_deg
offsetRA_mas
offsetDE_mas
externalParallaxFactors
horizons_file_seed
instrument
verbose
Returns
-------
"""
assert isinstance(t_MJD, (list, np.ndarray))
# check that t_MJD is sorted and increasing
if sorted(list(t_MJD)) != list(t_MJD):
raise RuntimeError('Please sort the input timestamps first.')
if t_MJD[0] > t_MJD[-1]:
raise RuntimeError('Please sort the input timestamps in increasing order.')
Nframes = len(t_MJD)
t_JD = t_MJD + 2400000.5
if externalParallaxFactors is not None:
parf = externalParallaxFactors
else:
parf = get_parallax_factors(self.RA_deg, self.DE_deg, t_JD, horizons_file_seed=horizons_file_seed,
verbose=verbose, instrument=instrument, overwrite=False)
self.parf = parf
if self.Tref_MJD is None:
self.Tref_MJD = np.mean(t_MJD)
trel_year = (t_MJD - self.Tref_MJD)/year2day
# % sin(psi) and cos(psi)
if psi_deg is not None:
psi_rad = np.deg2rad(psi_deg)
spsi = np.sin(psi_rad)
cpsi = np.cos(psi_rad)
t = trel_year
else:
t, cpsi, spsi, xi, yi = get_cpsi_spsi_for_2Dastrometry(trel_year, scan_angle_definition=self.scan_angle_definition)
tspsi = t*spsi
tcpsi = t*cpsi
if psi_deg is not None:
if externalParallaxFactors is None:
ppfact = parf[0] * cpsi + parf[1] * spsi # see Sahlmann+11 Eq. 1 / 8
else:
ppfact = parf
else:
ppfact = np.zeros(2*Nframes)
ppfact[xi] = parf[0]
ppfact[yi] = parf[1]
self.xi = np.where(xi)[0]
self.yi = np.where(yi)[0]
if self.scan_angle_definition == 'hipparcos':
C = np.array([cpsi, spsi, ppfact, tcpsi, tspsi])
elif self.scan_angle_definition == 'gaia':
C = np.array([spsi, cpsi, ppfact, tspsi, tcpsi])
self.coeffMatrix = C
self.timeUsedInTcspsi = np.array(t)
if psi_deg is not None:
self.MjdUsedInTcspsi = t_MJD
else:
self.MjdUsedInTcspsi = np.array(np.sort(np.tile(t_MJD, 2)))
parallax_for_ppm_mas = self.absolute_plx_mas - self.parallax_correction_mas
inVec = np.array([offsetRA_mas, offsetDE_mas, parallax_for_ppm_mas, self.muRA_mas, self.muDE_mas])
# inVec = np.array([offsetRA_mas, offsetDE_mas, parallax_for_ppm_mas, 0, 0])
ppm = np.dot(C.T, inVec)
if psi_deg is not None:
return ppm
else:
ppm2d = [ppm[xi],ppm[yi]]
return ppm2d
def plot_orbits(self, timestamps_curve_2D=None, timestamps_probe_2D=None, timestamps_probe_2D_label=None,
delta_mag=None, N_orbit=1., N_curve=100, save_plot=False, plot_dir=None,
new_figure=True, line_color='k', line_style='-', line_width=1, share_axes=False,
show_orientation=False, arrow_offset_x=0, invert_xaxis=True, show_time=True,
timeformat='jyear', name_seed='', verbose=False):
"""Plot barycentric, photocentric, and relative orbits in two panels.
Parameters
----------
timestamps_curve_2D : MJD
timestamps_probe_2D : MJD
timestamps_probe_2D_label
delta_mag
N_orbit
N_curve
save_plot
plot_dir
new_figure
line_color
line_style
line_width
share_axes
show_orientation
arrow_offset_x
invert_xaxis
show_time
timeformat
name_seed
verbose
Returns
-------
"""
if self.delta_mag is not None:
delta_mag = self.delta_mag
if timestamps_curve_2D is None:
timestamps_curve_2D = np.linspace(self.Tp_day - self.P_day, self.Tp_day + N_orbit + self.P_day, N_curve)
timestamps_curve_1D, cpsi_curve, spsi_curve, xi_curve, yi_curve = get_cpsi_spsi_for_2Dastrometry(timestamps_curve_2D)
# relative orbit
phi0_curve_relative = self.relative_orbit_fast(timestamps_curve_1D, spsi_curve, cpsi_curve, shift_omega_by_pi = True)
if timestamps_probe_2D is not None:
timestamps_probe_1D, cpsi_probe, spsi_probe, xi_probe, yi_probe = get_cpsi_spsi_for_2Dastrometry(timestamps_probe_2D)
phi0_probe_relative = self.relative_orbit_fast(timestamps_probe_1D, spsi_probe, cpsi_probe, shift_omega_by_pi = True)
if delta_mag is not None:
# fractional luminosity
beta = fractional_luminosity( 0. , 0.+delta_mag )
# fractional mass
f = fractional_mass(self.m1_MS, self.m2_MS)
# photocentre orbit about the system's barycentre
phi0_curve_photocentre = (f - beta) * self.relative_orbit_fast(timestamps_curve_1D, spsi_curve, cpsi_curve, shift_omega_by_pi = False)
if timestamps_probe_2D is not None:
phi0_probe_photocentre = (f - beta) * self.relative_orbit_fast(timestamps_probe_1D, spsi_probe, cpsi_probe, shift_omega_by_pi = False)
# barycentric orbit of M1
phi0_curve_barycentre = self.pjGetBarycentricAstrometricOrbitFast(timestamps_curve_1D, spsi_curve, cpsi_curve)
if timestamps_probe_2D is not None:
phi0_probe_barycentre = self.pjGetBarycentricAstrometricOrbitFast(timestamps_probe_1D, spsi_probe, cpsi_probe)
n_figure_columns = 2
n_figure_rows = 1
# fig, axes = pl.subplots(n_figure_rows, n_figure_columns, figsize=(n_figure_columns*6, n_figure_rows*5), facecolor='w', edgecolor='k', sharex=True, sharey=True)
if new_figure:
fig, axes = pl.subplots(n_figure_rows, n_figure_columns, figsize=(n_figure_columns*6, n_figure_rows*5), facecolor='w', edgecolor='k', sharex=share_axes, sharey=share_axes)
else:
axes = pl.gcf().axes
# plot smooth orbit curve
axes[0].plot(phi0_curve_barycentre[xi_curve], phi0_curve_barycentre[yi_curve],'k--',lw=line_width, color=line_color, ls=line_style) #, label='Barycentre'
# plot individual epochs
if timestamps_probe_2D is not None:
axes[0].plot(phi0_probe_barycentre[xi_probe], phi0_probe_barycentre[yi_probe],'bo',mfc='0.7', label=timestamps_probe_2D_label)
if delta_mag is not None:
axes[0].plot(phi0_curve_photocentre[xi_curve], phi0_curve_photocentre[yi_curve],'k--',lw=1, label='Photocentre')
if timestamps_probe_2D is not None:
axes[0].plot(phi0_probe_photocentre[xi_probe],phi0_probe_photocentre[yi_probe],'bo')
if show_orientation:
# arrow_index_1 = np.int(N_curve/3.3)
arrow_index_1 = 3*np.int(N_curve/5)
arrow_index_2 = arrow_index_1 + 10
length_factor = 1
arrow_factor = 2
# ax = pl.axes()
arrow_base_x = phi0_curve_barycentre[xi_curve][arrow_index_1]
arrow_base_y = phi0_curve_barycentre[yi_curve][arrow_index_1]
arrow_delta_x = phi0_curve_barycentre[xi_curve][arrow_index_2] - arrow_base_x
arrow_delta_y = phi0_curve_barycentre[yi_curve][arrow_index_2] - arrow_base_y
axes[0].arrow(arrow_base_x+arrow_offset_x, arrow_base_y, arrow_delta_x*length_factor, arrow_delta_y*length_factor, head_width=0.05*arrow_factor, head_length=0.1*arrow_factor, fc=line_color, ec=line_color) #, head_width=0.05, head_length=0.1
# plot origin = position of barycentre
axes[0].plot(0,0,'kx')
axes[0].axhline(y=0,color='0.7',ls='--',zorder=-50)
axes[0].axvline(x=0,color='0.7',ls='--',zorder=-50)
axes[0].set_xlabel('Offset in Right Ascension (mas)')
axes[0].set_ylabel('Offset in Declination (mas)')
axes[0].axis('equal')
if invert_xaxis:
axes[0].invert_xaxis()
axes[0].legend(loc='best')
axes[0].set_title('Bary-/photocentric orbit of M1')
# second panel
# plot smooth orbit curve
axes[1].plot(phi0_curve_relative[xi_curve],phi0_curve_relative[yi_curve],'k-',lw=line_width, color=line_color, ls=line_style)
# plot individual epochs
if timestamps_probe_2D is not None:
axes[1].plot(phi0_probe_relative[xi_probe],phi0_probe_relative[yi_probe], 'bo', label=timestamps_probe_2D_label)
if verbose:
print('relative separation: {}'.format(np.linalg.norm([phi0_probe_relative[xi_probe],phi0_probe_relative[yi_probe]], axis=0)))
if show_orientation:
# ax = pl.axes()
arrow_base_x = phi0_curve_relative[xi_curve][arrow_index_1]
arrow_base_y = phi0_curve_relative[yi_curve][arrow_index_1]
arrow_delta_x = phi0_curve_relative[xi_curve][arrow_index_2] - arrow_base_x
arrow_delta_y = phi0_curve_relative[yi_curve][arrow_index_2] - arrow_base_y
axes[1].arrow(arrow_base_x+arrow_offset_x, arrow_base_y, arrow_delta_x*length_factor, arrow_delta_y*length_factor, head_width=0.05*arrow_factor, head_length=0.1*arrow_factor, fc=line_color, ec=line_color)
# plot origin = position of primary
axes[1].plot(0,0,'kx')
axes[1].axhline(y=0,color='0.7',ls='--',zorder=-50)
axes[1].axvline(x=0,color='0.7',ls='--',zorder=-50)
axes[1].set_xlabel('Offset in Right Ascension (mas)')
axes[1].axis('equal')
axes[1].legend(loc='best')
axes[1].set_title('Relative orbit of M2 about M1')
if (not axes[1]._sharex) and (invert_xaxis):
axes[1].invert_xaxis()
pl.show()
if save_plot:
fig_name = os.path.join(plot_dir, '{}_orbits_sky.pdf'.format(name_seed))
plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)
# show barycentric offsets as function of time
if show_time:
t_plot_curve = getattr(Time(timestamps_curve_2D, format='mjd'), timeformat)
n_figure_columns = 2
n_figure_rows = 1
fig, axes = pl.subplots(n_figure_rows, n_figure_columns,
figsize=(n_figure_columns * 8, n_figure_rows * 4),
facecolor='w', edgecolor='k', sharex=share_axes,
sharey=share_axes)
# plot smooth orbit curve
axes[0].plot(t_plot_curve, phi0_curve_barycentre[xi_curve],
lw=line_width, color=line_color, ls=line_style)
axes[1].plot(t_plot_curve, phi0_curve_barycentre[yi_curve],
lw=line_width, color=line_color, ls=line_style)
axes[0].set_ylabel('Offset in Right Ascension (mas)')
axes[1].set_ylabel('Offset in Declination (mas)')
axes[0].set_xlabel('Time ({})'.format(timeformat))
axes[1].set_xlabel('Time ({})'.format(timeformat))
pl.suptitle('Barycentre orbit')
# plot individual epochs
if timestamps_probe_2D is not None:
axes[0].plot(Time(timestamps_probe_1D[xi_probe], format='mjd').jyear, phi0_probe_barycentre[xi_probe], 'bo',
mfc='0.7', label=timestamps_probe_2D_label)
axes[1].plot(Time(timestamps_probe_1D[yi_probe], format='mjd').jyear, phi0_probe_barycentre[yi_probe], 'bo',
mfc='0.7', label=timestamps_probe_2D_label)
pl.show()
if save_plot:
fig_name = os.path.join(plot_dir, '{}_barycentre_orbit_time.pdf'.format(name_seed))
plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)
def plot_ppm(self, timestamps_curve_2D=None, timestamps_probe_2D=None,
timestamps_probe_2D_label=None,
delta_mag=None, N_orbit=1., N_curve=100, save_plot=False, plot_dir=None,
new_figure=True, line_color='k', line_style='-', line_width=1, share_axes=False,
show_orientation=False, arrow_offset_x=0, invert_xaxis=True, show_time=True,
show_difference_to=None, timeformat='jyear',
title=None, show_sky=False, name_seed='',
**kwargs):
"""Plot the parallax and proper motion of the instance.
"""
if timestamps_curve_2D is None:
timestamps_curve_2D = np.linspace(self.Tp_day - self.P_day,
self.Tp_day + N_orbit + self.P_day, N_curve)
else:
N_curve = len(timestamps_curve_2D)
ppm_curve_mas = self.ppm(timestamps_curve_2D, offsetRA_mas=0, offsetDE_mas=0, externalParallaxFactors=None, horizons_file_seed=None, instrument=None, verbose=0)
if timestamps_probe_2D is not None:
ppm_probe_mas = self.ppm(timestamps_probe_2D, offsetRA_mas=0, offsetDE_mas=0, externalParallaxFactors=None, horizons_file_seed=None, instrument=None, verbose=0)
if show_difference_to is not None:
# expect OrbitSystem instance as input
ppm_curve_mas_2 = show_difference_to.ppm(timestamps_curve_2D, offsetRA_mas=0, offsetDE_mas=0, externalParallaxFactors=None, horizons_file_seed=None, instrument=None, verbose=0)
ppm_probe_mas_2 = show_difference_to.ppm(timestamps_probe_2D, offsetRA_mas=0, offsetDE_mas=0, externalParallaxFactors=None, horizons_file_seed=None, instrument=None, verbose=0)
ppm_curve_mas = [ppm_curve_mas[i] - ppm_curve_mas_2[i] for i in range(len(ppm_curve_mas))]
ppm_probe_mas = [ppm_probe_mas[i] - ppm_probe_mas_2[i] for i in range(len(ppm_probe_mas))]
if show_sky:
n_figure_columns = 1
n_figure_rows = 1
if new_figure:
# fig = pl.figure(figsize=(n_figure_columns * 6, n_figure_rows * 6), facecolor='w', edgecolor='k')
# axes = pl.gca()
fig, axes = pl.subplots(n_figure_rows, n_figure_columns,
figsize=(n_figure_columns * 6, n_figure_rows * 6),
facecolor='w', edgecolor='k', sharex=share_axes,
sharey=share_axes)
axes = [axes]
else:
axes = pl.gcf().axes
# plot smooth orbit curve
axes[0].plot(ppm_curve_mas[0], ppm_curve_mas[1], 'k--',
lw=line_width, color=line_color, ls=line_style)
# plot individual epochs
if timestamps_probe_2D is not None:
axes[0].plot(ppm_probe_mas[0], ppm_probe_mas[1], 'bo', label=timestamps_probe_2D_label, **kwargs)
axes[0].set_xlabel('Offset in Right Ascension (mas)')
axes[0].set_ylabel('Offset in Declination (mas)')
axes[0].axis('equal')
if invert_xaxis:
axes[0].invert_xaxis()
if show_orientation:
arrow_index_1 = np.int(N_curve / 5)
arrow_index_2 = arrow_index_1 + 10
length_factor = 10
arrow_factor = 1000
arrow_base_x = ppm_curve_mas[0][arrow_index_1]
arrow_base_y = ppm_curve_mas[1][arrow_index_1]
arrow_delta_x = ppm_curve_mas[0][arrow_index_2] - arrow_base_x
arrow_delta_y = ppm_curve_mas[2][arrow_index_2] - arrow_base_y
axes[0].arrow(arrow_base_x + arrow_offset_x, arrow_base_y,
arrow_delta_x * length_factor, arrow_delta_y * length_factor,
head_width=0.05 * arrow_factor, head_length=0.1 * arrow_factor,
fc=line_color,
ec=line_color) # , head_width=0.05, head_length=0.1
pl.show()
if save_plot:
fig_name = os.path.join(plot_dir, '{}_ppm_sky.pdf'.format(name_seed))
plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)
if show_time:
n_figure_columns = 2
n_figure_rows = 1
if new_figure:
fig, axes = pl.subplots(n_figure_rows, n_figure_columns,
figsize=(n_figure_columns * 8, n_figure_rows * 4),
facecolor='w', edgecolor='k', sharex=share_axes,
sharey=share_axes)
else:
axes = pl.gcf().axes
t_plot_curve = getattr(Time(timestamps_curve_2D, format='mjd'), timeformat)
# plot smooth PPM curve
axes[0].plot(t_plot_curve, ppm_curve_mas[0], lw=line_width, color=line_color, ls=line_style) # , label='Barycentre'
axes[1].plot(t_plot_curve, ppm_curve_mas[1], lw=line_width, color=line_color, ls=line_style) # , label='Barycentre'
axes[0].axhline(y=0, color='0.7', ls='--', zorder=-50)
axes[1].axhline(y=0, color='0.7', ls='--', zorder=-50)
axes[0].set_ylabel('Offset in Right Ascension (mas)')
axes[1].set_ylabel('Offset in Declination (mas)')
axes[0].set_xlabel('Time ({})'.format(timeformat))
axes[1].set_xlabel('Time ({})'.format(timeformat))
if title is not None:
pl.suptitle(title)
# plot individual epochs
if timestamps_probe_2D is not None:
t_plot_probe = getattr(Time(timestamps_probe_2D, format='mjd'), timeformat)
axes[0].plot(t_plot_probe, ppm_probe_mas[0], 'bo', label=timestamps_probe_2D_label, **kwargs)
axes[1].plot(t_plot_probe, ppm_probe_mas[1], 'bo', label=timestamps_probe_2D_label, **kwargs)
if timestamps_probe_2D_label is not None:
axes[0].legend(loc='best')
pl.show()
if save_plot:
fig_name = os.path.join(plot_dir, '{}_ppm_time.pdf'.format(name_seed))
plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)
class PpmPlotter(object):
"""
A class to plot results of astrometric fitting of parallax + proper motion
Attributes
----------
p : array
holding best fit parameters of linear fit (usually positions,parallax,proper motion)
part of what linfit returns
C : matrix
Numpy Matrix holding the parameters of the linear model
xmlFileName : string
filename used to write file on disk
Methods
-------
printSchemaNames()
prints names of availabe schemas
getTableNames(schemaName,verbose=0):
return table names of a certain schema
"""
def __init__(self, p, C, T, xi, yi, omc, noParallaxFit=0, psi_deg=None, epoch_outlier_dir=None,
outlier_sigma_threshold=2., absolute_threshold=None):
self.p = p
self.C = C
self.T = T
self.xi = xi
self.yi = yi
self.omc = omc
self.noParallaxFit = noParallaxFit
self.psi_deg = psi_deg
# compute positions at measurement dates according to best-fit model p (no DCR)
inVec = p.flatten()[0:5]
self.ppm_model = np.dot(C[0:len(inVec), :].T, inVec)
DCR = None
# compute measured positions (DCR-corrected)
if C.shape[0] == 7:
DCR = np.dot(C[5:7, :].T, p.flatten()[5:7])
elif (C.shape[0] == 5) & (self.noParallaxFit == 1):
DCR = (np.array(C[4, :]) * p[4]).flatten()
elif C.shape[0] == 6:
DCR = (np.array(C[5, :]) * p[5]).flatten()
elif C.shape[0] == 9:
DCR = np.dot(C[7:9, :].T, p.flatten()[7:9])
ACC = np.dot(C[5:7, :].T, p.flatten()[5:7])
self.ACC = ACC
elif (C.shape[0] == 5) & (self.noParallaxFit == 0):
DCR = np.zeros(len(T['da_mas']))
self.DCR = DCR
self.ppm_meas = self.T['da_mas'] - self.DCR
if self.psi_deg is not None:
# compute epoch averages
medi = np.unique(T['OB'])
self.medi = medi
self.t_MJD_epoch = np.zeros(len(medi))
self.stdResidualX = np.zeros(len(medi))
self.errResidualX = np.zeros(len(medi))
self.Xmean = np.zeros(len(medi))
self.parfXmean = np.zeros(len(medi))
self.DCR_Xmean = np.zeros(len(medi))
self.ACC_Xmean = np.zeros(len(medi))
self.meanResidualX = np.zeros(len(medi))
self.x_e_laz = np.zeros(len(medi))
self.sx_star_laz = np.zeros(len(medi))
for jj, epoch in enumerate(self.medi):
tmpidx = np.where(self.T['OB'] == epoch)[0]
tmpIndexX = tmpidx
self.t_MJD_epoch[jj] = np.mean(self.T['MJD'][tmpIndexX])
self.Xmean[jj] = np.average(self.ppm_meas[tmpIndexX],
weights=1. / (self.T['sigma_da_mas'][tmpIndexX] ** 2.))
self.DCR_Xmean[jj] = np.average(self.DCR[tmpIndexX])
self.meanResidualX[jj] = np.average(omc[tmpIndexX],
weights=1. / (self.T['sigma_da_mas'][tmpIndexX] ** 2.))
self.parfXmean[jj] = np.average(self.T['ppfact'][tmpIndexX])
self.stdResidualX[jj] = np.std(omc[tmpIndexX])
if len(tmpIndexX) == 1:
self.stdResidualX[jj] = self.T['sigma_da_mas'][tmpIndexX]
self.errResidualX[jj] = self.stdResidualX[jj] / np.sqrt(len(tmpIndexX))
# % from Lazorenko writeup:
self.x_e_laz[jj] = np.sum(omc[tmpIndexX] / (self.T['sigma_da_mas'][tmpIndexX] ** 2.)) / np.sum(
1 / (self.T['sigma_da_mas'][tmpIndexX] ** 2.))
self.sx_star_laz[jj] = 1 / np.sqrt(np.sum(1 / (self.T['sigma_da_mas'][tmpIndexX] ** 2.)))
self.chi2_naive = np.sum([self.meanResidualX ** 2 / self.errResidualX ** 2])
self.chi2_laz = np.sum([self.x_e_laz ** 2 / self.errResidualX ** 2])
self.chi2_star_laz = np.sum([self.x_e_laz ** 2 / self.sx_star_laz ** 2])
self.nFree_ep = len(medi) * 2 - C.shape[0]
self.chi2_laz_red = self.chi2_laz / self.nFree_ep
self.chi2_star_laz_red = self.chi2_star_laz / self.nFree_ep
self.chi2_naive_red = self.chi2_naive / self.nFree_ep
self.epoch_omc_std_X = np.std(self.meanResidualX)
self.epoch_omc_std = np.std([self.meanResidualX])
else:
# compute epoch averages
medi = np.unique(T['OB'])
self.medi = medi
self.t_MJD_epoch = np.zeros(len(medi))
self.stdResidualX = np.zeros(len(medi))
self.stdResidualY = np.zeros(len(medi))
self.errResidualX = np.zeros(len(medi))
self.errResidualY = np.zeros(len(medi))
self.Xmean = np.zeros(len(medi))
self.Ymean = np.zeros(len(medi))
self.parfXmean = np.zeros(len(medi))
self.parfYmean = np.zeros(len(medi))
self.DCR_Xmean = np.zeros(len(medi))
self.DCR_Ymean = np.zeros(len(medi))
self.ACC_Xmean = np.zeros(len(medi))
self.ACC_Ymean = np.zeros(len(medi))
self.meanResidualX = np.zeros(len(medi))
self.meanResidualY = np.zeros(len(medi))
self.x_e_laz = np.zeros(len(medi))
self.y_e_laz = np.zeros(len(medi))
self.sx_star_laz = np.zeros(len(medi))
self.sy_star_laz = np.zeros(len(medi))
outlier_1D_index = np.array([])
# loop through epochs
for jj, epoch in enumerate(self.medi):
tmpidx = np.where(self.T['OB'] == epoch)[0]
tmpIndexX = np.intersect1d(self.xi, tmpidx)
tmpIndexY = np.intersect1d(self.yi, tmpidx)
self.t_MJD_epoch[jj] = np.mean(self.T['MJD'][tmpIndexX])
# print 'epoch %1.0f' % epoch
# print self.T['MJD'][tmpIndexX]
# pdb.set_trace()
# print jj,tmpIndexX
self.Xmean[jj] = np.average(self.ppm_meas[tmpIndexX],
weights=1. / (self.T['sigma_da_mas'][tmpIndexX] ** 2.))
self.Ymean[jj] = np.average(self.ppm_meas[tmpIndexY],
weights=1. / (self.T['sigma_da_mas'][tmpIndexY] ** 2.))
# pdb.set_trace()
self.DCR_Xmean[jj] = np.average(self.DCR[tmpIndexX])
self.DCR_Ymean[jj] = np.average(self.DCR[tmpIndexY])
try:
self.ACC_Xmean[jj] = np.average(self.ACC[tmpIndexX])
self.ACC_Ymean[jj] = np.average(self.ACC[tmpIndexY])
except AttributeError:
pass
# pdb.set_trace()
self.meanResidualX[jj] = np.average(omc[tmpIndexX],
weights=1. / (self.T['sigma_da_mas'][tmpIndexX] ** 2.))
self.meanResidualY[jj] = np.average(omc[tmpIndexY],
weights=1. / (self.T['sigma_da_mas'][tmpIndexY] ** 2.))
self.parfXmean[jj] = np.average(self.T['ppfact'][tmpIndexX])
self.parfYmean[jj] = np.average(self.T['ppfact'][tmpIndexY])
self.stdResidualX[jj] = np.std(omc[tmpIndexX])
self.stdResidualY[jj] = np.std(omc[tmpIndexY])
if absolute_threshold is not None:
outliers_x = (np.abs(omc[tmpIndexX] - np.mean(omc[tmpIndexX])) > outlier_sigma_threshold * self.stdResidualX[jj]) | (np.abs(omc[tmpIndexX] - np.mean(omc[tmpIndexX])) > absolute_threshold)
outliers_y = (np.abs(omc[tmpIndexY] - np.mean(omc[tmpIndexY])) > outlier_sigma_threshold * self.stdResidualY[jj]) | (np.abs(omc[tmpIndexY] - np.mean(omc[tmpIndexY])) > absolute_threshold)
else:
outliers_x = np.abs(omc[tmpIndexX] - np.mean(omc[tmpIndexX])) > outlier_sigma_threshold * \
self.stdResidualX[jj]
outliers_y = np.abs(omc[tmpIndexY] - np.mean(omc[tmpIndexY])) > outlier_sigma_threshold * \
self.stdResidualY[jj]
if any(outliers_x):
tmp_1D_index_x = np.where(outliers_x)[0]
print('Detected %d X-residual outliers (%2.1f sigma) in epoch %d (1-indexed) ' % (
len(tmp_1D_index_x), outlier_sigma_threshold, epoch), end='')
print(np.abs(omc[tmpIndexX] - np.mean(omc[tmpIndexX]))[tmp_1D_index_x], end='')
for ii in tmp_1D_index_x:
print(' {:.12f}'.format(self.T['MJD'][tmpIndexX[ii]]), end=',')
print()
outlier_1D_index = np.hstack((outlier_1D_index, tmpIndexX[tmp_1D_index_x]))
# outlier_1D_index.append(tmpIndexX[tmp_1D_index_x].tolist())
if any(outliers_y):
tmp_1D_index_y = np.where(outliers_y)[0]
print('Detected %d Y-residual outliers (%2.1f sigma) in epoch %d (1-indexed) ' % (
len(tmp_1D_index_y), outlier_sigma_threshold, epoch), end='')
print(np.abs(omc[tmpIndexY] - np.mean(omc[tmpIndexY]))[tmp_1D_index_y], end='')
for ii in tmp_1D_index_y:
print(' {:.12f}'.format(self.T['MJD'][tmpIndexY[ii]]), end=',')
print()
outlier_1D_index = np.hstack((outlier_1D_index, tmpIndexX[tmp_1D_index_y]))
# outlier_1D_index.append(tmpIndexY[tmp_1D_index_y].tolist())
if len(tmpIndexX) == 1:
self.stdResidualX[jj] = self.T['sigma_da_mas'][tmpIndexX]
if len(tmpIndexY) == 1:
self.stdResidualY[jj] = self.T['sigma_da_mas'][tmpIndexY]
self.errResidualX[jj] = self.stdResidualX[jj] / np.sqrt(len(tmpIndexX))
self.errResidualY[jj] = self.stdResidualY[jj] / np.sqrt(len(tmpIndexY))
# % from Lazorenko writeup:
self.x_e_laz[jj] = np.sum(omc[tmpIndexX] / (self.T['sigma_da_mas'][tmpIndexX] ** 2.)) / np.sum(
1 / (self.T['sigma_da_mas'][tmpIndexX] ** 2.))
self.y_e_laz[jj] = np.sum(omc[tmpIndexY] / (self.T['sigma_da_mas'][tmpIndexY] ** 2.)) / np.sum(
1 / (self.T['sigma_da_mas'][tmpIndexY] ** 2.))
self.sx_star_laz[jj] = 1 / np.sqrt(np.sum(1 / (self.T['sigma_da_mas'][tmpIndexX] ** 2.)))
self.sy_star_laz[jj] = 1 / np.sqrt(np.sum(1 / (self.T['sigma_da_mas'][tmpIndexY] ** 2.)))
if len(outlier_1D_index) != 0:
print('MJD of outliers:')
for ii in np.unique(outlier_1D_index.astype(np.int)):
print('{:.12f}'.format(self.T['MJD'][ii]), end=',')
print()
# print(np.unique(self.T['MJD'][outlier_1D_index.astype(np.int)].data))
# write outliers to file
if epoch_outlier_dir is not None:
out_file = os.path.join(epoch_outlier_dir, 'epoch_1D_outliers.txt')
# T = Table([outlier_1D_index.astype(np.int)], names=['index_1D'])
# write outlier epoch to file
T = Table([self.T['MJD'][outlier_1D_index.astype(np.int)]], names=['MJD_1D'])
T.write(out_file, format='ascii.basic')
self.outlier_1D_index = outlier_1D_index
self.chi2_naive = np.sum(
[self.meanResidualX ** 2 / self.errResidualX ** 2, self.meanResidualY ** 2 / self.errResidualY ** 2])
self.chi2_laz = np.sum(
[self.x_e_laz ** 2 / self.errResidualX ** 2, self.y_e_laz ** 2 / self.errResidualY ** 2])
self.chi2_star_laz = np.sum(
[self.x_e_laz ** 2 / self.sx_star_laz ** 2, self.y_e_laz ** 2 / self.sy_star_laz ** 2])
self.nFree_ep = len(medi) * 2 - C.shape[0]
self.chi2_laz_red = self.chi2_laz / self.nFree_ep
self.chi2_star_laz_red = self.chi2_star_laz / self.nFree_ep
self.chi2_naive_red = self.chi2_naive / self.nFree_ep
self.epoch_omc_std_X = np.std(self.meanResidualX)
self.epoch_omc_std_Y = np.std(self.meanResidualY)
self.epoch_omc_std = np.std([self.meanResidualX, self.meanResidualY])
def ppm_plot(self, save_plot=0, plot_dir=None, name_seed='', descr=None, omc2D=0, arrowOffsetX=0, arrowOffsetY=0,
horizons_file_seed=None, psi_deg=None, instrument=None, separate_residual_panels=0,
residual_y_axis_limit=None, individual_frame_figure=False, omc_description=None):
"""Make figures showing results of PPM fitting.
Parameters
----------
save_plot
plot_dir
name_seed
descr
omc2D
arrowOffsetX
arrowOffsetY
horizons_file_seed
psi_deg
instrument
separate_residual_panels
residual_y_axis_limit
individual_frame_figure
"""
if self.noParallaxFit != 1:
# orb = OrbitSystem(P_day=1., ecc=0.0, m1_MS=1.0, m2_MJ=0.0, omega_deg=0., OMEGA_deg=0., i_deg=0., Tp_day=0,
# RA_deg=self.RA_deg, DE_deg=self.DE_deg, plx_mas=self.p[2], muRA_mas=self.p[3],
# muDE_mas=self.p[4], Tref_MJD=self.tref_MJD)
argument_dict = {'m2_MJ' : 0, 'RA_deg': self.RA_deg, 'DE_deg': self.DE_deg,
'absolute_plx_mas' : self.p[2], 'muRA_mas': self.p[3], 'muDE_mas': self.p[4],
'Tref_MJD': self.tref_MJD, }
orb = OrbitSystem(argument_dict)
else:
orb = OrbitSystem(P_day=1., ecc=0.0, m1_MS=1.0, m2_MJ=0.0, omega_deg=0., OMEGA_deg=0., i_deg=0., Tp_day=0,
RA_deg=self.RA_deg, DE_deg=self.DE_deg, plx_mas=0, muRA_mas=self.p[2],
muDE_mas=self.p[3])
if separate_residual_panels:
n_subplots = 3
else:
n_subplots = 2
##################################################################
# Figure with on-sky motion only, showing individual frames
if individual_frame_figure:
fig = pl.figure(figsize=(6, 6), facecolor='w', edgecolor='k')
pl.clf()
if instrument is None:
if psi_deg is None:
ppm_curve = orb.ppm(self.tmodel_MJD, offsetRA_mas=self.p[0], offsetDE_mas=self.p[1],
horizons_file_seed=horizons_file_seed, psi_deg=psi_deg)
ppm_meas = orb.ppm(self.t_MJD_epoch, offsetRA_mas=self.p[0], offsetDE_mas=self.p[1],
horizons_file_seed=horizons_file_seed, psi_deg=psi_deg)
if psi_deg is None:
pl.plot(ppm_curve[0], ppm_curve[1], 'k-')
pl.plot(self.Xmean, self.Ymean, 'ko')
pl.plot(self.ppm_meas[self.xi], self.ppm_meas[self.yi], 'b.')
pl.axis('equal')
ax = plt.gca()
ax.invert_xaxis()
pl.xlabel('Offset in Right Ascension (mas)')
pl.ylabel('Offset in Declination (mas)')
if self.title is not None:
pl.title(self.title)
if save_plot:
fig_name = os.path.join(plot_dir, 'PPM_{}_frames.pdf'.format(name_seed.replace('.', 'p')))
plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)
##################################################################
# Figure with on-sky motion and residuals
fig = pl.figure(figsize=(6, 8), facecolor='w', edgecolor='k')
pl.clf()
pl.subplot(n_subplots, 1, 1)
if instrument is None:
if psi_deg is None:
ppm_curve = orb.ppm(self.tmodel_MJD, offsetRA_mas=self.p[0], offsetDE_mas=self.p[1], horizons_file_seed=horizons_file_seed, psi_deg=psi_deg)
# ppm_meas = orb.ppm(self.t_MJD_epoch, offsetRA_mas=self.p[0], offsetDE_mas=self.p[1], horizons_file_seed=horizons_file_seed, psi_deg=psi_deg)
if psi_deg is None:
pl.plot(ppm_curve[0], ppm_curve[1], 'k-')
pl.plot(self.Xmean, self.Ymean, 'ko')
else:
instr = np.unique(instrument)
myColours = np.array(['k', 'b', 'g', '0.7', 'g'])
for jjj, ins in enumerate(instr):
tmpInstrument = np.array([ins] * len(self.tmodel_MJD))
idx = np.where(instrument == ins)[0]
if psi_deg is None:
ppm_curve = orb.ppm(self.tmodel_MJD, offsetRA_mas=self.p[0], offsetDE_mas=self.p[1],
instrument=tmpInstrument, psi_deg=psi_deg)
pl.plot(ppm_curve[0], ppm_curve[1], c=myColours[jjj], ls='-')
pl.plot(self.Xmean[idx], self.Ymean[idx], marker='o', mfc=myColours[jjj], mec=myColours[jjj],
ls='None')
ppm_meas = orb.ppm(self.t_MJD_epoch, offsetRA_mas=self.p[0], offsetDE_mas=self.p[1],
instrument=instrument, psi_deg=psi_deg)
# arrowOffsetY = 0.
# plt.annotate('', xy=(self.p[3][0], self.p[4][0]+arrowOffsetY), xytext=(0, 0+arrowOffsetY), arrowprops=dict(arrowstyle="->",facecolor='black'), size=30 )
plt.annotate('', xy=(np.float(self.p[3]) + arrowOffsetX, np.float(self.p[4]) + arrowOffsetY),
xytext=(0. + arrowOffsetX, 0. + arrowOffsetY), arrowprops=dict(arrowstyle="->", facecolor='black'),
size=30)
pl.axis('equal')
ax = plt.gca()
ax.invert_xaxis()
pl.xlabel('Offset in Right Ascension (mas)')
pl.ylabel('Offset in Declination (mas)')
if self.title is not None:
pl.title(self.title)
if descr is not None:
pl.text(0.01, 0.99, descr, horizontalalignment='left', verticalalignment='top', transform=ax.transAxes)
pl.subplot(n_subplots, 1, 2)
epochTime = self.t_MJD_epoch - self.tref_MJD
epochOrdinateLabel = 'MJD - %3.1f' % self.tref_MJD
pl.plot(epochTime, self.meanResidualX, 'ko', color='0.7', label='RA')
pl.errorbar(epochTime, self.meanResidualX, yerr=self.errResidualX, fmt='none', ecolor='0.7')
plt.axhline(y=0, color='0.5', ls='--', zorder=-50)
pl.ylabel('O-C (mas)')
if residual_y_axis_limit is not None:
pl.ylim((-residual_y_axis_limit, residual_y_axis_limit))
if psi_deg is None:
if separate_residual_panels:
pl.subplot(n_subplots, 1, 3)
pl.plot(epochTime, self.meanResidualY, 'ko', label='Dec')
pl.errorbar(epochTime, self.meanResidualY, yerr=self.errResidualY, fmt='none', ecolor='k')
plt.axhline(y=0, color='0.5', ls='--', zorder=-50)
pl.ylabel('O-C (mas)')
if residual_y_axis_limit is not None:
pl.ylim((-residual_y_axis_limit, residual_y_axis_limit))
if not separate_residual_panels:
# pl.legend(loc='best')
pl.legend(loc=3)
if omc_description is not None:
ax=pl.gca()
pl.text(0.01, 0.99, omc_description, horizontalalignment='left', verticalalignment='top',
transform=ax.transAxes)
if instrument is not None:
for jjj, ins in enumerate(instr):
idx = np.where(instrument == ins)[0]
pl.plot(epochTime[idx], self.meanResidualY[idx], marker='o', mfc=myColours[jjj], mec=myColours[jjj],
ls='None', label=ins)
pl.legend(loc='best')
pl.xlabel(epochOrdinateLabel)
fig.tight_layout(h_pad=0.0)
pl.show()
if save_plot:
fig_name = os.path.join(plot_dir, 'PPM_%s.pdf' % (name_seed.replace('.', 'p')))
plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)
if self.C.shape[0] > 7:
pl.figure(figsize=(6, 8), facecolor='w', edgecolor='k')
pl.clf()
pl.subplot(2, 1, 1)
# pl.plot(self.Xmean - ppm_meas[0],self.Ymean-ppm_meas[1],'ko')
pl.plot(self.ACC_Xmean, self.ACC_Ymean, 'ko')
pl.axis('equal')
ax = plt.gca()
ax.invert_xaxis()
pl.xlabel('Offset in Right Ascension (mas)')
pl.ylabel('Offset in Declination (mas)')
pl.title('Acceleration')
pl.subplot(2, 1, 2)
pl.plot(self.t_MJD_epoch, self.ACC_Xmean, 'ko', color='0.7')
pl.plot(self.t_MJD_epoch, self.ACC_Ymean, 'ko')
pl.xlabel('MJD')
pl.show()
if save_plot:
fig_name = os.path.join(plot_dir, 'ACCEL_%s.pdf' % (name_seed.replace('.', 'p')))
plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)
if omc2D == 1:
pl.figure(figsize=(6, 6), facecolor='w', edgecolor='k')
pl.clf()
pl.plot(self.meanResidualX, self.meanResidualY, 'ko')
pl.errorbar(self.meanResidualX, self.meanResidualY, xerr=self.errResidualX, yerr=self.errResidualY,
fmt='none', ecolor='k')
pl.axis('equal')
ax = plt.gca()
ax.invert_xaxis()
pl.xlabel('Residual in Right Ascension (mas)')
pl.ylabel('Residual in Declination (mas)')
pl.show()
if save_plot:
fig_name = '%sPPM_omc2D_%s.pdf' % (plot_dir, name_seed.replace('.', 'p'))
plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)
elif omc2D == 2: # for LUH16 referee
pl.figure(figsize=(6, 8), facecolor='w', edgecolor='k')
pl.clf()
pl.subplot(3, 1, 1)
pl.plot(epochTime, self.Xmean, 'ko', color='0.7')
pl.plot(epochTime, self.Ymean, 'ko')
pl.subplot(3, 1, 2)
pl.ylabel('Offset in RA/Dec (mas)')
pl.subplot(3, 1, 2)
pl.plot(self.T['MJD'][self.xi] - self.tref_MJD, self.omc[self.xi], 'ko', color='0.7')
pl.plot(self.T['MJD'][self.yi] - self.tref_MJD, self.omc[self.yi], 'ko')
pl.ylabel('Frame O-C (mas)')
pl.subplot(3, 1, 3)
# epochOrdinateLabel = 'MJD - %3.1f' % self.tref_MJD
pl.plot(epochTime, self.meanResidualX, 'ko', color='0.7')
pl.errorbar(epochTime, self.meanResidualX, yerr=self.errResidualX, fmt='none', ecolor='0.7')
pl.plot(epochTime, self.meanResidualY, 'ko')
pl.errorbar(epochTime, self.meanResidualY, yerr=self.errResidualY, fmt='none', ecolor='k')
plt.axhline(y=0, color='0.5', ls='--', zorder=-50)
pl.ylabel('Epoch O-C (mas)')
pl.xlabel(epochOrdinateLabel)
pl.show()
if save_plot:
fig_name = os.path.join(plot_dir, 'PPM_%s_referee.pdf' % (name_seed.replace('.', 'p')))
plt.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05)
def print_residual_stats(self):
print('Epoch residual RMS X %3.3f mas' % (self.epoch_omc_std_X))
if self.psi_deg is None:
print('Epoch residual RMS Y %3.3f mas' % (self.epoch_omc_std_Y))
print('Epoch residual RMS %3.3f mas' % (self.epoch_omc_std))
print('Degrees of freedom %d' % (self.nFree_ep))
for elm in ['chi2_laz_red', 'chi2_star_laz_red', 'chi2_naive_red']:
print('reduced chi^2 : %3.2f (%s)' % (eval('self.%s' % elm), elm))
if self.psi_deg is None:
print('Epoch precision (naive)'),
print((np.mean([self.errResidualX, self.errResidualY], axis=0)))
# print('Epoch precision (x_e_laz)'),
# print((np.mean([self.sx_star_laz, self.sy_star_laz], axis=0)))
print('Average precision (naive) %3.3f mas' % (np.mean([self.errResidualX, self.errResidualY])))
print('Average precision (x_e_laz) %3.3f mas' % (np.mean([self.sx_star_laz, self.sy_star_laz])))
else:
print('Epoch precision (naive)', )
print((np.mean([self.errResidualX], axis=0)))
# print('Epoch precision (x_e_laz)'),
# print((np.mean([self.sx_star_laz], axis=0)))
print('Average precision (naive) %3.3f mas' % (np.mean([self.errResidualX])))
print('Average precision (x_e_laz) %3.3f mas' % (np.mean([self.sx_star_laz])))
class AstrometricOrbitPlotter():
"""Class to plot results of astrometric fitting of parallax + proper motion + orbit.
That is, this class supports primarly plotting of barycentric and photocentric orbits.
Attributes
----------
p : array
holding best fit parameters of linear fit (usually positions,parallax,proper motion)
part of what linfit returns
C : matrix
Numpy Matrix holding the parameters of the linear model
Methods
-------
"""
def __init__(self, attribute_dict=None):
"""
theta, C, T, xi, yi, Tref_MJD, omc=None, m1_MS=1.0, outlier_sigma_threshold=3., absolute_threshold=10,
Parameters
----------
theta : list
list of dictionaries, length = nr of companions
C
T
xi
yi
Tref_MJD
omc
m1_MS
outlier_sigma_threshold
absolute_threshold
attribute_dict
"""
# model_parameters dict (theta)
# linear_coefficients dict ('matrix', 'table')
# 2d_indices dict 'xi', 'yi'
# data_type str '1d', '2d', 'mixed'
if attribute_dict is not None:
for key, value in attribute_dict.items():
setattr(self, key, value)
# set defaults
default_dict = {'outlier_sigma_threshold': 3.,
'absolute_threshold': 10.,
'residuals': None,
'scan_angle_definition': 'hipparcos',
'include_ppm': True,
'title': None,
'relative_orbit': False,
'verbose': False,
}
for key, value in default_dict.items():
if key not in attribute_dict.keys():
setattr(self, key, value)
required_attributes = ['linear_coefficients', 'model_parameters', 'data']
for attribute_name in required_attributes:
if hasattr(self, attribute_name) is False:
raise ValueError('Instance has to have a attribute named: {}'.format(attribute_name))
self.attribute_dict = attribute_dict
linear_coefficient_matrix = self.linear_coefficients['matrix']
number_of_companions = len(self.model_parameters)
self.number_of_companions = number_of_companions
model_name = 'k{:d}'.format(number_of_companions)
if self.relative_orbit:
# assert hasattr(self, 'relative_astrometry')
assert self.relative_coordinate_system is not None
T = self.data.epoch_data
# parameters of first companion
theta_0 = self.model_parameters[0]
required_parameters = ['offset_alphastar_mas', 'offset_delta_mas', 'absolute_plx_mas',
'muRA_mas', 'muDE_mas']
theta_names = theta_0.keys()
for parameter_name in required_parameters:
if parameter_name not in theta_names:
raise ValueError('Model parameter {} has to be set!'.format(parameter_name))
# if ('plx_abs_mas' in theta_names) & ('plx_corr_mas' in theta_names):
# theta_0['plx_mas']= theta_0['plx_abs_mas'] + ['plx_corr_mas']
if 'parallax_correction_mas' in theta_names:
parallax_for_ppm_mas = theta_0['absolute_plx_mas'] - theta_0['parallax_correction_mas']
else:
parallax_for_ppm_mas = theta_0['absolute_plx_mas']
# compute positions at measurement dates according to best-fit model p (no dcr)
ppm_parameters = np.array([theta_0['offset_alphastar_mas'], theta_0['offset_delta_mas'],
parallax_for_ppm_mas, theta_0['muRA_mas'], theta_0['muDE_mas']])
if self.include_ppm:
self.ppm_model = np.array(np.dot(linear_coefficient_matrix[0:len(ppm_parameters), :].T, ppm_parameters)).flatten()
elif self.relative_orbit:
self.ppm_model = np.zeros(len(T))
else:
# these are only the positional offsets
self.ppm_model = np.array(np.dot(linear_coefficient_matrix[0:2, :].T, ppm_parameters[0:2])).flatten()
if ('esinw' in theta_names):
# self.ecc, self.omega_deg = mcmc_helpers.decode_eccentricity_omega(theta_0['esinw'], theta_0['ecosw'])
for p in range(number_of_companions):
self.model_parameters[p]['ecc'], self.model_parameters[p]['omega_deg'] = \
mcmc_helpers.decode_eccentricity_omega(self.model_parameters[p]['esinw'], self.model_parameters[p]['ecosw'])
if ('m2sini' in theta_names):
for p in range(number_of_companions):
self.model_parameters[p]['m2_MJ'], self.model_parameters[p]['i_deg'] = \
mcmc_helpers.decode_eccentricity_omega(self.model_parameters[p]['m2sini'], self.model_parameters[p]['m2cosi'])
if 'rho_mas' in theta_names:
if 'd_mas' in theta_names:
dcr_parameters = np.array([theta_0['rho_mas'], theta_0['d_mas']])
else:
dcr_parameters = np.array([theta_0['rho_mas']])
# compute measured positions (dcr-corrected)
if linear_coefficient_matrix.shape[0] == 7:
dcr = np.dot(linear_coefficient_matrix[5:7, :].T, dcr_parameters)
elif linear_coefficient_matrix.shape[0] == 6:
dcr = linear_coefficient_matrix[5, :] * dcr_parameters
elif linear_coefficient_matrix.shape[0] <= 5:
dcr = np.zeros(linear_coefficient_matrix.shape[1])
else:
dcr = np.zeros(linear_coefficient_matrix.shape[1])
self.DCR = dcr
for p in range(number_of_companions):
theta_p = self.model_parameters[p]
if 'm2_MS' in theta_names:
theta_p['m2_MJ'] = theta_p['m2_MS'] * MS_kg / MJ_kg
tmporb = OrbitSystem(attribute_dict=theta_p)
if self.relative_orbit:
orbit_model = tmporb.relative_orbit_fast(np.array(T['MJD']), np.array(T['spsi']),
np.array(T['cpsi']),
shift_omega_by_pi=True,
coordinate_system=self.relative_coordinate_system)
else:
orbit_model = tmporb.photocenter_orbit(np.array(T['MJD']),np.array(T['spsi']),
np.array(T['cpsi']))
# orbit_model = tmporb.pjGetBarycentricAstrometricOrbitFast(np.array(T['MJD']),
# np.array(T['spsi']),
# np.array(T['cpsi']))
setattr(self, 'orbit_system_companion_{:d}'.format(p), tmporb)
setattr(self, 'orbit_model_%d' % (p), orbit_model)
if number_of_companions == 1:
self.orbit_system = self.orbit_system_companion_0
self.orbit_model = self.orbit_model_0
else:
self.orbit_model = self.orbit_model_0 + self.orbit_model_1
if self.residuals is None:
residuals = np.array(T['da_mas']) - self.orbit_model - self.DCR - self.ppm_model
else:
residuals = self.residuals
if np.any(np.isnan(residuals)):
raise ValueError('NaN found in residuals')
self.ppm_meas = np.array(T['da_mas']) - self.DCR - self.orbit_model
self.orb_meas = np.array(T['da_mas']) - self.DCR - self.ppm_model
for p in range(number_of_companions):
if number_of_companions == 1:
tmp_orb_meas = self.orb_meas
elif p == 0:
tmp_orb_meas = np.array(T['da_mas']) - self.DCR - self.ppm_model - self.orbit_model_1
elif p == 1:
tmp_orb_meas = np.array(T['da_mas']) - self.DCR - self.ppm_model - self.orbit_model_0
setattr(self, 'orb_{:d}_meas'.format(p), tmp_orb_meas)
# compute epoch averages
medi = np.unique(T['OB'])
self.medi = medi
self.n_epoch = len(self.medi)
self.t_MJD_epoch = np.zeros(self.n_epoch)
average_quantities_1d = 'stdResidualX errResidualX Xmean_ppm Xmean_orb parfXmean ' \
'DCR_Xmean ACC_Xmean meanResidualX x_e_laz sx_star_laz mean_cpsi mean_spsi'.split()
for p in range(number_of_companions):
average_quantities_1d += ['Xmean_orb_{:d}'.format(p)]
for attribute in average_quantities_1d:
setattr(self, attribute, np.zeros(len(medi)))
if '2d' in self.data_type:
for attribute in average_quantities_1d:
setattr(self, attribute.replace('X', 'Y').replace('x_', 'y_'), np.zeros(len(medi)))
outlier_1D_index = np.array([])
if self.data_type == 'gaia_2d':
self.xi = self.data.xi
self.yi = self.data.yi
for jj, epoch in enumerate(self.medi):
tmpidx = np.where(T['OB'] == epoch)[0]
if '2d' in self.data_type:
tmpIndexX = np.intersect1d(self.xi, tmpidx)
tmpIndexY = np.intersect1d(self.yi, tmpidx)
elif self.data_type == '1d':
tmpIndexX = tmpidx
self.t_MJD_epoch[jj] = np.mean(T['MJD'][tmpIndexX])
self.mean_cpsi[jj] = np.mean(T['cpsi'][tmpIndexX])
self.mean_spsi[jj] = np.mean(T['spsi'][tmpIndexX])
self.Xmean_ppm[jj] = np.average(self.ppm_meas[tmpIndexX],
weights=1. / (np.array(T['sigma_da_mas'])[tmpIndexX] ** 2.))
self.Xmean_orb[jj] = np.average(self.orb_meas[tmpIndexX],
weights=1. / (T['sigma_da_mas'][tmpIndexX] ** 2.))
if np.any(np.isnan(self.Xmean_ppm)):
raise ValueError('NaN found in Xmean_ppm')
if np.any(np.isnan(self.Xmean_orb)):
raise ValueError('NaN found in Xmean_orb')
if '2d' in self.data_type:
self.Ymean_ppm[jj] = np.average(self.ppm_meas[tmpIndexY],
weights=1. / (T['sigma_da_mas'][tmpIndexY] ** 2.))
self.Ymean_orb[jj] = np.average(self.orb_meas[tmpIndexY],
weights=1. / (T['sigma_da_mas'][tmpIndexY] ** 2.))
for p in range(number_of_companions):
getattr(self, 'Xmean_orb_{:d}'.format(p))[jj] = np.average(
getattr(self, 'orb_{:d}_meas'.format(p))[tmpIndexX],
weights=1. / (T['sigma_da_mas'][tmpIndexX] ** 2.))
# if self.data_type == '2d':
if '2d' in self.data_type:
getattr(self, 'Ymean_orb_{:d}'.format(p))[jj] = np.average(
getattr(self, 'orb_{:d}_meas'.format(p))[tmpIndexY],
weights=1. / (T['sigma_da_mas'][tmpIndexY] ** 2.))
self.DCR_Xmean[jj] = np.average(self.DCR[tmpIndexX])
self.meanResidualX[jj] = np.average(residuals[tmpIndexX], weights=1. / (T['sigma_da_mas'][tmpIndexX] ** 2.))
self.parfXmean[jj] = np.average(T['ppfact'][tmpIndexX])
self.stdResidualX[jj] = np.std(residuals[tmpIndexX]) if len(tmpIndexX)>1 else T['sigma_da_mas'][tmpIndexX]
if '2d' in self.data_type:
self.DCR_Ymean[jj] = np.average(self.DCR[tmpIndexY])
self.meanResidualY[jj] = np.average(residuals[tmpIndexY], weights=1. / (T['sigma_da_mas'][tmpIndexY] ** 2.))
self.parfYmean[jj] = np.average(T['ppfact'][tmpIndexY])
self.stdResidualY[jj] = np.std(residuals[tmpIndexY]) if len(tmpIndexY)>1 else T['sigma_da_mas'][tmpIndexY]
# on the fly inter-epoch outlier detection
outliers = {}
outliers['x'] = {}
outliers['x']['index'] = tmpIndexX
outliers['x']['std_residual'] = self.stdResidualX[jj]
if '2d' in self.data_type:
outliers['y'] = {}
outliers['y']['index'] = tmpIndexY
outliers['y']['std_residual'] = self.stdResidualY[jj]
is_outlier = []
for key in outliers.keys():
# boolean array
if self.absolute_threshold is not None:
is_outlier = (np.abs(residuals[outliers[key]['index']] - np.mean(residuals[outliers[key]['index']])) > self.outlier_sigma_threshold * outliers[key]['std_residual']) | (
np.abs(residuals[outliers[key]['index']] - np.mean(residuals[outliers[key]['index']])) > self.absolute_threshold)
elif self.outlier_sigma_threshold is not None:
is_outlier = np.abs(residuals[outliers[key]['index']] - np.mean(residuals[outliers[key]['index']])) > self.outlier_sigma_threshold * outliers[key]['std_residual']
if any(is_outlier):
tmp_1D_index = np.where(is_outlier)[0]
print('Detected {} {}-residual outliers ({:2.1f} sigma) in epoch {} (1-indexed) '.format(
len(tmp_1D_index), key, self.outlier_sigma_threshold, epoch), end='')
print(np.abs(residuals[outliers[key]['index']] - np.mean(residuals[outliers[key]['index']]))[tmp_1D_index], end='')
# 1/0
for ii in tmp_1D_index:
print(' {:.12f}'.format(T['MJD'][outliers[key]['index'][ii]]), end=',')
print()
outlier_1D_index = np.hstack((outlier_1D_index, outliers[key]['index'][tmp_1D_index]))
self.errResidualX[jj] = self.stdResidualX[jj] / np.sqrt(len(tmpIndexX))
if '2d' in self.data_type:
self.errResidualY[jj] = self.stdResidualY[jj] / np.sqrt(len(tmpIndexY))
# % from Lazorenko writeup:
self.x_e_laz[jj] = np.sum(residuals[tmpIndexX] / (T['sigma_da_mas'][tmpIndexX] ** 2.)) / np.sum(
1 / (T['sigma_da_mas'][tmpIndexX] ** 2.))
self.sx_star_laz[jj] = 1 / np.sqrt(np.sum(1 / (T['sigma_da_mas'][tmpIndexX] ** 2.)));
if '2d' in self.data_type:
self.y_e_laz[jj] = np.sum(residuals[tmpIndexY] / (T['sigma_da_mas'][tmpIndexY] ** 2.)) / np.sum(
1 / (T['sigma_da_mas'][tmpIndexY] ** 2.))
self.sy_star_laz[jj] = 1 / np.sqrt(np.sum(1 / (T['sigma_da_mas'][tmpIndexY] ** 2.)));
if len(outlier_1D_index) != 0:
print('MJD of outliers:')
for ii in np.unique(outlier_1D_index.astype(np.int)):
print('{:.12f}'.format(T['MJD'][ii]), end=',')
print()
self.outlier_1D_index = np.array(outlier_1D_index).astype(int)
# compute chi squared values
if self.data_type == '1d':
self.chi2_naive = np.sum([self.meanResidualX ** 2 / self.errResidualX ** 2])
self.chi2_laz = np.sum([self.x_e_laz ** 2 / self.errResidualX ** 2])
self.chi2_star_laz = np.sum([self.x_e_laz ** 2 / self.sx_star_laz ** 2])
elif '2d' in self.data_type:
self.chi2_naive = np.sum(
[self.meanResidualX ** 2 / self.errResidualX ** 2, self.meanResidualY ** 2 / self.errResidualY ** 2])
self.chi2_laz = np.sum(
[self.x_e_laz ** 2 / self.errResidualX ** 2, self.y_e_laz ** 2 / self.errResidualY ** 2])
self.chi2_star_laz = np.sum(
[self.x_e_laz ** 2 / self.sx_star_laz ** 2, self.y_e_laz ** 2 / self.sy_star_laz ** 2])
# fixed 2018-08-18 JSA
if self.data_type == '1d':
self.nFree_ep = len(medi) * 1 - (linear_coefficient_matrix.shape[0] + number_of_companions*7)
elif '2d' in self.data_type:
self.nFree_ep = len(medi) * 2 - (linear_coefficient_matrix.shape[0] + number_of_companions*7)
self.chi2_laz_red = self.chi2_laz / self.nFree_ep
self.chi2_star_laz_red = self.chi2_star_laz / self.nFree_ep
self.chi2_naive_red = self.chi2_naive / self.nFree_ep
self.epoch_omc_std_X = np.std(self.meanResidualX)
if self.data_type == '1d':
self.epoch_omc_std = self.epoch_omc_std_X
self.epoch_precision_mean = np.mean([self.errResidualX])
elif '2d' in self.data_type:
self.epoch_omc_std_Y = np.std(self.meanResidualY)
self.epoch_omc_std = np.std([self.meanResidualX, self.meanResidualY])
self.epoch_precision_mean = np.mean([self.errResidualX, self.errResidualY])
self.residuals = residuals
def epoch_parameters(self):
"""Return structure with epoch mean parameters to facilitate e.g. detection limit computation.
Returns
-------
"""
cat = Table()
cat['MJD'] = self.t_MJD_epoch
cat['RA*_mas'] = self.Xmean_ppm
cat['DE_mas'] = self.Ymean_ppm
cat['sRA*_mas'] = self.errResidualX
cat['sDE_mas'] = self.errResidualY
cat['OB'] = self.medi
cat['frame'] = self.medi
iad = ImagingAstrometryData(cat, data_type=self.data_type)
iad.RA_deg = self.orbit_system.RA_deg
iad.Dec_deg = self.orbit_system.DE_deg
iad.set_five_parameter_coefficients()
iad.set_data_1D()
# covariance matrix
S_mean = np.mat(np.diag(1. / np.power(iad.data_1D['sigma_da_mas'], 2)))
# mean signal/abscissa
M_mean = np.mat(iad.data_1D['da_mas'])
# coefficient matrix
C_mean = iad.five_parameter_coefficients_array
mean_dict = {'covariance_matrix': S_mean,
'signal': M_mean,
'coefficient_matrix': C_mean,
'iad': iad
}
# return C_mean, S_mean, M_mean
return mean_dict
def print_residual_statistics(self):
"""Print statistics to stdout."""
print('='*100)
print('Epoch residual RMS X %3.3f mas' % (self.epoch_omc_std_X))
if self.data_type == '2d':
print('Epoch residual RMS Y %3.3f mas' % (self.epoch_omc_std_Y))
print('Epoch residual RMS %3.3f mas' % (self.epoch_omc_std))
print('Degrees of freedom %d' % (self.nFree_ep))
for elm in ['chi2_laz_red', 'chi2_star_laz_red', 'chi2_naive_red']:
print('reduced chi^2 : %3.2f (%s)' % (eval('self.%s' % elm), elm))
print('Epoch precision (naive)'),
print(self.epoch_precision_mean)
if self.data_type == '1d':
# print('Epoch precision (x_e_laz)'),
# print(np.mean([self.sx_star_laz], axis=0))
print('Average precision (naive) %3.3f mas' % (np.mean([self.errResidualX])))
print('Average precision (x_e_laz) %3.3f mas' % (np.mean([self.sx_star_laz])))
elif '2d' in self.data_type:
# print('Epoch precision (x_e_laz)'),
# print(np.mean([self.sx_star_laz, self.sy_star_laz], axis=0))
print('Average precision (naive) %3.3f mas' % (np.mean([self.errResidualX, self.errResidualY])))
print('Average precision (x_e_laz) %3.3f mas' % (np.mean([self.sx_star_laz, self.sy_star_laz])))
print('='*100)
def astrometric_signal_to_noise_epoch(self, amplitude_mas):
"""Return astrometric SNR for epochs (FOV transists not CCD transits)"""
if self.data_type == '1d':
median_uncertainty_mas = np.median([self.errResidualX])
astrometric_snr = amplitude_mas * np.sqrt(self.n_epoch)/median_uncertainty_mas
return astrometric_snr
def plot(self, argument_dict=None):
"""Make the astrometric orbit plots.
Parameters
----------
argument_dict : dict
"""
# set defaults
if argument_dict is not None:
default_argument_dict = {'arrow_length_factor': 1.,
'horizons_file_seed': None,
'frame_omc_description': 'default',
'orbit_description': 'default',
'scan_angle_definition': 'gaia',
'orbit_signal_description': 'default',
'ppm_description': 'default',
'epoch_omc_description': 'default',
'name_seed': 'star',
'make_1d_overview_figure': True,
'make_condensed_summary_figure': True,
'frame_residual_panel': False,
'arrow_offset_x': 40.,
'arrow_offset_y': 0.,
'save_plot': False,
'orbit_only_panel': False,
'make_xy_residual_figure': False,
'make_ppm_figure': False,
'plot_dir': os.getcwd(),
}
for key, value in default_argument_dict.items():
if key not in argument_dict.keys():
argument_dict[key] = value
if argument_dict['ppm_description'] == 'default':
argument_dict['ppm_description'] = '$\\varpi={:2.3f}$ mas\n$\mu_\\mathrm{{ra^\\star}}={' \
':2.3f}$ mas/yr\n$\mu_\\mathrm{{dec}}={:2.3f}$ mas/yr'.format(
self.model_parameters[0]['absolute_plx_mas'], self.model_parameters[0]['muRA_mas'],
self.model_parameters[0]['muDE_mas'])
if argument_dict['epoch_omc_description'] == 'default':
argument_dict['epoch_omc_description'] = '$N_e={}$, $N_f={}$,\n$\Delta t={:.0f}$ d, DOF$_\\mathrm{{eff}}$={},\n' \
'$\Sigma_\\mathrm{{O-C,epoch}}$={:2.3f} mas\n$\\bar\\sigma_\Lambda$={:2.3f} mas'.format(
len(np.unique(self.data.epoch_data['OB'])), len(self.data.epoch_data),
np.ptp(self.data.epoch_data['MJD']), self.nFree_ep, self.epoch_omc_std,
self.epoch_precision_mean)
if argument_dict['frame_omc_description'] == 'default':
argument_dict['frame_omc_description'] = '$N_f={}/{}$, $\Sigma_\\mathrm{{O-C,frame}}$={:2.3f} mas\n' \
'$\\bar\\sigma_\Lambda$={:2.3f} mas'.format(
len(self.data.epoch_data), self.data.n_original_frames, np.std(self.residuals), np.mean(self.data.epoch_data['sigma_da_mas']))
if 'excess_noise' in argument_dict.keys():
argument_dict['frame_omc_description'] += '\nexN = {:2.2f}, mF = {:2.0f}'.format(
argument_dict['excess_noise'], argument_dict['merit_function'])
if argument_dict['orbit_signal_description'] == 'default':
argument_dict[
'orbit_signal_description'] = '$\Sigma_\\mathrm{{Signal,epoch}}$={:2.3f} mas'.format(
np.std(self.Xmean_orb))
# loop over number of companions
for p in range(self.number_of_companions):
if (argument_dict['orbit_description'][p] == 'default') and (self.model_parameters[p]['solution_type'] in ['Acceleration7', 'Acceleration9']):
argument_dict['tmp_orbit_description'] = '{}'.format(self.model_parameters[p]['solution_type'])
elif (argument_dict['orbit_description'][p] == 'default'):
argument_dict['tmp_orbit_description'] = '$P={:2.3f}$ d\n$e={:2.3f}$\n$\\alpha={:2.3f}$ mas\n$i={:2.3f}$ deg\n$\\omega={:2.3f}$ deg\n$\\Omega={:2.3f}$ deg\n$M_1={:2.3f}$ Msun\n$M_2={:2.1f}$ Mjup'.format(self.model_parameters[p]['P_day'], self.model_parameters[p]['ecc'], getattr(self, 'orbit_system_companion_{:d}'.format(p)).alpha_mas, self.model_parameters[p]['i_deg'], self.model_parameters[p]['omega_deg'], self.model_parameters[p]['OMEGA_deg'], self.model_parameters[p]['m1_MS'], self.model_parameters[p]['m2_MJ'])
else:
argument_dict['tmp_orbit_description'] = argument_dict['orbit_description'][p]
theta_p = self.model_parameters[p]
theta_names = theta_p.keys()
if self.model_parameters[p]['solution_type'] in ['Acceleration7', 'Acceleration9']:
name_seed_2 = argument_dict['name_seed'] + '_{}'.format(self.model_parameters[p]['solution_type'])
else:
name_seed_2 = argument_dict['name_seed'] + '_companion{:d}'.format(p)
if 'm2_MS' in theta_names:
theta_p['m2_MJ'] = theta_p['m2_MS'] * MS_kg / MJ_kg
orb = OrbitSystem(attribute_dict=theta_p)
if getattr(orb, 'Tref_MJD') is None:
raise UserWarning('Reference time was not set.')
# PPM plot and residuals
if argument_dict['make_ppm_figure']:
n_rows = 2
n_columns = 1
fig = pl.figure(figsize=(6, 8), facecolor='w', edgecolor='k')
pl.clf()
# PPM panel
pl.subplot(n_rows, n_columns, 1)
self.insert_ppm_plot(orb, argument_dict)
pl.axis('equal')
ax = plt.gca()
ax.invert_xaxis()
pl.xlabel('Offset in Right Ascension (mas)')
pl.ylabel('Offset in Declination (mas)')
if self.title is not None:
pl.title(self.title)
pl.subplot(n_rows, n_columns, 2)
self.insert_epoch_residual_plot(orb, argument_dict)
plt.tight_layout()
pl.show()
if argument_dict['save_plot']:
figure_file_name = os.path.join(argument_dict['plot_dir'],
'ppm_{}.pdf'.format(
name_seed_2.replace('.', 'p')))
fig.savefig(figure_file_name, transparent=True, bbox_inches='tight',
pad_inches=0.05)
# 1D astrometry overview figure
if argument_dict['make_1d_overview_figure']:
n_rows = 3
n_columns = 2
fig = pl.figure(figsize=(14, 9), facecolor='w', edgecolor='k')
pl.clf()
# PPM panel
pl.subplot(n_rows, n_columns, 1)
self.insert_ppm_plot(orb, argument_dict)
pl.axis('equal')
ax = plt.gca()
ax.invert_xaxis()
pl.xlabel('Offset in Right Ascension (mas)')
pl.ylabel('Offset in Declination (mas)')
if self.title is not None:
pl.title(self.title)
# orbit panel
pl.subplot(n_rows-1, n_columns, 3)
self.insert_orbit_plot(orb, argument_dict)
pl.axis('equal')
ax = plt.gca()
ax.invert_xaxis()
pl.xlabel('Offset in Right Ascension (mas)')
pl.ylabel('Offset in Declination (mas)')
pl.subplot(n_rows, n_columns, 2)
self.insert_orbit_timeseries_plot(orb, argument_dict)
pl.subplot(n_rows, n_columns, 4)
self.insert_orbit_epoch_residuals_plot(orb, argument_dict)
pl.subplot(n_rows, n_columns, 6)
self.insert_orbit_frame_residuals_plot(orb, argument_dict, direction='x')
pl.xlabel('MJD - {:3.1f}'.format(orb.Tref_MJD))
# fig.tight_layout(h_pad=0.0)
pl.show()
if argument_dict['save_plot']:
figure_file_name = os.path.join(argument_dict['plot_dir'],
'orbit_1d_summary_{}.png'.format(
name_seed_2.replace('.', 'p')))
try:
fig.savefig(figure_file_name, transparent=False, bbox_inches='tight',
pad_inches=0.05)
except ValueError:
print('WARNING: Could not save {}'.format(figure_file_name))
##################################################
# TRIPLE PANEL FIGURE (PPM + ORBIT + EPOCH RESIDUALS)
# plot PPM and residuals
if argument_dict['make_condensed_summary_figure']:
if argument_dict['frame_residual_panel']:
pl.figure(figsize=(6, 9), facecolor='w', edgecolor='k')
n_panels = 3
else:
pl.figure(figsize=(6, 6), facecolor='w', edgecolor='k')
n_panels = 2
pl.clf()
# PPM panel
pl.subplot(n_panels, 1, 1)
self.insert_ppm_plot(orb, argument_dict)
pl.axis('equal')
ax = plt.gca()
ax.invert_xaxis()
pl.xlabel('Offset in Right Ascension (mas)')
pl.ylabel('Offset in Declination (mas)')
if self.title is not None:
pl.title(self.title)
# orbit panel
pl.subplot(n_panels, 1, 2)
self.insert_orbit_plot(orb, argument_dict)
pl.axis('equal')
ax = plt.gca()
ax.invert_xaxis()
pl.xlabel('Offset in Right Ascension (mas)')
pl.ylabel('Offset in Declination (mas)')
# frame residual panel
if argument_dict['frame_residual_panel']:
pl.subplot(n_panels, 1, 3)
self.insert_epoch_residual_plot(orb, argument_dict)
plt.tight_layout()
pl.show()
if argument_dict['save_plot']:
figure_file_name = os.path.join(argument_dict['plot_dir'], 'ppm_orbit_{}.pdf'.format(name_seed_2.replace('.', 'p')))
fig.savefig(figure_file_name, transparent=True, bbox_inches='tight', pad_inches=0.05)
##################################################
##################################################
# ORBIT only
if argument_dict['orbit_only_panel']:
fig = pl.figure(figsize=(8, 8), facecolor='w', edgecolor='k')
pl.clf()
self.insert_orbit_plot(orb, argument_dict)
if self.title is not None:
pl.title(self.title)
pl.axis('equal')
ax = plt.gca()
ax.invert_xaxis()
pl.xlabel('Offset in Right Ascension (mas)')
pl.ylabel('Offset in Declination (mas)')
pl.show()
if argument_dict['save_plot']:
figure_file_name = os.path.join(argument_dict['plot_dir'], 'orbit_only_{}.pdf'.format(name_seed_2.replace('.', 'p')))
fig.savefig(figure_file_name, transparent=True, bbox_inches='tight', pad_inches=0.05)
##################################################
##################################################
# FIGURE SHOWING RA AND Dec OFFSETS AND RESIDUALS
if argument_dict['make_xy_residual_figure']:
if self.data_type == '1d':
n_columns = 1
elif self.data_type == '2d':
n_columns = 2
if argument_dict['frame_residual_panel']:
n_rows = 3
elif argument_dict['omc_panel'] is False:
n_rows = 1
else:
n_rows = 2
fig, axes = pl.subplots(n_rows, n_columns, sharex=True, sharey=False, figsize=(n_columns*4.0, n_rows*2.5), facecolor='w',
edgecolor='k', squeeze=False)
self.insert_orbit_timeseries_plot(orb, argument_dict, ax=axes[0][0])
if self.data_type == '2d':
self.insert_orbit_timeseries_plot(orb, argument_dict, direction='y', ax=axes[0][1])
if self.title is not None:
fig.suptitle(self.title)
if argument_dict['omc_panel']:
self.insert_orbit_epoch_residuals_plot(orb, argument_dict, ax=axes[1][0])
if self.data_type == '2d':
self.insert_orbit_epoch_residuals_plot(orb, argument_dict, direction='y', ax=axes[1][1])
if argument_dict['frame_residual_panel']:
self.insert_orbit_frame_residuals_plot(orb, argument_dict, direction='x', ax=axes[2][0])
if self.data_type == '2d':
self.insert_orbit_frame_residuals_plot(orb, argument_dict, direction='y',
ax=axes[2][1])
axes[-1][0].set_xlabel('MJD - %3.1f' % orb.Tref_MJD)
labels = axes[-1][0].get_xticklabels()
plt.setp(labels, rotation=30)
if self.data_type == '2d':
axes[-1][1].set_xlabel('MJD - %3.1f' % orb.Tref_MJD)
labels = axes[-1][1].get_xticklabels()
plt.setp(labels, rotation=30)
# if self.title is None:
# fig.tight_layout(pad=0.0)
# plt.tight_layout()
# pl.subplots_adjust(right=1.5)
pl.show()
if argument_dict['save_plot']:
if argument_dict['frame_residual_panel']:
figure_file_name = os.path.join(argument_dict['plot_dir'], 'orbit_time_{}_frameres.pdf'.format(name_seed_2.replace('.', 'p')))
else:
# figure_file_name = os.path.join(argument_dict['plot_dir'],
# 'orbit_time_{}.pdf'.format(name_seed_2.replace('.', 'p')))
figure_file_name = os.path.join(argument_dict['plot_dir'],
'orbit_time_{}.png'.format(name_seed_2.replace('.', 'p')))
fig.savefig(figure_file_name, transparent=True, bbox_inches='tight', pad_inches=0.05)
# if argument_dict['make_relative_orbit_figure']:
def insert_ppm_plot(self, orb, argument_dict):
"""Plot the PPM model curve and the orbit-substracted, epoch-averaged measurements.
Parameters
----------
orb
argument_dict
Returns
-------
"""
t_curve_mjd_2d = np.sort(np.tile(self.t_curve_MJD, 2))
ppm_curve = orb.ppm(t_curve_mjd_2d, offsetRA_mas=orb.offset_alphastar_mas,
offsetDE_mas=orb.offset_delta_mas,
horizons_file_seed=argument_dict['horizons_file_seed'])
pl.plot(ppm_curve[0], ppm_curve[1], 'k-')
if self.data_type == '2d':
pl.plot(self.Xmean_ppm, self.Ymean_ppm, 'ko')
plt.annotate('', xy=(np.float(orb.muRA_mas) * argument_dict['arrow_length_factor'] + argument_dict['arrow_offset_x'],
np.float(orb.muDE_mas) * argument_dict['arrow_length_factor'] + argument_dict['arrow_offset_y']),
xytext=(0. + argument_dict['arrow_offset_x'], 0. + argument_dict['arrow_offset_y']),
arrowprops=dict(arrowstyle="->", facecolor='black'), size=30)
if argument_dict['ppm_description'] is not None:
ax = pl.gca()
pl.text(0.01, 0.99, argument_dict['ppm_description'], horizontalalignment='left',
verticalalignment='top', transform=ax.transAxes)
def insert_orbit_timeseries_plot(self, orb, argument_dict, direction='x', ax=None):
"""Plot the residual signal after removal of parallax, proper motion, linear terms."""
if ax is None:
ax = pl.gca()
ax.axhline(y=0, color='0.5', ls=':', zorder=-50)
if direction=='x':
ax.plot(self.t_MJD_epoch - orb.Tref_MJD, self.Xmean_orb, 'ko')
ax.errorbar(self.t_MJD_epoch - orb.Tref_MJD, self.Xmean_orb, yerr=self.errResidualX,
fmt='none', ecolor='k')
if argument_dict['orbit_signal_description'] is not None:
pl.text(0.01, 0.99, argument_dict['orbit_signal_description'], horizontalalignment='left',
verticalalignment='top', transform=ax.transAxes)
if self.data_type == '1d':
ax.set_ylabel('Offset along scan (mas)')
# ax.set_title(self.title)
elif self.data_type == '2d':
timestamps_1D, cpsi_curve, spsi_curve, xi_curve, yi_curve = get_cpsi_spsi_for_2Dastrometry(
self.t_curve_MJD, scan_angle_definition=argument_dict['scan_angle_definition'])
# orbit_curve = orb.pjGetBarycentricAstrometricOrbitFast(timestamps_1D, spsi_curve,
# cpsi_curve)
if self.relative_orbit:
orbit_curve = orb.relative_orbit_fast(timestamps_1D, spsi_curve, cpsi_curve,
shift_omega_by_pi=True,
coordinate_system=self.relative_coordinate_system)
else:
orbit_curve = orb.photocenter_orbit(timestamps_1D, spsi_curve,
cpsi_curve)
phi1_curve = orbit_curve[xi_curve]
phi2_curve = orbit_curve[yi_curve]
if direction=='x':
ax.plot(self.t_curve_MJD - orb.Tref_MJD, phi1_curve, 'k-')
ax.set_ylabel('Offset in RA/Dec (mas)')
elif direction=='y':
ax.plot(self.t_MJD_epoch - orb.Tref_MJD, self.Ymean_orb, 'ko')
ax.errorbar(self.t_MJD_epoch - orb.Tref_MJD, self.Ymean_orb, yerr=self.errResidualY,
fmt='none', ecolor='k')
ax.plot(self.t_curve_MJD - orb.Tref_MJD, phi2_curve, 'k-')
# ax.set_ylabel('Offset in Dec (mas)')
def insert_orbit_epoch_residuals_plot(self, orb, argument_dict, direction='x', ax=None):
"""
Parameters
----------
orb
argument_dict
direction
ax
Returns
-------
"""
if ax is None:
ax = pl.gca()
if direction=='x':
ax.plot(self.t_MJD_epoch - orb.Tref_MJD, self.meanResidualX, 'ko')
ax.errorbar(self.t_MJD_epoch - orb.Tref_MJD, self.meanResidualX,
yerr=self.errResidualX, fmt='none', ecolor='k')
ax.axhline(y=0, color='0.5', ls='--', zorder=-50)
ax.set_ylabel('O-C (mas)')
if argument_dict['epoch_omc_description'] is not None:
pl.text(0.01, 0.99, argument_dict['epoch_omc_description'], horizontalalignment='left',
verticalalignment='top', transform=ax.transAxes)
elif direction=='y':
ax.plot(self.t_MJD_epoch - orb.Tref_MJD, self.meanResidualY, 'ko')
ax.errorbar(self.t_MJD_epoch - orb.Tref_MJD, self.meanResidualY,
yerr=self.errResidualY, fmt='none', ecolor='k')
ax.axhline(y=0, color='0.5', ls='--', zorder=-50)
# ax.set_ylabel('O-C (mas)')
def insert_orbit_frame_residuals_plot(self, orb, argument_dict, direction='x', ax=None):
"""
Parameters
----------
orb
argument_dict
direction
ax
Returns
-------
"""
if ax is None:
ax = pl.gca()
if self.data_type == '1d':
ax.plot(self.data.epoch_data['MJD'] - orb.Tref_MJD, self.residuals, 'ko', mfc='k', ms=4)
ax.errorbar(self.data.epoch_data['MJD'] - orb.Tref_MJD, self.residuals, yerr=self.data.epoch_data['sigma_da_mas'], fmt='none', ecolor='k')
ax.axhline(y=0, color='0.5', ls='--', zorder=-50)
# 1/0
if len(self.outlier_1D_index) != 0:
ax.plot(self.data.epoch_data['MJD'][self.outlier_1D_index] - orb.Tref_MJD, self.residuals[self.outlier_1D_index], 'ko', mfc='b',
ms=4)
# 1/0
ax.errorbar(np.array(self.data.epoch_data['MJD'])[self.outlier_1D_index] - orb.Tref_MJD, self.residuals[self.outlier_1D_index],
yerr=np.array(self.data.epoch_data['sigma_da_mas'])[self.outlier_1D_index], fmt='none', ecolor='b')
if argument_dict['frame_omc_description'] is not None:
pl.text(0.01, 0.99, argument_dict['frame_omc_description'], horizontalalignment='left',
verticalalignment='top', transform=ax.transAxes)
elif self.data_type == '2d':
if direction=='x':
tmp_index = self.xi
elif direction=='y':
tmp_index = self.yi
# mfc = 'none'
mec= '0.4'
mfc = mec
marker='.'
alpha = 0.5
ax.plot(self.data.epoch_data['MJD'][tmp_index] - orb.Tref_MJD, self.residuals[tmp_index], mec=mec, mfc=mfc, marker=marker, ls='none', alpha=alpha)
ax.axhline(y=0, color='0.5', ls='--', zorder=-50)
ax.set_ylabel('Frame O-C (mas)')
def insert_epoch_residual_plot(self, orb, argument_dict):
"""Plot the epoch-average residuals.
Parameters
----------
orb
argument_dict
Returns
-------
"""
epochTime = self.t_MJD_epoch - orb.Tref_MJD
epochOrdinateLabel = 'MJD - {:3.1f}'.format(orb.Tref_MJD)
if self.data_type == '2d':
x_residual_color = '0.7'
else:
x_residual_color = 'k'
pl.plot(epochTime, self.meanResidualX, 'ko', color=x_residual_color)
pl.errorbar(epochTime, self.meanResidualX, yerr=self.errResidualX, fmt='none',
ecolor=x_residual_color)
if self.data_type == '2d':
pl.plot(epochTime, self.meanResidualY, 'ko')
pl.errorbar(epochTime, self.meanResidualY, yerr=self.errResidualY, fmt='none', ecolor='k')
plt.axhline(y=0, color='0.5', ls='--', zorder=-50)
pl.ylabel('O-C (mas)')
pl.xlabel(epochOrdinateLabel)
if argument_dict['epoch_omc_description'] is not None:
ax = plt.gca()
pl.text(0.01, 0.99, argument_dict['epoch_omc_description'], horizontalalignment='left',
verticalalignment='top', transform=ax.transAxes)
def insert_orbit_plot(self, orb, argument_dict):
"""Add orbit to current figure.
Returns
-------
"""
timestamps_1D, cpsi_curve, spsi_curve, xi_curve, yi_curve = get_cpsi_spsi_for_2Dastrometry(self.t_curve_MJD, scan_angle_definition=argument_dict['scan_angle_definition'])
t_epoch_MJD, cpsi_epoch, spsi_epoch, xi_epoch, yi_epoch = get_cpsi_spsi_for_2Dastrometry(self.t_MJD_epoch, scan_angle_definition=argument_dict['scan_angle_definition'])
t_frame_mjd, cpsi_frame, spsi_frame, xi_frame, yi_frame = get_cpsi_spsi_for_2Dastrometry(np.array(self.data.epoch_data['MJD']), scan_angle_definition=argument_dict['scan_angle_definition'])
if orb.solution_type in ['Acceleration7', 'Acceleration9']:
orbit_curve = orb.astrometric_acceleration(timestamps_1D, spsi_curve, cpsi_curve)
phi1_curve = orbit_curve[xi_curve]
phi2_curve = orbit_curve[yi_curve]
orbit_epoch = orb.astrometric_acceleration(t_epoch_MJD, spsi_epoch, cpsi_epoch)
phi1_model_epoch = orbit_epoch[xi_epoch]
phi2_model_epoch = orbit_epoch[yi_epoch]
orbit_frame = orb.astrometric_acceleration(t_frame_mjd, spsi_frame, cpsi_frame)
phi1_model_frame = orbit_frame[xi_frame]
phi2_model_frame = orbit_frame[yi_frame]
else:
# actual orbit
if self.relative_orbit:
orbit_curve = orb.relative_orbit_fast(timestamps_1D, spsi_curve, cpsi_curve, shift_omega_by_pi=True,
coordinate_system=self.relative_coordinate_system)
else:
orbit_curve = orb.photocenter_orbit(timestamps_1D, spsi_curve, cpsi_curve)
phi1_curve = orbit_curve[xi_curve]
phi2_curve = orbit_curve[yi_curve]
if self.relative_orbit:
orbit_epoch = orb.relative_orbit_fast(t_epoch_MJD, spsi_epoch, cpsi_epoch, shift_omega_by_pi=True,
coordinate_system=self.relative_coordinate_system)
else:
orbit_epoch = orb.photocenter_orbit(t_epoch_MJD, spsi_epoch, cpsi_epoch)
phi1_model_epoch = orbit_epoch[xi_epoch]
phi2_model_epoch = orbit_epoch[yi_epoch]
if self.relative_orbit:
orbit_frame = orb.relative_orbit_fast(t_frame_mjd, spsi_frame, cpsi_frame, shift_omega_by_pi=True,
coordinate_system=self.relative_coordinate_system)
else:
orbit_frame = orb.photocenter_orbit(t_frame_mjd, spsi_frame, cpsi_frame)
phi1_model_frame = orbit_frame[xi_frame]
phi2_model_frame = orbit_frame[yi_frame]
# show periastron
if 1:
t_periastron_mjd, cpsi_periastron, spsi_periastron, xi_periastron, yi_periastron = get_cpsi_spsi_for_2Dastrometry(orb.Tp_day, scan_angle_definition=argument_dict['scan_angle_definition'])
if self.relative_orbit:
orbit_periastron = orb.relative_orbit_fast(t_periastron_mjd, spsi_periastron, cpsi_periastron,
shift_omega_by_pi=True,
coordinate_system=self.relative_coordinate_system)
else:
orbit_periastron = orb.photocenter_orbit(t_periastron_mjd, spsi_periastron, cpsi_periastron)
# orbit_periastron = orb.pjGetBarycentricAstrometricOrbitFast(t_periastron_mjd, spsi_periastron, cpsi_periastron)
phi1_model_periastron = orbit_periastron[xi_periastron]
phi2_model_periastron = orbit_periastron[yi_periastron]
pl.plot([0, phi1_model_periastron], [0, phi2_model_periastron], marker='.', ls='-', lw=0.5, color='0.5')
pl.plot(phi1_model_periastron, phi2_model_periastron, marker='s', color='0.5', mfc='0.5')
pl.plot(phi1_curve, phi2_curve, ls='-', lw=1.5, color='0.5')
pl.plot(phi1_model_epoch, phi2_model_epoch, marker='o', color='0.7', ms=5, mfc='none', ls='')
if self.data_type in ['1d', 'gaia_2d']:
if argument_dict['scan_angle_definition'] == 'hipparcos':
frame_residual_alphastar_along_scan = self.data.epoch_data['cpsi'] * self.residuals
frame_residual_delta_along_scan = self.data.epoch_data['spsi'] * self.residuals
epoch_residual_alphastar_along_scan = self.mean_cpsi * self.meanResidualX
epoch_residual_delta_along_scan = self.mean_spsi * self.meanResidualX
elif argument_dict['scan_angle_definition'] == 'gaia':
frame_residual_alphastar_along_scan = self.data.epoch_data['spsi'] * self.residuals
frame_residual_delta_along_scan = self.data.epoch_data['cpsi'] * self.residuals
epoch_residual_alphastar_along_scan = self.mean_spsi * self.meanResidualX
epoch_residual_delta_along_scan = self.mean_cpsi * self.meanResidualX
frame_residual_color = '0.8'
pl.plot(phi1_model_frame + frame_residual_alphastar_along_scan,
phi2_model_frame + frame_residual_delta_along_scan, marker='o',
color=frame_residual_color, ms=4, mfc=frame_residual_color,
mec=frame_residual_color, ls='')
pl.plot(phi1_model_epoch + epoch_residual_alphastar_along_scan,
phi2_model_epoch + epoch_residual_delta_along_scan, marker='o', color='k',
ms=5, ls='')
# plot epoch-level error-bars
for jj in range(len(self.meanResidualX)):
if argument_dict['scan_angle_definition'] == 'hipparcos':
x1 = phi1_model_epoch[jj] + self.mean_cpsi[jj] * (self.meanResidualX[jj] + self.errResidualX[jj])
x2 = phi1_model_epoch[jj] + self.mean_cpsi[jj] * (self.meanResidualX[jj] - self.errResidualX[jj])
y1 = phi2_model_epoch[jj] + self.mean_spsi[jj] * (self.meanResidualX[jj] + self.errResidualX[jj])
y2 = phi2_model_epoch[jj] + self.mean_spsi[jj] * (self.meanResidualX[jj] - self.errResidualX[jj])
elif argument_dict['scan_angle_definition'] == 'gaia':
x1 = phi1_model_epoch[jj] + self.mean_spsi[jj] * (self.meanResidualX[jj] + self.errResidualX[jj])
x2 = phi1_model_epoch[jj] + self.mean_spsi[jj] * (self.meanResidualX[jj] - self.errResidualX[jj])
y1 = phi2_model_epoch[jj] + self.mean_cpsi[jj] * (self.meanResidualX[jj] + self.errResidualX[jj])
y2 = phi2_model_epoch[jj] + self.mean_cpsi[jj] * (self.meanResidualX[jj] - self.errResidualX[jj])
pl.plot([x1, x2], [y1, y2], 'k-', lw=1)
# from yorick code
# // psi is the scan angle from north to east (better, from west to north)
# // scanning direction
# dx1_mas = cpsi_obs * myresidual;//*hd.SRES;
# dy1_mas = spsi_obs * myresidual;// *hd.SRES;
elif self.data_type == '2d':
pl.plot(self.Xmean_orb, self.Ymean_orb, 'ko', ms=8)
pl.errorbar(self.Xmean_orb, self.Ymean_orb, xerr=self.errResidualX, yerr=self.errResidualY,
fmt='none', ecolor='0.6', zorder=-49)
for j in range(len(phi1_model_epoch)):
pl.plot([self.Xmean_orb[j], phi1_model_epoch[j]], [self.Ymean_orb[j], phi2_model_epoch[j]],
'k--', color='0.7', zorder=-50)
# show origin
pl.plot(0, 0, 'kx')
if argument_dict['tmp_orbit_description'] is not None:
pl.text(0.01, 0.99, argument_dict['tmp_orbit_description'], horizontalalignment='left',
verticalalignment='top', transform=pl.gca().transAxes)
class AstrometricAccelerationPlotter(AstrometricOrbitPlotter):
""""Class to plot results of astrometric fitting of parallax + proper motion + acceleration terms."""
def __init__(self, attribute_dict=None):
"""
attribute_dict
"""
if attribute_dict is not None:
for key, value in attribute_dict.items():
setattr(self, key, value)
# set defaults
default_dict = {'outlier_sigma_threshold': 3.,
'absolute_threshold': 10.,
'residuals': None,
'scan_angle_definition': 'gaia',
'include_ppm': True,
'title': None,
'verbose': False,
'relative_orbit': False,
}
for key, value in default_dict.items():
if key not in attribute_dict.keys():
setattr(self, key, value)
required_attributes = ['linear_coefficients', 'model_parameters', 'data']
for attribute_name in required_attributes:
if hasattr(self, attribute_name) is False:
raise ValueError('Instance has to have a attribute named: {}'.format(attribute_name))
self.attribute_dict = attribute_dict
self.linear_coefficient_matrix = self.linear_coefficients['matrix']
number_of_companions = len(self.model_parameters)
self.number_of_companions = number_of_companions
# model_name = 'k{:d}'.format(number_of_companions)
def verify(self):
# parameters of first companion
theta_0 = self.model_parameters[0]
required_parameters = ['offset_alphastar_mas', 'offset_delta_mas', 'absolute_plx_mas',
'muRA_mas', 'muDE_mas']
theta_names = theta_0.keys()
for parameter_name in required_parameters:
if parameter_name not in theta_names:
raise ValueError('Model parameter {} has to be set!'.format(parameter_name))
def set_ppm_model(self):
"""Compute PPM model values using given parameters."""
self.verify()
theta_0 = self.model_parameters[0]
T = self.data.epoch_data
# if ('plx_abs_mas' in theta_names) & ('plx_corr_mas' in theta_names):
# theta_0['plx_mas']= theta_0['plx_abs_mas'] + ['plx_corr_mas']
if 'parallax_correction_mas' in theta_0.keys():
parallax_for_ppm_mas = theta_0['absolute_plx_mas'] - theta_0['parallax_correction_mas']
else:
parallax_for_ppm_mas = theta_0['absolute_plx_mas']
# compute positions at measurement dates according to best-fit model p (no dcr)
ppm_parameters = np.array([theta_0['offset_alphastar_mas'], theta_0['offset_delta_mas'],
parallax_for_ppm_mas, theta_0['muRA_mas'], theta_0['muDE_mas']])
if self.include_ppm:
self.ppm_model = np.array(np.dot(self.linear_coefficient_matrix[0:len(ppm_parameters), :].T, ppm_parameters)).flatten()
else:
# these are only the positional offsets
self.ppm_model = np.array(np.dot(self.linear_coefficient_matrix[0:2, :].T, ppm_parameters[0:2])).flatten()
def set_dcr_model(self):
"""Compute refraction offsets."""
theta_names = self.model_parameters[0].keys()
if 'rho_mas' in theta_names:
if 'd_mas' in theta_names:
dcr_parameters = np.array([theta_0['rho_mas'], theta_0['d_mas']])
else:
dcr_parameters = np.array([theta_0['rho_mas']])
# compute measured positions (dcr-corrected)
if self.linear_coefficient_matrix.shape[0] == 7:
dcr = np.dot(self.linear_coefficient_matrix[5:7, :].T, dcr_parameters)
elif self.linear_coefficient_matrix.shape[0] == 6:
dcr = self.linear_coefficient_matrix[5, :] * dcr_parameters
elif self.linear_coefficient_matrix.shape[0] <= 5:
dcr = np.zeros(self.linear_coefficient_matrix.shape[1])
else:
dcr = np.zeros(self.linear_coefficient_matrix.shape[1])
self.dcr_model = dcr
def set_acceleration_model(self):
"""The `orbit_model` attribute is overloaded here."""
T = self.data.epoch_data
for p in range(self.number_of_companions):
theta_p = self.model_parameters[p]
tmporb = OrbitSystem(attribute_dict=theta_p)
# print(T['MJD', 'cpsi', 'spsi'])
orbit_model = tmporb.astrometric_acceleration(np.array(T['MJD']), np.array(T['spsi']), np.array(T['cpsi']))
setattr(self, 'orbit_system_companion_{:d}'.format(p), tmporb)
setattr(self, 'orbit_model_%d' % (p), orbit_model)
if self.number_of_companions == 1:
self.orbit_system = self.orbit_system_companion_0
self.orbit_model = self.orbit_model_0
def set_residuals(self):
self.set_acceleration_model()
self.set_dcr_model()
self.set_ppm_model()
# print(self.orbit_model)
# print(self.dcr_model)
# print(self.ppm_model)
T = self.data.epoch_data
self.residuals = np.array(T['da_mas']) - self.orbit_model - self.dcr_model - self.ppm_model
# residuals =
# if self.residuals is None:
# residuals = np.array(T['da_mas']) - self.orbit_model - self.DCR - self.ppm_model
# else:
# residuals = self.residuals
if np.any(np.isnan(self.residuals)):
raise ValueError('NaN found in residuals')
self.ppm_meas = np.array(T['da_mas']) - self.dcr_model - self.orbit_model
self.orb_meas = np.array(T['da_mas']) - self.dcr_model - self.ppm_model
for p in range(self.number_of_companions):
if self.number_of_companions == 1:
tmp_orb_meas = self.orb_meas
setattr(self, 'orb_{:d}_meas'.format(p), tmp_orb_meas)
# compute epoch averages
medi = np.unique(T['OB'])
self.medi = medi
self.n_epoch = len(self.medi)
self.t_MJD_epoch = np.zeros(self.n_epoch)
average_quantities_1d = 'stdResidualX errResidualX Xmean_ppm Xmean_orb parfXmean ' \
'DCR_Xmean ACC_Xmean meanResidualX x_e_laz sx_star_laz mean_cpsi mean_spsi'.split()
for p in range(self.number_of_companions):
average_quantities_1d += ['Xmean_orb_{:d}'.format(p)]
for attribute in average_quantities_1d:
setattr(self, attribute, np.zeros(len(medi)))
if '2d' in self.data_type:
for attribute in average_quantities_1d:
setattr(self, attribute.replace('X', 'Y').replace('x_', 'y_'), np.zeros(len(medi)))
outlier_1D_index = np.array([])
if self.data_type == 'gaia_2d':
self.xi = self.data.xi
self.yi = self.data.yi
for jj, epoch in enumerate(self.medi):
tmpidx = np.where(T['OB'] == epoch)[0]
if '2d' in self.data_type:
tmpIndexX = np.intersect1d(self.xi, tmpidx)
tmpIndexY = np.intersect1d(self.yi, tmpidx)
elif self.data_type == '1d':
tmpIndexX = tmpidx
self.t_MJD_epoch[jj] = np.mean(T['MJD'][tmpIndexX])
self.mean_cpsi[jj] = np.mean(T['cpsi'][tmpIndexX])
self.mean_spsi[jj] = np.mean(T['spsi'][tmpIndexX])
self.Xmean_ppm[jj] = np.average(self.ppm_meas[tmpIndexX],
weights=1. / (np.array(T['sigma_da_mas'])[tmpIndexX] ** 2.))
self.Xmean_orb[jj] = np.average(self.orb_meas[tmpIndexX],
weights=1. / (T['sigma_da_mas'][tmpIndexX] ** 2.))
if np.any(np.isnan(self.Xmean_ppm)):
raise ValueError('NaN found in Xmean_ppm')
if np.any(np.isnan(self.Xmean_orb)):
raise ValueError('NaN found in Xmean_orb')
if '2d' in self.data_type:
self.Ymean_ppm[jj] = np.average(self.ppm_meas[tmpIndexY],
weights=1. / (T['sigma_da_mas'][tmpIndexY] ** 2.))
self.Ymean_orb[jj] = np.average(self.orb_meas[tmpIndexY],
weights=1. / (T['sigma_da_mas'][tmpIndexY] ** 2.))
for p in range(self.number_of_companions):
getattr(self, 'Xmean_orb_{:d}'.format(p))[jj] = np.average(
getattr(self, 'orb_{:d}_meas'.format(p))[tmpIndexX],
weights=1. / (T['sigma_da_mas'][tmpIndexX] ** 2.))
# if self.data_type == '2d':
if '2d' in self.data_type:
getattr(self, 'Ymean_orb_{:d}'.format(p))[jj] = np.average(
getattr(self, 'orb_{:d}_meas'.format(p))[tmpIndexY],
weights=1. / (T['sigma_da_mas'][tmpIndexY] ** 2.))
self.DCR_Xmean[jj] = np.average(self.dcr_model[tmpIndexX])
self.meanResidualX[jj] = np.average(self.residuals[tmpIndexX], weights=1. / (T['sigma_da_mas'][tmpIndexX] ** 2.))
self.parfXmean[jj] = np.average(T['ppfact'][tmpIndexX])
self.stdResidualX[jj] = np.std(self.residuals[tmpIndexX]) if len(tmpIndexX)>1 else T['sigma_da_mas'][tmpIndexX]
if '2d' in self.data_type:
self.DCR_Ymean[jj] = np.average(self.dcr_model[tmpIndexY])
self.meanResidualY[jj] = np.average(self.residuals[tmpIndexY], weights=1. / (T['sigma_da_mas'][tmpIndexY] ** 2.))
self.parfYmean[jj] = np.average(T['ppfact'][tmpIndexY])
self.stdResidualY[jj] = np.std(self.residuals[tmpIndexY]) if len(tmpIndexY)>1 else T['sigma_da_mas'][tmpIndexY]
# on the fly inter-epoch outlier detection
outliers = {}
outliers['x'] = {}
outliers['x']['index'] = tmpIndexX
outliers['x']['std_residual'] = self.stdResidualX[jj]
if '2d' in self.data_type:
outliers['y'] = {}
outliers['y']['index'] = tmpIndexY
outliers['y']['std_residual'] = self.stdResidualY[jj]
is_outlier = []
for key in outliers.keys():
# boolean array
if self.absolute_threshold is not None:
is_outlier = (np.abs(self.residuals[outliers[key]['index']] - np.mean(self.residuals[outliers[key]['index']])) > self.outlier_sigma_threshold * outliers[key]['std_residual']) | (
np.abs(self.residuals[outliers[key]['index']] - np.mean(self.residuals[outliers[key]['index']])) > self.absolute_threshold)
elif self.outlier_sigma_threshold is not None:
is_outlier = np.abs(self.residuals[outliers[key]['index']] - np.mean(self.residuals[outliers[key]['index']])) > self.outlier_sigma_threshold * outliers[key]['std_residual']
if any(is_outlier):
tmp_1D_index = np.where(is_outlier)[0]
print('Detected {} {}-residual outliers ({:2.1f} sigma) in epoch {} (1-indexed) '.format(
len(tmp_1D_index), key, self.outlier_sigma_threshold, epoch), end='')
print(np.abs(self.residuals[outliers[key]['index']] - np.mean(self.residuals[outliers[key]['index']]))[tmp_1D_index], end='')
# 1/0
for ii in tmp_1D_index:
print(' {:.12f}'.format(T['MJD'][outliers[key]['index'][ii]]), end=',')
print()
outlier_1D_index = np.hstack((outlier_1D_index, outliers[key]['index'][tmp_1D_index]))
self.errResidualX[jj] = self.stdResidualX[jj] / np.sqrt(len(tmpIndexX))
if '2d' in self.data_type:
self.errResidualY[jj] = self.stdResidualY[jj] / np.sqrt(len(tmpIndexY))
# % from Lazorenko writeup:
self.x_e_laz[jj] = np.sum(self.residuals[tmpIndexX] / (T['sigma_da_mas'][tmpIndexX] ** 2.)) / np.sum(
1 / (T['sigma_da_mas'][tmpIndexX] ** 2.))
self.sx_star_laz[jj] = 1 / np.sqrt(np.sum(1 / (T['sigma_da_mas'][tmpIndexX] ** 2.)));
if '2d' in self.data_type:
self.y_e_laz[jj] = np.sum(self.residuals[tmpIndexY] / (T['sigma_da_mas'][tmpIndexY] ** 2.)) / np.sum(
1 / (T['sigma_da_mas'][tmpIndexY] ** 2.))
self.sy_star_laz[jj] = 1 / np.sqrt(np.sum(1 / (T['sigma_da_mas'][tmpIndexY] ** 2.)));
if len(outlier_1D_index) != 0:
print('MJD of outliers:')
for ii in np.unique(outlier_1D_index.astype(np.int)):
print('{:.12f}'.format(T['MJD'][ii]), end=',')
print()
self.outlier_1D_index = np.array(outlier_1D_index).astype(int)
# compute chi squared values
if self.data_type == '1d':
self.chi2_naive = np.sum([self.meanResidualX ** 2 / self.errResidualX ** 2])
self.chi2_laz = np.sum([self.x_e_laz ** 2 / self.errResidualX ** 2])
self.chi2_star_laz = np.sum([self.x_e_laz ** 2 / self.sx_star_laz ** 2])
elif '2d' in self.data_type:
self.chi2_naive = np.sum(
[self.meanResidualX ** 2 / self.errResidualX ** 2, self.meanResidualY ** 2 / self.errResidualY ** 2])
self.chi2_laz = np.sum(
[self.x_e_laz ** 2 / self.errResidualX ** 2, self.y_e_laz ** 2 / self.errResidualY ** 2])
self.chi2_star_laz = np.sum(
[self.x_e_laz ** 2 / self.sx_star_laz ** 2, self.y_e_laz ** 2 / self.sy_star_laz ** 2])
# fixed 2018-08-18 JSA
if self.data_type == '1d':
self.nFree_ep = len(medi) * 1 - (self.linear_coefficient_matrix.shape[0] + self.number_of_companions*7)
elif '2d' in self.data_type:
self.nFree_ep = len(medi) * 2 - (self.linear_coefficient_matrix.shape[0] + self.number_of_companions*7)
self.chi2_laz_red = self.chi2_laz / self.nFree_ep
self.chi2_star_laz_red = self.chi2_star_laz / self.nFree_ep
self.chi2_naive_red = self.chi2_naive / self.nFree_ep
self.epoch_omc_std_X = np.std(self.meanResidualX)
if self.data_type == '1d':
self.epoch_omc_std = self.epoch_omc_std_X
self.epoch_precision_mean = np.mean([self.errResidualX])
elif '2d' in self.data_type:
self.epoch_omc_std_Y = np.std(self.meanResidualY)
self.epoch_omc_std = np.std([self.meanResidualX, self.meanResidualY])
self.epoch_precision_mean = np.mean([self.errResidualX, self.errResidualY])
# self.residuals = residuals
class DetectionLimit(object):
"""Class to support determination of planet detection limits from astrometry."""
def __init__(self, attribute_dict={}):
"""The default attribute values are stored in the hardcoded
dictionary below, which also defines the list of acceptable
attributes.
The content of attribute_dict is transferred to the instance.
Parameters
----------
attribute_dict : dict
"""
self.attribute_dict = attribute_dict
default_dict = {'m1_msun': 1., # primary mass
'absolute_plx_mas': 25., # parallax
'identifier': 'starname', # name
'm2_grid_n': 10, # number of samples across the secondary mass range
'm2_mjup_lower': 1., # lower limit for secondary mass
'm2_mjup_upper': 30., # upper limit for secondary mass
'simulations_per_gridpoint_n': 1000, # number of simulations at any grid point
'period_grid_n': 10, # number of samples across the period range
'period_day_lower': 50., # lower limit of orbital period
'period_day_upper': 1000., # lower limit of orbital period
'out_dir': os.getcwd(),
'overwrite': False
}
# Assign user values as attributes when present, use defaults if not
attribute_keys = attribute_dict.keys()
for key, val in default_dict.items():
if key in attribute_keys:
setattr(self, key, attribute_dict[key])
else:
setattr(self, key, val)
# Warn users if a key in attribute_dict isn't a default attribute
mismatch = [key for key in attribute_dict.keys()
if key not in default_dict.keys()]
if mismatch:
raise KeyError('Key{0} {1} {2} absent in default OrbitClass'
.format('s' if len(mismatch) > 1 else '',
mismatch,
'are' if len(mismatch) > 1 else 'is'))
self.n_simulations = self.period_grid_n* self.simulations_per_gridpoint_n * self.m2_grid_n # number of planetary systems generated
print('Instantiating DetectionLimit object:')
print('Simulations: total number {}: {} periods, {} secondary masses, {} random)'.format(
self.n_simulations, self.period_grid_n, self.m2_grid_n, self.simulations_per_gridpoint_n))
print('Simulations: M2 resolution {:3.3f} Mjup'.format((self.m2_mjup_upper - self.m2_mjup_lower) / self.m2_grid_n))
def prepare_reference_dataset(self, xfP, use_mean_epochs=True, horizonsFileSeed=None):
"""
Parameters
----------
xfP
use_mean_epochs
horizonsFileSeed
Returns
-------
"""
if use_mean_epochs: # fastSimu works with epoch averages
# C_mean, S_mean, M_mean = xfP.epoch_parameters()
mean_parameters = xfP.epoch_parameters()
res_mean = linearfit.LinearFit(mean_parameters['signal'], mean_parameters['covariance_matrix'],
mean_parameters['coefficient_matrix'])
res_mean.fit()
self.S_mean = mean_parameters['covariance_matrix']
self.C_mean = mean_parameters['coefficient_matrix']
self.M_mean = mean_parameters['signal']
self.iad = mean_parameters['iad']
self.res_mean = res_mean
# 1/0
#
#
self.tp_mjd = xfP.orbit_system.Tp_day
#
#
# orb_mean = OrbitSystem(P_day=1., ecc=0.0, m1_MS=1.0, m2_MJ=0.0, omega_deg=0., OMEGA_deg=0., i_deg=0.,
# Tp_day=0, RA_deg=xfP.RA_deg, DE_deg=xfP.DE_deg, plx_mas=self.absPlx_mas, muRA_mas=0,
# muDE_mas=0, Tref_MJD=xfP.tref_MJD)
# ppm1dMeas_mean_mas = orb_mean.ppm(xfP.t_MJD_epoch, horizons_file_seed=horizonsFileSeed,
# psi_deg=xfP.psi_deg)
# C_mean = orb_mean.coeffMatrix
# TableC1_mean = Table(C_mean.T, names=('cpsi', 'spsi', 'ppfact', 'tcpsi', 'tspsi'))
# tmp_mean, xi_mean, yi_mean = xfGetMeanParMatrix(xfP)
# S_mean = np.mat(np.diag(1. / np.power(tmp_mean['sigma_da_mas'], 2)))
# M_mean = np.mat(tmp_mean['da_mas'])
# # res_mean = linfit(M_mean, S_mean, C_mean)
# res_mean = linearfit.LinearFit(M_mean, S_mean, C_mean)
# res_mean.fit()
# # res_mean.makeReadableNumbers()
#
# self.TableC1_mean = TableC1_mean
# self.tmp_mean = tmp_mean
# self.res_mean = res_mean
# self.S_mean = S_mean
# self.C_mean = C_mean
# # res_mean.disp()
def run_simulation(self, simu_run=1, log_P_day_grid=True):
"""
Parameters
----------
simu_run
log_P_day_grid
Returns
-------
"""
self.m2_jup_grid = np.linspace(self.m2_mjup_lower, self.m2_mjup_upper, self.m2_grid_n)
if log_P_day_grid:
self.p_day_grid = np.logspace(np.log10(self.period_day_lower),
np.log10(self.period_day_upper),
self.period_grid_n)
else:
self.p_day_grid = np.linspace(self.period_day_lower, self.period_day_upper,
self.period_grid_n)
simu_dir = os.path.join(self.out_dir, 'simu/simu_run{}/'.format(simu_run))
if not os.path.exists(simu_dir):
os.makedirs(simu_dir)
mc_file_name = os.path.join(simu_dir, '{}_detectionLimits_{}_m1{:1.3f}.pkl'.format(
self.identifier, self.n_simulations, self.m1_msun))
mean_residuals = np.zeros((self.n_simulations, len(self.res_mean.residuals)))
mean_residual_rms = np.zeros(self.n_simulations)
if ((not os.path.isfile(mc_file_name)) or (self.overwrite)):
# sample OMEGA space uniformly
OMEGA_deg_vals = np.linspace(0, 359, 360)
simu_OMEGA_deg = np.random.choice(OMEGA_deg_vals, self.n_simulations)
# sample inclination space according to sin(i) probability
i_deg_vals = np.linspace(0, 179, 180)
PDF_i_deg = 1. / 2 * np.sin(np.deg2rad(i_deg_vals))
PDF_i_deg_normed = PDF_i_deg / np.sum(PDF_i_deg)
simu_i_deg = np.random.choice(i_deg_vals, self.n_simulations, p=PDF_i_deg_normed)
simu_M2_jup = np.zeros(self.n_simulations)
temp_M2 = np.zeros(self.m2_grid_n * self.simulations_per_gridpoint_n)
for jj in range(self.m2_grid_n):
tempIdx = np.arange(jj * self.simulations_per_gridpoint_n, (jj + 1) * self.simulations_per_gridpoint_n)
temp_M2[tempIdx] = self.m2_jup_grid[jj] * np.ones(self.simulations_per_gridpoint_n)
simu_P_day = np.zeros(self.n_simulations)
for jj in range(self.period_grid_n):
tempIdx = np.arange(jj * self.simulations_per_gridpoint_n * self.m2_grid_n,
(jj + 1) * self.simulations_per_gridpoint_n * self.m2_grid_n)
simu_P_day[tempIdx] = self.p_day_grid[jj] * np.ones(self.simulations_per_gridpoint_n * self.m2_grid_n)
simu_M2_jup[tempIdx] = temp_M2;
# time of perisatron passage
simu_tp_mjd = self.tp_mjd + np.random.rand(self.n_simulations) * simu_P_day
# simulate circular orbits only
ecc = 0.
omega_deg = 0.
if 0:
pl.figure(figsize=(10, 10), facecolor='w', edgecolor='k')
pl.clf()
pl.subplot(2, 2, 1)
pl.hist(simu_i_deg)
pl.xlabel('inc')
pl.subplot(2, 2, 2)
pl.hist(simu_OMEGA_deg)
pl.xlabel('OMEGA')
pl.subplot(2, 2, 3)
pl.hist(simu_P_day)
pl.xlabel('Period')
pl.subplot(2, 2, 4)
pl.hist(simu_M2_jup)
pl.xlabel('M2')
pl.show()
print('Running simulations ...')
print('Simulation 0000000')
spsi = np.array(self.iad.data_1D['spsi'])
cpsi = np.array(self.iad.data_1D['cpsi'])
ref_da_mas = np.array(self.M_mean)
ref_omc_mas = self.res_mean.residuals
for j in range(self.n_simulations):
# tot_da_mas = []
# simu_da_mas = []
simu_da_mas = pjGetOrbitFast(P_day=simu_P_day[j], ecc=ecc, m1_MS=self.m1_msun, m2_MJ=simu_M2_jup[j],
omega_deg=omega_deg, OMEGA_deg=simu_OMEGA_deg[j], i_deg=simu_i_deg[j],
T0_day=simu_tp_mjd[j], plx_mas=self.absolute_plx_mas,
t_MJD=np.array(self.iad.data_1D['MJD']), spsi=spsi, cpsi=cpsi)
# orb_simu = OrbitSystem(P_day=simu_P_day[j], ecc=ecc, m1_MS=M1_Msun, m2_MJ = simu_M2_jup[j] , omega_deg=omega_deg, OMEGA_deg=simu_OMEGA_deg[j], i_deg=simu_i_deg[j], Tp_day = simu_tp_mjd[j], RA_deg=RA_deg,DE_deg=DE_deg,plx_mas = plx_mas, muRA_mas=res.p[3][0],muDE_mas=res.p[4][0] )
# simu_da_mas = orb_simu.pjGetOrbitFast(0 , t_MJD = tmp_mean['MJD'], psi_deg = psi_deg )#, verbose=0):
tot_da_mas = ref_da_mas - ref_omc_mas + simu_da_mas # remove noise structure
simu_res = linearfit.LinearFit(np.mat(tot_da_mas), self.S_mean, self.C_mean)
simu_res.fit()
mean_residual_rms[j] = np.std(np.array(simu_res.residuals))
if np.mod(j, 10000) == 0:
print('\b\b\b\b\b\b\b%07d' % j)
# print '\x1b[%07d\r' % j,
pickle.dump((mean_residual_rms), open(mc_file_name, "wb"))
else:
mean_residual_rms = pickle.load(open(mc_file_name, "rb"))
self.mean_residual_rms = mean_residual_rms
def run_simulation_parallel(self, simulation_run_number=1, log_P_day_grid=True, parallel=True):
"""
parallelized running of simulations, looping through simulated pseudo-orbits
:param simulation_run_number:
:param log_P_day_grid:
:param parallel:
:return:
"""
# directory to write to
simulation_dir = os.path.join(self.dwDir, 'simulation/simulation_run_number%d/' % simulation_run_number)
if not os.path.exists(simulation_dir):
os.makedirs(simulation_dir)
# generate grid of companion masses
self.m2_jup_grid = np.linspace(self.m2_mjup_lower, self.m2_mjup_upper, self.m2_grid_n)
# generate grid of orbital periods (log or linear spacing)
if log_P_day_grid:
self.p_day_grid = np.logspace(np.log10(self.period_day_lower), np.log10(self.period_day_upper),
self.period_grid_n)
else:
self.p_day_grid = np.linspace(self.period_day_lower, self.period_day_upper, self.period_grid_n)
# pickle file to save results
mc_file_name = os.path.join(simulation_dir, 'dw%02d_detectionLimits_%d%s.pkl' % (
self.dwNr, self.n_simulations, ('_MA%1.3f' % self.M1_Msun).replace('.', 'p')))
# meanResiduals = np.zeros((self.n_simulations, len(self.res_mean.omc[0])))
mean_residual_rms = np.zeros(self.n_simulations)
N_sim_within_loop = self.simulations_per_gridpoint_n * self.m2_grid_n
# array to hold results, sliced by orbital period
mean_residual_rms = np.zeros((self.period_grid_n, N_sim_within_loop))
def compute_mean_residual_rms(P_day, ecc, m1_MS, m2_MJ,
omega_deg, OMEGA_deg, i_deg,
T0_day, plx_mas,
t_MJD, spsi, cpsi, ref_da_mas, ref_omc_mas):
simu_da_mas = pjGetOrbitFast(P_day, ecc, m1_MS, m2_MJ,
omega_deg, OMEGA_deg, i_deg,
T0_day, plx_mas,
t_MJD, spsi, cpsi)
tot_da_mas = ref_da_mas - ref_omc_mas + simu_da_mas # remove noise structure
simu_res = linfit(np.mat(tot_da_mas), self.S_mean, self.C_mean)
individual_mean_residual_rms = np.std(np.array(simu_res.omc)[0])
return individual_mean_residual_rms
def return_residual_rms_array(arg):
[P_day, ecc, m1_MS, m2_MJ_array,
omega_deg, OMEGA_deg_array, i_deg_array,
T0_day_array, plx_mas,
t_MJD, spsi, cpsi, ref_da_mas, ref_omc_mas] = arg
n = len(m2_MJ_array)
residual_rms_array = np.zeros(n)
for j in range(n):
residual_rms_array[j] = compute_mean_residual_rms(P_day, ecc, m1_MS, m2_MJ_array[j],
omega_deg, OMEGA_deg_array[j], i_deg_array[j],
T0_day_array[j], plx_mas,
t_MJD, spsi, cpsi, ref_da_mas, ref_omc_mas)
return residual_rms_array
# import numpy as np
# from multiprocessing import Pool
from pathos.multiprocessing import ProcessingPool as Pool
if ((not os.path.isfile(mc_file_name)) or (self.overwrite)):
random_seed = 1234
OMEGA_deg_vals = np.linspace(0, 359, 360)
np.random.seed(random_seed)
simu_OMEGA_deg = np.random.choice(OMEGA_deg_vals, N_sim_within_loop)
i_deg_vals = np.linspace(0, 179, 180)
PDF_i_deg = 1. / 2 * np.sin(np.deg2rad(i_deg_vals))
PDF_i_deg_normed = PDF_i_deg / np.sum(PDF_i_deg)
np.random.seed(random_seed)
simu_i_deg = np.random.choice(i_deg_vals, N_sim_within_loop, p=PDF_i_deg_normed)
simu_M2_jup = np.zeros(N_sim_within_loop)
# temp_M2 = np.zeros(self.m2_grid_n * self.simulations_per_gridpoint_n)
for jj in range(self.m2_grid_n):
tempIdx = np.arange(jj * self.simulations_per_gridpoint_n, (jj + 1) * self.simulations_per_gridpoint_n)
simu_M2_jup[tempIdx] = self.m2_jup_grid[jj] * np.ones(self.simulations_per_gridpoint_n)
# simu_P_day = np.zeros(self.n_simulations)
# for jj in range(self.period_grid_n):
# tempIdx = np.arange(jj * self.simulations_per_gridpoint_n * self.m2_grid_n,
# (jj + 1) * self.simulations_per_gridpoint_n * self.m2_grid_n)
# simu_P_day[tempIdx] = self.p_day_grid[jj] * np.ones(self.simulations_per_gridpoint_n * self.m2_grid_n)
# simu_M2_jup[tempIdx] = temp_M2;
ecc = 0.
omega_deg = 0.
print('Running simulations in parallel...')
spsi = np.array(self.TableC1_mean['spsi'])
cpsi = np.array(self.TableC1_mean['cpsi'])
ref_da_mas = np.array(self.tmp_mean['da_mas'])
ref_omc_mas = self.res_mean.omc[0]
n_processes = 8
pool = Pool(processes=n_processes)
arg_list = []
for jj, P_day in enumerate(self.p_day_grid):
# print('Processing period number %d'%jj)
np.random.seed(random_seed)
simu_T0_day = self.T0_MJD + np.random.rand(N_sim_within_loop) * P_day
arg = [P_day, ecc, self.M1_Msun, simu_M2_jup,
omega_deg, simu_OMEGA_deg, simu_i_deg,
simu_T0_day, self.absPlx_mas,
np.array(self.tmp_mean['MJD']), spsi, cpsi, ref_da_mas, ref_omc_mas]
arg_list.append(arg)
import time
t0 = time.time()
mean_residual_rms = np.array(pool.map(return_residual_rms_array, arg_list))
t1 = time.time()
print('multiprocessing using %d processes finished in %3.3f sec' % (n_processes, t1 - t0))
pool.close()
pickle.dump((mean_residual_rms.flatten()), open(mc_file_name, "wb"))
else:
mean_residual_rms = pickle.load(open(mc_file_name, "rb"))
self.mean_residual_rms = mean_residual_rms.flatten()
def plot_simu_results(self, xfP, factor=1., visplot=True, confidence_limit=0.997,
x_axis_unit='day', semilogx=True, y_data_divisor=None, y_data_factor=1.,
new_figure=True, line_width=2.):
"""
Parameters
----------
xfP
factor
visplot
confidence_limit
x_axis_unit
semilogx
y_data_divisor
y_data_factor
new_figure
line_width
Returns
-------
"""
# if xfP.psi_deg is None:
if xfP.data_type is '2d':
criterion = np.std([xfP.meanResidualX, xfP.meanResidualY]) * factor
else:
criterion = np.std([xfP.meanResidualX]) * factor
print('Detection criterion is %3.3f mas ' % (criterion))
print('Using confidence limit of {:.3f}'.format(confidence_limit))
n_smaller = np.zeros((self.period_grid_n, self.m2_grid_n))
for jj in range(self.period_grid_n):
tempIdx = np.arange(jj * self.simulations_per_gridpoint_n * self.m2_grid_n,
(jj + 1) * self.simulations_per_gridpoint_n * self.m2_grid_n)
for kk in range(self.m2_grid_n):
pix = np.arange(kk * self.simulations_per_gridpoint_n, (kk + 1) * self.simulations_per_gridpoint_n)
n_smaller[jj, kk] = np.sum(self.mean_residual_rms[tempIdx[pix]] <= criterion)
detection_limit = np.zeros((self.period_grid_n, 2))
for jj in range(self.period_grid_n):
try:
limit_index = np.where(n_smaller[jj, :] < self.simulations_per_gridpoint_n * (1 - confidence_limit))[0][0]
try:
M2_val = self.m2_jup_grid[limit_index]
except ValueError:
M2_val = np.max(self.m2_jup_grid)
except IndexError:
M2_val = np.max(self.m2_jup_grid)
detection_limit[jj, :] = [self.p_day_grid[jj], M2_val]
if visplot:
if x_axis_unit == 'day':
x_axis_factor = 1
elif x_axis_unit == 'year':
x_axis_factor = 1. / u.year.to(u.day)
x_axis_label = 'Period ({})'.format(x_axis_unit)
if new_figure:
pl.figure(figsize=(6, 3), facecolor='w', edgecolor='k')
pl.clf()
if semilogx:
if y_data_divisor is not None:
pl.semilogx(detection_limit[:, 0] * x_axis_factor, y_data_divisor/detection_limit[:, 1]*y_data_factor, 'k-', lw=line_width)
else:
pl.semilogx(detection_limit[:, 0] * x_axis_factor, detection_limit[:, 1]*y_data_factor, 'k-', lw=line_width)
else:
if y_data_divisor is not None:
pl.plot(detection_limit[:, 0] * x_axis_factor, y_data_divisor/detection_limit[:, 1]*y_data_factor, 'k-', lw=line_width)
else:
pl.plot(detection_limit[:, 0] * x_axis_factor, detection_limit[:, 1] * y_data_factor, 'k-',
lw=line_width)
pl.title('{:.1f}% confidence limit'.format(confidence_limit * 100))
if y_data_divisor is not None:
pl.ylim((0, y_data_divisor / np.max(self.m2_jup_grid) * y_data_factor))
else:
pl.ylim((0, np.max(self.m2_jup_grid) * y_data_factor))
pl.xlabel(x_axis_label)
if new_figure:
pl.show()
self.detection_limit = detection_limit
def plot_rv_data(rv, orbit_system=None, verbose=True, n_orbit=2, estimate_systemic_velocity=False,
data_colour='k', include_degenerate_orbit=False, plot_parameters_ensemble=None):
"""
Parameters
----------
rv
orbit_system
verbose
n_orbit
estimate_systemic_velocity
Returns
-------
"""
rv['jyear'] = [Time(rv['MJD'][i], format='mjd').jyear for i in range(len(rv))]
n_rows = 2
n_columns = 1
fig, axes = pl.subplots(n_rows, n_columns, sharex=True, figsize=(n_rows * 3.5, n_columns * 5.5),
facecolor='w', edgecolor='k', squeeze=False)
if 'rv_mps' in rv.colnames:
basic_unit = 'mps'
conversion_factor = 1
elif 'rv_kmps' in rv.colnames:
basic_unit = 'kmps'
conversion_factor = 1e3
unit_string = {'mps': 'm/s', 'kmps': 'km/s'}
# fig.suptitle(self.title)
# pl.subplot(2,1,1)
axes[0][0].plot(rv['jyear'], rv['rv_{}'.format(basic_unit)], 'ko', label='_', mfc=data_colour)
axes[0][0].errorbar(rv['jyear'], rv['rv_{}'.format(basic_unit)], yerr=rv['sigma_rv_{}'.format(basic_unit)], fmt='none', ecolor=data_colour, label='_')
n_rv = len(rv)
if orbit_system is not None:
# fit systemic velocity
if estimate_systemic_velocity:
rv_mps = orbit_system.compute_radial_velocity(np.array(rv['MJD']))
rv_kmps = rv_mps / 1000.
onesvec = np.ones(n_rv)
C = np.mat([onesvec])
weight = 1. / np.power(np.array(rv['sigma_rv_kmps']), 2)
LHS = np.mat(np.array(rv['rv_kmps']) - rv_kmps)
res = linearfit.LinearFit(LHS, np.diag(weight), C)
res.fit()
gamma_kmps = np.float(res.p)
gamma_mps = gamma_kmps*1e3
print('Systemic velocity {:2.3f} +/- {:2.3f} km/s'.format(gamma_kmps,
res.p_normalised_uncertainty[0]))
rv['rv_model_kmps'] = rv_kmps + gamma_kmps
orbit_system.gamma_ms = gamma_mps
else:
rv['rv_model_{}'.format(basic_unit)] = orbit_system.compute_radial_velocity(np.array(rv['MJD']))/conversion_factor
gamma_mps = None
# plot RV orbit of primary
time_offset_day = rv['MJD'][0] - orbit_system.Tp_day
orbit_system.plot_rv_orbit(time_offset_day=time_offset_day, n_orbit=n_orbit,
n_curve=10000, axis=axes[0][0], rv_unit=basic_unit)
if plot_parameters_ensemble is not None:
n_curve = 500
n_ensemble = len(plot_parameters_ensemble['offset_alphastar_mas'])
# array to store RVs
rv_ensemble = np.zeros((n_ensemble, n_curve))
# get times at which to sample orbit
t_plot_ensemble_jyear = orbit_system.get_t_plot(time_offset_day=time_offset_day, n_orbit=n_orbit, n_curve=n_curve)
t_plot_ensemble_mjd = orbit_system.get_t_plot(time_offset_day=time_offset_day,
n_orbit=n_orbit, n_curve=n_curve,
format='mjd')
for key in ['m2_MS', 'm_tot_ms', 'P_year', 'a1_mas', 'arel_mas', 'arel_AU']:
if key in plot_parameters_ensemble.keys():
plot_parameters_ensemble.pop(key)
plot_parameters_ensemble['Tref_MJD'] = np.ones(n_ensemble)*orbit_system.Tref_MJD
for index_ensemble in range(n_ensemble):
tmp_system = OrbitSystem({key: samples[index_ensemble] for key, samples in plot_parameters_ensemble.items()})
rv_ensemble[index_ensemble, :] = tmp_system.compute_radial_velocity(t_plot_ensemble_mjd)/1e3
axes[0][0].fill_between(t_plot_ensemble_jyear, np.percentile(rv_ensemble, 15.865, axis=0),
np.percentile(rv_ensemble, 84.134, axis=0), color='0.7')
# 1/0
# orbit_system_ensemble = [OrbitSystem({})]
# for key,
# rv_mps = (self.compute_radial_velocity(t_day))
# 1/0
if include_degenerate_orbit:
orbit_system_degenerate = copy.deepcopy(orbit_system)
orbit_system_degenerate.omega_deg += 180.
orbit_system_degenerate.OMEGA_deg += 180.
orbit_system_degenerate.plot_rv_orbit(time_offset_day=rv['MJD'][0] - orbit_system.Tp_day,
n_orbit=n_orbit, n_curve=1000, axis=axes[0][0],
rv_unit=basic_unit, line_style='--')
residuals = rv['rv_{}'.format(basic_unit)] - rv['rv_model_{}'.format(basic_unit)]
rv_description = '$\\gamma={:2.3f}$ km/s\n$N_\\mathrm{{RV}}={}$\n' \
'$\Sigma_\\mathrm{{O-C}}$={:2.3f} {}'.format(orbit_system.gamma_ms/1e3, len(rv), np.std(residuals), unit_string[basic_unit])
# plot systemic velocity
axes[0][0].axhline(y=orbit_system.gamma_ms / conversion_factor, color='0.5', ls=':', zorder=-50)
axes[1][0].plot(rv['jyear'], residuals, 'ko', label='_', mfc=data_colour)
axes[1][0].errorbar(rv['jyear'], residuals, yerr=rv['sigma_rv_{}'.format(basic_unit)], fmt='none', ecolor=data_colour, label='_')
axes[1][0].text(0.01, 0.99, rv_description, horizontalalignment='left',
verticalalignment='top', transform=axes[1][0].transAxes)
axes[-1][0].set_xlabel('Time (Julian year)')
# pl.legend()
axes[0][0].set_ylabel('RV ({})'.format(unit_string[basic_unit]))
axes[1][0].set_ylabel('O-C ({})'.format(unit_string[basic_unit]))
axes[1][0].axhline(y=0, color='0.5', ls='--', zorder=-50)
axes[1][0].set_xlabel('Time (Julian year)')
labels = axes[-1][0].get_xticklabels()
plt.setp(labels, rotation=30)
fig.tight_layout(h_pad=0.0)
if verbose:
rv.pprint()
def get_cpsi_spsi_for_2Dastrometry(timestamps_2D, scan_angle_definition='hipparcos'):
"""Return cos(psi) and sin(psi) for regular 2D astrometry, where psi is the scan angle.
For Hipparcos
xi = spsi==0 #index of X coordinates (cpsi = 1) psi = 0 deg
yi = cpsi==0 #index of Y coordinates (spsi = 1) psi = 90 deg
Parameters
----------
timestamps_2D
scan_angle_definition
Returns
-------
"""
# every 2D timestamp is duplicated to obtain the 1D timestamps
try:
timestamps_1D = np.sort(np.hstack((timestamps_2D, timestamps_2D)))
except AttributeError:
1/0
n_1d = len(timestamps_1D)
# compute cos(psi) and sin(psi) factors assuming orthogonal axes
if scan_angle_definition == 'hipparcos':
spsi = (np.arange(1, n_1d+1)+1)%2# % first Ra then Dec
cpsi = (np.arange(1, n_1d+1) )%2
# indices of X and Y measurements
xi = np.where(spsi==0)[0] #index of X coordinates (cpsi = 1) psi = 0 deg
yi = np.where(cpsi==0)[0] #index of Y coordinates (spsi = 1) psi = 90 deg
elif scan_angle_definition == 'gaia':
cpsi = (np.arange(1, n_1d+1)+1)%2
spsi = (np.arange(1, n_1d+1) )%2
# indices of X and Y measurements
yi = np.where(spsi==0)[0]
xi = np.where(cpsi==0)[0]
return timestamps_1D, cpsi, spsi, xi, yi
def mass_from_semimajor_axis(a_m, p_day):
"""Return mass term in Kepler's law.
M_0,1,2 = 4 pi^2 a_0,1,2^3 / P^2
Parameters
----------
a_m
p_day
Returns
-------
"""
mass_term = 4 * np.pi**2 * a_m**3/(p_day*day2sec)**2
mass_kg = mass_term / Ggrav
return mass_kg
def convert_from_linear_to_angular(a_m, absolute_parallax_mas):
"""Convert a linear quantity in meters to a angle in mas, given the absolute parallax.
Parameters
----------
a_m
absolute_parallax_mas
Returns
-------
"""
d_pc = 1./ (absolute_parallax_mas/1000.)
a_rad = np.arctan2(a_m, d_pc*pc_m)
a_mas = a_rad * rad2mas # semimajor axis in mas
return a_mas
def convert_from_angular_to_linear(a_mas, absolute_parallax_mas):
"""Convert a angle in mas to a linear quantity in meters, given the absolute parallax.
Parameters
----------
a_mas
absolute_parallax_mas
Returns
-------
"""
a_rad = a_mas/rad2mas
d_pc = 1. / (absolute_parallax_mas / 1000.)
a_m = np.tan(a_rad) * d_pc*pc_m
# a_m = a_rad * d_pc*pc_m
return a_m
def companion_mass_in_diluted_system(alpha_mas, absolute_parallax_mas, m1_kg, p_day, delta_mag,
numeric_solution=True):
"""Return companion mass given photocenter orbit and delta_mag."""
g_value = Ggrav / (4 * np.pi**2) * (p_day * day2sec)**2
alpha_value = convert_from_angular_to_linear(alpha_mas, absolute_parallax_mas)
beta_value = fractional_luminosity(0, delta_mag)
if numeric_solution:
alpha = alpha_value
m1 = m1_kg
beta = beta_value
g = g_value
zero_equation = lambda m2: g * (m1 + m2) - (alpha / (m2 / (m1 + m2) - beta)) ** 3 # == 0
# scipyfmin minimizes the given function with a given starting value
m2_kg = scipyfmin(zero_equation, m1, disp=False)
return m2_kg
else:
alpha = alpha_value
m1 = m1_kg
beta = beta_value
g = g_value
m2_kg = np.array([-(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)/(3*(27*(alpha**3*m1**2 + beta**3*g*m1**3)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) + np.sqrt(-4*(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)**3 + (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2 + 2*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**2)/2 - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**(1./3)) - (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(3*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) - (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) + np.sqrt(-4*(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)**3 + (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2 + 2*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**2)/2 - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**(1./3)/3, -(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)/(3*((-1./2) - np.sqrt(3)*1j/2)*(27*(alpha**3*m1**2 + beta**3*g*m1**3)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) + np.sqrt(-4*(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)**3 + (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2 + 2*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**2)/2 - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**(1./3)) - (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(3*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) - ((-1./2) - np.sqrt(3)*1j/2)*(27*(alpha**3*m1**2 + beta**3*g*m1**3)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) + np.sqrt(-4*(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)**3 + (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2 + 2*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**2)/2 - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**(1./3)/3, -(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)/(3*((-1./2) + np.sqrt(3)*1j/2)*(27*(alpha**3*m1**2 + beta**3*g*m1**3)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) + np.sqrt(-4*(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)**3 + (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2 + 2*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**2)/2 - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**(1./3)) - (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(3*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) - ((-1./2) + np.sqrt(3)*1j/2)*(27*(alpha**3*m1**2 + beta**3*g*m1**3)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) + np.sqrt(-4*(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)**3 + (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2 + 2*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**2)/2 - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**(1./3)/3])
# m2_kg = -(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)/(3*(27*(alpha**3*m1**2 + beta**3*g*m1**3)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) + np.sqrt(-4*(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)**3 + (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2 + 2*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**2)/2 - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**(1/3.)) - (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(3*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) - (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)) + np.sqrt(-4*(-3*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**2/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2)**3 + (27*(alpha**3*m1**2 + beta**3*g*m1**3)/(beta**3*g - 3*beta**2*g + 3*beta*g - g) - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2 + 2*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**2)/2 - 9*(2*alpha**3*m1 + 3*beta**3*g*m1**2 - 3*beta**2*g*m1**2)*(alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)/(2*(beta**3*g - 3*beta**2*g + 3*beta*g - g)**2) + (alpha**3 + 3*beta**3*g*m1 - 6*beta**2*g*m1 + 3*beta*g*m1)**3/(beta**3*g - 3*beta**2*g + 3*beta*g - g)**3)**(1/3.)/3
if 0:
# solve the equation using sympy
alpha = sp.Symbol('alpha')
beta = sp.Symbol('beta')
g = sp.Symbol('g')
m1 = sp.Symbol('m1')
m2 = sp.Symbol('m2')
zero_equation = g * (m1 + m2) - (alpha / (m2/(m1 + m2) - beta))**3 # == 0
res = sp.solvers.solve(zero_equation, m2, check=False)
print(sp.python(res))
for i, sol in enumerate(res):
print('Solution {}'.format(i))
if i == 1:
m2_kg = sol.evalf(subs={g: g_value, m1: m1_kg, beta: beta_value, alpha: alpha_value})
return m2_kg
return m2_kg
def pjGet_m2(m1_kg, a_m, P_day):
"""Return companion mass in kg.
Parameters
----------
m1_kg : float
primary mass in kg
a_m : float
barycentric semimajor axis in meter
P_day : float
orbital period in days
Returns
-------
"""
c = np.abs(4.*np.pi**2.*a_m**3./(P_day*day2sec)**2.)
a = np.sqrt( c / Ggrav ) * m1_kg
b = np.sqrt( c / Ggrav )
m2_kg = (27.*a**2. + 3.*np.sqrt(3.)* np.sqrt(27.*a**4. + 4.*a**3.*b**3.) + 18.*a*b**3. + 2.*b**6.)**(1./3.) / (3.*2.**(1./3.)) - (2.**(1./3.)*(-6.*a*b - b**4.)) / (3.* (27.*a**2. + 3.*np.sqrt(3)*np.sqrt( 27.*a**4. + 4.*a**3.*b**3. ) + 18.*a*b**3. + 2.*b**6.)**(1./3.))+(b**2.)/3.
if 0 == 1:
# from sympy import Eq, Symbol, solve
import sympy as sp
# (a1_detection_mas/1.e3 * AU_m * d_pc)**3 * (4. * np.pi**2.) * (P_day*day2sec)**2. = Ggrav * (m2_MJ * MJ_kg)**3. / ( m1_MS*MS_kg + m2_MJ*MJ_kg )**2.
# m2_MJ = sp.Symbol('m2_MJ')
# P_day = sp.Symbol('P_day')
# a = (a1_detection_mas/1.e3 * AU_m * d_pc)**3 * (4. * np.pi**2.)
# b = a * (P_day*day2sec)**2 / Ggrav
# m2 = m2_MJ * MJ_kg
# m1 = m1_MS*MS_kg
a = sp.Symbol('a')
p = sp.Symbol('p')
G = sp.Symbol('G')
m1 = sp.Symbol('m1')
m2 = sp.Symbol('m2')
# g1 = b - (m2)**3 / ( m1 + m2 )**2
# a_AU = a_m / AU_m # in AU
# a1_mas*d_pc*AU_m / 1e3 = a_m
# p1 = (4. * np.pi**2.)
# p2 = (self.P_day*day2sec)**2
# p = p2/p1*G
# a_m = a1_detection_mas / 1.e3 * d_pc * AU_m
# a = (a1_detection_mas / 1.e3 * d_pc * AU_m)**3
# M/G = m2**3 / ( m1 + m2 )**2
# a = M * p
# g1 = a - M*p
g1 = p * m2**3 / ( m1 + m2 )**2 - a
res = sp.solvers.solve( (g1), (m2))
print(res)
return m2_kg
def semimajor_axis_barycentre_angular(m1_MS, m2_MJ, P_day, plx_mas):
"""Return the semi-major axis, in milliarcseconds, of a primary object's orbit
around the system barycenter.
Parameters
----------
m1_MS : `float`
The mass of the primary, in solar masses.
m2_MJ : `float`
The mass of the secondary, in Jupiter masses.
P_day : `float`
The period of the secondary, in Earth days.
plx_mas : `float`
The parallax of the primary, in milliarcseconds.
Returns
----------
a_barycentre : `float`
The apparent semi-major axis of the primary, in milliarcseconds.
"""
# # mass term for the barycentric orbit of the primary mass
# M = (Ggrav * (m2_MJ * MJ_kg)**3. / (m1_MS * MS_kg + m2_MJ * MJ_kg)**2.)
#
# # semimajor axis of the primary mass in meter
# a_m = (M / (4. * np.pi**2.) * (P_day * day2sec)**2.)**(1./3.)
a_m = semimajor_axis_barycentre_linear(m1_MS, m2_MJ, P_day)
d_pc = 1. / (plx_mas / 1000.)
a_rad = np.arctan2(a_m, d_pc*pc_m)
# semimajor axis in mas
a_mas = a_rad * rad2mas
return a_mas
def semimajor_axis_barycentre_linear(m1_MS, m2_MJ, P_day):
"""
Get the semi-major axis, in meters, of a primary object's orbit around the
system barycenter.
Parameters
----------
m1_MS : `float`
The mass of the primary, in solar masses.
m2_MJ : `float`
The mass of the secondary, in Jupiter masses.
P_day : `float`
The period of the secondary, in Earth days.
Returns
----------
a_m_barycentre : `float`
The physical semi-major axis of the primary, in meters.
"""
M = (Ggrav * (m2_MJ * MJ_kg)**3.
/ (m1_MS * MS_kg + m2_MJ * MJ_kg)**2.) # mass term for the barycentric orbit of the primary mass
a_m = (M / (4. * np.pi**2.) * (P_day * day2sec)**2.)**(1./3.) # semimajor axis of the primary mass in m
return a_m
def semimajor_axis_relative_angular(m1_MS, m2_MJ, P_day, plx_mas):
"""
Get the semi-major axis, in milliarcseconds, of a secondary object's orbit
around its primary.
Parameters
----------
m1_MS : `float`
The mass of the primary, in solar masses.
m2_MJ : `float`
The mass of the secondary, in Jupiter masses.
P_day : `float`
The period of the secondary, in Earth days.
plx_mas : `float`
The parallax of the primary, in milliarcseconds.
Returns
----------
a_relative : `float`
The apparent semi-major axis of the secondary, in milliarcseconds.
"""
# a_rel_m = ((Ggrav * (m1_MS * MS_kg + m2_MJ * MJ_kg)
# / 4. / (np.pi**2.)
# * (P_day * day2sec)**2.)**(1./3.))
#M = Ggrav * (m2_MJ * MJ_kg)**3. / ( m1_MS*MS_kg + m2_MJ*MJ_kg )**2. # mass term for the barycentric orbit of the primary mass
#a_m = ( M / (4. * np.pi**2.) * (P_day*day2sec)**2. )**(1./3.) # semimajor axis of the primary mass in m
a_rel_m = semimajor_axis_relative_linear(m1_MS, m2_MJ, P_day)
d_pc = 1./ (plx_mas / 1000.)
a_rel_rad = np.arctan2(a_rel_m, d_pc * pc_m)
a_rel_mas = a_rel_rad * rad2mas # semimajor axis in mas
return a_rel_mas
def semimajor_axis_relative_linear(m1_MS, m2_MJ, P_day):
"""Get the semi-major axis, in meters, of a secondary object's orbit around
its primary.
Parameters
----------
m1_MS : `float`
The mass of the primary, in solar masses.
m2_MJ : `float`
The mass of the secondary, in Jupiter masses.
P_day : `float`
The period of the secondary, in Earth days.
Returns
----------
a_m_relative : `float`
The physical semi-major axis of the secondary, in meters.
"""
a_rel_m = ((Ggrav * (m1_MS * MS_kg + m2_MJ * MJ_kg)
/ 4. / (np.pi**2.)
* (P_day * day2sec)**2.)**(1./3.))
return a_rel_m
def secondary_mass_at_detection_limit( m1_MS, Period_day, d_pc, a1_detection_mas ):
"""
formerly pjGet_DetectionLimits
Parameters
----------
m1_MS
Period_day
d_pc
a1_detection_mas
Returns
-------
"""
a_m = a1_detection_mas / 1.e3 * d_pc * AU_m
m1_kg = m1_MS * MS_kg
P_day = Period_day
m2_kg = pjGet_m2( m1_kg, a_m, P_day )
m2_MJ = m2_kg / MJ_kg
return m2_MJ
def mean_anomaly(t_mjd, t_periastron_mjd, p_day):
"""Return mean anomaly at time t_mjd.
Parameters
----------
t_mjd : float
time in MJD
t_periastron_mjd : float
Time of periastron passage in MJD
p_day : float
Orbital period in days
Returns
-------
m_deg : float
Mean anomaly
"""
m_deg = np.rad2deg((t_mjd - t_periastron_mjd) * (2 * np.pi)/p_day)
return m_deg
def eccentric_anomaly(ecc, t_mjd, t_periastron_mjd, p_day):
"""
following MIKS-GA4FORS_v0.4/genetic/kepler-genetic.i
Parameters
----------
ecc
t_mjd
t_periastron_mjd
p_day
Returns
-------
"""
m_deg = mean_anomaly(t_mjd, t_periastron_mjd, p_day)
M_rad = np.deg2rad(m_deg)
if np.all(ecc) == 0.0:
return M_rad
else:
E_rad = np.zeros(len(M_rad))
E0_rad = M_rad + ecc*np.sin(M_rad)*(1+ecc*np.cos(M_rad)) #valeur initiale
Enew_rad=E0_rad # initialissation a l'anomalie moyenne
cnt=0 #compteur d'iterations
E_rad_tmp = 1000.
while (np.max(np.abs(Enew_rad-E_rad_tmp)) >1.e-8) & (cnt<200):
E_rad_tmp = Enew_rad
f = E_rad_tmp - ecc*np.sin(E_rad_tmp) - M_rad
fp = 1-ecc*np.cos(E_rad_tmp)#derivee de f par rapport a E
fpp = ecc*np.sin(E_rad_tmp)
# //Enew_rad = E_rad_tmp - f/fp //
# //Enew_rad = E_rad_tmp -2*fp/fpp - sqrt( (fp/fpp)^2 +f) bof
Enew_rad = E_rad_tmp - 2*fp*f/(2*fp**2-f*fpp) #marche tres bien
cnt += 1
E_rad = E_rad_tmp
return E_rad
def RadialVelocitiesConstants(k1_mps,om_rad,ecc):
alpha_mps = +k1_mps*np.cos(om_rad)
beta_mps = -k1_mps*np.sin(om_rad)
delta_mps = +k1_mps*ecc*np.cos(om_rad)
return np.array([alpha_mps,beta_mps,delta_mps])
def TrueAnomaly(ecc, E_rad):
# BUG FOUND 2016-02-08, NOT SURE WHERE THIS CAME FROM
# theta_rad_tmp = 2.*np.arctan( np.sqrt((1.+ecc)/(1.-ecc))*np.tan(E_rad/2.) )
# theta_rad = np.arctan2( np.cos(theta_rad_tmp), np.sin(theta_rad_tmp) )
theta_rad = 2.*np.arctan( np.sqrt((1.+ecc)/(1.-ecc))*np.tan(E_rad/2.) )
return theta_rad
def RadialVelocitiesKepler(alpha_mps,beta_mps,delta_mps,theta_rad):
Vrad_mps = alpha_mps * np.cos(theta_rad) + beta_mps * np.sin(theta_rad) + delta_mps
return Vrad_mps
def EllipticalRectangularCoordinates(ecc, E_rad):
# /*
# * DOCUMENT
# * EllipticalRectangularCoordinates(ecc,E_rad)
# *
# * It computes the ellipses of the orbit for \f$ i=0\f$ and \f$ \Omega=0\f$
# *
# *
# * - INPUT
# * - omega_rad Longitude of periastron expressed in radian
# * - ecc Eccentricity
# * - Tp_day Time of passage at periastron (julian date-2400000)
# * - P_day Period of the orbit
# * - t_day Date/time of the observations (julian date-2400000)
# *
# * OUTPUT
# * Position on the sky. Needs the Thieles-Innes coef
# *
# *
# *
# * SEE ALSO EccentricAnomaly
# */
X = np.cos(E_rad) - ecc
Y = np.sqrt(1.-ecc**2)*np.sin(E_rad)
return np.array([X,Y])
def geometric_elements(thiele_innes_parameters):
"""Return geometrical orbit elements a, omega, OMEGA, i.
Parameters
----------
thiele_innes_constants : array or array of arrays
Array of Thiele Innes constants [A,B,F,G] in milli-arcsecond
Returns
-------
geometric_parameters : array
Orbital elements [a_mas, omega_deg, OMEGA_deg, i_deg]
"""
A = thiele_innes_parameters[0]
B = thiele_innes_parameters[1]
F = thiele_innes_parameters[2]
G = thiele_innes_parameters[3]
p = (A ** 2 + B ** 2 + G ** 2 + F ** 2) / 2.
q = A * G - B * F
a_mas = np.sqrt(p + np.sqrt(p ** 2 - q ** 2))
# i_rad = math.acos(q/(a_mas**2.))
# omega_rad = (math.atan2(B-F,A+G)+math.atan2(-B-F,A-G))/2.;
# OMEGA_rad = (math.atan2(B-F,A+G)-math.atan2(-B-F,A-G))/2.;
i_rad = np.arccos(q / (a_mas ** 2.))
omega_rad = (np.arctan2(B - F, A + G) + np.arctan2(-B - F, A - G)) / 2.
OMEGA_rad = (np.arctan2(B - F, A + G) - np.arctan2(-B - F, A - G)) / 2.
i_deg = np.rad2deg(i_rad)
omega_deg = np.rad2deg(omega_rad)
OMEGA_deg = np.rad2deg(OMEGA_rad)
# OMEGA_deg = np.rad2deg(np.unwrap(OMEGA_rad))
if np.any(np.isnan(a_mas)):
index = np.where(np.isnan(a_mas))[0]
raise RuntimeError('nan detected: {} occurrences'.format(len(index)))
# if isinstance(omega_deg, (list, tuple, np.ndarray)):
# index = np.where(omega_deg < 0.)[0]
# omega_deg[index] += 180.
#
# if isinstance(OMEGA_deg, (list, tuple, np.ndarray)):
# index = np.where(OMEGA_deg < 0.)[0]
# OMEGA_deg[index] += 180.
geometric_parameters = np.array([a_mas, omega_deg, OMEGA_deg, i_deg])
return geometric_parameters
def thiele_innes_constants(geometric_parameters):
"""Return A B F G in mas from the input of the geometrical elements
Parameters
----------
geometric_parameters : array
[a_mas, omega_deg, OMEGA_deg, i_deg]
Returns
-------
thiele_innes_parameters : array
[A, B, F, G] in mas
"""
a_mas = geometric_parameters[0]
omega_rad = np.deg2rad(geometric_parameters[1])
OMEGA_rad = np.deg2rad(geometric_parameters[2])
i_rad = np.deg2rad(geometric_parameters[3])
A = a_mas * (np.cos(OMEGA_rad)*np.cos(omega_rad) - np.sin(OMEGA_rad)*np.sin(omega_rad)*np.cos(i_rad))
B = a_mas * (np.sin(OMEGA_rad)*np.cos(omega_rad) + np.cos(OMEGA_rad)*np.sin(omega_rad)*np.cos(i_rad))
F = a_mas * (-np.cos(OMEGA_rad)*np.sin(omega_rad) - np.sin(OMEGA_rad)*np.cos(omega_rad)*np.cos(i_rad))
G = a_mas * (-np.sin(OMEGA_rad)*np.sin(omega_rad) + np.cos(OMEGA_rad)*np.cos(omega_rad)*np.cos(i_rad))
thiele_innes_parameters = np.array([A, B, F, G])
return thiele_innes_parameters
def astrom_signal(t_day, psi_deg, ecc, P_day, Tp_day, TIC):
#USAGE of pseudo eccentricity
# a = [pecc,P_day,Tp_day,A,B,F,G]
# input: xp = structure containing dates and baseline orientations of measurements
# a = structure containing aric orbit parameters
# output: phi = displacment angle in mas
# pecc = a(1) #ecc = abs(double(atan(pecc)*2/pi)) # ecc = retrEcc( pecc )
# psi_rad = psi_deg *2*np.pi/360
psi_rad = np.deg2rad(psi_deg)
# compute eccentric anomaly
E_rad = eccentric_anomaly(ecc, t_day, Tp_day, P_day)
# compute orbit projected on the sky
if np.all(ecc == 0):
X = np.cos(E_rad)
Y = np.sin(E_rad)
else:
X = np.cos(E_rad)-ecc
Y = np.sqrt(1.-ecc**2)*np.sin(E_rad)
#compute phi
# A = TIC[0]
# B = TIC[1]
# F = TIC[2]
# G = TIC[3]
# phi = (A*np.sin(psi_rad)+B*np.cos(psi_rad))*X + (F*np.sin(psi_rad)+G*np.cos(psi_rad))*Y
phi = (TIC[0]*np.sin(psi_rad)+TIC[1]*np.cos(psi_rad))*X + (TIC[2]*np.sin(psi_rad)+TIC[3]*np.cos(psi_rad))*Y
# return np.array(phi)
return phi
def astrom_signalFast(t_day, spsi, cpsi, ecc, P_day, T0_day, TIC, scan_angle_definition='hipparcos'):
"""Return astrometric orbit signal.
Parameters
----------
t_day
spsi
cpsi
ecc
P_day
T0_day
TIC
Returns
-------
phi : numpy array
Orbit signal along scan angle psi.
"""
# compute eccentric anomaly
E_rad = eccentric_anomaly(ecc, t_day, T0_day, P_day)
# compute orbit projected on the sky
if np.all(ecc == 0):
X = np.cos(E_rad)
Y = np.sin(E_rad)
else:
X = np.cos(E_rad)-ecc
Y = np.sqrt(1.-ecc**2)*np.sin(E_rad)
# see Equation 8 in Sahlmann+2011
if scan_angle_definition == 'hipparcos':
phi = (TIC[0]*spsi + TIC[1]*cpsi)*X + (TIC[2]*spsi + TIC[3]*cpsi)*Y
elif scan_angle_definition == 'gaia':
# A B F G
phi = (TIC[0]*cpsi + TIC[1]*spsi)*X + (TIC[2]*cpsi + TIC[3]*spsi)*Y
return phi
def get_ephemeris(center='g@399', target='0', start_time=None, stop_time=None, step_size='5d',
verbose=True, out_dir=None, vector_table_output_type=1, output_units='AU-D',
overwrite=False, reference_plane='FRAME'):
"""Query the JPL Horizons web interface to return the X,Y,Z position of the target body
relative to the center body.
Parameters
----------
center : str
Horizons object identifier, default is Earth Center 'g@399'
target : str
Horizons object identifier, default is Solar System Barycenter '0'
start_time : astropy time instance
stop_time : astropy time instance
step_size : string, default is '1d' for 1 day steps
verbose : bool
out_dir : str
vector_table_output_type
output_units
overwrite
reference_plane : str
reference_plane = 'FRAME' is for Earth mean equator and equinox
Returns
-------
xyzdata : astropy table
References
----------
See Horizons_doc.pdf available at https://ssd.jpl.nasa.gov/?horizons#email
Documentation can also be obtained by sending en email with subject "BATCH-LONG" to
[email protected]
"""
global ephemeris_dir
if start_time is None:
start_time = Time(1950.0, format='jyear')
if stop_time is None:
stop_time = Time(2025.0, format='jyear')
if out_dir is not None:
ephemeris_dir = out_dir
if output_units not in ['AU-D', 'KM-S', 'KM-D']:
raise NotImplementedError()
if reference_plane not in ['ECLIPTIC', 'FRAME', 'B']: # last is BODY EQUATOR
raise NotImplementedError()
if vector_table_output_type not in np.arange(6)+1:
raise NotImplementedError()
horizons_file_seed = '{}_{}_{}_{}_{}'.format(center, target, start_time, stop_time, step_size)
out_file = os.path.join(ephemeris_dir, horizons_file_seed + '.txt')
if verbose:
print('Getting ephemeris {}'.format(horizons_file_seed))
if (not os.path.isfile(out_file)) or overwrite:
# run Horizons query
url = "https://ssd.jpl.nasa.gov/horizons_batch.cgi?batch=l&TABLE_TYPE='VECTORS'&CSV_FORMAT='YES'"
url += "&CENTER='{}'".format(center)
url += "&COMMAND='{}'".format(target)
url += "&START_TIME='{}'".format(start_time.isot.split('T')[0])
url += "&STOP_TIME='{}'".format(stop_time.isot.split('T')[0])
url += "&STEP_SIZE='{}'".format(step_size)
url += "&SKIP_DAYLT='NO'"
url += "&OUT_UNITS='{}'".format(output_units)
url += "&VEC_TABLE='{}'".format(vector_table_output_type)
url += "&REF_PLANE='{}'".format(reference_plane)
if verbose:
print(url)
try:
url_stream = urlopen(url)
except HTTPError as e:
print("Unable to open URL:", e)
sys.exit(1)
content = url_stream.read()
url_stream.close()
with open(out_file, 'wb') as ephemeris:
ephemeris.write(content)
xyzdata = read_ephemeris(horizons_file_seed, overwrite=overwrite, ephemeris_path=ephemeris_dir)
return xyzdata
def read_ephemeris(horizons_file_seed, overwrite=False, ephemeris_path=None, verbose=False):
"""
Read ephemeris file obtained from the JPL HORIZONS system
TODO: clean up computation of data_start and data_end
:param horizons_file_seed:
:return:
"""
if ephemeris_path is None:
ephemeris_path = ephemeris_dir
fits_file = os.path.join(ephemeris_path, horizons_file_seed + '_XYZ.fits')
if (not os.path.isfile(fits_file)) or overwrite:
eph_file = os.path.join(ephemeris_path, horizons_file_seed + '.txt')
f_rd = open(eph_file, 'r')
# file_lines = f_rd.readlines()[0].split('\r')
file_lines = f_rd.readlines()
f_rd.close()
# for i in range(len(file_lines)):
# line = file_lines[i]
# print('{} {}'.format(i, line))
# if line.strip()=='':
# print('{} Empty line detected'.format(i))
index_start = [i for i in range(len(file_lines)) if "$$SOE" in file_lines[i]][0]
index_end = [i for i in range(len(file_lines)) if "$$EOE" in file_lines[i]][0]
# n_blank_lines = len([i for i in range(index_start) if (file_lines[i] == '' or file_lines[i] == ' ' or file_lines[i].strip() == '\n')])
n_blank_lines = len([i for i in range(index_start) if (file_lines[i].strip() in ['\n',''])])
# data_start = index_start + 1
data_start = index_start - n_blank_lines + 1
data_end = data_start + index_end - index_start -1
# data_end = index_end - 1
header_start = index_start - n_blank_lines -2
if verbose:
print('Number of blank lines found before data: {}'.format(n_blank_lines))
print('index_start: {}'.format(index_start))
print('index_end: {}'.format(index_end))
print('data_start: {}'.format(data_start))
print('data_end: {}'.format(data_end))
print('header start: {}'.format(header_start))
xyzdata = Table.read(eph_file, format='ascii.basic', delimiter=',', data_start = data_start,
data_end=data_end, guess=False, comment='mycomment95', header_start = header_start)
xyzdata.write(fits_file, format = 'fits', overwrite=True)
# xyzdata = Table.read(eph_file, format='ascii.no_header', delimiter=',', data_start = data_start,
# data_end=data_end, names=('JD','ISO','X','Y','Z','tmp'), guess=False, comment='mycomment95')
# xyzdata['JD','X','Y','Z'].write(fits_file, format = 'fits')
else:
xyzdata = Table.read(fits_file, format = 'fits')
for colname in xyzdata.colnames:
if 'col' in colname:
xyzdata.remove_column(colname)
# xyzdata.rename_column('JDTDB', 'JD')
return xyzdata
def get_parallax_factors(ra_deg, dec_deg, time_jd, horizons_file_seed=None, verbose=False,
instrument=None, overwrite=False):
"""
Parameters
----------
ra_deg : float
Right Ascension in degrees
dec_deg : float
Declination in degrees
time_jd : ndarray
Array of times in Julian Day format
horizons_file_seed : str
Optional input of pre-existing ephemeris file from JPL Horizons
verbose : bool
verbosity
instrument : str
Optional argument when using pre-existing ephemeris file
overwrite : bool
Whether to overwrite existing products
Returns
-------
[parallax_factor_ra, parallax_factor_dec] : ndarray
Arrays holding the parallax factors
"""
ephFactor = -1
ra_rad = np.deg2rad(ra_deg)
de_rad = np.deg2rad(dec_deg)
if instrument is not None:
instr = np.unique(instrument)
Nepoch = len(instrument)
Xip_val = np.zeros(Nepoch)
Yip_val = np.zeros(Nepoch)
Zip_val = np.zeros(Nepoch)
for ins in instr:
idx = np.where( instrument == ins )[0]
if verbose:
print('Getting Parallax factors for %s using Seed: \t%s' % (ins, DEFAULT_EPHEMERIS_DICTIONARY[ins]))
xyzdata = read_ephemeris(DEFAULT_EPHEMERIS_DICTIONARY[ins])
Xip = interp1d(xyzdata['JD'],xyzdata['X'], kind='linear', copy=True, bounds_error=True,fill_value=np.nan)
Yip = interp1d(xyzdata['JD'],xyzdata['Y'], kind='linear', copy=True, bounds_error=True,fill_value=np.nan)
Zip = interp1d(xyzdata['JD'],xyzdata['Z'], kind='linear', copy=True, bounds_error=True,fill_value=np.nan)
try:
Xip_val[idx] = Xip(time_jd[idx])
Yip_val[idx] = Yip(time_jd[idx])
Zip_val[idx] = Zip(time_jd[idx])
except ValueError:
print('Error in time interpolation for parallax factors: range %3.1f--%3.2f (%s--%s)\n' % (np.min(time_jd[idx]), np.max(time_jd[idx]), Time(np.min(time_jd[idx]), format='jd', scale='utc').iso, Time(np.max(time_jd[idx]), format='jd', scale='utc').iso)),
print('Ephemeris file contains data from %s to %s' % (Time(np.min(xyzdata['JD']),format='jd').iso, Time(np.max(xyzdata['JD']),format='jd').iso))
pdb.set_trace()
1/0
parallax_factor_ra = ephFactor* ( Xip_val*np.sin(ra_rad) - Yip_val*np.cos(ra_rad) )
parallax_factor_dec = ephFactor*(( Xip_val*np.cos(ra_rad) + Yip_val*np.sin(ra_rad) )*np.sin(de_rad) - Zip_val*np.cos(de_rad))
else:
if horizons_file_seed is None:
xyzdata = get_ephemeris(verbose=verbose, overwrite=overwrite)
# if verbose:
# print('Getting Parallax factors using Seed: \t%s' % horizons_file_seed)
else:
xyzdata = read_ephemeris(horizons_file_seed)
Xip = interp1d(xyzdata['JDTDB'],xyzdata['X'], kind='linear', copy=True, bounds_error=True,fill_value=np.nan)
Yip = interp1d(xyzdata['JDTDB'],xyzdata['Y'], kind='linear', copy=True, bounds_error=True,fill_value=np.nan)
Zip = interp1d(xyzdata['JDTDB'],xyzdata['Z'], kind='linear', copy=True, bounds_error=True,fill_value=np.nan)
try:
parallax_factor_ra = ephFactor* (Xip(time_jd) * np.sin(ra_rad) - Yip(time_jd) * np.cos(ra_rad))
parallax_factor_dec = ephFactor*((Xip(time_jd) * np.cos(ra_rad) + Yip(time_jd) * np.sin(ra_rad)) * np.sin(de_rad) - Zip(time_jd) * np.cos(de_rad))
except ValueError:
raise ValueError('Error in time interpolation for parallax factors: \n'
'requested range {:3.1f}--{:3.1f} ({}--{})\n'
'available range {:3.1f}--{:3.1f} ({}--{})'.format(np.min(time_jd), np.max(time_jd), Time(np.min(time_jd), format='jd', scale='utc').iso, Time(np.max(time_jd), format='jd', scale='utc').iso, np.min(xyzdata['JDTDB']), np.max(xyzdata['JDTDB']), Time(np.min(xyzdata['JDTDB']), format='jd', scale='utc').iso, Time(np.max(xyzdata['JDTDB']), format='jd', scale='utc').iso
) )
return [parallax_factor_ra, parallax_factor_dec]
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def pjGetOrbitFast(P_day=100, ecc=0, m1_MS=1, m2_MJ = 1, omega_deg=0, OMEGA_deg=0, i_deg=45, T0_day = 0, plx_mas = 25, t_MJD='', spsi='', cpsi='', verbose=0):
# /* DOCUMENT ARV -- simulate fast 1D astrometry for planet detection limits
# written: J. Sahlmann 18 May 2015 ESAC
# */
m2_MS = m2_MJ * MJ2MS
d_pc = 1./ (plx_mas/1000.)
#**************ASTROMETRY********************************************************
M = Ggrav * (m2_MJ * MJ_kg)**3. / ( m1_MS*MS_kg + m2_MJ*MJ_kg )**2. # mass term for the barycentric orbit of the primary mass
a_m = ( M / (4. * np.pi**2.) * (P_day*day2sec)**2. )**(1./3.) # semimajor axis of the primary mass in m
a_rad = np.arctan2(a_m,d_pc*pc_m)
a_mas = a_rad * rad2mas # semimajor axis in mas
TIC = thiele_innes_constants([a_mas , omega_deg , OMEGA_deg, i_deg]) #Thiele-Innes constants
phi1 = astrom_signalFast(t_MJD,spsi,cpsi,ecc,P_day,T0_day,TIC)
return phi1
def dcr_coefficients(aux):
"""Return DCR parameters following Sahlmann+13.
Parameters
----------
aux : astropy table
Table containing columns with predefined names.
Returns
-------
"""
temp = aux['temperature'].data # Celsius
pres = aux['pressure'].data # mbar
f3m = (1. - (temp - 11.) / (273. + 11.)) * (1. + (pres - 744.) / 744.)
# zenith angle
z_rad = np.deg2rad(90. - aux['tel_altitude'].data)
lat_rad = np.deg2rad(aux['geo_latitude'].data)
dec_rad = np.deg2rad(aux['dec'].data)
azi_rad = np.deg2rad(aux['tel_azimuth'].data)
# hour angle
ha_rad = [sla.slalib.sla_pda2h(lat_rad[i], dec_rad[i], azi_rad[i])[0] for i in range(len(dec_rad))]
# parallactic angle
pa_rad = [sla.slalib.sla_pa(ha_rad[i], dec_rad[i], lat_rad[i]) for i in range(len(dec_rad))]
f1xm = f3m * np.tan(z_rad) * np.sin(pa_rad)
f1ym = f3m * np.tan(z_rad) * np.cos(pa_rad)
# % DCR parameter 1
xfactor = 1
yfactor = 1
xDCRfactor = np.array(xfactor * np.mat(f1xm).T).flatten()
yDCRfactor = np.array(yfactor * np.mat(f1ym).T).flatten()
return xDCRfactor, yDCRfactor
class ImagingAstrometryData(object):
"""Structure class for 2D imaging astrometry."""
def __init__(self, data_table, out_dir=None, data_type='2d', time_column_name='MJD',
simbad_object_name=None):
"""
Parameters
----------
data_table
out_dir
data_type
"""
required_data_table_columns = [time_column_name, 'frame', 'OB']
for column_name in required_data_table_columns:
if column_name not in data_table.colnames:
raise ValueError('Input table has to have a column named: {}'.format(column_name))
# sort data table by increasing time
self.time_column_name = time_column_name
self.simbad_object_name = simbad_object_name
self.data_type = data_type
self.scan_angle_definition = 'hipparcos'
data_table.sort(self.time_column_name)
self.data_table = data_table
# self.epoch_data = data_table
self.number_of_frames = len(np.unique(self.data_table['frame']))
self.number_of_observing_blocks = len(np.unique(self.data_table['OB']))
self.observing_time_span_day = np.ptp(data_table[self.time_column_name])
if data_type=='2d':
# unique Julian dates of observations, i.e. of 2D astrometry
self.observing_times_2D_MJD, unique_index = np.unique(np.array(data_table[self.time_column_name]), return_index=True)
self.data_2D = self.data_table[unique_index]
self.number_of_1D_measurements = 2 * len(self.data_2D)
else:
self.data_1D = self.data_table
self.number_of_1D_measurements = len(self.data_1D)
if out_dir is not None:
self.out_dir = out_dir
else:
self.out_dir = os.getcwd()
def __str__(self):
"""Return string describing the instance."""
description = '\nNumber of OBs: \t {}'.format(self.number_of_observing_blocks)
description += '\nNumber of frames / measurements: \t {} / {}'.format(self.number_of_frames,
self.number_of_1D_measurements)
description += '\nObservation time span: \t {:3.1f} days'.format(self.observing_time_span_day)
return description
def set_object_coordinates(self, RA_deg=None, Dec_deg=None, overwrite=False):
if (self.simbad_object_name is None) & (RA_deg is None) & (Dec_deg is None):
print('Error: provide simbad name or coordinates')
1/0
elif (RA_deg is not None) & (Dec_deg is not None):
self.RA_deg = RA_deg
self.Dec_deg = Dec_deg
return
elif self.simbad_object_name is not None:
object_string = self.simbad_object_name.replace(' ','')
outFile = os.path.join(self.out_dir,'%s_simbad_parameters.txt' % object_string)
if (not(os.path.isfile(outFile))) | (overwrite is True):
mySimbad = Simbad()
mySimbad.add_votable_fields('ra(d)','dec(d)','pmdec','pmra','parallax','sptype')
pt = mySimbad.query_object(self.simbad_object_name)
pt.write(outFile, format='ascii.basic',delimiter=',')
else:
pt = Table.read(outFile,format='ascii.basic',delimiter=',')
self.simbad_object_parameters = pt
self.RA_deg = np.float(self.simbad_object_parameters['RA_d'])
self.Dec_deg = np.float(self.simbad_object_parameters['DEC_d'])
# for c in ['RA_d','DEC_d','PMDEC','PMRA','PLX_VALUE','SP_TYPE']:
def set_five_parameter_coefficients(self, earth_ephemeris_file_seed=None, verbose=False, reference_epoch_MJD=None, overwrite=False):
"""Set the coefficients of the five linear parameters, i.e. parallax factors and 0,1's for
coordinates.
Parameters
----------
earth_ephemeris_file_seed
verbose
reference_epoch_MJD
overwrite
Returns
-------
"""
required_attributes = ['RA_deg', 'Dec_deg']
for attribute_name in required_attributes:
if hasattr(self, attribute_name) is False:
raise ValueError('Instance has to have a attribute named: {}'.format(attribute_name))
# TODO
# clarify use of tdb here!
observing_times_2D_TDB_JD = Time(self.observing_times_2D_MJD, format='mjd', scale='utc').tdb.jd
# compute parallax factors, this is a 2xN_obs array
observing_parallax_factors = get_parallax_factors(self.RA_deg, self.Dec_deg, observing_times_2D_TDB_JD, horizons_file_seed=earth_ephemeris_file_seed, verbose=verbose, overwrite=overwrite)
# set reference epoch for position and computation of proper motion coefficients tspsi and tcpsi
if reference_epoch_MJD is None:
self.reference_epoch_MJD = np.mean(self.observing_times_2D_MJD)
else:
self.reference_epoch_MJD = reference_epoch_MJD
# time relative to reference epoch in years for proper motion coefficients
observing_relative_time_2D_year = (self.observing_times_2D_MJD - self.reference_epoch_MJD)/year2day
observing_relative_time_1D_year, observing_1D_cpsi, observing_1D_spsi, self.observing_1D_xi, self.observing_1D_yi = get_cpsi_spsi_for_2Dastrometry(observing_relative_time_2D_year)
observing_1D_tcpsi = observing_1D_cpsi * observing_relative_time_1D_year
observing_1D_tspsi = observing_1D_spsi * observing_relative_time_1D_year
observing_1D_ppfact = np.zeros(self.number_of_1D_measurements)
observing_1D_ppfact[self.observing_1D_xi] = observing_parallax_factors[0]
observing_1D_ppfact[self.observing_1D_yi] = observing_parallax_factors[1]
self.five_parameter_coefficients_table = Table(np.array([observing_1D_cpsi,observing_1D_spsi,observing_1D_ppfact,observing_1D_tcpsi,observing_1D_tspsi]).T, names=('cpsi','spsi','ppfact','tcpsi','tspsi'))
self.five_parameter_coefficients_array = np.array([self.five_parameter_coefficients_table[c].data for c in self.five_parameter_coefficients_table.colnames])
self.observing_relative_time_1D_year = observing_relative_time_1D_year
def set_linear_parameter_coefficients(self, earth_ephemeris_file_seed=None, verbose=False, reference_epoch_MJD=None):
if not hasattr(self, 'five_parameter_coefficients'):
self.set_five_parameter_coefficients(earth_ephemeris_file_seed=earth_ephemeris_file_seed, verbose=verbose, reference_epoch_MJD=reference_epoch_MJD)
if ('fx[1]' in self.data_2D.colnames) & ('fx[2]' in self.data_2D.colnames):
# the VLT/FORS2 case with a DCR corrector
tmp_2D = self.data_2D[self.time_column_name,'fx[1]','fy[1]','fx[2]','fy[2]'] #,'RA*_mas','DE_mas','sRA*_mas','sDE_mas','OB','frame']
elif ('fx[1]' in self.data_2D.colnames) & ('fx[2]' not in self.data_2D.colnames):
# for GTC/OSIRIS, Gemini/GMOS-N/GMOS-S, VLT/HAWK-I
tmp_2D = self.data_2D[self.time_column_name, 'fx[1]', 'fy[1]']
elif ('fx[1]' not in self.data_2D.colnames) & ('fx[2]' not in self.data_2D.colnames):
# anything else, e.g. RECONS, there is no DCR correction to be applied
# tmp_2D = self.data_2D[[self.time_column_name]]
self.linear_parameter_coefficients_table = self.five_parameter_coefficients_table
self.linear_parameter_coefficients_array = np.array(
[self.linear_parameter_coefficients_table[c].data for c in
self.linear_parameter_coefficients_table.colnames])
return
tmp_1D = tablevstack( (tmp_2D,tmp_2D) )
tmp_1D.sort(self.time_column_name)
# sign factors to get DCR coefficients right
xfactor = -1
yfactor = 1
if 'fx[1]' in self.data_2D.colnames:
tmp_1D.add_column(Column(name='rho_factor',data=np.zeros(len(tmp_1D))))
tmp_1D['rho_factor'][self.observing_1D_xi] = xfactor * tmp_1D['fx[1]'][self.observing_1D_xi]
tmp_1D['rho_factor'][self.observing_1D_yi] = yfactor * tmp_1D['fy[1]'][self.observing_1D_yi]
if 'fx[2]' in self.data_2D.colnames:
tmp_1D.add_column(Column(name='d_factor',data=np.zeros(len(tmp_1D))))
tmp_1D['d_factor'][self.observing_1D_xi] = xfactor * tmp_1D['fx[2]'][self.observing_1D_xi]
tmp_1D['d_factor'][self.observing_1D_yi] = yfactor * tmp_1D['fy[2]'][self.observing_1D_yi]
if self.instrument == 'FORS2':
self.dcr_parameter_coefficients_table = tmp_1D['rho_factor','d_factor']
else:
self.dcr_parameter_coefficients_table = tmp_1D[['rho_factor']]
self.dcr_parameter_coefficients_array = np.array([self.dcr_parameter_coefficients_table[c].data for c in self.dcr_parameter_coefficients_table.colnames])
self.linear_parameter_coefficients_table = tablehstack((self.five_parameter_coefficients_table, self.dcr_parameter_coefficients_table))
self.linear_parameter_coefficients_table = tablehstack((self.five_parameter_coefficients_table, self.dcr_parameter_coefficients_table))
self.linear_parameter_coefficients_array = np.array([self.linear_parameter_coefficients_table[c].data for c in self.linear_parameter_coefficients_table.colnames])
def set_data_1D(self, earth_ephemeris_file_seed=None, verbose=False, reference_epoch_MJD=None):
tmp_2D = self.data_2D[self.time_column_name,'RA*_mas','DE_mas','sRA*_mas','sDE_mas','OB','frame']
tmp_1D = tablevstack( (tmp_2D,tmp_2D) )
tmp_1D.sort(self.time_column_name)
if not hasattr(self, 'linear_parameter_coefficients'):
self.set_linear_parameter_coefficients(earth_ephemeris_file_seed=earth_ephemeris_file_seed, verbose=verbose, reference_epoch_MJD=reference_epoch_MJD)
data_1D = tmp_1D[[self.time_column_name]]
# astrometric measurement ('abscissa') and uncertainty
data_1D.add_column(Column(name='da_mas',data=np.zeros(len(data_1D))))
data_1D.add_column(Column(name='sigma_da_mas',data=np.zeros(len(data_1D))))
data_1D['da_mas'][self.observing_1D_xi] = tmp_1D['RA*_mas'][self.observing_1D_xi]
data_1D['da_mas'][self.observing_1D_yi] = tmp_1D['DE_mas'][self.observing_1D_yi]
data_1D['sigma_da_mas'][self.observing_1D_xi] = tmp_1D['sRA*_mas'][self.observing_1D_xi]
data_1D['sigma_da_mas'][self.observing_1D_yi] = tmp_1D['sDE_mas'][self.observing_1D_yi]
for col in ['OB','frame']:
data_1D[col] = tmp_1D[col]
linear_parameter_coefficients_table = self.linear_parameter_coefficients_table
# linear_parameter_coefficients.remove_column(self.time_column_name)
self.data_1D = tablehstack((data_1D, linear_parameter_coefficients_table))
self.observing_times_1D_MJD = self.data_1D[self.time_column_name].data #np.array(data_table[self.time_column_name])
def get_theta_best_genome(best_genome_file, reference_time_MJD, theta_names, m1_MS, instrument=None,
verbose=False):
"""
:param best_genome_file:
:param reference_time_MJD:
:param theta_names:
:param m1_MS:
:param instrument:
:param verbose:
:return:
"""
parameters = []
best_genome = Table.read(best_genome_file, format='ascii.basic', data_start=2, delimiter=',', guess=False)
if instrument.lower() != 'fors2':
best_genome.remove_column('d_mas')
# if verbose:
if 0:
for i in range(len(best_genome)):
for c in best_genome.colnames:
print('Planet %d: %s \t %3.3f' % (i+1, c, best_genome[c][i]))
thiele_innes_constants = np.array([best_genome[c] for c in ['A','B','F','G']])
a_mas, omega_deg, OMEGA_deg, i_deg = geometric_elements(thiele_innes_constants)
d_pc = 1./ (best_genome['plx_mas'].data.data /1000.)
P_day = best_genome['P_day'].data.data
a_m = a_mas / 1.e3 * d_pc * AU_m
m1_kg = m1_MS * MS_kg
m2_kg = pjGet_m2( m1_kg, a_m, P_day )
# m2_kg = keplerian_secondary_mass( m1_kg, a_m, P_day )
m2_MS = m2_kg / MS_kg
# print(m2_MS)
m2_MJ = m2_kg / MJ_kg
TRef_MJD = reference_time_MJD
# MIKS-GA computes T0 relative to the average time
if verbose:
for i in range(len(best_genome)):
print('Planet %d: Phi0 = %f' % (i+1,best_genome['Tp_day'][i]))
print('Planet %d: m2_MJ = %f' % (i+1, m2_MJ[i]))
best_genome['Tp_day'] += TRef_MJD
best_genome['a_mas'] = a_mas
best_genome['omega_deg'] = omega_deg
best_genome['i_deg'] = i_deg
best_genome['OMEGA_deg'] = OMEGA_deg
best_genome['m1_MS'] = m1_MS
best_genome['m2_MS'] = m2_MS
# col_list = theta_names #np.array(['P_day','ecc','m1_MS','m2_MS','omega_deg','Tp_day','dRA0_mas','dDE0_mas','plx_mas','muRA_mas','muDE_mas','rho_mas','d_mas','OMEGA_deg','i_deg'])
for i in range(len(best_genome)):
# generate dictionary
theta = {c: best_genome[c][i] for c in best_genome.colnames}
parameters.append(theta)
if verbose:
for i in range(len(best_genome)):
theta = parameters[i]
for key,value in theta.items():
print('Planet %d: Adopted: %s \t %3.3f' % (i, key, value))
# return theta_best_genome
return parameters
| []
| []
| [
"EPHEMERIS_DIRECTORY"
]
| [] | ["EPHEMERIS_DIRECTORY"] | python | 1 | 0 | |
python/Flask-ui/routes.py | import os
from flask import Flask, render_template, request, session, redirect, url_for, make_response, Response, send_from_directory, abort, Response, send_file
from models import db, User
from forms import SignupForm, Loginform
def runserver():
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://localhost/learningflask'
db.init_app(app)
app.secret_key = "development-key"
@app.route("/")
@app.route("/home")
@app.route("/about")
@app.route("/bootstrap")
def basic_pages():
return make_response(open('static/index.html').read())
#def home():
# return render_template("index.html")
@app.route("/version")
def version():
return make_response(open('static/versions-mapping.json').read())
#@app.route("/about")
#def about():
# return render_template("../static/partials/about.html")
@app.route("/signup", methods=["GET", "POST"])
def signup():
if 'email' in session:
return redirect(url_for('app'))
form = SignupForm()
if request.method == "POST":
if form.validate() == False:
return render_template("../static/partials/signup.html", form=form)
else:
newuser = User(form.first_name.data, form.last_name.data, form.email.data, form.password.data)
db.session.add(newuser)
db.session.commit()
session['email'] = newuser.email
return redirect(url_for('app'))
elif request.method == "GET":
return render_template("../static/partials/signup.html", form=form)
@app.route("/login", methods=["GET", "POST"])
def login():
if 'email' in session:
return redirect(url_for('app'))
form = Loginform()
if request.method == "POST":
if form.validate() == False:
return render_template("../static/partials/login.html", form=form)
else:
email = form.email.data
password = form.password.data
user = User.query.filter_by(email=email).first()
if user is not None and user.check_password(password):
session['email'] = form.email.data
return redirect(url_for('app'))
else:
return redirect(url_for('login'))
elif request.method == "GET":
return render_template("../static/partials/login.html", form=form)
@app.route("/logout")
def logout():
session.pop('email', None)
return redirect(url_for('home'))
#@app.route("/main")
#def main():
# return render_template("app.html")
#@app.route("/partials/london")
#def london(london):
# return send_file("/partials/")
#
#@app.route("/home/<location>")
#def app_location(location):
# return render_template("/static/partials/{}.html".format(location))
@app.route("/apppage")
def apppage():
if 'email' not in session:
return redirect(url_for('login'))
return render_template("../static/partials/app.html")
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'img/favicon.ico')
#@app.errorhandler(404)
#def page_not_found(#):
# return render_template('404.html'), 404
if __name__ == '__main__':
app.run(debug=True)
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
main.go | package main
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
log "github.com/Sirupsen/logrus"
"github.com/mitchellh/go-homedir"
)
const (
DEFAULT_GOLANG = "1.10.3"
DEFAULT_GOOS = runtime.GOOS
DEFAULT_GOARCH = runtime.GOARCH
DEFAULT_DOWNLOAD_BASE = "https://storage.googleapis.com/golang/"
EXTRACTED_CANARY = "go-extracted"
SHA_EXTENSION = ".sha256"
RUNGO_VERSION = "0.0.8"
)
func main() {
verbose := os.Getenv("RUNGO_VERBOSE")
if verbose != "" {
log.SetLevel(log.DebugLevel)
} else {
log.SetLevel(log.InfoLevel)
}
log.SetFormatter(&log.TextFormatter{DisableColors: true})
log.Debugf("Starting rungo version %s", RUNGO_VERSION)
// Find the version requested
version := findVersion()
// Find the user's home directory
homeDir, err := homedir.Dir()
if err != nil {
log.Fatalf("Failed to determine home directory: %v", err)
}
// baseDir of all file operations for this go version
baseDir := filepath.Join(homeDir, DEFAULT_HOME_INSTALL_LOCATION, version)
// Form URL to download golangArchive
downloadBase := os.Getenv("RUNGO_DOWNLOAD_BASE")
if downloadBase == "" {
downloadBase = DEFAULT_DOWNLOAD_BASE
}
fileUrl := downloadBase + fmt.Sprintf(DEFAULT_ARCHIVE_NAME, version, DEFAULT_GOOS, DEFAULT_GOARCH)
// Location on the filesystem to store the golang archive
golangArchive := filepath.Join(baseDir, path.Base(fileUrl))
sha256sum, err := fetchSha256(fileUrl+SHA_EXTENSION, golangArchive+SHA_EXTENSION)
if err != nil {
log.Fatalf("Failed to fetch sha256: %v", err)
}
err = downloadFile(fileUrl, sha256sum, golangArchive)
if err != nil {
log.Fatalf("Failed to download: %v", err)
}
// Extract golang archive
canaryFile := filepath.Join(baseDir, EXTRACTED_CANARY) // File that signals extraction has already occurred
if fileExists(canaryFile) {
log.Debugf("Skipping extraction due to presence of canary at %q", canaryFile)
} else {
// Remove extracted canary, if exists
_ = os.Remove(filepath.Join(baseDir, EXTRACTED_CANARY))
err = extractFile(golangArchive, baseDir)
if err != nil {
log.Fatalf("Failed to extract: %v", err)
}
ioutil.WriteFile(canaryFile, []byte(""), 0755)
log.Debugf("Successfully extracted %q", golangArchive)
}
// Run go command
setGoRoot(baseDir)
binary := filepath.Base(os.Args[0])
if binary == "rungo" {
binary = "go"
} else if binary == "rungo.exe" {
binary = "go.exe"
}
err = runGo(binary, baseDir, os.Args[1:])
if err != nil {
log.Fatalf("command failed: %v", err)
}
}
| [
"\"RUNGO_VERBOSE\"",
"\"RUNGO_DOWNLOAD_BASE\""
]
| []
| [
"RUNGO_DOWNLOAD_BASE",
"RUNGO_VERBOSE"
]
| [] | ["RUNGO_DOWNLOAD_BASE", "RUNGO_VERBOSE"] | go | 2 | 0 | |
tests/cmd/stability/main.go | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"net/http"
_ "net/http/pprof"
"os"
"time"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/tests"
"github.com/pingcap/tidb-operator/tests/pkg/apimachinery"
"github.com/pingcap/tidb-operator/tests/pkg/client"
"github.com/pingcap/tidb-operator/tests/pkg/fixture"
"github.com/pingcap/tidb-operator/tests/pkg/metrics"
"github.com/pingcap/tidb-operator/tests/slack"
"github.com/robfig/cron"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/component-base/logs"
"k8s.io/kubernetes/test/e2e/framework/log"
)
var cfg *tests.Config
var certCtx *apimachinery.CertContext
var upgradeVersions []string
func init() {
client.RegisterFlags()
}
func main() {
logs.InitLogs()
defer logs.FlushLogs()
go func() {
if err := http.ListenAndServe(":6060", nil); err != nil {
log.Failf(err.Error())
}
}()
metrics.StartServer()
cfg = tests.ParseConfigOrDie()
upgradeVersions = cfg.GetUpgradeTidbVersionsOrDie()
ns := os.Getenv("NAMESPACE")
var err error
certCtx, err = apimachinery.SetupServerCert(ns, tests.WebhookServiceName)
if err != nil {
panic(err)
}
go tests.StartValidatingAdmissionWebhookServerOrDie(certCtx)
c := cron.New()
if err := c.AddFunc("0 0 10 * * *", func() {
slack.NotifyAndCompletedf("Succeed %d times in the past 24 hours.", slack.SuccessCount)
slack.SuccessCount = 0
}); err != nil {
panic(err)
}
go c.Start()
wait.Forever(run, 5*time.Minute)
}
func run() {
cli, kubeCli, asCli, aggrCli, apiExtCli := client.NewCliOrDie()
ocfg := newOperatorConfig()
cluster1 := newTidbClusterConfig("ns1", "cluster1")
cluster2 := newTidbClusterConfig("ns2", "cluster2")
cluster3 := newTidbClusterConfig("ns2", "cluster3")
directRestoreCluster1 := newTidbClusterConfig("ns1", "restore1")
fileRestoreCluster1 := newTidbClusterConfig("ns1", "file-restore1")
directRestoreCluster2 := newTidbClusterConfig("ns2", "restore2")
fileRestoreCluster2 := newTidbClusterConfig("ns2", "file-restore2")
onePDCluster1 := newTidbClusterConfig("ns1", "one-pd-cluster-1")
onePDCluster2 := newTidbClusterConfig("ns2", "one-pd-cluster-2")
onePDCluster1.Clustrer.Spec.PD.Replicas = 1
onePDCluster2.Clustrer.Spec.PD.Replicas = 1
allClusters := []*tests.TidbClusterConfig{
cluster1,
cluster2,
cluster3,
directRestoreCluster1,
fileRestoreCluster1,
directRestoreCluster2,
fileRestoreCluster2,
onePDCluster1,
onePDCluster2,
}
deployedClusters := make([]*tests.TidbClusterConfig, 0)
addDeployedClusterFn := func(cluster *tests.TidbClusterConfig) {
for _, tc := range deployedClusters {
if tc.Namespace == cluster.Namespace && tc.ClusterName == cluster.ClusterName {
return
}
}
deployedClusters = append(deployedClusters, cluster)
}
fta := tests.NewFaultTriggerAction(cli, kubeCli, cfg)
fta.CheckAndRecoverEnvOrDie()
oa := tests.NewOperatorActions(cli, kubeCli, asCli, aggrCli, apiExtCli, tests.DefaultPollInterval, ocfg, cfg, allClusters, nil, nil)
oa.CheckK8sAvailableOrDie(nil, nil)
oa.LabelNodesOrDie()
go oa.RunEventWorker()
oa.CleanOperatorOrDie(ocfg)
oa.DeployOperatorOrDie(ocfg)
crdUtil := tests.NewCrdTestUtil(cli, kubeCli, asCli, kubeCli.AppsV1())
log.Logf(fmt.Sprintf("allclusters: %v", allClusters))
crdUtil.CleanResourcesOrDie("tc", "ns1")
crdUtil.CleanResourcesOrDie("tc", "ns2")
crdUtil.CleanResourcesOrDie("pvc", "ns1")
crdUtil.CleanResourcesOrDie("pvc", "ns2")
crdUtil.CleanResourcesOrDie("secret", "ns1")
crdUtil.CleanResourcesOrDie("secret", "ns2")
crdUtil.CleanResourcesOrDie("pod", "ns1")
crdUtil.CleanResourcesOrDie("pod", "ns2")
caseFn := func(clusters []*tests.TidbClusterConfig, onePDClsuter *tests.TidbClusterConfig, backupTargets []tests.BackupTarget, upgradeVersion string) {
// check env
fta.CheckAndRecoverEnvOrDie()
oa.CheckK8sAvailableOrDie(nil, nil)
//deploy and clean the one-pd-cluster
onePDTC := onePDClsuter.Clustrer
crdUtil.CreateTidbClusterOrDie(onePDTC)
crdUtil.WaitTidbClusterReadyOrDie(onePDTC, 60*time.Minute)
crdUtil.DeleteTidbClusterOrDie(onePDTC)
// deploy
for _, cluster := range clusters {
tc := cluster.Clustrer
crdUtil.CreateTidbClusterOrDie(tc)
secret := buildSecret(cluster)
crdUtil.CreateSecretOrDie(secret)
addDeployedClusterFn(cluster)
}
for _, cluster := range clusters {
tc := cluster.Clustrer
crdUtil.WaitTidbClusterReadyOrDie(tc, 60*time.Minute)
crdUtil.CheckDisasterToleranceOrDie(tc)
oa.BeginInsertDataToOrDie(cluster)
}
log.Logf("clusters deployed and checked")
slack.NotifyAndCompletedf("clusters deployed and checked, ready to run stability test")
// upgrade
namespace := os.Getenv("NAMESPACE")
oa.RegisterWebHookAndServiceOrDie(ocfg.WebhookConfigName, namespace, ocfg.WebhookServiceName, certCtx)
for _, cluster := range clusters {
cluster.Clustrer.Spec.Version = upgradeVersion
crdUtil.UpdateTidbClusterOrDie(cluster.Clustrer)
crdUtil.WaitTidbClusterReadyOrDie(cluster.Clustrer, 60*time.Minute)
}
log.Logf("clusters upgraded in checked")
// configuration change
for _, cluster := range clusters {
cluster.Clustrer.Spec.PD.Replicas = int32(cfg.PDMaxReplicas)
cluster.Clustrer.Spec.TiKV.Config.Set("server.grpc-concurrency", cfg.TiKVGrpcConcurrency)
cluster.Clustrer.Spec.TiDB.Config.Set("token-limit", cfg.TiDBTokenLimit)
crdUtil.UpdateTidbClusterOrDie(cluster.Clustrer)
crdUtil.WaitTidbClusterReadyOrDie(cluster.Clustrer, 60*time.Minute)
}
oa.CleanWebHookAndServiceOrDie(ocfg.WebhookConfigName)
log.Logf("clusters configurations updated in checked")
for _, cluster := range clusters {
crdUtil.CheckDisasterToleranceOrDie(cluster.Clustrer)
}
log.Logf("clusters DisasterTolerance checked")
//stop node
physicalNode, node, faultTime := fta.StopNodeOrDie()
oa.EmitEvent(nil, fmt.Sprintf("StopNode: %s on %s", node, physicalNode))
oa.CheckFailoverPendingOrDie(deployedClusters, node, &faultTime)
oa.CheckFailoverOrDie(deployedClusters, node)
time.Sleep(3 * time.Minute)
fta.StartNodeOrDie(physicalNode, node)
oa.EmitEvent(nil, fmt.Sprintf("StartNode: %s on %s", node, physicalNode))
oa.WaitPodOnNodeReadyOrDie(deployedClusters, node)
oa.CheckRecoverOrDie(deployedClusters)
for _, cluster := range deployedClusters {
crdUtil.WaitTidbClusterReadyOrDie(cluster.Clustrer, 30*time.Minute)
}
log.Logf("clusters node stopped and restarted checked")
slack.NotifyAndCompletedf("stability test: clusters node stopped and restarted checked")
// truncate tikv sst file
oa.TruncateSSTFileThenCheckFailoverOrDie(clusters[0], 5*time.Minute)
log.Logf("clusters truncate sst file and checked failover")
slack.NotifyAndCompletedf("stability test: clusters truncate sst file and checked failover")
// delete pd data
oa.DeletePDDataThenCheckFailoverOrDie(clusters[0], 5*time.Minute)
log.Logf("cluster[%s/%s] DeletePDDataThenCheckFailoverOrDie success", clusters[0].Namespace, clusters[0].ClusterName)
slack.NotifyAndCompletedf("stability test: DeletePDDataThenCheckFailoverOrDie success")
// stop one etcd
faultEtcd := tests.SelectNode(cfg.ETCDs)
fta.StopETCDOrDie(faultEtcd)
defer fta.StartETCDOrDie(faultEtcd)
time.Sleep(3 * time.Minute)
oa.CheckEtcdDownOrDie(ocfg, deployedClusters, faultEtcd)
fta.StartETCDOrDie(faultEtcd)
log.Logf("clusters stop on etcd and restart")
// stop all etcds
fta.StopETCDOrDie()
time.Sleep(10 * time.Minute)
fta.StartETCDOrDie()
oa.CheckEtcdDownOrDie(ocfg, deployedClusters, "")
log.Logf("clusters stop all etcd and restart")
// stop all kubelets
fta.StopKubeletOrDie()
time.Sleep(10 * time.Minute)
fta.StartKubeletOrDie()
oa.CheckKubeletDownOrDie(ocfg, deployedClusters, "")
log.Logf("clusters stop all kubelets and restart")
// stop all kube-proxy and k8s/operator/tidbcluster is available
fta.StopKubeProxyOrDie()
oa.CheckKubeProxyDownOrDie(ocfg, clusters)
fta.StartKubeProxyOrDie()
log.Logf("clusters stop all kube-proxy and restart")
// stop all kube-scheduler pods
for _, physicalNode := range cfg.APIServers {
for _, vNode := range physicalNode.Nodes {
fta.StopKubeSchedulerOrDie(vNode.IP)
}
}
oa.CheckKubeSchedulerDownOrDie(ocfg, clusters)
for _, physicalNode := range cfg.APIServers {
for _, vNode := range physicalNode.Nodes {
fta.StartKubeSchedulerOrDie(vNode.IP)
}
}
log.Logf("clusters stop all kube-scheduler and restart")
// stop all kube-controller-manager pods
for _, physicalNode := range cfg.APIServers {
for _, vNode := range physicalNode.Nodes {
fta.StopKubeControllerManagerOrDie(vNode.IP)
}
}
oa.CheckKubeControllerManagerDownOrDie(ocfg, clusters)
for _, physicalNode := range cfg.APIServers {
for _, vNode := range physicalNode.Nodes {
fta.StartKubeControllerManagerOrDie(vNode.IP)
}
}
log.Logf("clusters stop all kube-controller and restart")
// stop one kube-apiserver pod
faultApiServer := tests.SelectNode(cfg.APIServers)
log.Logf("fault ApiServer Node name = %s", faultApiServer)
fta.StopKubeAPIServerOrDie(faultApiServer)
defer fta.StartKubeAPIServerOrDie(faultApiServer)
time.Sleep(3 * time.Minute)
oa.CheckOneApiserverDownOrDie(ocfg, clusters, faultApiServer)
fta.StartKubeAPIServerOrDie(faultApiServer)
log.Logf("clusters stop one kube-apiserver and restart")
time.Sleep(time.Minute)
// stop all kube-apiserver pods
for _, physicalNode := range cfg.APIServers {
for _, vNode := range physicalNode.Nodes {
fta.StopKubeAPIServerOrDie(vNode.IP)
}
}
oa.CheckAllApiserverDownOrDie(ocfg, clusters)
for _, physicalNode := range cfg.APIServers {
for _, vNode := range physicalNode.Nodes {
fta.StartKubeAPIServerOrDie(vNode.IP)
}
}
log.Logf("clusters stop all kube-apiserver and restart")
time.Sleep(time.Minute)
}
// before operator upgrade
preUpgrade := []*tests.TidbClusterConfig{
cluster1,
cluster2,
}
backupTargets := []tests.BackupTarget{
{
TargetCluster: directRestoreCluster1,
IsAdditional: false,
IncrementalType: tests.DbTypeTiDB,
},
}
if ocfg.Tag != "v1.0.0" {
backupTargets = append(backupTargets, tests.BackupTarget{
TargetCluster: fileRestoreCluster1,
IsAdditional: true,
IncrementalType: tests.DbTypeFile,
})
}
caseFn(preUpgrade, onePDCluster1, backupTargets, upgradeVersions[0])
// after operator upgrade
if cfg.UpgradeOperatorImage != "" && cfg.UpgradeOperatorTag != "" {
ocfg.Image = cfg.UpgradeOperatorImage
ocfg.Tag = cfg.UpgradeOperatorTag
oa.UpgradeOperatorOrDie(ocfg)
postUpgrade := []*tests.TidbClusterConfig{
cluster3,
cluster1,
cluster2,
}
v := upgradeVersions[0]
if len(upgradeVersions) == 2 {
v = upgradeVersions[1]
}
postUpgradeBackupTargets := []tests.BackupTarget{
{
TargetCluster: directRestoreCluster2,
IsAdditional: false,
IncrementalType: tests.DbTypeTiDB,
},
}
if ocfg.Tag != "v1.0.0" {
postUpgradeBackupTargets = append(postUpgradeBackupTargets, tests.BackupTarget{
TargetCluster: fileRestoreCluster2,
IsAdditional: true,
IncrementalType: tests.DbTypeFile,
})
}
// caseFn(postUpgrade, restoreCluster2, tidbUpgradeVersion)
caseFn(postUpgrade, onePDCluster2, postUpgradeBackupTargets, v)
}
for _, cluster := range allClusters {
oa.StopInsertDataTo(cluster)
}
slack.SuccessCount++
slack.NotifyAndCompletedf("Succeed stability onetime")
log.Logf("################## Stability test finished at: %v\n\n\n\n", time.Now().Format(time.RFC3339))
}
func newOperatorConfig() *tests.OperatorConfig {
return &tests.OperatorConfig{
Namespace: "pingcap",
ReleaseName: "operator",
Image: cfg.OperatorImage,
Tag: cfg.OperatorTag,
ControllerManagerReplicas: tests.IntPtr(2),
SchedulerImage: "gcr.io/google-containers/hyperkube",
SchedulerReplicas: tests.IntPtr(2),
Features: []string{
"StableScheduling=true",
},
LogLevel: "2",
WebhookServiceName: tests.WebhookServiceName,
WebhookSecretName: "webhook-secret",
WebhookConfigName: "webhook-config",
ImagePullPolicy: v1.PullAlways,
TestMode: true,
WebhookEnabled: true,
PodWebhookEnabled: false,
StsWebhookEnabled: true,
}
}
func newTidbClusterConfig(ns, clusterName string) *tests.TidbClusterConfig {
tidbVersion := cfg.GetTiDBVersionOrDie()
topologyKey := "rack"
tc := fixture.GetTidbCluster(ns, clusterName, tidbVersion)
tc.Spec.ConfigUpdateStrategy = v1alpha1.ConfigUpdateStrategyRollingUpdate
return &tests.TidbClusterConfig{
Namespace: ns,
ClusterName: clusterName,
OperatorTag: cfg.OperatorTag,
PDImage: fmt.Sprintf("pingcap/pd:%s", tidbVersion),
TiKVImage: fmt.Sprintf("pingcap/tikv:%s", tidbVersion),
TiDBImage: fmt.Sprintf("pingcap/tidb:%s", tidbVersion),
PumpImage: fmt.Sprintf("pingcap/tidb-binlog:%s", tidbVersion),
UserName: "root",
Password: "",
InitSecretName: fmt.Sprintf("%s-set-secret", clusterName),
BackupSecretName: fmt.Sprintf("%s-backup-secret", clusterName),
BackupName: "backup",
Resources: map[string]string{
"pd.resources.limits.cpu": "1000m",
"pd.resources.limits.memory": "2Gi",
"pd.resources.requests.cpu": "200m",
"pd.resources.requests.memory": "1Gi",
"tikv.resources.limits.cpu": "8000m",
"tikv.resources.limits.memory": "16Gi",
"tikv.resources.requests.cpu": "1000m",
"tikv.resources.requests.memory": "2Gi",
"tidb.resources.limits.cpu": "8000m",
"tidb.resources.limits.memory": "8Gi",
"tidb.resources.requests.cpu": "500m",
"tidb.resources.requests.memory": "1Gi",
"monitor.persistent": "true",
"discovery.image": cfg.OperatorImage,
"tikv.defaultcfBlockCacheSize": "8GB",
"tikv.writecfBlockCacheSize": "2GB",
"pvReclaimPolicy": "Delete",
},
Args: map[string]string{
"binlog.drainer.workerCount": "1024",
"binlog.drainer.txnBatch": "512",
},
Monitor: true,
BlockWriteConfig: cfg.BlockWriter,
TopologyKey: topologyKey,
ClusterVersion: tidbVersion,
EnableConfigMapRollout: true,
Clustrer: tc,
}
}
func buildSecret(info *tests.TidbClusterConfig) *corev1.Secret {
backupSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: info.BackupSecretName,
Namespace: info.Namespace,
},
Data: map[string][]byte{
"user": []byte(info.UserName),
"password": []byte(info.Password),
},
Type: corev1.SecretTypeOpaque,
}
return &backupSecret
}
| [
"\"NAMESPACE\"",
"\"NAMESPACE\""
]
| []
| [
"NAMESPACE"
]
| [] | ["NAMESPACE"] | go | 1 | 0 | |
DNA2VEC/train.py | # 使用GPU模式,不然永远也训练不完
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from model import get_model
import numpy as np
import keras
from keras.callbacks import Callback
from datetime import datetime
from sklearn.metrics import roc_auc_score, average_precision_score
from sklearn.model_selection import train_test_split
'''
2021-04-11 16:53:06.007063: E tensorflow/stream_executor/dnn.cc:616] CUDNN_STATUS_INTERNAL_ERROR
in tensorflow/stream_executor/cuda/cuda_dnn.cc(2011): 'cudnnRNNBackwardData( cudnn.handle(), rnn_desc.handle(),
model_dims.max_seq_length, output_desc.handles(), output_data.opaque(), output_desc.handles(), output_backprop_data.opaque(),
output_h_desc.handle(), output_h_backprop_data.opaque(), output_c_desc.handle(), output_c_backprop_data.opaque(),
rnn_desc.params_handle(), params.opaque(), input_h_desc.handle(), input_h_data.opaque(), input_c_desc.handle(),
input_c_data.opaque(), input_desc.handles(), input_backprop_data->opaque(), input_h_desc.handle(), input_h_backprop_data->opaque(),
input_c_desc.handle(), input_c_backprop_data->opaque(), workspace.opaque(), workspace.size(), reserve_space_data->opaque(), reserve_space_data->size())'
2021-04-11 16:53:06.007530: W tensorflow/core/framework/op_kernel.cc:1767] OP_REQUIRES failed at cudnn_rnn_ops.cc:1922:
Internal: Failed to call ThenRnnBackward with model config: [rnn_mode, rnn_input_mode, rnn_direction_mode]: 3, 0, 0 ,
[num_layers, input_size, num_units, dir_count, max_seq_length, batch_size, cell_num_units]: [1, 64, 50, 1, 100, 32, 0]
2021-04-11 16:53:06.007077: F tensorflow/stream_executor/cuda/cuda_dnn.cc:190] Check failed: status == CUDNN_STATUS_SUCCESS (7 vs. 0)Failed to set cuDNN stream.
解决方案
'''
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior() # disable for tensorFlow V2
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
##############################
#
# loss数据可视化
#
##############################
import keras
from matplotlib import pyplot as plt
class PlotProgress(keras.callbacks.Callback):
def __init__(self, entity = ['loss', 'accuracy']):
self.entity = entity
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.accs = []
self.val_accs = []
self.fig = plt.figure()
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
# 损失函数
self.losses.append(logs.get('{}'.format(self.entity[0])))
self.val_losses.append(logs.get('val_{}'.format(self.entity[0])))
# 准确率
self.accs.append(logs.get('{}'.format(self.entity[1])))
self.val_accs.append(logs.get('val_{}'.format(self.entity[1])))
self.i += 1
# clear_output(wait=True)
plt.figure(0)
plt.clf() # 清理历史遗迹
plt.plot(self.x, self.losses, label="{}".format(self.entity[0]))
plt.plot(self.x, self.val_losses, label="val_{}".format(self.entity[0]))
plt.legend()
plt.savefig('loss.jpg')
plt.pause(0.01)
# plt.show()
plt.figure(1)
plt.clf() # 清理历史遗迹
plt.plot(self.x, self.accs, label="{}".format(self.entity[1]))
plt.plot(self.x, self.val_accs, label="val_{}".format(self.entity[1]))
plt.legend()
plt.savefig('acc.jpg')
plt.pause(0.01)
# plt.show()
class roc_callback(Callback):
def __init__(self, name):
self.name = name
def on_train_begin(self, logs={}):
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
self.model.save_weights(
"./model/{0}Model{1}.h5".format(self.name, epoch))
return
def on_batch_begin(self, batch, logs={}):
return
def on_batch_end(self, batch, logs={}):
return
t1 = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
#names = ['GM12878', 'HUVEC', 'HeLa-S3', 'IMR90', 'K562', 'NHEK','all','all-NHEK']
# name=names[0]
# The data used here is the sequence processed by data_processing.py.
'''
names = ['GM12878', 'HUVEC', 'HeLa-S3', 'IMR90', 'K562', 'NHEK']
for name in names:
'''
name = 'X5628FC'
# Data_dir = '/home/ycm/data/%s/' % name
train = np.load('%s_train.npz' % name)
X_en_tra, X_pr_tra, y_tra = train['X_en_tra'], train['X_pr_tra'], train['y_tra']
model = get_model()
model.summary()
print('Traing %s cell line specific model ...' % name)
back = roc_callback(name=name)
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor = 'val_accuracy', patience = 30, restore_best_weights = True)
# 绘图函数
plot_progress = PlotProgress(entity = ['loss', 'accuracy'])
history = model.fit([X_en_tra, X_pr_tra], y_tra, epochs=1000, batch_size=32, validation_split=0.11,
callbacks=[back, early_stopping, plot_progress])
t2 = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
model.save('dna2vec_best_model.h5')
print("开始时间:"+t1+"结束时间:"+t2) | []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
score_mask.py | from __future__ import print_function
import os, pdb, sys, glob
# we need to set GPUno first, otherwise may out of memory
stage = int(sys.argv[1])
gpuNO = sys.argv[2]
model_dir = sys.argv[3]
test_dir = sys.argv[4]
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(gpuNO)
import StringIO
import scipy.misc
import numpy as np
from skimage.measure import compare_ssim as ssim
from skimage.measure import compare_psnr as psnr
from skimage.color import rgb2gray
# from PIL import Image
import scipy.misc
import tflib
import tflib.inception_score
def l1_mean_dist(x,y):
# return np.sum(np.abs(x-y))
diff = x.astype(float)-y.astype(float)
return np.sum(np.abs(diff))/np.product(x.shape)
def l2_mean_dist(x,y):
# return np.sqrt(np.sum((x-y)**2))
diff = x.astype(float)-y.astype(float)
return np.sqrt(np.sum(diff**2))/np.product(x.shape)
# pdb.set_trace()
if 1==stage:
test_result_dir_x = os.path.join(model_dir, test_dir, 'x_target')
# test_result_dir_x = os.path.join(model_dir, test_dir, 'x')
test_result_dir_G = os.path.join(model_dir, test_dir, 'G')
test_result_dir_mask = os.path.join(model_dir, test_dir, 'mask')
score_path = os.path.join(model_dir, test_dir, 'score_mask.txt')
types = ('*.jpg', '*.png') # the tuple of file types
x_files = []
G_files = []
mask_files = []
for files in types:
x_files.extend(glob.glob(os.path.join(test_result_dir_x, files)))
G_files.extend(glob.glob(os.path.join(test_result_dir_G, files)))
mask_files.extend(glob.glob(os.path.join(test_result_dir_mask, files)))
x_target_list = []
for path in x_files:
x_target_list.append(scipy.misc.imread(path))
G_list = []
for path in G_files:
G_list.append(scipy.misc.imread(path))
mask_target_list = []
for path in mask_files:
mask_target_list.append(scipy.misc.imread(path))
N = len(G_files)
##################### SSIM ##################
ssim_G_x = []
psnr_G_x = []
L1_mean_G_x = []
L2_mean_G_x = []
# x_0_255 = utils_wgan.unprocess_image(x_fixed, 127.5, 127.5)
x_0_255 = x_target_list
for i in xrange(N):
# G_gray = rgb2gray((G_list[i]/127.5-1).clip(min=-1,max=1))
# x_target_gray = rgb2gray((x_target_list[i]/127.5-1).clip(min=-1,max=1))
# gray image, [0,1]
# G_gray = rgb2gray((G_list[i]).clip(min=0,max=255))
# x_target_gray = rgb2gray((x_target_list[i]).clip(min=0,max=255))
# ssim_G_x.append(ssim(G_gray, x_target_gray, data_range=x_target_gray.max()-x_target_gray.min(), multichannel=False))
# psnr_G_x.append(psnr(im_true=x_target_gray, im_test=G_gray, data_range=x_target_gray.max()-x_target_gray.min()))
# L1_mean_G_x.append(l1_mean_dist(G_gray, x_target_gray))
# L2_mean_G_x.append(l2_mean_dist(G_gray, x_target_gray))
# color image
# ssim_G_x.append(ssim(G_list[i], x_target_list[i], multichannel=True))
masked_G_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G_list[i])
masked_x_target_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*x_target_list[i])
ssim_G_x.append(ssim(masked_G_array, masked_x_target_array, multichannel=True))
psnr_G_x.append(psnr(im_true=masked_x_target_array, im_test=masked_G_array))
L1_mean_G_x.append(l1_mean_dist(masked_G_array, masked_x_target_array))
L2_mean_G_x.append(l2_mean_dist(masked_G_array, masked_x_target_array))
# pdb.set_trace()
ssim_G_x_mean = np.mean(ssim_G_x)
ssim_G_x_std = np.std(ssim_G_x)
psnr_G_x_mean = np.mean(psnr_G_x)
psnr_G_x_std = np.std(psnr_G_x)
L1_G_x_mean = np.mean(L1_mean_G_x)
L1_G_x_std = np.std(L1_mean_G_x)
L2_G_x_mean = np.mean(L2_mean_G_x)
L2_G_x_std = np.std(L2_mean_G_x)
print('ssim_G_x_mean: %f\n' % ssim_G_x_mean)
print('ssim_G_x_std: %f\n' % ssim_G_x_std)
print('psnr_G_x_mean: %f\n' % psnr_G_x_mean)
print('psnr_G_x_std: %f\n' % psnr_G_x_std)
print('L1_G_x_mean: %f\n' % L1_G_x_mean)
print('L1_G_x_std: %f\n' % L1_G_x_std)
print('L2_G_x_mean: %f\n' % L2_G_x_mean)
print('L2_G_x_std: %f\n' % L2_G_x_std)
# ##################### Inception score ##################
# # IS_G_mean, IS_G_std = tflib.inception_score.get_inception_score(G_list)
# G_list_masked = [np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G_list[i]) for i in range(len(G_list))]
# IS_G_mean, IS_G_std = tflib.inception_score.get_inception_score(G_list_masked)
# print('IS_G_mean: %f\n' % IS_G_mean)
# print('IS_G_std: %f\n' % IS_G_std)
# with open(score_path, 'w') as f:
# f.write('Image number: %d\n' % N)
# f.write('ssim: %.5f +- %.5f ' % (ssim_G_x_mean, ssim_G_x_std))
# f.write('IS: %.5f +- %.5f ' % (IS_G_mean, IS_G_std))
# f.write('psnr: %.5f +- %.5f ' % (psnr_G_x_mean, psnr_G_x_std))
# f.write('L1: %.5f +- %.5f ' % (L1_G_x_mean, L1_G_x_std))
# f.write('L2: %.5f +- %.5f' % (L2_G_x_mean, L2_G_x_std))
## IS of fake data
G_list_masked = [np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G_list[i]) for i in range(len(G_list))]
IS_G_mean, IS_G_std = tflib.inception_score.get_inception_score(G_list_masked)
print('IS_G_mean: %f\n' % IS_G_mean)
print('IS_G_std: %f\n' % IS_G_std)
with open(score_path, 'w') as f:
f.write('Image number: %d\n' % N)
f.write('IS: %.5f +- %.5f ' % (IS_G_mean, IS_G_std))
## IS of real data
# x_target_list_masked = [np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*x_target_list[i]) for i in range(len(x_target_list))]
# IS_G_mean, IS_G_std = tflib.inception_score.get_inception_score(x_target_list_masked)
# print('IS_G_mean: %f\n' % IS_G_mean)
# print('IS_G_std: %f\n' % IS_G_std)
# with open(score_path+'_x_target', 'w') as f:
# f.write('Image number: %d\n' % N)
# f.write('IS: %.5f +- %.5f ' % (IS_G_mean, IS_G_std))
elif 2==stage:
test_result_dir_x = os.path.join(model_dir, test_dir, 'x_target')
test_result_dir_G1 = os.path.join(model_dir, test_dir, 'G1')
test_result_dir_G2 = os.path.join(model_dir, test_dir, 'G2')
test_result_dir_mask = os.path.join(model_dir, test_dir, 'mask')
score_path = os.path.join(model_dir, test_dir, 'score_mask.txt') #
types = ('*.jpg', '*.png') # the tuple of file types
x_files = []
G1_files = []
G2_files = []
mask_files = []
for files in types:
x_files.extend(glob.glob(os.path.join(test_result_dir_x, files)))
G1_files.extend(glob.glob(os.path.join(test_result_dir_G1, files)))
G2_files.extend(glob.glob(os.path.join(test_result_dir_G2, files)))
mask_files.extend(glob.glob(os.path.join(test_result_dir_mask, files)))
x_target_list = []
for path in x_files:
x_target_list.append(scipy.misc.imread(path))
G1_list = []
for path in G1_files:
G1_list.append(scipy.misc.imread(path))
G2_list = []
for path in G2_files:
G2_list.append(scipy.misc.imread(path))
mask_target_list = []
for path in mask_files:
mask_target_list.append(scipy.misc.imread(path))
##################### SSIM G1 ##################
N = len(x_files)
ssim_G_x = []
psnr_G_x = []
L1_mean_G_x = []
L2_mean_G_x = []
# x_0_255 = utils_wgan.unprocess_image(x_fixed, 127.5, 127.5)
# x_0_255 = x_target_list
for i in xrange(N):
# G1_gray = rgb2gray((G1_list[i]/127.5-1).clip(min=-1,max=1))
# x_target_gray = rgb2gray((x_target_list[i]/127.5-1).clip(min=-1,max=1))
# gray image, [0,1]
# G1_gray = rgb2gray((G1_list[i]).clip(min=0,max=255))
# x_target_gray = rgb2gray((x_target_list[i]).clip(min=0,max=255))
# ssim_G_x.append(ssim(G_gray, x_target_gray, data_range=x_target_gray.max()-x_target_gray.min(), multichannel=False))
# psnr_G_x.append(psnr(im_true=x_target_gray, im_test=G1_gray, data_range=x_target_gray.max()-x_target_gray.min()))
# L1_mean_G_x.append(l1_mean_dist(G1_gray, x_target_gray))
# L2_mean_G_x.append(l2_mean_dist(G1_gray, x_target_gray))
# color image
# ssim_G_x.append(ssim(G1_list[i], x_target_list[i], multichannel=True))
masked_G1_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G1_list[i])
masked_x_target_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*x_target_list[i])
ssim_G_x.append(ssim(masked_G1_array, masked_x_target_array, multichannel=True))
psnr_G_x.append(psnr(im_true=masked_x_target_array, im_test=masked_G1_array))
L1_mean_G_x.append(l1_mean_dist(masked_G1_array, masked_x_target_array))
L2_mean_G_x.append(l2_mean_dist(masked_G1_array, masked_x_target_array))
# pdb.set_trace()
ssim_G1_x_mean = np.mean(ssim_G_x)
ssim_G1_x_std = np.std(ssim_G_x)
psnr_G1_x_mean = np.mean(psnr_G_x)
psnr_G1_x_std = np.std(psnr_G_x)
L1_G1_x_mean = np.mean(L1_mean_G_x)
L1_G1_x_std = np.std(L1_mean_G_x)
L2_G1_x_mean = np.mean(L2_mean_G_x)
L2_G1_x_std = np.std(L2_mean_G_x)
print('ssim_G1_x_mean: %f\n' % ssim_G1_x_mean)
print('ssim_G1_x_std: %f\n' % ssim_G1_x_std)
print('psnr_G1_x_mean: %f\n' % psnr_G1_x_mean)
print('psnr_G1_x_std: %f\n' % psnr_G1_x_std)
print('L1_G1_x_mean: %f\n' % L1_G1_x_mean)
print('L1_G1_x_std: %f\n' % L1_G1_x_std)
print('L2_G1_x_mean: %f\n' % L2_G1_x_mean)
print('L2_G1_x_std: %f\n' % L2_G1_x_std)
##################### SSIM G2 ##################
N = len(x_files)
ssim_G_x = []
psnr_G_x = []
L1_mean_G_x = []
L2_mean_G_x = []
# x_0_255 = utils_wgan.unprocess_image(x_fixed, 127.5, 127.5)
# x_0_255 = x_target_list
for i in xrange(N):
# G2_gray = rgb2gray((G2_list[i]/127.5-1).clip(min=-1,max=1))
# x_target_gray = rgb2gray((x_target_list[i]/127.5-1).clip(min=-1,max=1))
# gray image, [0,1]
# G2_gray = rgb2gray((G2_list[i]).clip(min=0,max=255))
# x_target_gray = rgb2gray((x_target_list[i]).clip(min=0,max=255))
# ssim_G_x.append(ssim(G_gray, x_target_gray, data_range=x_target_gray.max()-x_target_gray.min(), multichannel=False))
# psnr_G_x.append(psnr(im_true=x_target_gray, im_test=G2_gray, data_range=x_target_gray.max()-x_target_gray.min()))
# L1_mean_G_x.append(l1_mean_dist(G2_gray, x_target_gray))
# L2_mean_G_x.append(l2_mean_dist(G2_gray, x_target_gray))
# color image
# ssim_G_x.append(ssim(G2_list[i], x_target_list[i], multichannel=True))
masked_G2_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G2_list[i])
masked_x_target_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*x_target_list[i])
ssim_G_x.append(ssim(masked_G2_array, masked_x_target_array, multichannel=True))
psnr_G_x.append(psnr(im_true=masked_x_target_array, im_test=masked_G2_array))
L1_mean_G_x.append(l1_mean_dist(masked_G2_array, masked_x_target_array))
L2_mean_G_x.append(l2_mean_dist(masked_G2_array, masked_x_target_array))
# pdb.set_trace()
ssim_G2_x_mean = np.mean(ssim_G_x)
ssim_G2_x_std = np.std(ssim_G_x)
psnr_G2_x_mean = np.mean(psnr_G_x)
psnr_G2_x_std = np.std(psnr_G_x)
L1_G2_x_mean = np.mean(L1_mean_G_x)
L1_G2_x_std = np.std(L1_mean_G_x)
L2_G2_x_mean = np.mean(L2_mean_G_x)
L2_G2_x_std = np.std(L2_mean_G_x)
print('ssim_G2_x_mean: %f\n' % ssim_G2_x_mean)
print('ssim_G2_x_std: %f\n' % ssim_G2_x_std)
print('psnr_G2_x_mean: %f\n' % psnr_G2_x_mean)
print('psnr_G2_x_std: %f\n' % psnr_G2_x_std)
print('L1_G2_x_mean: %f\n' % L1_G2_x_mean)
print('L1_G2_x_std: %f\n' % L1_G2_x_std)
print('L2_G2_x_mean: %f\n' % L2_G2_x_mean)
print('L2_G2_x_std: %f\n' % L2_G2_x_std)
##################### Inception score ##################
G1_list_masked = [np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G1_list[i]) for i in range(len(G1_list))]
G2_list_masked = [np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G2_list[i]) for i in range(len(G2_list))]
# IS_G1_mean, IS_G1_std = tflib.inception_score.get_inception_score(G1_list)
IS_G1_mean, IS_G1_std = tflib.inception_score.get_inception_score(G1_list_masked)
print('IS_G1_mean: %f\n' % IS_G1_mean)
print('IS_G1_std: %f\n' % IS_G1_std)
# IS_G2_mean, IS_G2_std = tflib.inception_score.get_inception_score(G2_list)
IS_G2_mean, IS_G2_std = tflib.inception_score.get_inception_score(G2_list_masked)
print('IS_G2_mean: %f\n' % IS_G2_mean)
print('IS_G2_std: %f\n' % IS_G2_std)
with open(score_path, 'w') as f:
f.write('N: %d ' % N)
f.write('ssimG1: %.5f +- %.5f ' % (ssim_G1_x_mean, ssim_G1_x_std))
f.write('ISG1: %.5f +- %.5f ' % (IS_G1_mean, IS_G1_std))
f.write('psnrG1: %.5f +- %.5f ' % (psnr_G1_x_mean, psnr_G1_x_std))
f.write('L1G1: %.5f +- %.5f ' % (L1_G1_x_mean, L1_G1_x_std))
f.write('L2G1: %.5f +- %.5f ' % (L2_G1_x_mean, L2_G1_x_std))
f.write('ssimG2: %.5f +- %.5f ' % (ssim_G2_x_mean, ssim_G2_x_std))
f.write('ISG2: %.5f +- %.5f ' % (IS_G2_mean, IS_G2_std))
f.write('psnrG2: %.5f +- %.5f ' % (psnr_G2_x_mean, psnr_G2_x_std))
f.write('L1G2: %.5f +- %.5f ' % (L1_G2_x_mean, L1_G2_x_std))
f.write('L2G2: %.5f +- %.5f' % (L2_G2_x_mean, L2_G2_x_std))
# f.write('ssim_std: %f ' % ssim_G_x_std)
# f.write('IS_mean: %f ' % IS_G_mean)
# f.write('IS_std: %f ' % IS_G_std)
# f.write('psnr_mean: %f ' % psnr_G_x_mean)
# f.write('psnr_std: %f' % psnr_G_x_std)
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
cmd/influxd/run/command.go | // Package run is the run (default) subcommand for the influxd command.
package run
import (
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
_ "net/http/pprof"
"os"
"path/filepath"
"runtime"
"strconv"
"time"
"github.com/influxdata/influxdb/logger"
"go.uber.org/zap"
)
const logo = `
8888888 .d888 888 8888888b. 888888b.
888 d88P" 888 888 "Y88b 888 "88b
888 888 888 888 888 888 .88P
888 88888b. 888888 888 888 888 888 888 888 888 8888888K.
888 888 "88b 888 888 888 888 Y8bd8P' 888 888 888 "Y88b
888 888 888 888 888 888 888 X88K 888 888 888 888
888 888 888 888 888 Y88b 888 .d8""8b. 888 .d88P 888 d88P
8888888 888 888 888 888 "Y88888 888 888 8888888P" 8888888P"
`
// Command represents the command executed by "influxd run".
type Command struct {
Version string
Branch string
Commit string
BuildTime string
closing chan struct{}
pidfile string
Closed chan struct{}
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
Logger *zap.Logger
Server *Server
// How to get environment variables. Normally set to os.Getenv, except for tests.
Getenv func(string) string
}
// NewCommand return a new instance of Command.
func NewCommand() *Command {
return &Command{
closing: make(chan struct{}),
Closed: make(chan struct{}),
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
Logger: zap.NewNop(),
}
}
// Run parses the config from args and runs the server.
func (cmd *Command) Run(args ...string) error {
// Parse the command line flags.
options, err := cmd.ParseFlags(args...)
if err != nil {
return err
}
config, err := cmd.ParseConfig(options.GetConfigPath())
if err != nil {
return fmt.Errorf("parse config: %s", err)
}
// Apply any environment variables on top of the parsed config
if err := config.ApplyEnvOverrides(cmd.Getenv); err != nil {
return fmt.Errorf("apply env config: %v", err)
}
// Validate the configuration.
if err := config.Validate(); err != nil {
return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err)
}
var logErr error
if cmd.Logger, logErr = config.Logging.New(cmd.Stderr); logErr != nil {
// assign the default logger
cmd.Logger = logger.New(cmd.Stderr)
}
// Attempt to run pprof on :6060 before startup if debug pprof enabled.
if config.HTTPD.DebugPprofEnabled {
runtime.SetBlockProfileRate(int(1 * time.Second))
runtime.SetMutexProfileFraction(1)
go func() { http.ListenAndServe("localhost:6060", nil) }()
}
// Print sweet InfluxDB logo.
if !config.Logging.SuppressLogo && logger.IsTerminal(cmd.Stdout) {
fmt.Fprint(cmd.Stdout, logo)
}
// Mark start-up in log.
cmd.Logger.Info("InfluxDB starting",
zap.String("version", cmd.Version),
zap.String("branch", cmd.Branch),
zap.String("commit", cmd.Commit))
cmd.Logger.Info("Go runtime",
zap.String("version", runtime.Version()),
zap.Int("maxprocs", runtime.GOMAXPROCS(0)))
// If there was an error on startup when creating the logger, output it now.
if logErr != nil {
cmd.Logger.Error("Unable to configure logger", zap.Error(logErr))
}
// Write the PID file.
if err := cmd.writePIDFile(options.PIDFile); err != nil {
return fmt.Errorf("write pid file: %s", err)
}
cmd.pidfile = options.PIDFile
if config.HTTPD.PprofEnabled {
// Turn on block and mutex profiling.
runtime.SetBlockProfileRate(int(1 * time.Second))
runtime.SetMutexProfileFraction(1) // Collect every sample
}
// Create server from config and start it.
buildInfo := &BuildInfo{
Version: cmd.Version,
Commit: cmd.Commit,
Branch: cmd.Branch,
Time: cmd.BuildTime,
}
s, err := NewServer(config, buildInfo)
if err != nil {
return fmt.Errorf("create server: %s", err)
}
s.Logger = cmd.Logger
s.CPUProfile = options.CPUProfile
s.MemProfile = options.MemProfile
if err := s.Open(); err != nil {
return fmt.Errorf("open server: %s", err)
}
cmd.Server = s
// Begin monitoring the server's error channel.
go cmd.monitorServerErrors()
return nil
}
// Close shuts down the server.
func (cmd *Command) Close() error {
defer close(cmd.Closed)
defer cmd.removePIDFile()
close(cmd.closing)
if cmd.Server != nil {
return cmd.Server.Close()
}
return nil
}
func (cmd *Command) monitorServerErrors() {
newLogger := log.New(cmd.Stderr, "", log.LstdFlags)
for {
select {
case err := <-cmd.Server.Err():
newLogger.Println(err)
case <-cmd.closing:
return
}
}
}
func (cmd *Command) removePIDFile() {
if cmd.pidfile != "" {
if err := os.Remove(cmd.pidfile); err != nil {
cmd.Logger.Error("Unable to remove pidfile", zap.Error(err))
}
}
}
// ParseFlags parses the command line flags from args and returns an options set.
func (cmd *Command) ParseFlags(args ...string) (Options, error) {
var options Options
fs := flag.NewFlagSet("", flag.ContinueOnError)
fs.StringVar(&options.ConfigPath, "config", "", "")
fs.StringVar(&options.PIDFile, "pidfile", "", "")
// Ignore hostname option.
_ = fs.String("hostname", "", "")
fs.StringVar(&options.CPUProfile, "cpuprofile", "", "")
fs.StringVar(&options.MemProfile, "memprofile", "", "")
fs.Usage = func() { fmt.Fprintln(cmd.Stderr, usage) }
if err := fs.Parse(args); err != nil {
return Options{}, err
}
return options, nil
}
// writePIDFile writes the process ID to path.
func (cmd *Command) writePIDFile(path string) error {
// Ignore if path is not set.
if path == "" {
return nil
}
// Ensure the required directory structure exists.
if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil {
return fmt.Errorf("mkdir: %s", err)
}
// Retrieve the PID and write it.
pid := strconv.Itoa(os.Getpid())
if err := ioutil.WriteFile(path, []byte(pid), 0666); err != nil {
return fmt.Errorf("write file: %s", err)
}
return nil
}
// ParseConfig parses the config at path.
// It returns a demo configuration if path is blank.
func (cmd *Command) ParseConfig(path string) (*Config, error) {
// Use demo configuration if no config path is specified.
if path == "" {
cmd.Logger.Info("No configuration provided, using default settings")
return NewDemoConfig()
}
cmd.Logger.Info("Loading configuration file", zap.String("path", path))
config := NewConfig()
if err := config.FromTomlFile(path); err != nil {
return nil, err
}
return config, nil
}
const usage = `Runs the InfluxDB server.
Usage: influxd run [flags]
-config <path>
Set the path to the configuration file.
This defaults to the environment variable INFLUXDB_CONFIG_PATH,
~/.influxdb/influxdb.conf, or /etc/influxdb/influxdb.conf if a file
is present at any of these locations.
Disable the automatic loading of a configuration file using
the null device (such as /dev/null).
-pidfile <path>
Write process ID to a file.
-cpuprofile <path>
Write CPU profiling information to a file.
-memprofile <path>
Write memory usage information to a file.`
// Options represents the command line options that can be parsed.
type Options struct {
ConfigPath string
PIDFile string
CPUProfile string
MemProfile string
}
// GetConfigPath returns the config path from the options.
// It will return a path by searching in this order:
// 1. The CLI option in ConfigPath
// 2. The environment variable INFLUXDB_CONFIG_PATH
// 3. The first influxdb.conf file on the path:
// - ~/.influxdb
// - /etc/influxdb
func (opt *Options) GetConfigPath() string {
if opt.ConfigPath != "" {
if opt.ConfigPath == os.DevNull {
return ""
}
return opt.ConfigPath
} else if envVar := os.Getenv("INFLUXDB_CONFIG_PATH"); envVar != "" {
return envVar
}
for _, path := range []string{
os.ExpandEnv("${HOME}/.influxdb/influxdb.conf"),
"/etc/influxdb/influxdb.conf",
} {
if _, err := os.Stat(path); err == nil {
return path
}
}
return ""
}
| [
"\"INFLUXDB_CONFIG_PATH\""
]
| []
| [
"INFLUXDB_CONFIG_PATH"
]
| [] | ["INFLUXDB_CONFIG_PATH"] | go | 1 | 0 | |
src/TestConsole.go | package main
import (
"bufio"
"bytes"
"fmt"
"os"
)
func main() {
DisplayNetCoreEnvironment()
CleanTheDatabase()
StartTheTimer()
StartTheReportingServices()
WaitForExit()
}
// DisplayNetCoreEnvironment Scan user input
func DisplayNetCoreEnvironment() {
fmt.Printf("***Running with ASPNETCORE_ENVIRONMENT = %s***\n", os.Getenv("ASPNETCORE_ENVIRONMENT"))
}
// CleanTheDatabase asks the user if the db should be cleaned and cleans it, if so
func CleanTheDatabase() {
var startTimer string = ScanUserKeyInput("\nDo you want the database to be cleaned:", []byte(`yYnN`), "n")
if startTimer == "y" || startTimer == "Y" {
fmt.Println("Cleaning the database...")
}
}
// StartTheTimer asks the user if the timer should be started and starts it, if so
func StartTheTimer() {
var startTimer string = ScanUserKeyInput("\nDo you want the timer service to be started:", []byte(`yYnN`), "n")
if startTimer == "y" || startTimer == "Y" {
fmt.Println("Starting the timer...")
}
}
// StartTheReportingServices starts the reporting services
func StartTheReportingServices() {
fmt.Println("\nStarting the reporting services...\n")
}
// WaitForExit waits for the console test app to exit
func WaitForExit() {
ScanUserKeyInput("\nPress x or X to exit", []byte(`xX`), "")
}
// ScanUserKeyInput scans user input
func ScanUserKeyInput(prompt string, acceptedInputs []byte, defaultAnswer string) string {
scanner := bufio.NewScanner(os.Stdin)
fmt.Println(prompt)
var answer string = defaultAnswer
for scanner.Scan() {
input := scanner.Bytes()
if len(input) != 0 && bytes.Contains(acceptedInputs, input) != false {
answer = string(input)
break
}
}
return answer
}
| [
"\"ASPNETCORE_ENVIRONMENT\""
]
| []
| [
"ASPNETCORE_ENVIRONMENT"
]
| [] | ["ASPNETCORE_ENVIRONMENT"] | go | 1 | 0 | |
readthedocs/vcs_support/base.py | import logging
from collections import namedtuple
import os
from os.path import basename
import subprocess
from django.template.defaultfilters import slugify
log = logging.getLogger(__name__)
class VCSVersion(object):
"""
Represents a Version (tag or branch) in a VCS.
This class should only be instantiated in BaseVCS subclasses.
It can act as a context manager to temporarily switch to this tag (eg to
build docs for this tag).
"""
def __init__(self, repository, identifier, verbose_name):
self.repository = repository
self.identifier = identifier
self.verbose_name = verbose_name
def __repr__(self):
return "<VCSVersion: %s:%s" % (self.repository.repo_url,
self.verbose_name)
class VCSProject(namedtuple("VCSProject",
"name default_branch working_dir repo_url")):
"""Transient object to encapsulate a projects stuff"""
pass
class BaseCLI(object):
"""
Helper class for CLI-heavy classes.
"""
log_tmpl = 'VCS[{ident}]: {args}'
def __call__(self, *args):
return self.run(args)
def run(self, *args):
"""
:param bits: list of command and args. See `subprocess` docs
"""
process = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.working_dir, shell=False,
env=self.env)
log.info(self.log_tmpl.format(ident=basename(self.working_dir),
args=' '.join(args)))
stdout, stderr = process.communicate()
log.info(self.log_tmpl.format(ident=basename(self.working_dir),
args=stdout))
return (process.returncode, stdout, stderr)
@property
def env(self):
return os.environ.copy()
class BaseVCS(BaseCLI):
"""
Base for VCS Classes.
Built on top of the BaseCLI.
"""
supports_tags = False # Whether this VCS supports tags or not.
supports_branches = False # Whether this VCS supports branches or not.
contribution_backends = []
#==========================================================================
# General methods
#==========================================================================
def __init__(self, project, version):
self.default_branch = project.default_branch
self.name = project.name
self.repo_url = project.repo_url
self.working_dir = project.working_dir
def check_working_dir(self):
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
def update(self):
"""
If self.working_dir is already a valid local copy of the repository,
update the repository, else create a new local copy of the repository.
"""
self.check_working_dir()
#==========================================================================
# Tag / Branch related methods
# These methods only apply if supports_tags = True and/or
# support_branches = True
#==========================================================================
@property
def tags(self):
"""
Returns a list of VCSVersion objects. See VCSVersion for more
information.
"""
raise NotImplementedError
@property
def branches(self):
"""
Returns a list of VCSVersion objects. See VCSVersion for more
information.
"""
raise NotImplementedError
def checkout(self, identifier=None):
"""
Set the state to the given identifier.
If identifier is None, checkout to the latest revision.
The type and format of identifier may change from VCS to VCS, so each
backend is responsible to understand it's identifiers.
"""
self.check_working_dir()
#==========================================================================
# Contribution related methods
# These methods only apply if supports_contribution = True
#==========================================================================
def get_contribution_backend(self):
"""
Returns a contribution backend or None for this repository. The backend
is detected via the repository URL.
"""
for backend in self.contribution_backends:
if backend.accepts(self.repo_url):
return backend(self)
return None
class BaseContributionBackend(BaseCLI):
"""
Base class for contribution backends.
The main purpose of this base class is to define the API.
"""
def __init__(self, repo):
self.repo = repo
self.slug = slugify(repo.name)
self.default_branch = repo.default_branch
self.repo_url = repo.repo_url
self.working_dir = repo.working_dir
@classmethod
def accepts(cls, url):
"""
Classmethod that checks if a given repository URL is supported by this
backend.
"""
return False
def get_branch_file(self, branch, filename):
"""
Returns the contents of a file as it is in the specified branch.
"""
raise NotImplementedError
def set_branch_file(self, branch, filename, contents, comment=''):
"""
Saves the file in the specified branch.
"""
raise NotImplementedError
def push_branch(self, branch, title='', comment=''):
"""
Pushes a branch upstream.
"""
raise NotImplementedError
def _open_file(self, filename, mode='r'):
return open(os.path.join(self.repo.working_dir, filename), mode)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
handlers_drive.go | package main
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"time"
"github.com/prasmussen/gdrive/auth"
"github.com/prasmussen/gdrive/cli"
"github.com/prasmussen/gdrive/drive"
)
const ClientId = "250872367727-ahm3gic1bto0fi2itr6qmtb6g29egi67.apps.googleusercontent.com"
const ClientSecret = "XeKt2cj78gPT4-OwPH_rtrdM"
const TokenFilename = "token_v2.json"
const DefaultCacheFileName = "file_cache.json"
func listHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).List(drive.ListFilesArgs{
Out: os.Stdout,
MaxFiles: args.Int64("maxFiles"),
NameWidth: args.Int64("nameWidth"),
Query: args.String("query"),
SortOrder: args.String("sortOrder"),
SkipHeader: args.Bool("skipHeader"),
SizeInBytes: args.Bool("sizeInBytes"),
AbsPath: args.Bool("absPath"),
})
checkErr(err)
}
func listChangesHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListChanges(drive.ListChangesArgs{
Out: os.Stdout,
PageToken: args.String("pageToken"),
MaxChanges: args.Int64("maxChanges"),
Now: args.Bool("now"),
NameWidth: args.Int64("nameWidth"),
SkipHeader: args.Bool("skipHeader"),
})
checkErr(err)
}
func downloadHandler(ctx cli.Context) {
args := ctx.Args()
checkDownloadArgs(args)
err := newDrive(args).Download(drive.DownloadArgs{
Out: os.Stdout,
Id: args.String("fileId"),
Force: args.Bool("force"),
Skip: args.Bool("skip"),
Path: args.String("path"),
Delete: args.Bool("delete"),
Recursive: args.Bool("recursive"),
Stdout: args.Bool("stdout"),
Progress: progressWriter(args.Bool("noProgress")),
Timeout: durationInSeconds(args.Int64("timeout")),
})
checkErr(err)
}
func downloadQueryHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).DownloadQuery(drive.DownloadQueryArgs{
Out: os.Stdout,
Query: args.String("query"),
Force: args.Bool("force"),
Skip: args.Bool("skip"),
Recursive: args.Bool("recursive"),
Path: args.String("path"),
Progress: progressWriter(args.Bool("noProgress")),
})
checkErr(err)
}
func downloadSyncHandler(ctx cli.Context) {
args := ctx.Args()
cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName)
err := newDrive(args).DownloadSync(drive.DownloadSyncArgs{
Out: os.Stdout,
Progress: progressWriter(args.Bool("noProgress")),
Path: args.String("path"),
RootId: args.String("fileId"),
DryRun: args.Bool("dryRun"),
DeleteExtraneous: args.Bool("deleteExtraneous"),
Timeout: durationInSeconds(args.Int64("timeout")),
Resolution: conflictResolution(args),
Comparer: NewCachedMd5Comparer(cachePath),
})
checkErr(err)
}
func downloadRevisionHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).DownloadRevision(drive.DownloadRevisionArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
RevisionId: args.String("revId"),
Force: args.Bool("force"),
Stdout: args.Bool("stdout"),
Path: args.String("path"),
Progress: progressWriter(args.Bool("noProgress")),
Timeout: durationInSeconds(args.Int64("timeout")),
})
checkErr(err)
}
func uploadHandler(ctx cli.Context) {
args := ctx.Args()
checkUploadArgs(args)
err := newDrive(args).Upload(drive.UploadArgs{
Out: os.Stdout,
Progress: progressWriter(args.Bool("noProgress")),
Path: args.String("path"),
Name: args.String("name"),
Description: args.String("description"),
Parents: args.StringSlice("parent"),
Mime: args.String("mime"),
Recursive: args.Bool("recursive"),
Share: args.Bool("share"),
Delete: args.Bool("delete"),
ChunkSize: args.Int64("chunksize"),
Timeout: durationInSeconds(args.Int64("timeout")),
})
checkErr(err)
}
func uploadStdinHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).UploadStream(drive.UploadStreamArgs{
Out: os.Stdout,
In: os.Stdin,
Name: args.String("name"),
Description: args.String("description"),
Parents: args.StringSlice("parent"),
Mime: args.String("mime"),
Share: args.Bool("share"),
ChunkSize: args.Int64("chunksize"),
Timeout: durationInSeconds(args.Int64("timeout")),
Progress: progressWriter(args.Bool("noProgress")),
})
checkErr(err)
}
func uploadSyncHandler(ctx cli.Context) {
args := ctx.Args()
cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName)
err := newDrive(args).UploadSync(drive.UploadSyncArgs{
Out: os.Stdout,
Progress: progressWriter(args.Bool("noProgress")),
Path: args.String("path"),
RootId: args.String("fileId"),
DryRun: args.Bool("dryRun"),
DeleteExtraneous: args.Bool("deleteExtraneous"),
ChunkSize: args.Int64("chunksize"),
Timeout: durationInSeconds(args.Int64("timeout")),
Resolution: conflictResolution(args),
Comparer: NewCachedMd5Comparer(cachePath),
})
checkErr(err)
}
func updateHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Update(drive.UpdateArgs{
Out: os.Stdout,
Id: args.String("fileId"),
Path: args.String("path"),
Name: args.String("name"),
Description: args.String("description"),
Parents: args.StringSlice("parent"),
Mime: args.String("mime"),
Progress: progressWriter(args.Bool("noProgress")),
ChunkSize: args.Int64("chunksize"),
Timeout: durationInSeconds(args.Int64("timeout")),
})
checkErr(err)
}
func infoHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Info(drive.FileInfoArgs{
Out: os.Stdout,
Id: args.String("fileId"),
SizeInBytes: args.Bool("sizeInBytes"),
})
checkErr(err)
}
func importHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Import(drive.ImportArgs{
Mime: args.String("mime"),
Out: os.Stdout,
Path: args.String("path"),
Parents: args.StringSlice("parent"),
Progress: progressWriter(args.Bool("noProgress")),
})
checkErr(err)
}
func exportHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Export(drive.ExportArgs{
Out: os.Stdout,
Id: args.String("fileId"),
Mime: args.String("mime"),
PrintMimes: args.Bool("printMimes"),
Force: args.Bool("force"),
})
checkErr(err)
}
func listRevisionsHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListRevisions(drive.ListRevisionsArgs{
Out: os.Stdout,
Id: args.String("fileId"),
NameWidth: args.Int64("nameWidth"),
SizeInBytes: args.Bool("sizeInBytes"),
SkipHeader: args.Bool("skipHeader"),
})
checkErr(err)
}
func mkdirHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Mkdir(drive.MkdirArgs{
Out: os.Stdout,
Name: args.String("name"),
Description: args.String("description"),
Parents: args.StringSlice("parent"),
})
checkErr(err)
}
func shareHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Share(drive.ShareArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
Role: args.String("role"),
Type: args.String("type"),
Email: args.String("email"),
Domain: args.String("domain"),
Discoverable: args.Bool("discoverable"),
})
checkErr(err)
}
func shareListHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListPermissions(drive.ListPermissionsArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
})
checkErr(err)
}
func shareRevokeHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).RevokePermission(drive.RevokePermissionArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
PermissionId: args.String("permissionId"),
})
checkErr(err)
}
func deleteHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Delete(drive.DeleteArgs{
Out: os.Stdout,
Id: args.String("fileId"),
Recursive: args.Bool("recursive"),
})
checkErr(err)
}
func listSyncHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListSync(drive.ListSyncArgs{
Out: os.Stdout,
SkipHeader: args.Bool("skipHeader"),
})
checkErr(err)
}
func listRecursiveSyncHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListRecursiveSync(drive.ListRecursiveSyncArgs{
Out: os.Stdout,
RootId: args.String("fileId"),
SkipHeader: args.Bool("skipHeader"),
PathWidth: args.Int64("pathWidth"),
SizeInBytes: args.Bool("sizeInBytes"),
SortOrder: args.String("sortOrder"),
})
checkErr(err)
}
func deleteRevisionHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).DeleteRevision(drive.DeleteRevisionArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
RevisionId: args.String("revId"),
})
checkErr(err)
}
func aboutHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).About(drive.AboutArgs{
Out: os.Stdout,
SizeInBytes: args.Bool("sizeInBytes"),
})
checkErr(err)
}
func aboutImportHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).AboutImport(drive.AboutImportArgs{
Out: os.Stdout,
})
checkErr(err)
}
func aboutExportHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).AboutExport(drive.AboutExportArgs{
Out: os.Stdout,
})
checkErr(err)
}
func getOauthClient(args cli.Arguments) (*http.Client, error) {
if args.String("refreshToken") != "" && args.String("accessToken") != "" {
ExitF("Access token not needed when refresh token is provided")
}
if args.String("refreshToken") != "" {
return auth.NewRefreshTokenClient(ClientId, ClientSecret, args.String("refreshToken")), nil
}
if args.String("accessToken") != "" {
return auth.NewAccessTokenClient(ClientId, ClientSecret, args.String("accessToken")), nil
}
configDir := getConfigDir(args)
if args.String("serviceAccount") != "" {
serviceAccountPath := ConfigFilePath(configDir, args.String("serviceAccount"))
serviceAccountClient, err := auth.NewServiceAccountClient(serviceAccountPath)
if err != nil {
return nil, err
}
return serviceAccountClient, nil
}
tokenPath := ConfigFilePath(configDir, TokenFilename)
return auth.NewFileSourceClient(ClientId, ClientSecret, tokenPath, authCodePrompt)
}
func getConfigDir(args cli.Arguments) string {
// Use dir from environment var if present
if os.Getenv("GDRIVE_CONFIG_DIR") != "" {
return os.Getenv("GDRIVE_CONFIG_DIR")
}
return args.String("configDir")
}
func newDrive(args cli.Arguments) *drive.Drive {
oauth, err := getOauthClient(args)
if err != nil {
ExitF("Failed getting oauth client: %s", err.Error())
}
client, err := drive.New(oauth)
if err != nil {
ExitF("Failed getting drive: %s", err.Error())
}
return client
}
func authCodePrompt(url string) func() string {
return func() string {
fmt.Println("Authentication needed")
fmt.Println("Go to the following url in your browser:")
fmt.Printf("%s\n\n", url)
fmt.Print("Enter verification code: ")
var code string
if _, err := fmt.Scan(&code); err != nil {
fmt.Printf("Failed reading code: %s", err.Error())
}
return code
}
}
func progressWriter(discard bool) io.Writer {
if discard {
return ioutil.Discard
}
return os.Stderr
}
func durationInSeconds(seconds int64) time.Duration {
return time.Second * time.Duration(seconds)
}
func conflictResolution(args cli.Arguments) drive.ConflictResolution {
keepLocal := args.Bool("keepLocal")
keepRemote := args.Bool("keepRemote")
keepLargest := args.Bool("keepLargest")
if (keepLocal && keepRemote) || (keepLocal && keepLargest) || (keepRemote && keepLargest) {
ExitF("Only one conflict resolution flag can be given")
}
if keepLocal {
return drive.KeepLocal
}
if keepRemote {
return drive.KeepRemote
}
if keepLargest {
return drive.KeepLargest
}
return drive.NoResolution
}
func checkUploadArgs(args cli.Arguments) {
if args.Bool("recursive") && args.Bool("delete") {
ExitF("--delete is not allowed for recursive uploads")
}
if args.Bool("recursive") && args.Bool("share") {
ExitF("--share is not allowed for recursive uploads")
}
}
func checkDownloadArgs(args cli.Arguments) {
if args.Bool("recursive") && args.Bool("delete") {
ExitF("--delete is not allowed for recursive downloads")
}
}
| [
"\"GDRIVE_CONFIG_DIR\"",
"\"GDRIVE_CONFIG_DIR\""
]
| []
| [
"GDRIVE_CONFIG_DIR"
]
| [] | ["GDRIVE_CONFIG_DIR"] | go | 1 | 0 | |
src/sage_docbuild/__init__.py | """
The documentation builder
It is the starting point for building documentation, and is
responsible to figure out what to build and with which options. The
actual documentation build for each individual document is then done
in a subprocess call to sphinx, see :func:`builder_helper`.
* The builder can be configured in build_options.py
* The sphinx subprocesses are configured in conf.py
"""
# ****************************************************************************
# Copyright (C) 2008-2009 Mike Hansen <[email protected]>
# 2009-2010 Mitesh Patel <[email protected]>
# 2009-2015 J. H. Palmieri <[email protected]>
# 2009 Carl Witty <[email protected]>
# 2010-2017 Jeroen Demeyer <[email protected]>
# 2012 William Stein <[email protected]>
# 2012-2014 Nicolas M. Thiery <[email protected]>
# 2012-2015 André Apitzsch <[email protected]>
# 2012 Florent Hivert <[email protected]>
# 2013-2014 Volker Braun <[email protected]>
# 2013 R. Andrew Ohana <[email protected]>
# 2015 Thierry Monteil <[email protected]>
# 2015 Marc Mezzarobba <[email protected]>
# 2015 Travis Scrimshaw <tscrim at ucdavis.edu>
# 2016-2017 Frédéric Chapoton <[email protected]>
# 2016 Erik M. Bray <[email protected]>
# 2017 Kwankyu Lee <[email protected]>
# 2017 François Bissey <[email protected]>
# 2018 Julian Rüth <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
import logging
import optparse
import os
import pickle
import re
import shutil
import subprocess
import sys
import time
import types
import warnings
logger = logging.getLogger(__name__)
import sphinx.util.console
import sphinx.ext.intersphinx
import sage.all
from sage.misc.cachefunc import cached_method
from sage.misc.misc import sage_makedirs
from sage.env import SAGE_DOC_SRC, SAGE_DOC, SAGE_SRC, DOT_SAGE
from .build_options import (LANGUAGES, SPHINXOPTS, PAPER, OMIT,
PAPEROPTS, ALLSPHINXOPTS, NUM_THREADS, WEBSITESPHINXOPTS,
INCREMENTAL_BUILD, ABORT_ON_ERROR)
##########################################
# Parallel Building Ref Manual #
##########################################
def build_ref_doc(args):
doc = args[0]
lang = args[1]
format = args[2]
kwds = args[3]
args = args[4:]
if format == 'inventory': # you must not use the inventory to build the inventory
kwds['use_multidoc_inventory'] = False
getattr(ReferenceSubBuilder(doc, lang), format)(*args, **kwds)
##########################################
# Builders #
##########################################
def builder_helper(type):
"""
Returns a function which builds the documentation for
output type ``type``.
TESTS:
Check that :trac:`25161` has been resolved::
sage: from sage_docbuild import DocBuilder, setup_parser
sage: DocBuilder._options = setup_parser().parse_args([])[0] # builder_helper needs _options to be set
sage: import sage_docbuild.sphinxbuild
sage: def raiseBaseException():
....: raise BaseException("abort pool operation")
sage: original_runsphinx, sage_docbuild.sphinxbuild.runsphinx = sage_docbuild.sphinxbuild.runsphinx, raiseBaseException
sage: from sage_docbuild import builder_helper, build_ref_doc
sage: from sage_docbuild import _build_many as build_many
sage: helper = builder_helper("html")
sage: try:
....: build_many(build_ref_doc, [("docname", "en", "html", {})])
....: except Exception as E:
....: "Non-exception during docbuild: abort pool operation" in str(E)
True
"""
def f(self, *args, **kwds):
output_dir = self._output_dir(type)
options = ALLSPHINXOPTS
if self.name == 'website':
# WEBSITESPHINXOPTS is either empty or " -A hide_pdf_links=1 "
options += WEBSITESPHINXOPTS
if kwds.get('use_multidoc_inventory', True):
options += ' -D multidoc_first_pass=0'
else:
options += ' -D multidoc_first_pass=1'
build_command = '-b %s -d %s %s %s %s'%(type, self._doctrees_dir(),
options, self.dir,
output_dir)
logger.debug(build_command)
# Run Sphinx with Sage's special logger
sys.argv = ["sphinx-build"] + build_command.split()
from .sphinxbuild import runsphinx
try:
runsphinx()
except Exception:
if ABORT_ON_ERROR:
raise
except BaseException as e:
# We need to wrap a BaseException that is not an Exception in a
# regular Exception. Otherwise multiprocessing.Pool.get hangs, see
# #25161
if ABORT_ON_ERROR:
raise Exception("Non-exception during docbuild: %s"%(e,), e)
if "/latex" in output_dir:
logger.warning("LaTeX file written to {}".format(output_dir))
else:
logger.warning(
"Build finished. The built documents can be found in {}".
format(output_dir))
f.is_output_format = True
return f
class DocBuilder(object):
def __init__(self, name, lang='en'):
"""
INPUT:
- ``name`` - the name of a subdirectory in SAGE_DOC_SRC, such as
'tutorial' or 'bordeaux_2008'
- ``lang`` - (default "en") the language of the document.
"""
doc = name.split(os.path.sep)
if doc[0] in LANGUAGES:
lang = doc[0]
doc.pop(0)
self.name = os.path.join(*doc)
self.lang = lang
self.dir = os.path.join(SAGE_DOC_SRC, self.lang, self.name)
def _output_dir(self, type):
"""
Returns the directory where the output of type ``type`` is stored.
If the directory does not exist, then it will automatically be
created.
EXAMPLES::
sage: from sage_docbuild import DocBuilder
sage: b = DocBuilder('tutorial')
sage: b._output_dir('html')
'.../html/en/tutorial'
"""
d = os.path.join(SAGE_DOC, type, self.lang, self.name)
sage_makedirs(d)
return d
def _doctrees_dir(self):
"""
Returns the directory where the doctrees are stored. If the
directory does not exist, then it will automatically be
created.
EXAMPLES::
sage: from sage_docbuild import DocBuilder
sage: b = DocBuilder('tutorial')
sage: b._doctrees_dir()
'.../doctrees/en/tutorial'
"""
d = os.path.join(SAGE_DOC, 'doctrees', self.lang, self.name)
sage_makedirs(d)
return d
def _output_formats(self):
"""
Returns a list of the possible output formats.
EXAMPLES::
sage: from sage_docbuild import DocBuilder
sage: b = DocBuilder('tutorial')
sage: b._output_formats()
['changes', 'html', 'htmlhelp', 'inventory', 'json', 'latex', 'linkcheck', 'pickle', 'web']
"""
#Go through all the attributes of self and check to
#see which ones have an 'is_output_format' attribute. These
#are the ones created with builder_helper.
output_formats = []
for attr in dir(self):
if hasattr(getattr(self, attr), 'is_output_format'):
output_formats.append(attr)
output_formats.sort()
return output_formats
def pdf(self):
"""
Builds the PDF files for this document. This is done by first
(re)-building the LaTeX output, going into that LaTeX
directory, and running 'make all-pdf' there.
EXAMPLES::
sage: from sage_docbuild import DocBuilder
sage: b = DocBuilder('tutorial')
sage: b.pdf() #not tested
"""
self.latex()
tex_dir = self._output_dir('latex')
pdf_dir = self._output_dir('pdf')
if self.name == 'reference':
# recover maths in tex, undoing what Sphinx did (trac #29993)
tex_file = os.path.join(tex_dir, 'reference.tex')
with open(tex_file) as f:
ref = f.read()
ref = re.sub(r'\\textbackslash{}', r'\\', ref)
ref = re.sub(r'\\textbackslash{}', r'\\', ref)
ref = re.sub(r'\\{', r'{', ref)
ref = re.sub(r'\\}', r'}', ref)
ref = re.sub(r'\\_', r'_', ref)
ref = re.sub(r'\\textasciicircum{}', r'^', ref)
with open(tex_file, 'w') as f:
f.write(ref)
make_target = "cd '%s' && $MAKE %s && mv -f *.pdf '%s'"
error_message = "failed to run $MAKE %s in %s"
command = 'all-pdf'
if subprocess.call(make_target%(tex_dir, command, pdf_dir), shell=True):
raise RuntimeError(error_message%(command, tex_dir))
logger.warning("Build finished. The built documents can be found in %s", pdf_dir)
def clean(self, *args):
shutil.rmtree(self._doctrees_dir())
output_formats = list(args) if args else self._output_formats()
for format in output_formats:
shutil.rmtree(self._output_dir(format), ignore_errors=True)
html = builder_helper('html')
pickle = builder_helper('pickle')
web = pickle
json = builder_helper('json')
htmlhelp = builder_helper('htmlhelp')
latex = builder_helper('latex')
changes = builder_helper('changes')
linkcheck = builder_helper('linkcheck')
# import the customized builder for object.inv files
inventory = builder_helper('inventory')
from .utils import build_many as _build_many
def build_many(target, args, processes=None):
"""
Thin wrapper around `sage_docbuild.utils.build_many` which uses the
docbuild settings ``NUM_THREADS`` and ``ABORT_ON_ERROR``.
"""
if processes is None:
processes = NUM_THREADS
try:
_build_many(target, args, processes=processes)
except BaseException as exc:
if ABORT_ON_ERROR:
raise
##########################################
# Parallel Building Ref Manual #
##########################################
def build_other_doc(args):
document = args[0]
name = args[1]
kwds = args[2]
args = args[3:]
logger.warning("\nBuilding %s.\n" % document)
getattr(get_builder(document), name)(*args, **kwds)
class AllBuilder(object):
"""
A class used to build all of the documentation.
"""
def __getattr__(self, attr):
"""
For any attributes not explicitly defined, we just go through
all of the documents and call their attr. For example,
'AllBuilder().json()' will go through all of the documents
and call the json() method on their builders.
"""
from functools import partial
return partial(self._wrapper, attr)
def _wrapper(self, name, *args, **kwds):
"""
This is the function which goes through all of the documents
and does the actual building.
"""
start = time.time()
docs = self.get_all_documents()
refs = [x for x in docs if x.endswith('reference')]
others = [x for x in docs if not x.endswith('reference')]
# Build the reference manual twice to resolve references. That is,
# build once with the inventory builder to construct the intersphinx
# inventory files, and then build the second time for real. So the
# first build should be as fast as possible;
logger.warning("\nBuilding reference manual, first pass.\n")
for document in refs:
getattr(get_builder(document), 'inventory')(*args, **kwds)
logger.warning("Building reference manual, second pass.\n")
sage_makedirs(os.path.join(SAGE_DOC, "html", "en", "reference", "_static"))
for document in refs:
getattr(get_builder(document), name)(*args, **kwds)
# build the other documents in parallel
L = [(doc, name, kwds) + args for doc in others]
# Trac #31344: Work around crashes from multiprocessing
if sys.platform == 'darwin':
for target in L:
build_other_doc(target)
else:
build_many(build_other_doc, L)
logger.warning("Elapsed time: %.1f seconds."%(time.time()-start))
logger.warning("Done building the documentation!")
def get_all_documents(self):
"""
Returns a list of all of the documents. A document is a directory within one of
the language subdirectories of SAGE_DOC_SRC specified by the global LANGUAGES
variable.
EXAMPLES::
sage: from sage_docbuild import AllBuilder
sage: documents = AllBuilder().get_all_documents()
sage: 'en/tutorial' in documents
True
sage: documents[0] == 'en/reference'
True
"""
documents = []
for lang in LANGUAGES:
for document in os.listdir(os.path.join(SAGE_DOC_SRC, lang)):
if (document not in OMIT
and os.path.isdir(os.path.join(SAGE_DOC_SRC, lang, document))):
documents.append(os.path.join(lang, document))
# Ensure that the reference guide is compiled first so that links from
# the other documents to it are correctly resolved.
if 'en/reference' in documents:
documents.remove('en/reference')
documents.insert(0, 'en/reference')
return documents
class WebsiteBuilder(DocBuilder):
def html(self):
"""
After we've finished building the website index page, we copy
everything one directory up. Then we call
:meth:`create_html_redirects`.
"""
DocBuilder.html(self)
html_output_dir = self._output_dir('html')
for f in os.listdir(html_output_dir):
src = os.path.join(html_output_dir, f)
dst = os.path.join(html_output_dir, '..', f)
if os.path.isdir(src):
shutil.rmtree(dst, ignore_errors=True)
shutil.copytree(src, dst)
else:
shutil.copy2(src, dst)
def create_html_redirects(self):
"""
Writes a number of small HTML files; these are files which used to
contain the main content of the reference manual before splitting the
manual into multiple documents. After the split, those files have
moved, so in each old location, write a file which redirects to the new
version. (This is so old URLs to pieces of the reference manual still
open the correct files.)
"""
from sage.misc.superseded import deprecation
deprecation(29993, "This method was created in trac #6495 for backward compatibility. Not necessary anymore.")
# The simple html template which will cause a redirect to the correct file.
html_template = """<html><head>
<meta HTTP-EQUIV="REFRESH" content="0; url=%s">
</head><body></body></html>"""
reference_dir = os.path.abspath(os.path.join(self._output_dir('html'),
'..', 'reference'))
reference_builder = ReferenceBuilder('reference')
refdir = os.path.join(SAGE_DOC_SRC, 'en', 'reference')
for document in reference_builder.get_all_documents(refdir):
# path is the directory above reference dir
path = os.path.abspath(os.path.join(reference_dir, '..'))
# the name of the subdocument
document_name = document.split('/')[1]
# the sage directory within a subdocument, for example
# local/share/doc/sage/html/en/reference/algebras/sage
sage_directory = os.path.join(path, document, 'sage')
# Walk through all of the files in the sage_directory
for dirpath, dirnames, filenames in os.walk(sage_directory):
# a string like reference/algebras/sage/algebras
short_path = dirpath[len(path)+1:]
# a string like sage/algebras
shorter_path = os.path.join(*short_path.split(os.sep)[2:])
# make the shorter path directory
try:
os.makedirs(os.path.join(reference_dir, shorter_path))
except OSError:
pass
for filename in filenames:
if not filename.endswith('html'):
continue
# the name of the html file we are going to create
redirect_filename = os.path.join(reference_dir, shorter_path, filename)
# the number of levels up we need to use in the relative url
levels_up = len(shorter_path.split(os.sep))
# the relative url that we will redirect to
redirect_url = "/".join(['..']*levels_up + [document_name, shorter_path, filename])
# write the html file which performs the redirect
with open(redirect_filename, 'w') as f:
print(redirect_filename)
f.write(html_template % redirect_url)
def clean(self):
"""
When we clean the output for the website index, we need to
remove all of the HTML that were placed in the parent
directory.
"""
html_output_dir = self._output_dir('html')
parent_dir = os.path.realpath(os.path.join(html_output_dir, '..'))
for filename in os.listdir(html_output_dir):
parent_filename = os.path.join(parent_dir, filename)
if not os.path.exists(parent_filename):
continue
if os.path.isdir(parent_filename):
shutil.rmtree(parent_filename, ignore_errors=True)
else:
os.unlink(parent_filename)
DocBuilder.clean(self)
class ReferenceBuilder(AllBuilder):
"""
This class builds the reference manual. It uses DocBuilder to
build the top-level page and ReferenceSubBuilder for each
sub-component.
"""
def __init__(self, name, lang='en'):
"""
Records the reference manual's name, in case it's not
identical to 'reference'.
"""
AllBuilder.__init__(self)
doc = name.split(os.path.sep)
if doc[0] in LANGUAGES:
lang = doc[0]
doc.pop(0)
self.name = doc[0]
self.lang = lang
def _output_dir(self, type, lang='en'):
"""
Return the directory where the output of type ``type`` is stored.
If the directory does not exist, then it will automatically be
created.
EXAMPLES::
sage: from sage_docbuild import ReferenceBuilder
sage: b = ReferenceBuilder('reference')
sage: b._output_dir('html')
'.../html/en/reference'
"""
d = os.path.join(SAGE_DOC, type, lang, self.name)
sage_makedirs(d)
return d
def _refdir(self, lang):
return os.path.join(SAGE_DOC_SRC, lang, self.name)
def _build_bibliography(self, lang, format, *args, **kwds):
"""
Build the bibliography only
The bibliography references.aux is referenced by the other
manuals and needs to be built first.
"""
refdir = self._refdir(lang)
references = [
(doc, lang, format, kwds) + args for doc in self.get_all_documents(refdir)
if doc == 'reference/references'
]
build_many(build_ref_doc, references)
def _build_everything_except_bibliography(self, lang, format, *args, **kwds):
"""
Build the entire reference manual except the bibliography
"""
refdir = self._refdir(lang)
non_references = [
(doc, lang, format, kwds) + args for doc in self.get_all_documents(refdir)
if doc != 'reference/references'
]
build_many(build_ref_doc, non_references)
def _wrapper(self, format, *args, **kwds):
"""
Builds reference manuals. For each language, it builds the
top-level document and its components.
"""
for lang in LANGUAGES:
refdir = self._refdir(lang)
if not os.path.exists(refdir):
continue
logger.info('Building bibliography')
self._build_bibliography(lang, format, *args, **kwds)
logger.info('Bibliography finished, building dependent manuals')
self._build_everything_except_bibliography(lang, format, *args, **kwds)
# The html refman must be build at the end to ensure correct
# merging of indexes and inventories.
# Sphinx is run here in the current process (not in a
# subprocess) and the IntersphinxCache gets populated to be
# used for the second pass of the reference manual and for
# the other documents.
getattr(DocBuilder(self.name, lang), format)(*args, **kwds)
# PDF: we need to build master index file which lists all
# of the PDF file. So we create an html file, based on
# the file index.html from the "website" target.
if format == 'pdf':
# First build the website page. This only takes a few seconds.
getattr(get_builder('website'), 'html')()
website_dir = os.path.join(SAGE_DOC, 'html', 'en', 'website')
output_dir = self._output_dir(format, lang)
# Install in output_dir a symlink to the directory containing static files.
try:
os.symlink(os.path.join(website_dir, '_static'), os.path.join(output_dir, '_static'))
except FileExistsError:
pass
# Now modify website's index.html page and write it to
# output_dir.
with open(os.path.join(website_dir, 'index.html')) as f:
html = f.read().replace('Documentation', 'Reference')
html_output_dir = os.path.dirname(website_dir)
html = html.replace('http://www.sagemath.org',
os.path.join(html_output_dir, 'index.html'))
# From index.html, we want the preamble and the tail.
html_end_preamble = html.find('<h1>Sage Reference')
html_bottom = html.rfind('</table>') + len('</table>')
# For the content, we modify doc/en/reference/index.rst, which
# has two parts: the body and the table of contents.
with open(os.path.join(SAGE_DOC_SRC, lang, 'reference', 'index.rst')) as f:
rst = f.read()
# Get rid of todolist and miscellaneous rst markup.
rst = rst.replace('.. _reference-manual:\n\n', '')
rst = re.sub(r'\\\\', r'\\', rst)
# Replace rst links with html links. There are three forms:
#
# `blah`__ followed by __ LINK
#
# `blah <LINK>`_
#
# :doc:`blah <module/index>`
#
# Change the first and the second forms to
#
# <a href="LINK">blah</a>
#
# Change the third form to
#
# <a href="module/module.pdf">blah <img src="_static/pdf.png" /></a>
#
rst = re.sub(r'`([^`\n]*)`__.*\n\n__ (.*)',
r'<a href="\2">\1</a>.', rst)
rst = re.sub(r'`([^<\n]*)\s+<(.*)>`_',
r'<a href="\2">\1</a>', rst)
rst = re.sub(r':doc:`([^<]*?)\s+<(.*)/index>`',
r'<a href="\2/\2.pdf">\1 <img src="_static/pdf.png"/></a>', rst)
# Body: add paragraph <p> markup.
start = rst.rfind('*\n') + 1
end = rst.find('\nUser Interfaces')
rst_body = rst[start:end]
rst_body = rst_body.replace('\n\n', '</p>\n<p>')
# TOC: don't include the indices
start = rst.find('\nUser Interfaces')
end = rst.find('Indices and Tables')
rst_toc = rst[start:end]
# change * to <li>; change rst headers to html headers
rst_toc = re.sub(r'\*(.*)\n',
r'<li>\1</li>\n', rst_toc)
rst_toc = re.sub(r'\n([A-Z][a-zA-Z, ]*)\n[=]*\n',
r'</ul>\n\n\n<h2>\1</h2>\n\n<ul>\n', rst_toc)
rst_toc = re.sub(r'\n([A-Z][a-zA-Z, ]*)\n[-]*\n',
r'</ul>\n\n\n<h3>\1</h3>\n\n<ul>\n', rst_toc)
# now write the file.
with open(os.path.join(output_dir, 'index.html'), 'w') as new_index:
new_index.write(html[:html_end_preamble])
new_index.write('<h1> Sage Reference Manual (PDF version)'+ '</h1>')
new_index.write(rst_body)
new_index.write('<ul>')
new_index.write(rst_toc)
new_index.write('</ul>\n\n')
new_index.write(html[html_bottom:])
logger.warning('''
PDF documents have been created in subdirectories of
%s
Alternatively, you can open
%s
for a webpage listing all of the documents.''' % (output_dir,
os.path.join(output_dir,
'index.html')))
def get_all_documents(self, refdir):
"""
Returns a list of all reference manual components to build.
We add a component name if it's a subdirectory of the manual's
directory and contains a file named 'index.rst'.
We return the largest component (most subdirectory entries)
first since they will take the longest to build.
EXAMPLES::
sage: from sage_docbuild import ReferenceBuilder
sage: b = ReferenceBuilder('reference')
sage: refdir = os.path.join(os.environ['SAGE_DOC_SRC'], 'en', b.name)
sage: sorted(b.get_all_documents(refdir))
['reference/algebras',
'reference/arithgroup',
...,
'reference/valuations']
"""
documents = []
for doc in os.listdir(refdir):
directory = os.path.join(refdir, doc)
if os.path.exists(os.path.join(directory, 'index.rst')):
n = len(os.listdir(directory))
documents.append((-n, os.path.join(self.name, doc)))
return [ doc[1] for doc in sorted(documents) ]
class ReferenceSubBuilder(DocBuilder):
"""
This class builds sub-components of the reference manual. It is
responsible for making sure that the auto generated reST files for the
Sage library are up to date.
When building any output, we must first go through and check
to see if we need to update any of the autogenerated reST
files. There are two cases where this would happen:
1. A new module gets added to one of the toctrees.
2. The actual module gets updated and possibly contains a new
title.
"""
def __init__(self, *args, **kwds):
DocBuilder.__init__(self, *args, **kwds)
self._wrap_builder_helpers()
def _wrap_builder_helpers(self):
from functools import partial, update_wrapper
for attr in dir(self):
if hasattr(getattr(self, attr), 'is_output_format'):
f = partial(self._wrapper, attr)
f.is_output_format = True
update_wrapper(f, getattr(self, attr))
setattr(self, attr, f)
def _wrapper(self, build_type, *args, **kwds):
"""
This is the wrapper around the builder_helper methods that
goes through and makes sure things are up to date.
"""
# Force regeneration of all modules if the inherited
# and/or underscored members options have changed.
cache = self.get_cache()
force = False
try:
if (cache['option_inherited'] != self._options.inherited or
cache['option_underscore'] != self._options.underscore):
logger.info("Detected change(s) in inherited and/or underscored members option(s).")
force = True
except KeyError:
force = True
cache['option_inherited'] = self._options.inherited
cache['option_underscore'] = self._options.underscore
self.save_cache()
# After "sage -clone", refresh the reST file mtimes in
# environment.pickle.
if self._options.update_mtimes:
logger.info("Checking for reST file mtimes to update...")
self.update_mtimes()
if force:
# Write reST files for all modules from scratch.
self.clean_auto()
for module_name in self.get_all_included_modules():
self.write_auto_rest_file(module_name)
else:
# Write reST files for new and updated modules.
for module_name in self.get_new_and_updated_modules():
self.write_auto_rest_file(module_name)
# Copy over the custom reST files from _sage
_sage = os.path.join(self.dir, '_sage')
if os.path.exists(_sage):
logger.info("Copying over custom reST files from %s ...", _sage)
shutil.copytree(_sage, os.path.join(self.dir, 'sage'))
getattr(DocBuilder, build_type)(self, *args, **kwds)
def cache_filename(self):
"""
Return the filename where the pickle of the reference cache
is stored.
"""
return os.path.join(self._doctrees_dir(), 'reference.pickle')
@cached_method
def get_cache(self):
"""
Retrieve the reference cache which contains the options previously used
by the reference builder.
If it doesn't exist, then we just return an empty dictionary. If it
is corrupted, return an empty dictionary.
"""
filename = self.cache_filename()
if not os.path.exists(filename):
return {}
with open(self.cache_filename(), 'rb') as file:
try:
cache = pickle.load(file)
except Exception:
logger.debug("Cache file '%s' is corrupted; ignoring it..." % filename)
cache = {}
else:
logger.debug("Loaded the reference cache: %s", filename)
return cache
def save_cache(self):
"""
Pickle the current reference cache for later retrieval.
"""
cache = self.get_cache()
with open(self.cache_filename(), 'wb') as file:
pickle.dump(cache, file)
logger.debug("Saved the reference cache: %s", self.cache_filename())
def get_sphinx_environment(self):
"""
Returns the Sphinx environment for this project.
"""
from sphinx.environment import BuildEnvironment
class FakeConfig(object):
values = tuple()
class FakeApp(object):
def __init__(self, dir):
self.srcdir = dir
self.config = FakeConfig()
env_pickle = os.path.join(self._doctrees_dir(), 'environment.pickle')
try:
with open(env_pickle, 'rb') as f:
env = pickle.load(f)
env.app = FakeApp(self.dir)
env.config.values = env.app.config.values
logger.debug("Opened Sphinx environment: %s", env_pickle)
return env
except IOError as err:
logger.debug("Failed to open Sphinx environment: %s", err)
def update_mtimes(self):
"""
Updates the modification times for reST files in the Sphinx
environment for this project.
"""
env = self.get_sphinx_environment()
if env is not None:
for doc in env.all_docs:
env.all_docs[doc] = time.time()
logger.info("Updated %d reST file mtimes", len(env.all_docs))
# This is the only place we need to save (as opposed to
# load) Sphinx's pickle, so we do it right here.
env_pickle = os.path.join(self._doctrees_dir(),
'environment.pickle')
# When cloning a new branch (see
# SAGE_LOCAL/bin/sage-clone), we hard link the doc output.
# To avoid making unlinked, potentially inconsistent
# copies of the environment, we *don't* use
# env.topickle(env_pickle), which first writes a temporary
# file. We adapt sphinx.environment's
# BuildEnvironment.topickle:
# remove unpicklable attributes
env.set_warnfunc(None)
del env.config.values
with open(env_pickle, 'wb') as picklefile:
# remove potentially pickling-problematic values from config
for key, val in vars(env.config).items():
if key.startswith('_') or isinstance(val, (types.ModuleType,
types.FunctionType,
type)):
del env.config[key]
pickle.dump(env, picklefile, pickle.HIGHEST_PROTOCOL)
logger.debug("Saved Sphinx environment: %s", env_pickle)
def get_modified_modules(self):
"""
Returns an iterator for all the modules that have been modified
since the documentation was last built.
"""
env = self.get_sphinx_environment()
if env is None:
logger.debug("Stopped check for modified modules.")
return
try:
added, changed, removed = env.get_outdated_files(False)
logger.info("Sphinx found %d modified modules", len(changed))
except OSError as err:
logger.debug("Sphinx failed to determine modified modules: %s", err)
return
for name in changed:
# Only pay attention to files in a directory sage/... In
# particular, don't treat a file like 'sagetex.rst' in
# doc/en/reference/misc as an autogenerated file: see
# #14199.
if name.startswith('sage' + os.sep):
yield name
def print_modified_modules(self):
"""
Prints a list of all the modules that have been modified since
the documentation was last built.
"""
for module_name in self.get_modified_modules():
print(module_name)
def get_all_rst_files(self, exclude_sage=True):
"""
Returns an iterator for all rst files which are not
autogenerated.
"""
for directory, subdirs, files in os.walk(self.dir):
if exclude_sage and directory.startswith(os.path.join(self.dir, 'sage')):
continue
for filename in files:
if not filename.endswith('.rst'):
continue
yield os.path.join(directory, filename)
def get_all_included_modules(self):
"""
Returns an iterator for all modules which are included in the
reference manual.
"""
for filename in self.get_all_rst_files():
for module in self.get_modules(filename):
yield module
def get_new_and_updated_modules(self):
"""
Return an iterator for all new and updated modules that appear in
the toctrees, and remove obsolete old modules.
"""
env = self.get_sphinx_environment()
if env is None:
all_docs = {}
else:
all_docs = env.all_docs
new_modules = []
updated_modules = []
old_modules = []
for module_name in self.get_all_included_modules():
docname = module_name.replace('.', os.path.sep)
if docname not in all_docs:
new_modules.append(module_name)
yield module_name
continue
# get the modification timestamp of the reST doc for the module
mtime = all_docs[docname]
try:
with warnings.catch_warnings():
# primarily intended to ignore deprecation warnings
warnings.simplefilter("ignore")
__import__(module_name)
except ImportError as err:
logger.error("Warning: Could not import %s %s", module_name, err)
raise
module_filename = sys.modules[module_name].__file__
if (module_filename.endswith('.pyc') or module_filename.endswith('.pyo')):
source_filename = module_filename[:-1]
if (os.path.exists(source_filename)): module_filename = source_filename
newtime = os.path.getmtime(module_filename)
if newtime > mtime:
updated_modules.append(module_name)
yield module_name
else: # keep good old module
old_modules.append(module_name)
removed_modules = []
for docname in all_docs.keys():
if docname.startswith('sage' + os.path.sep):
module_name = docname.replace(os.path.sep, '.')
if not (module_name in old_modules or module_name in updated_modules):
try:
os.remove(os.path.join(self.dir, docname) + '.rst')
except OSError: # already removed
pass
logger.debug("Deleted auto-generated reST file %s".format(docname))
removed_modules.append(module_name)
logger.info("Found %d new modules", len(new_modules))
logger.info("Found %d updated modules", len(updated_modules))
logger.info("Removed %d obsolete modules", len(removed_modules))
def print_new_and_updated_modules(self):
"""
Print all the modules that appear in the toctrees that
are newly included or updated.
"""
for module_name in self.get_new_and_updated_modules():
print(module_name)
def get_modules(self, filename):
"""
Given a filename for a reST file, return an iterator for
all of the autogenerated reST files that it includes.
"""
# Create the regular expression used to detect an autogenerated file
auto_re = re.compile(r'^\s*(..\/)*(sage(nb)?\/[\w\/]*)\s*$')
# Read the lines
with open(filename) as f:
lines = f.readlines()
for line in lines:
match = auto_re.match(line)
if match:
yield match.group(2).replace(os.path.sep, '.')
def get_module_docstring_title(self, module_name):
"""
Returns the title of the module from its docstring.
"""
#Try to import the module
try:
__import__(module_name)
except ImportError as err:
logger.error("Warning: Could not import %s %s", module_name, err)
return "UNABLE TO IMPORT MODULE"
module = sys.modules[module_name]
#Get the docstring
doc = module.__doc__
if doc is None:
doc = module.doc if hasattr(module, 'doc') else ""
#Extract the title
i = doc.find('\n')
if i != -1:
return doc[i+1:].lstrip().splitlines()[0]
else:
return doc
def auto_rest_filename(self, module_name):
"""
Returns the name of the file associated to a given module
EXAMPLES::
sage: from sage_docbuild import ReferenceSubBuilder
sage: ReferenceSubBuilder("reference").auto_rest_filename("sage.combinat.partition")
'.../doc/en/reference/sage/combinat/partition.rst'
"""
return self.dir + os.path.sep + module_name.replace('.',os.path.sep) + '.rst'
def write_auto_rest_file(self, module_name):
"""
Writes the autogenerated reST file for module_name.
"""
if not module_name.startswith('sage'):
return
filename = self.auto_rest_filename(module_name)
sage_makedirs(os.path.dirname(filename))
title = self.get_module_docstring_title(module_name)
if title == '':
logger.error("Warning: Missing title for %s", module_name)
title = "MISSING TITLE"
with open(filename, 'w') as outfile:
# Don't doctest the autogenerated file.
outfile.write(".. nodoctest\n\n")
# Now write the actual content.
outfile.write(".. _%s:\n\n"%(module_name.replace(".__init__","")))
outfile.write(title + '\n')
outfile.write('='*len(title) + "\n\n")
outfile.write('.. This file has been autogenerated.\n\n')
inherited = ':inherited-members:' if self._options.inherited else ''
automodule = '''
.. automodule:: %s
:members:
:undoc-members:
:show-inheritance:
%s
'''
outfile.write(automodule % (module_name, inherited))
def clean_auto(self):
"""
Remove all autogenerated reST files.
"""
try:
shutil.rmtree(os.path.join(self.dir, 'sage'))
logger.debug("Deleted auto-generated reST files in: %s",
os.path.join(self.dir, 'sage'))
except OSError:
pass
def get_unincluded_modules(self):
"""
Returns an iterator for all the modules in the Sage library
which are not included in the reference manual.
"""
#Make a dictionary of the included modules
included_modules = {}
for module_name in self.get_all_included_modules():
included_modules[module_name] = True
base_path = os.path.join(SAGE_SRC, 'sage')
for directory, subdirs, files in os.walk(base_path):
for filename in files:
if not (filename.endswith('.py') or
filename.endswith('.pyx')):
continue
path = os.path.join(directory, filename)
#Create the module name
module_name = path[len(base_path):].replace(os.path.sep, '.')
module_name = 'sage' + module_name
module_name = module_name[:-4] if module_name.endswith('pyx') else module_name[:-3]
#Exclude some ones -- we don't want init the manual
if module_name.endswith('__init__') or module_name.endswith('all'):
continue
if module_name not in included_modules:
yield module_name
def print_unincluded_modules(self):
"""
Prints all of the modules which are not included in the Sage
reference manual.
"""
for module_name in self.get_unincluded_modules():
print(module_name)
def print_included_modules(self):
"""
Prints all of the modules that are included in the Sage reference
manual.
"""
for module_name in self.get_all_included_modules():
print(module_name)
class SingleFileBuilder(DocBuilder):
"""
This is the class used to build the documentation for a single
user-specified file. If the file is called 'foo.py', then the
documentation is built in ``DIR/foo/`` if the user passes the
command line option "-o DIR", or in ``DOT_SAGE/docbuild/foo/``
otherwise.
"""
def __init__(self, path):
"""
INPUT:
- ``path`` - the path to the file for which documentation
should be built
"""
self.lang = 'en'
self.name = 'single_file'
path = os.path.abspath(path)
# Create docbuild and relevant subdirectories, e.g.,
# the static and templates directories in the output directory.
# By default, this is DOT_SAGE/docbuild/MODULE_NAME, but can
# also be specified at the command line.
module_name = os.path.splitext(os.path.basename(path))[0]
latex_name = module_name.replace('_', r'\\_')
if self._options.output_dir:
base_dir = os.path.join(self._options.output_dir, module_name)
if os.path.exists(base_dir):
logger.warning('Warning: Directory %s exists. It is safer to build in a new directory.' % base_dir)
else:
base_dir = os.path.join(DOT_SAGE, 'docbuild', module_name)
try:
shutil.rmtree(base_dir)
except OSError:
pass
self.dir = os.path.join(base_dir, 'source')
sage_makedirs(os.path.join(self.dir, "static"))
sage_makedirs(os.path.join(self.dir, "templates"))
# Write self.dir/conf.py
conf = r"""# This file is automatically generated by {}, do not edit!
import sys, os
sys.path.append({!r})
from sage.docs.conf import *
html_static_path = [] + html_common_static_path
project = 'Documentation for {}'
release = 'unknown'
name = {!r}
html_title = project
html_short_title = project
htmlhelp_basename = name
extensions.remove('multidocs') # see #29651
extensions.remove('inventory_builder')
latex_domain_indices = False
latex_documents = [
('index', name + '.tex', 'Documentation for {}',
'unknown', 'manual'),
]
""".format(__file__, self.dir, module_name, module_name, latex_name)
if 'SAGE_DOC_UNDERSCORE' in os.environ:
conf += r"""
def setup(app):
app.connect('autodoc-skip-member', skip_member)
"""
with open(os.path.join(self.dir, 'conf.py'), 'w') as conffile:
conffile.write(conf)
# Write self.dir/index.rst
title = 'Docs for file %s' % path
heading = title + "\n" + ("=" * len(title))
index = r"""{}
.. This file is automatically generated by {}, do not edit!
.. automodule:: {}
:members:
:undoc-members:
:show-inheritance:
""".format(heading, __file__, module_name)
with open(os.path.join(self.dir, 'index.rst'), 'w') as indexfile:
indexfile.write(index)
# Create link from original file to self.dir. Note that we
# append self.dir to sys.path in conf.py. This is reasonably
# safe (but not perfect), since we just created self.dir.
try:
os.symlink(path, os.path.join(self.dir, os.path.basename(path)))
except OSError:
pass
def _output_dir(self, type):
"""
Return the directory where the output of type ``type`` is stored.
If the directory does not exist, then it will automatically be
created.
"""
base_dir = os.path.split(self.dir)[0]
d = os.path.join(base_dir, "output", type)
sage_makedirs(d)
return d
def _doctrees_dir(self):
"""
Returns the directory where the doctrees are stored. If the
directory does not exist, then it will automatically be
created.
"""
return self._output_dir('doctrees')
def get_builder(name):
"""
Returns an appropriate *Builder object for the document ``name``.
DocBuilder and its subclasses do all the real work in building the
documentation.
"""
if name == 'all':
return AllBuilder()
elif name.endswith('reference'):
return ReferenceBuilder(name)
elif 'reference' in name and os.path.exists(os.path.join(SAGE_DOC_SRC, 'en', name)):
return ReferenceSubBuilder(name)
elif name.endswith('website'):
return WebsiteBuilder(name)
elif name.startswith('file='):
path = name[5:]
if path.endswith('.sage') or path.endswith('.pyx'):
raise NotImplementedError('Building documentation for a single file only works for Python files.')
return SingleFileBuilder(path)
elif name in get_documents() or name in AllBuilder().get_all_documents():
return DocBuilder(name)
else:
print("'%s' is not a recognized document. Type 'sage --docbuild -D' for a list"%name)
print("of documents, or 'sage --docbuild --help' for more help.")
sys.exit(1)
def format_columns(lst, align='<', cols=None, indent=4, pad=3, width=80):
"""
Utility function that formats a list as a simple table and returns
a Unicode string representation. The number of columns is
computed from the other options, unless it's passed as a keyword
argument. For help on Python's string formatter, see
http://docs.python.org/library/string.html#format-string-syntax
"""
# Can we generalize this (efficiently) to other / multiple inputs
# and generators?
size = max(map(len, lst)) + pad
if cols is None:
import math
cols = math.trunc((width - indent) / size)
s = " " * indent
for i in range(len(lst)):
if i != 0 and i % cols == 0:
s += "\n" + " " * indent
s += "{0:{1}{2}}".format(lst[i], align, size)
s += "\n"
return s
def help_usage(s="", compact=False):
"""
Appends and returns a brief usage message for the Sage
documentation builder. If 'compact' is False, the function adds a
final newline character.
"""
s += "sage --docbuild [OPTIONS] DOCUMENT (FORMAT | COMMAND)"
if not compact:
s += "\n"
return s
def help_description(s="", compact=False):
"""
Appends and returns a brief description of the Sage documentation
builder. If 'compact' is False, the function adds a final newline
character.
"""
s += "Build or return information about Sage documentation.\n\n"
s += " DOCUMENT name of the document to build\n"
s += " FORMAT document output format\n"
s += " COMMAND document-specific command\n\n"
s += "Note that DOCUMENT may have the form 'file=/path/to/FILE',\n"
s += "which builds the documentation for the specified file.\n\n"
s += "A DOCUMENT and either a FORMAT or a COMMAND are required,\n"
s += "unless a list of one or more of these is requested."
if not compact:
s += "\n"
return s
def help_examples(s=""):
"""
Appends and returns some usage examples for the Sage documentation
builder.
"""
s += "Examples:\n"
s += " sage --docbuild -FDC all\n"
s += " sage --docbuild constructions pdf\n"
s += " sage --docbuild reference html -jv3\n"
s += " sage --docbuild --mathjax tutorial html\n"
s += " sage --docbuild reference print_unincluded_modules\n"
s += " sage --docbuild developer -j html --sphinx-opts -q,-aE --verbose 2"
return s
def get_documents():
"""
Returns a list of document names the Sage documentation builder
will accept as command-line arguments.
"""
all_b = AllBuilder()
docs = all_b.get_all_documents()
docs = [(d[3:] if d[0:3] == 'en/' else d) for d in docs]
return docs
def help_documents(s=""):
"""
Appends and returns a tabular list of documents, including a
shortcut 'all' for all documents, available to the Sage
documentation builder.
"""
docs = get_documents()
s += "DOCUMENTs:\n"
s += format_columns(docs + ['all (!)'])
s += "(!) Builds everything.\n\n"
if 'reference' in docs:
s+= "Other valid document names take the form 'reference/DIR', where\n"
s+= "DIR is a subdirectory of SAGE_DOC_SRC/en/reference/.\n"
s+= "This builds just the specified part of the reference manual.\n"
s += "DOCUMENT may also have the form 'file=/path/to/FILE', which builds\n"
s += "the documentation for the specified file.\n"
return s
def get_formats():
"""
Returns a list of output formats the Sage documentation builder
will accept on the command-line.
"""
tut_b = DocBuilder('en/tutorial')
formats = tut_b._output_formats()
formats.remove('html')
return ['html', 'pdf'] + formats
def help_formats(s=""):
"""
Appends and returns a tabular list of output formats available to
the Sage documentation builder.
"""
s += "FORMATs:\n"
s += format_columns(get_formats())
return s
def help_commands(name='all', s=""):
"""
Appends and returns a tabular list of commands, if any, the Sage
documentation builder can run on the indicated document. The
default is to list all commands for all documents.
"""
# To do: Generate the lists dynamically, using class attributes,
# as with the Builders above.
command_dict = { 'reference' : [
'print_included_modules', 'print_modified_modules (*)',
'print_unincluded_modules', 'print_new_and_updated_modules (*)',
] }
for doc in command_dict:
if name == 'all' or doc == name:
s += "COMMANDs for the DOCUMENT '" + doc + "':\n"
s += format_columns(command_dict[doc])
s += "(*) Since the last build.\n"
return s
def help_message_long(option, opt_str, value, parser):
"""
Prints an extended help message for the Sage documentation builder
and exits.
"""
help_funcs = [ help_usage, help_description, help_documents,
help_formats, help_commands, parser.format_option_help,
help_examples ]
for f in help_funcs:
print(f())
sys.exit(0)
def help_message_short(option=None, opt_str=None, value=None, parser=None,
error=False):
"""
Prints a help message for the Sage documentation builder. The
message includes command-line usage and a list of options. The
message is printed only on the first call. If error is True
during this call, the message is printed only if the user hasn't
requested a list (e.g., documents, formats, commands).
"""
if not hasattr(parser.values, 'printed_help'):
if error is True:
if not hasattr(parser.values, 'printed_list'):
parser.print_help()
else:
parser.print_help()
setattr(parser.values, 'printed_help', 1)
def help_wrapper(option, opt_str, value, parser):
"""
A helper wrapper for command-line options to the Sage
documentation builder that print lists, such as document names,
formats, and document-specific commands.
"""
if option.dest == 'commands':
print(help_commands(value), end="")
if option.dest == 'documents':
print(help_documents(), end="")
if option.dest == 'formats':
print(help_formats(), end="")
if option.dest == 'all_documents':
if value == 'en/reference' or value == 'reference':
b = ReferenceBuilder('reference')
refdir = os.path.join(os.environ['SAGE_DOC_SRC'], 'en', b.name)
s = b.get_all_documents(refdir)
# Put the bibliography first, because it needs to be built first:
s.remove('reference/references')
s.insert(0, 'reference/references')
elif value == 'all':
s = get_documents()
# Put the reference manual first, because it needs to be built first:
s.remove('reference')
s.insert(0, 'reference')
else:
raise ValueError("argument for --all-documents must be either 'all'"
" or 'reference'")
for d in s:
print(d)
setattr(parser.values, 'printed_list', 1)
class IndentedHelpFormatter2(optparse.IndentedHelpFormatter, object):
"""
Custom help formatter class for optparse's OptionParser.
"""
def format_description(self, description):
"""
Returns a formatted description, preserving any original
explicit new line characters.
"""
if description:
lines_in = description.split('\n')
lines_out = [self._format_text(line) for line in lines_in]
return "\n".join(lines_out) + "\n"
else:
return ""
def format_heading(self, heading):
"""
Returns a formatted heading using the superclass' formatter.
If the heading is 'options', up to case, the function converts
it to ALL CAPS. This allows us to match the heading 'OPTIONS' with
the same token in the builder's usage message.
"""
if heading.lower() == 'options':
heading = "OPTIONS"
return super(IndentedHelpFormatter2, self).format_heading(heading)
def setup_parser():
"""
Sets up and returns a command-line OptionParser instance for the
Sage documentation builder.
"""
# Documentation: http://docs.python.org/library/optparse.html
parser = optparse.OptionParser(add_help_option=False,
usage=help_usage(compact=True),
formatter=IndentedHelpFormatter2(),
description=help_description(compact=True))
# Standard options. Note: We use explicit option.dest names
# to avoid ambiguity.
standard = optparse.OptionGroup(parser, "Standard")
standard.add_option("-h", "--help",
action="callback", callback=help_message_short,
help="show a help message and exit")
standard.add_option("-H", "--help-all",
action="callback", callback=help_message_long,
help="show an extended help message and exit")
standard.add_option("-D", "--documents", dest="documents",
action="callback", callback=help_wrapper,
help="list all available DOCUMENTs")
standard.add_option("-F", "--formats", dest="formats",
action="callback", callback=help_wrapper,
help="list all output FORMATs")
standard.add_option("-C", "--commands", dest="commands",
type="string", metavar="DOC",
action="callback", callback=help_wrapper,
help="list all COMMANDs for DOCUMENT DOC; use 'all' to list all")
standard.add_option("-i", "--inherited", dest="inherited",
default=False, action="store_true",
help="include inherited members in reference manual; may be slow, may fail for PDF output")
standard.add_option("-u", "--underscore", dest="underscore",
default=False, action="store_true",
help="include variables prefixed with '_' in reference manual; may be slow, may fail for PDF output")
standard.add_option("-j", "--mathjax", "--jsmath", dest="mathjax",
action="store_true",
help="render math using MathJax; FORMATs: html, json, pickle, web")
standard.add_option("--no-plot", dest="no_plot",
action="store_true",
help="do not include graphics auto-generated using the '.. plot' markup")
standard.add_option("--include-tests-blocks", dest="skip_tests", default=True,
action="store_false",
help="include TESTS blocks in the reference manual")
standard.add_option("--no-pdf-links", dest="no_pdf_links",
action="store_true",
help="do not include PDF links in DOCUMENT 'website'; FORMATs: html, json, pickle, web")
standard.add_option("--warn-links", dest="warn_links",
default=False, action="store_true",
help="issue a warning whenever a link is not properly resolved; equivalent to '--sphinx-opts -n' (sphinx option: nitpicky)")
standard.add_option("--check-nested", dest="check_nested",
action="store_true",
help="check picklability of nested classes in DOCUMENT 'reference'")
standard.add_option("-N", "--no-colors", dest="color", default=True,
action="store_false",
help="do not color output; does not affect children")
standard.add_option("-q", "--quiet", dest="verbose",
action="store_const", const=0,
help="work quietly; same as --verbose=0")
standard.add_option("-v", "--verbose", dest="verbose",
type="int", default=1, metavar="LEVEL",
action="store",
help="report progress at LEVEL=0 (quiet), 1 (normal), 2 (info), or 3 (debug); does not affect children")
standard.add_option("-o", "--output", dest="output_dir", default=None,
metavar="DIR", action="store",
help="if DOCUMENT is a single file ('file=...'), write output to this directory")
parser.add_option_group(standard)
# Advanced options.
advanced = optparse.OptionGroup(parser, "Advanced",
"Use these options with care.")
advanced.add_option("-S", "--sphinx-opts", dest="sphinx_opts",
type="string", metavar="OPTS",
action="store",
help="pass comma-separated OPTS to sphinx-build")
advanced.add_option("-U", "--update-mtimes", dest="update_mtimes",
default=False, action="store_true",
help="before building reference manual, update modification times for auto-generated reST files")
advanced.add_option("-k", "--keep-going", dest="keep_going",
default=False, action="store_true",
help="Do not abort on errors but continue as much as possible after an error")
advanced.add_option("--all-documents", dest="all_documents",
type="str", metavar="ARG",
action="callback", callback=help_wrapper,
help="if ARG is 'reference', list all subdocuments"
" of en/reference. If ARG is 'all', list all main"
" documents")
parser.add_option_group(advanced)
return parser
def setup_logger(verbose=1, color=True):
r"""
Set up a Python Logger instance for the Sage documentation builder. The
optional argument sets logger's level and message format.
EXAMPLES::
sage: from sage_docbuild import setup_logger, logger
sage: setup_logger()
sage: type(logger)
<class 'logging.Logger'>
"""
# Set up colors. Adapted from sphinx.cmdline.
import sphinx.util.console as c
if not color or not sys.stdout.isatty() or not c.color_terminal():
c.nocolor()
# Available colors: black, darkgray, (dark)red, dark(green),
# brown, yellow, (dark)blue, purple, fuchsia, turquoise, teal,
# lightgray, white. Available styles: reset, bold, faint,
# standout, underline, blink.
# Set up log record formats.
format_std = "%(message)s"
formatter = logging.Formatter(format_std)
# format_debug = "%(module)s #%(lineno)s %(funcName)s() %(message)s"
fields = ['%(module)s', '#%(lineno)s', '%(funcName)s()', '%(message)s']
colors = ['darkblue', 'darkred', 'brown', 'reset']
styles = ['reset', 'reset', 'reset', 'reset']
format_debug = ""
for i in range(len(fields)):
format_debug += c.colorize(styles[i], c.colorize(colors[i], fields[i]))
if i != len(fields):
format_debug += " "
# Note: There's also Handler.setLevel(). The argument is the
# lowest severity message that the respective logger or handler
# will pass on. The default levels are DEBUG, INFO, WARNING,
# ERROR, and CRITICAL. We use "WARNING" for normal verbosity and
# "ERROR" for quiet operation. It's possible to define custom
# levels. See the documentation for details.
if verbose == 0:
logger.setLevel(logging.ERROR)
if verbose == 1:
logger.setLevel(logging.WARNING)
if verbose == 2:
logger.setLevel(logging.INFO)
if verbose == 3:
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(format_debug)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
class IntersphinxCache:
"""
Replace sphinx.ext.intersphinx.fetch_inventory by an in-memory
cached version.
"""
def __init__(self):
self.inventories = {}
self.real_fetch_inventory = sphinx.ext.intersphinx.fetch_inventory
sphinx.ext.intersphinx.fetch_inventory = self.fetch_inventory
def fetch_inventory(self, app, uri, inv):
"""
Return the result of ``sphinx.ext.intersphinx.fetch_inventory()``
from a cache if possible. Otherwise, call
``sphinx.ext.intersphinx.fetch_inventory()`` and cache the result.
"""
t = (uri, inv)
try:
return self.inventories[t]
except KeyError:
i = self.real_fetch_inventory(app, uri, inv)
self.inventories[t] = i
return i
def main():
# Parse the command-line.
parser = setup_parser()
options, args = parser.parse_args()
DocBuilder._options = options
# Get the name and type (target format) of the document we are
# trying to build.
try:
name, type = args
except ValueError:
help_message_short(parser=parser, error=True)
sys.exit(1)
# Set up module-wide logging.
setup_logger(options.verbose, options.color)
def excepthook(*exc_info):
logger.error('Error building the documentation.', exc_info=exc_info)
if INCREMENTAL_BUILD:
logger.error('''
Note: incremental documentation builds sometimes cause spurious
error messages. To be certain that these are real errors, run
"make doc-clean" first and try again.''')
sys.excepthook = excepthook
# Process selected options.
#
# MathJax: this check usually has no practical effect, since
# SAGE_DOC_MATHJAX is set to "True" by the script sage-env.
# To disable MathJax, set SAGE_DOC_MATHJAX to "no" or "False".
if options.mathjax or (os.environ.get('SAGE_DOC_MATHJAX', 'no') != 'no'
and os.environ.get('SAGE_DOC_MATHJAX', 'no') != 'False'):
os.environ['SAGE_DOC_MATHJAX'] = 'True'
if options.check_nested:
os.environ['SAGE_CHECK_NESTED'] = 'True'
if options.underscore:
os.environ['SAGE_DOC_UNDERSCORE'] = "True"
global ALLSPHINXOPTS, WEBSITESPHINXOPTS, ABORT_ON_ERROR
if options.sphinx_opts:
ALLSPHINXOPTS += options.sphinx_opts.replace(',', ' ') + " "
if options.no_pdf_links:
WEBSITESPHINXOPTS = " -A hide_pdf_links=1 "
if options.warn_links:
ALLSPHINXOPTS += "-n "
if options.no_plot:
os.environ['SAGE_SKIP_PLOT_DIRECTIVE'] = 'yes'
if options.skip_tests:
os.environ['SAGE_SKIP_TESTS_BLOCKS'] = 'True'
ABORT_ON_ERROR = not options.keep_going
# Delete empty directories. This is needed in particular for empty
# directories due to "git checkout" which never deletes empty
# directories it leaves behind. See Trac #20010.
for dirpath, dirnames, filenames in os.walk(SAGE_DOC_SRC, topdown=False):
if not dirnames + filenames:
logger.warning('Deleting empty directory {0}'.format(dirpath))
os.rmdir(dirpath)
# Set up Intersphinx cache
C = IntersphinxCache()
builder = getattr(get_builder(name), type)
builder()
| []
| []
| [
"SAGE_DOC_UNDERSCORE",
"SAGE_DOC_MATHJAX",
"SAGE_DOC_SRC",
"SAGE_SKIP_TESTS_BLOCKS",
"SAGE_CHECK_NESTED",
"SAGE_SKIP_PLOT_DIRECTIVE"
]
| [] | ["SAGE_DOC_UNDERSCORE", "SAGE_DOC_MATHJAX", "SAGE_DOC_SRC", "SAGE_SKIP_TESTS_BLOCKS", "SAGE_CHECK_NESTED", "SAGE_SKIP_PLOT_DIRECTIVE"] | python | 6 | 0 | |
common/sas.go | package common
import (
"bytes"
"context"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"sync"
"time"
)
// ParseConnectionString parses the given connection string into a key-value map,
// returns an error if at least one of required keys is missing.
func ParseConnectionString(cs string, require ...string) (map[string]string, error) {
m := map[string]string{}
for _, s := range strings.Split(cs, ";") {
if s == "" {
continue
}
kv := strings.SplitN(s, "=", 2)
if len(kv) != 2 {
return nil, errors.New("malformed connection string")
}
m[kv[0]] = kv[1]
}
for _, k := range require {
if s := m[k]; s == "" {
return nil, fmt.Errorf("%s is required", k)
}
}
return m, nil
}
func GetEdgeModuleEnvironmentVariables() (map[string]string, error) {
m := map[string]string{}
require := []string{
"ContainerHostName",
"IOTHubHostName",
"GatewayHostName",
"DeviceID",
"ModuleID",
"GenerationID",
"WorkloadAPI",
"APIVersion",
}
m["ContainerHostName"] = os.Getenv("HOSTNAME")
m["IOTHubHostName"] = os.Getenv("IOTEDGE_IOTHUBHOSTNAME")
m["GatewayHostName"] = os.Getenv("IOTEDGE_GATEWAYHOSTNAME")
m["DeviceID"] = os.Getenv("IOTEDGE_DEVICEID")
m["ModuleID"] = os.Getenv("IOTEDGE_MODULEID")
m["GenerationID"] = os.Getenv("IOTEDGE_MODULEGENERATIONID")
m["WorkloadAPI"] = os.Getenv("IOTEDGE_WORKLOADURI")
m["APIVersion"] = os.Getenv("IOTEDGE_APIVERSION")
for _, k := range require {
if s := m[k]; s == "" {
return nil, fmt.Errorf("%s is required", k)
}
}
return m, nil
}
// NewSharedAccessKey creates new shared access key for subsequent token generation.
func NewSharedAccessKey(hostname, policy, key string) *SharedAccessKey {
return &SharedAccessKey{
HostName: hostname,
SharedAccessKeyName: policy,
SharedAccessKey: key,
}
}
// SharedAccessKey is SAS token generator.
type SharedAccessKey struct {
HostName string
SharedAccessKeyName string
SharedAccessKey string
}
// Token generates a shared access signature for the named resource and lifetime.
func (c *SharedAccessKey) Token(
resource string, lifetime time.Duration,
) (*SharedAccessSignature, error) {
return NewSharedAccessSignature(
resource, c.SharedAccessKeyName, c.SharedAccessKey, time.Now().Add(lifetime),
)
}
// NewSharedAccessSignature initialized a new shared access signature
// and generates signature fields based on the given input.
func NewSharedAccessSignature(
resource, policy, key string, expiry time.Time,
) (*SharedAccessSignature, error) {
sig, err := mksig(resource, key, expiry)
if err != nil {
return nil, err
}
return &SharedAccessSignature{
Sr: resource,
Sig: sig,
Se: expiry,
Skn: policy,
}, nil
}
func mksig(sr, key string, se time.Time) (string, error) {
b, err := base64.StdEncoding.DecodeString(key)
if err != nil {
return "", err
}
h := hmac.New(sha256.New, b)
if _, err := fmt.Fprintf(h, "%s\n%d", url.QueryEscape(sr), se.Unix()); err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(h.Sum(nil)), nil
}
// SharedAccessSignature is a shared access signature instance.
type SharedAccessSignature struct {
Sr string
Sig string
Se time.Time
Skn string
}
// String converts the signature to a token string.
func (sas *SharedAccessSignature) String() string {
s := "SharedAccessSignature " +
"sr=" + url.QueryEscape(sas.Sr) +
"&sig=" + url.QueryEscape(sas.Sig) +
"&se=" + url.QueryEscape(strconv.FormatInt(sas.Se.Unix(), 10))
if sas.Skn != "" {
s += "&skn=" + url.QueryEscape(sas.Skn)
}
return s
}
// EDGE MODULE AUTOMATIC AUTHENTICATION
// TokenFromEdge generates a shared access signature for the named resource and lifetime using the Workload API sign endpoint
func (c *SharedAccessKey) TokenFromEdge(
workloadURI, module, genid, resource string, lifetime time.Duration,
) (*SharedAccessSignature, error) {
return NewSharedAccessSignatureFromEdge(
workloadURI, module, genid, resource, time.Now().Add(lifetime),
)
}
// NewSharedAccessSignature initialized a new shared access signature
// and generates signature fields based on the given input.
func NewSharedAccessSignatureFromEdge(
workloadURI, module, genid, resource string, expiry time.Time,
) (*SharedAccessSignature, error) {
sig, err := mksigViaEdge(workloadURI, resource, module, genid, expiry)
if err != nil {
return nil, err
}
return &SharedAccessSignature{
Sr: resource,
Sig: sig,
Se: expiry,
}, nil
}
func mksigViaEdge(workloadURI, resource, module, genid string, se time.Time) (string, error) {
data := url.QueryEscape(resource) + "\n" + strconv.FormatInt(se.Unix(), 10)
request := &EdgeSignRequestPayload{
Data: base64.StdEncoding.EncodeToString([]byte(data)),
}
return edgeSignRequest(workloadURI, module, genid, request)
}
// EdgeSignRequestPayload is a placeholder object for sign requests.
type EdgeSignRequestPayload struct {
KeyID string `json:"keyId"`
Algo string `json:"algo"`
Data string `json:"data"`
}
// Validate the properties on EdgeSignRequestPayload
func (esrp *EdgeSignRequestPayload) Validate() error {
if len(esrp.Algo) < 1 {
esrp.Algo = "HMACSHA256"
}
if len(esrp.KeyID) < 1 {
esrp.KeyID = "primary"
}
if len(esrp.Data) < 1 {
return fmt.Errorf("sign request: no data provided")
}
return nil
}
// EdgeSignRequestResponse is a container struct for the response.
type EdgeSignRequestResponse struct {
Digest string `json:"digest"`
Message string `json:"message"`
}
var (
sharedUnixHTTPClient http.Client
doOnce sync.Once
)
func setSharedUnixHTTPClient(addrName string) {
doOnce.Do(func() {
sharedUnixHTTPClient = http.Client{
Transport: &http.Transport{
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", addrName)
},
},
}
})
}
func edgeSignRequest(workloadURI, name, genid string, payload *EdgeSignRequestPayload) (string, error) {
esrr := EdgeSignRequestResponse{}
// validate payload properties
err := payload.Validate()
if err != nil {
return "", fmt.Errorf("sign: unable to sign request: %w", err)
}
payloadJSON, _ := json.Marshal(payload)
// catch unix domain sockets URIs
if strings.Contains(workloadURI, "unix://") {
addr, err := net.ResolveUnixAddr("unix", strings.TrimPrefix(workloadURI, "unix://"))
if err != nil {
fmt.Printf("Failed to resolve: %v\n", err)
return "", err
}
setSharedUnixHTTPClient(addr.Name)
response, err := sharedUnixHTTPClient.Post("http://iotedge"+fmt.Sprintf("/modules/%s/genid/%s/sign?api-version=2018-06-28", name, genid), "text/plain", bytes.NewBuffer(payloadJSON))
if err != nil {
return "", fmt.Errorf("sign: unable to sign request (resp): %w", err)
}
defer response.Body.Close()
body, err := io.ReadAll(response.Body)
if err != nil {
return "", fmt.Errorf("sign: unable to sign request (read): %w", err)
}
err = json.Unmarshal(body, &esrr)
if err != nil {
return "", fmt.Errorf("sign: unable to sign request (unm): %w", err)
}
} else {
// format uri string for base uri
uri := fmt.Sprintf("%smodules/%s/genid/%s/sign?api-version=2018-06-28", workloadURI, name, genid)
// get http response and handle error
resp, err := http.Post(uri, "text/plain", bytes.NewBuffer(payloadJSON))
if err != nil {
return "", fmt.Errorf("sign: unable to sign request (resp): %w", err)
}
defer resp.Body.Close()
// read response
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("sign: unable to sign request (read): %w", err)
}
err = json.Unmarshal(body, &esrr)
if err != nil {
return "", fmt.Errorf("sign: unable to sign request (unm): %w", err)
}
}
// if error returned from WorkloadAPI
if len(esrr.Message) > 0 {
return "", fmt.Errorf("sign: unable to sign request: %s", esrr.Message)
}
return esrr.Digest, nil
}
| [
"\"HOSTNAME\"",
"\"IOTEDGE_IOTHUBHOSTNAME\"",
"\"IOTEDGE_GATEWAYHOSTNAME\"",
"\"IOTEDGE_DEVICEID\"",
"\"IOTEDGE_MODULEID\"",
"\"IOTEDGE_MODULEGENERATIONID\"",
"\"IOTEDGE_WORKLOADURI\"",
"\"IOTEDGE_APIVERSION\""
]
| []
| [
"IOTEDGE_IOTHUBHOSTNAME",
"IOTEDGE_APIVERSION",
"IOTEDGE_WORKLOADURI",
"HOSTNAME",
"IOTEDGE_MODULEID",
"IOTEDGE_DEVICEID",
"IOTEDGE_MODULEGENERATIONID",
"IOTEDGE_GATEWAYHOSTNAME"
]
| [] | ["IOTEDGE_IOTHUBHOSTNAME", "IOTEDGE_APIVERSION", "IOTEDGE_WORKLOADURI", "HOSTNAME", "IOTEDGE_MODULEID", "IOTEDGE_DEVICEID", "IOTEDGE_MODULEGENERATIONID", "IOTEDGE_GATEWAYHOSTNAME"] | go | 8 | 0 | |
plugin/plugin_shim_test.go | package plugin_test
import (
"os"
"os/exec"
"path/filepath"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
)
var _ = Describe("Command", func() {
var (
validPluginPath string
OLD_PLUGINS_DIR string
)
BeforeEach(func() {
OLD_PLUGINS_DIR = os.Getenv("CF_PLUGINS_DIR")
dir, err := os.Getwd()
Expect(err).NotTo(HaveOccurred())
fullDir := filepath.Join(dir, "..", "fixtures", "config", "main-plugin-test-config")
err = os.Setenv("CF_PLUGINS_DIR", fullDir)
Expect(err).NotTo(HaveOccurred())
validPluginPath = filepath.Join(dir, "..", "fixtures", "plugins", "test_1.exe")
})
AfterEach(func() {
err := os.Setenv("CF_PLUGINS_DIR", OLD_PLUGINS_DIR)
Expect(err).NotTo(HaveOccurred())
})
Describe(".ServeCommand", func() {
XIt("prints a warning if a plugin does not implement the rpc interface", func() {
//This would seem like a valid test, but the plugin itself will not compile
})
It("Exits with status 1 if it cannot ping the host port passed as an argument", func() {
args := []string{"0", "0"}
session, err := Start(exec.Command(validPluginPath, args...), GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
Eventually(session, 2).Should(Exit(1))
})
})
})
| [
"\"CF_PLUGINS_DIR\""
]
| []
| [
"CF_PLUGINS_DIR"
]
| [] | ["CF_PLUGINS_DIR"] | go | 1 | 0 | |
tests/unit/db/unit_t_db_base.py | #!/usr/bin/env python
# Copyright (c) 2015 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
_unit_t_db_base_
unit_t_db_base module - The base class for all unit tests that target a db
The unit tests are set to execute by default against a CouchDB instance.
In order to run the unit tests against a Cloudant instance, set the
RUN_CLOUDANT_TESTS environment variable to something.
example: export RUN_CLOUDANT_TESTS=1
Other valid environment variables:
CLOUDANT_ACCOUNT: Set this to the Cloudant account that you wish to connect to.
- This is used for Cloudant tests only.
example: export CLOUDANT_ACCOUNT=account
DB_USER: Set this to the username to connect with.
- Optional for CouchDB tests. If omitted then a user will be created before
tests are executed in CouchDB.
- Mandatory for Cloudant tests.
example: export DB_USER=user
DB_PASSWORD: Set this to the password for the user specified.
example: export DB_PASSWORD=password
DB_URL: Optionally set this to override the construction of the database URL.
example: export DB_URL=https://account.cloudant.com
"""
import unittest
import requests
import os
import uuid
from cloudant.account import CouchDB, Cloudant
from ... import unicode_
class UnitTestDbBase(unittest.TestCase):
"""
The base class for all unit tests targeting a database
"""
@classmethod
def setUpClass(cls):
"""
If targeting CouchDB, Set up a CouchDB instance otherwise do nothing.
Note: Admin Party is currently unsupported so we must create a
CouchDB user for tests to function with a CouchDB instance if one is
not provided.
"""
if os.environ.get('RUN_CLOUDANT_TESTS') is None:
if os.environ.get('DB_URL') is None:
os.environ['DB_URL'] = 'http://127.0.0.1:5984'
if os.environ.get('DB_USER') is None:
os.environ['DB_USER_CREATED'] = '1'
os.environ['DB_USER'] = 'user-{0}'.format(
unicode_(uuid.uuid4())
)
os.environ['DB_PASSWORD'] = 'password'
resp = requests.put(
'{0}/_config/admins/{1}'.format(
os.environ['DB_URL'],
os.environ['DB_USER']
),
data='"{0}"'.format(os.environ['DB_PASSWORD'])
)
resp.raise_for_status()
@classmethod
def tearDownClass(cls):
"""
If necessary, clean up CouchDB instance once all tests are complete.
"""
if (os.environ.get('RUN_CLOUDANT_TESTS') is None and
os.environ.get('DB_USER_CREATED') is not None):
resp = requests.delete(
'{0}://{1}:{2}@{3}/_config/admins/{4}'.format(
os.environ['DB_URL'].split('://', 1)[0],
os.environ['DB_USER'],
os.environ['DB_PASSWORD'],
os.environ['DB_URL'].split('://', 1)[1],
os.environ['DB_USER']
)
)
del os.environ['DB_USER_CREATED']
del os.environ['DB_USER']
resp.raise_for_status()
def setUp(self):
"""
Set up test attributes for unit tests targeting a database
"""
if os.environ.get('RUN_CLOUDANT_TESTS') is None:
self.user = os.environ['DB_USER']
self.pwd = os.environ['DB_PASSWORD']
self.url = os.environ['DB_URL']
self.client = CouchDB(self.user, self.pwd, url=self.url)
else:
self.account = os.environ.get('CLOUDANT_ACCOUNT')
self.user = os.environ.get('DB_USER')
self.pwd = os.environ.get('DB_PASSWORD')
self.url = os.environ.get(
'DB_URL',
'https://{0}.cloudant.com'.format(self.account))
self.client = Cloudant(
self.user,
self.pwd,
url=self.url,
x_cloudant_user=self.account)
def tearDown(self):
"""
Ensure the client is new for each test
"""
del self.client
def db_set_up(self):
"""
Set up test attributes for Database tests
"""
self.client.connect()
self.test_dbname = self.dbname()
self.db = self.client._DATABASE_CLASS(self.client, self.test_dbname)
self.db.create()
def db_tear_down(self):
"""
Reset test attributes for each test
"""
self.db.delete()
self.client.disconnect()
del self.test_dbname
del self.db
def dbname(self, database_name='db'):
return '{0}-{1}'.format(database_name, unicode_(uuid.uuid4()))
def populate_db_with_documents(self, doc_count=100):
docs = [
{'_id': 'julia{0:03d}'.format(i), 'name': 'julia', 'age': i}
for i in range(doc_count)
]
return self.db.bulk_docs(docs)
| []
| []
| [
"DB_PASSWORD",
"DB_USER_CREATED",
"RUN_CLOUDANT_TESTS",
"CLOUDANT_ACCOUNT",
"DB_USER",
"DB_URL"
]
| [] | ["DB_PASSWORD", "DB_USER_CREATED", "RUN_CLOUDANT_TESTS", "CLOUDANT_ACCOUNT", "DB_USER", "DB_URL"] | python | 6 | 0 | |
back-end/logger.py | # ------------------------------------------------------------------#
# Service: gin-proc
# Project: GIN - https://gin.g-node.org
# Documentation: https://github.com/G-Node/gin-proc/blob/master/docs
# Package: Logger
# ------------------------------------------------------------------#
import logging
import os
from datetime import datetime
def level():
if 'DEBUG' in os.environ and os.environ['DEBUG']:
return logging.DEBUG
else:
return logging.INFO
FORMAT = "%(asctime)s:%(levelname)s:%(message)s"
if 'LOG_DIR' in os.environ:
LOG = True
FILENAME = os.environ['LOG_DIR']
logging.basicConfig(filename=FILENAME, format=FORMAT, level=level())
else:
LOG = False
logging.basicConfig(format=FORMAT, level=level())
def log(function, message):
if LOG:
if function == 'warning':
logging.warning(message)
elif function == 'debug':
logging.debug(message)
elif function == 'error':
logging.error(message)
elif function == 'critical':
logging.critical(message)
elif function == 'info':
logging.info(message)
elif function == 'exception':
logging.exception(message)
else:
if function != "debug" or os.environ.get("DEBUG", None):
print("{1}: [{0}] {2}".format(function.upper(), datetime.now(),
message))
| []
| []
| [
"LOG_DIR",
"DEBUG"
]
| [] | ["LOG_DIR", "DEBUG"] | python | 2 | 0 | |
Packs/CortexDataLake/Integrations/CortexDataLake/CortexDataLake.py | """ IMPORTS """
from CommonServerPython import *
import os
import re
import requests
import json
from pancloud import QueryService, Credentials, exceptions
import base64
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from typing import Dict, Any, List, Tuple, Callable
from tempfile import gettempdir
from dateutil import parser
import demistomock as demisto
from datetime import timedelta
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBAL CONSTS '''
ACCESS_TOKEN_CONST = 'access_token' # guardrails-disable-line
REFRESH_TOKEN_CONST = 'refresh_token' # guardrails-disable-line
EXPIRES_IN = 'expires_in'
INSTANCE_ID_CONST = 'instance_id'
API_URL_CONST = 'api_url'
REGISTRATION_ID_CONST = 'reg_id'
ENCRYPTION_KEY_CONST = 'auth_key'
FIRST_FAILURE_TIME_CONST = 'first_failure_time'
LAST_FAILURE_TIME_CONST = 'last_failure_time'
DEFAULT_API_URL = 'https://api.us.cdl.paloaltonetworks.com'
MINUTES_60 = 60 * 60
SECONDS_30 = 30
FETCH_TABLE_HR_NAME = {
"firewall.threat": "Cortex Firewall Threat",
"firewall.file_data": "Cortex Firewall File Data"
}
BAD_REQUEST_REGEX = r'^Error in API call \[400\].*'
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
def __init__(self, token_retrieval_url, registration_id, use_ssl, proxy, refresh_token, enc_key):
headers = get_x_content_info_headers()
headers['Authorization'] = registration_id
headers['Accept'] = 'application/json'
super().__init__(base_url=token_retrieval_url, headers=headers, verify=use_ssl, proxy=proxy)
self.refresh_token = refresh_token
self.enc_key = enc_key
self.use_ssl = use_ssl
# Trust environment settings for proxy configuration
self.trust_env = proxy
self._set_access_token()
def _set_access_token(self):
"""
Checks if access token exists in the integration context and set it to the object properties, if not, a new token
is generated and saved in the integration context along with the query api_url and the instance_id
Returns:
None
"""
integration_context = demisto.getIntegrationContext()
access_token = integration_context.get(ACCESS_TOKEN_CONST)
valid_until = integration_context.get(EXPIRES_IN)
if access_token and valid_until:
if int(time.time()) < valid_until:
self.access_token = access_token
self.api_url = integration_context.get(API_URL_CONST, DEFAULT_API_URL)
self.instance_id = integration_context.get(INSTANCE_ID_CONST)
return
demisto.debug(f'access token time: {valid_until} expired/none. Will call oproxy')
access_token, api_url, instance_id, refresh_token, expires_in = self._oproxy_authorize()
updated_integration_context = {
ACCESS_TOKEN_CONST: access_token,
EXPIRES_IN: int(time.time()) + expires_in - SECONDS_30,
API_URL_CONST: api_url,
INSTANCE_ID_CONST: instance_id
}
if refresh_token:
updated_integration_context.update({REFRESH_TOKEN_CONST: refresh_token})
demisto.setIntegrationContext(updated_integration_context)
self.access_token = access_token
self.api_url = api_url
self.instance_id = instance_id
def _oproxy_authorize(self) -> Tuple[Any, Any, Any, Any, int]:
oproxy_response = self._get_access_token_with_backoff_strategy()
access_token = oproxy_response.get(ACCESS_TOKEN_CONST)
api_url = oproxy_response.get('url')
refresh_token = oproxy_response.get(REFRESH_TOKEN_CONST)
instance_id = oproxy_response.get(INSTANCE_ID_CONST)
# In case the response has EXPIRES_IN key with empty string as value, we need to make sure we don't try to cast
# an empty string to an int.
expires_in = int(oproxy_response.get(EXPIRES_IN, MINUTES_60) or 0)
if not access_token or not api_url or not instance_id:
raise DemistoException(f'Missing attribute in response: access_token, instance_id or api are missing.\n'
f'Oproxy response: {oproxy_response}')
return access_token, api_url, instance_id, refresh_token, expires_in
def _get_access_token_with_backoff_strategy(self) -> dict:
""" Implements a backoff strategy for retrieving an access token.
Raises an exception if the call is within one of the time windows, otherwise fetches the access token
Returns: The oproxy response or raising a DemistoException
"""
self._backoff_strategy(demisto.getIntegrationContext())
return self._get_access_token()
@staticmethod
def _backoff_strategy(integration_context: dict):
""" Implements a backoff strategy for retrieving an access token. Logic as follows:
- First 60 minutes check for access token once every 1 minute max.
- Next 47 hours check for access token once every 10 minute max.
- After 48 hours check for access token once every 60 minutes max.
Args:
integration_context: The integration context
"""
err_msg = 'We have found out that your recent attempts to authenticate against the CDL server have failed. ' \
'Therefore we have limited the number of calls that the CDL integration performs. ' \
'If you wish to try authenticating again, please run the `cdl-reset-authentication-timeout` ' \
'command and retry. If you choose not to reset the authentication timeout, the next attempt can be ' \
'done in {} {}.'
first_failure_time = integration_context.get(FIRST_FAILURE_TIME_CONST)
last_failure_time = integration_context.get(LAST_FAILURE_TIME_CONST)
now_datetime = datetime.utcnow()
demisto.debug(f'CDL - First failure time: {first_failure_time}')
demisto.debug(f'CDL - Last failure time: {last_failure_time}')
demisto.debug(f'CDL - Current time: {last_failure_time}')
if first_failure_time and last_failure_time:
first_failure_datetime = datetime.fromisoformat(first_failure_time)
last_failure_datetime = datetime.fromisoformat(last_failure_time)
time_from_first_failure = now_datetime - first_failure_datetime
time_from_last_failure = now_datetime - last_failure_datetime
if time_from_first_failure < timedelta(hours=1):
window = timedelta(minutes=1)
if time_from_last_failure < window:
raise DemistoException(err_msg.format(window - time_from_last_failure, 'seconds'))
elif time_from_first_failure < timedelta(hours=48):
window = timedelta(minutes=10)
if time_from_last_failure < window:
raise DemistoException(err_msg.format(window - time_from_last_failure, 'minutes'))
else:
window = timedelta(minutes=60)
if time_from_last_failure < window:
raise DemistoException(err_msg.format(window - time_from_last_failure, 'minutes'))
def _get_access_token(self) -> dict:
""" Performs an http request to oproxy-cdl access token endpoint
In case of failure, handles the error, otherwise reset the failure counters and return the response
Returns: The oproxy response or raising a DemistoException
"""
demisto.debug('CDL - Fetching access token')
try:
oproxy_response = self._http_request('POST',
'/cdl-token',
json_data={'token': get_encrypted(self.refresh_token, self.enc_key)},
timeout=(60 * 3, 60 * 3),
retries=3,
backoff_factor=10,
status_list_to_retry=[400])
except DemistoException as e:
if re.match(BAD_REQUEST_REGEX, str(e)):
demisto.error('The request to retrieve the access token has failed with 400 status code.')
demisto.setIntegrationContext(self._cache_failure_times(demisto.getIntegrationContext()))
raise e
self.reset_failure_times()
return oproxy_response
@staticmethod
def _cache_failure_times(integration_context: dict) -> dict:
""" Updates the failure times in case of an error with 400 status code.
Args:
integration_context: The integration context
Returns:
The updated integration context
"""
current_time = datetime.utcnow().isoformat()
times_dict = {LAST_FAILURE_TIME_CONST: current_time}
if not integration_context.get(FIRST_FAILURE_TIME_CONST):
# first failure
times_dict[FIRST_FAILURE_TIME_CONST] = current_time
integration_context.update(times_dict)
return integration_context
@staticmethod
def reset_failure_times():
""" Resets the time failure counters: FIRST_FAILURE_TIME_CONST & LAST_FAILURE_TIME_CONST
"""
integration_context = demisto.getIntegrationContext()
for failure_time_key in (FIRST_FAILURE_TIME_CONST, LAST_FAILURE_TIME_CONST):
if failure_time_key in integration_context:
del integration_context[failure_time_key]
demisto.setIntegrationContext(integration_context)
def query_loggings(self, query: str) -> Tuple[List[dict], list]:
"""
This function handles all the querying of Cortex Logging service
Args:
query: The sql string query.
Returns:
A list of records according to the query
"""
query_data = {'query': self.add_instance_id_to_query(query),
'language': 'csql'}
query_service = self.initial_query_service()
response = query_service.create_query(query_params=query_data, enforce_json=True)
query_result = response.json()
if not response.ok:
status_code = response.status_code
try:
# For some error responses the messages are in 'query_result['errors'] and for some they are simply
# in 'query_result
errors = query_result.get('errors', query_result)
error_message = ''.join([message.get('message') for message in errors])
except AttributeError:
error_message = query_result
raise DemistoException(f'Error in query to Cortex Data Lake [{status_code}] - {error_message}')
try:
raw_results = [r.json() for r in query_service.iter_job_results(job_id=query_result.get('jobId'),
result_format='valuesDictionary',
max_wait=2000)]
except exceptions.HTTPError as e:
raise DemistoException(f'Received error {str(e)} when querying logs.')
extended_results: List[Dict] = []
for result in raw_results:
page = result.get('page', {})
data = page.get('result', {}).get('data', [])
if data:
extended_results.extend(data)
return extended_results, raw_results
def initial_query_service(self) -> QueryService:
credentials = Credentials(
access_token=self.access_token,
verify=self.use_ssl
)
query_service = QueryService(
url=self.api_url,
credentials=credentials,
trust_env=self.trust_env
)
return query_service
def add_instance_id_to_query(self, query: str) -> str:
"""
On apollo v2 all table names must have the instance_id at the top of their hierarchy.
This function adds the instance_id to the query.
For example:
For the query "SELECT * FROM `test`" with instance_id=1234 this function will return "SELECT * FROM `1234.test`"
Args:
query: A query for CDL
Returns:
A query with instance_id
"""
FIND_FROM_STATEMENT_REGEX_PATTERN = r'(?i)FROM `'
query = re.sub(FIND_FROM_STATEMENT_REGEX_PATTERN, f'FROM `{self.instance_id}.', query)
return query
''' HELPER FUNCTIONS '''
def human_readable_time_from_epoch_time(epoch_time: int, utc_time: bool = False):
"""
Divides the epoch time by 1e6 since the epoch format has 6 trailing zeroes
Since incidents need the time in utc format (ends in 'Z') but the SQL syntax cannot parse a UTC formatted date well
it is parameterized
Args:
utc_time: A boolean that states weather to add the 'Z' at the end of the date string
epoch_time: Epoch time as it is in the raw_content
Returns:
human readable time in the format of '1970-01-01T02:00:00'
"""
result = datetime.fromtimestamp(epoch_time / 1e6).isoformat() if epoch_time else None
if result:
result += 'Z' if utc_time else ''
return result
def add_milliseconds_to_epoch_time(epoch_time):
"""
Add 1 millisecond so we would not get duplicate incidents.
Args:
epoch_time: Epoch time as it is in the raw_content
Returns:
epoch_time with 1 more millisecond.
"""
epoch_time = int(epoch_time / 1000 + 1) / 1000
return epoch_time
def epoch_to_timestamp_and_add_milli(epoch_time: int):
"""
Create human readable time in the format of '1970-01-01T02:00:00.000Z'
Args:
epoch_time: Epoch time as it is in the raw_content
Returns:
human readable time in the format of '1970-01-01T02:00:00.000Z'
"""
epoch_time = add_milliseconds_to_epoch_time(epoch_time)
epoch_time_str = datetime.fromtimestamp(epoch_time).isoformat(timespec='milliseconds') + "Z"
return epoch_time_str
def common_context_transformer(row_content):
"""
This function retrieves data from a row of raw data into context path locations
Args:
row_content: a dict representing raw data of a row
Returns:
a dict with context paths and their corresponding value
"""
return {
'Action': row_content.get('action', {}).get('value'),
'App': row_content.get('app'),
'Protocol': row_content.get('protocol', {}).get('value'),
'DestinationIP': row_content.get('dest_ip', {}).get('value'),
'RuleMatched': row_content.get('rule_matched'),
'CharacteristicOfApp': row_content.get('characteristics_of_app'),
'LogSourceName': row_content.get('log_source_name'),
'IsNat': row_content.get('is_nat'),
'NatDestinationPort': row_content.get('nat_dest_port'),
'NatDestination': row_content.get('nat_dest', {}).get('value'),
'NatSource': row_content.get('nat_source', {}).get('value'),
'SourceIP': row_content.get('source_ip', {}).get('value'),
'AppCategory': row_content.get('app_category'),
'SourceLocation': row_content.get('source_location'),
'DestinationLocation': row_content.get('dest_location'),
'FileSHA256': row_content.get('file_sha_256'),
'FileName': row_content.get('file_name'),
'TimeGenerated': human_readable_time_from_epoch_time(row_content.get('time_generated', 0))
}
def traffic_context_transformer(row_content: dict) -> dict:
"""
This function retrieves data from a row of raw data into context path locations
Args:
row_content: a dict representing raw data of a row
Returns:
a dict with context paths and their corresponding value
"""
return {
'Action': row_content.get('action', {}).get('value'),
'RiskOfApp': row_content.get('risk_of_app'),
'NatSourcePort': row_content.get('nat_source_port'),
'SessionID': row_content.get('session_id'),
'Packets': row_content.get('packets_total'),
'CharacteristicOfApp': row_content.get('characteristics_of_app'),
'App': row_content.get('app'),
'Vsys': row_content.get('vsys'),
'IsNat': row_content.get('is_nat'),
'LogTime': human_readable_time_from_epoch_time(row_content.get('log_time', 0)),
'SubcategoryOfApp': row_content.get('app_sub_category'),
'Protocol': row_content.get('protocol', {}).get('value'),
'NatDestinationPort': row_content.get('nat_dest_port'),
'DestinationIP': row_content.get('dest_ip', {}).get('value'),
'NatDestination': row_content.get('nat_dest', {}).get('value'),
'RuleMatched': row_content.get('rule_matched'),
'DestinationPort': row_content.get('dest_port'),
'TotalTimeElapsed': row_content.get('total_time_elapsed'),
'LogSourceName': row_content.get('log_source_name'),
'Subtype': row_content.get('sub_type', {}).get('value'),
'Users': row_content.get('users'),
'TunneledApp': row_content.get('tunneled_app'),
'IsPhishing': row_content.get('is_phishing'),
'SessionEndReason': row_content.get('session_end_reason', {}).get('value'),
'NatSource': row_content.get('nat_source', {}).get('value'),
'SourceIP': row_content.get('source_ip', {}).get('value'),
'SessionStartIP': human_readable_time_from_epoch_time(row_content.get('session_start_time', 0)),
'TimeGenerated': human_readable_time_from_epoch_time(row_content.get('time_generated', 0)),
'AppCategory': row_content.get('app_category'),
'SourceLocation': row_content.get('source_location'),
'DestinationLocation': row_content.get('dest_location'),
'LogSourceID': row_content.get('log_source_id'),
'TotalBytes': row_content.get('bytes_total'),
'VsysID': row_content.get('vsys_id'),
'ToZone': row_content.get('to_zone'),
'URLCategory': row_content.get('url_category', {}).get('value'),
'SourcePort': row_content.get('source_port'),
'Tunnel': row_content.get('tunnel', {}).get('value'),
'SourceDeviceHost': row_content.get('source_device_host'),
'DestDeviceHost': row_content.get('dest_device_host')
}
def threat_context_transformer(row_content: dict) -> dict:
"""
This function retrieves data from a row of raw data into context path locations
Args:
row_content: a dict representing raw data of a row
Returns:
a dict with context paths and their corresponding value
"""
return {
'SessionID': row_content.get('session_id'),
'Action': row_content.get('action', {}).get('value'),
'App': row_content.get('app'),
'IsNat': row_content.get('is_nat'),
'SubcategoryOfApp': row_content.get('app_sub_category'),
'PcapID': row_content.get('pcap_id'),
'NatDestination': row_content.get('nat_dest', {}).get('value'),
'Flags': row_content.get('flags'),
'DestinationPort': row_content.get('dest_port'),
'ThreatID': row_content.get('threat_id'),
'NatSource': row_content.get('nat_source', {}).get('value'),
'IsURLDenied': row_content.get('is_url_denied'),
'Users': row_content.get('users'),
'TimeGenerated': human_readable_time_from_epoch_time(row_content.get('time_generated', 0)),
'IsPhishing': row_content.get('is_phishing'),
'AppCategory': row_content.get('app_category'),
'SourceLocation': row_content.get('source_location'),
'DestinationLocation': row_content.get('dest_location'),
'ToZone': row_content.get('to_zone'),
'RiskOfApp': row_content.get('risk_of_app'),
'NatSourcePort': row_content.get('nat_source_port'),
'CharacteristicOfApp': row_content.get('characteristics_of_app'),
'FromZone': row_content.get('from_zone'),
'Vsys': row_content.get('vsys'),
'Protocol': row_content.get('protocol', {}).get('value'),
'NatDestinationPort': row_content.get('nat_dest_port'),
'DestinationIP': row_content.get('dest_ip', {}).get('value'),
'SourceIP': row_content.get('source_ip', {}).get('value'),
'RuleMatched': row_content.get('rule_matched'),
'ThreatCategory': row_content.get('threat_category', {}).get('value'),
'LogSourceName': row_content.get('log_source_name'),
'Subtype': row_content.get('sub_type', {}).get('value'),
'Direction': row_content.get('direction_of_attack', {}).get('value'),
'FileName': row_content.get('file_name'),
'VendorSeverity': row_content.get('vendor_severity', {}).get('value'),
'LogTime': human_readable_time_from_epoch_time(row_content.get('log_time', 0)),
'LogSourceID': row_content.get('log_source_id'),
'VsysID': row_content.get('vsys_id'),
'URLDomain': row_content.get('url_domain'),
'URLCategory': row_content.get('url_category', {}).get('value'),
'SourcePort': row_content.get('source_port'),
'FileSHA256': row_content.get('file_sha_256'),
'SourceDeviceHost': row_content.get('source_device_host'),
'DestDeviceHost': row_content.get('dest_device_host')
}
def url_context_transformer(row_content: dict) -> dict:
"""
This function retrieves data from a row of raw data into context path locations
Args:
row_content: a dict representing raw data of a row
Returns:
a dict with context paths and their corresponding value
"""
return {
'SessionID': row_content.get('session_id'),
'Action': row_content.get('action', {}).get('value'),
'App': row_content.get('app'),
'PcapID': row_content.get('pcap_id'),
'DestinationPort': row_content.get('dest_port'),
'AppCategory': row_content.get('app_category'),
'AppSubcategory': row_content.get('app_sub_category'),
'SourceLocation': row_content.get('source_location'),
'DestinationLocation': row_content.get('dest_location'),
'ToZone': row_content.get('to_zone'),
'FromZone': row_content.get('from_zone'),
'Protocol': row_content.get('protocol', {}).get('value'),
'DestinationIP': row_content.get('dest_ip', {}).get('value'),
'SourceIP': row_content.get('source_ip', {}).get('value'),
'RuleMatched': row_content.get('rule_matched'),
'ThreatCategory': row_content.get('threat_category', {}).get('value'),
'ThreatName': row_content.get('threat_name'),
'Subtype': row_content.get('sub_type', {}).get('value'),
'LogTime': human_readable_time_from_epoch_time(row_content.get('log_time', 0)),
'LogSourceName': row_content.get('log_source_name'),
'Denied': row_content.get('is_url_denied'),
'Category': row_content.get('url_category', {}).get('value'),
'SourcePort': row_content.get('source_port'),
'URL': row_content.get('url_domain'),
'URI': row_content.get('uri'),
'ContentType': row_content.get('content_type'),
'HTTPMethod': row_content.get('http_method', {}).get('value'),
'Severity': row_content.get('severity'),
'UserAgent': row_content.get('user_agent'),
'RefererProtocol': row_content.get('referer_protocol', {}).get('value'),
'RefererPort': row_content.get('referer_port'),
'RefererFQDN': row_content.get('referer_fqdn'),
'RefererURL': row_content.get('referer_url_path'),
'SrcUser': row_content.get('source_user'),
'SrcUserInfo': row_content.get('source_user_info'),
'DstUser': row_content.get('dest_user'),
'DstUserInfo': row_content.get('dest_user_info'),
'TechnologyOfApp': row_content.get('technology_of_app'),
'SourceDeviceHost': row_content.get('source_device_host'),
'DestDeviceHost': row_content.get('dest_device_host')
}
def files_context_transformer(row_content: dict) -> dict:
"""
This function retrieves data from a row of raw data into context path locations
Args:
row_content: a dict representing raw data of a row
Returns:
a dict with context paths and their corresponding value
"""
return {
'Action': row_content.get('action', {}).get('value'),
'App': row_content.get('app'),
'AppCategory': row_content.get('app_category'),
'AppSubcategory': row_content.get('app_sub_category'),
'CharacteristicOfApp': row_content.get('characteristics_of_app'),
'DestinationIP': row_content.get('dest_ip', {}).get('value'),
'CloudHostname': row_content.get('cloud_hostname'),
'CountOfRepeats': row_content.get('count_of_repeats'),
'CustomerID': row_content.get('customer_id'),
'DestinationLocation': row_content.get('dest_location'),
'DestinationPort': row_content.get('dest_port'),
'DirectionOfAttack': row_content.get('direction_of_attack', {}).get('value'),
'FileID': row_content.get('file_id'),
'FileName': row_content.get('file_name'),
'FileType': row_content.get('file_type'),
'Flags': row_content.get('flags'),
'FromZone': row_content.get('from_zone'),
'Http2Connection': row_content.get('http2_connection'),
'InboundIf': row_content.get('inbound_if', {}).get('value'),
'IngestionTime': human_readable_time_from_epoch_time(row_content.get('ingestion_time', 0)),
'IsCaptivePortal': row_content.get('is_captive_portal'),
'IsClientToServer': row_content.get('is_client_to_server'),
'IsContainer': row_content.get('is_container'),
'IsDecryptMirror': row_content.get('is_decrypt_mirror'),
'IsDupLog': row_content.get('is_dup_log'),
'IsExported': row_content.get('is_exported'),
'IsForwarded': row_content.get('is_forwarded'),
'IsMptcpOn': row_content.get('is_mptcp_on'),
'IsNat': row_content.get('is_nat'),
'IsNonStdDestPort': row_content.get('is_non_std_dest_port'),
'IsPacketCapture': row_content.get('is_packet_capture'),
'IsPhishing': row_content.get('is_phishing'),
'IsPrismaBranch': row_content.get('is_prisma_branch'),
'IsPrismaMobile': row_content.get('is_prisma_mobile'),
'IsProxy': row_content.get('is_proxy'),
'IsReconExcluded': row_content.get('is_recon_excluded'),
'IsSaasApp': row_content.get('is_saas_app'),
'IsServerToClient': row_content.get('is_server_to_client'),
'IsSymReturn': row_content.get('is_sym_return'),
'IsTransaction': row_content.get('is_transaction'),
'IsTunnelInspected': row_content.get('is_tunnel_inspected'),
'IsUrlDenied': row_content.get('is_url_denied'),
'LogSet': row_content.get('log_set'),
'LogSource': row_content.get('log_source'),
'LogSourceID': row_content.get('log_source_id'),
'LogSourceName': row_content.get('log_source_name'),
'LogTime': human_readable_time_from_epoch_time(row_content.get('log_time', 0)),
'LogType': row_content.get('log_type', {}).get('value'),
'NatDestination': row_content.get('nat_dest', {}).get('value'),
'NatSource': row_content.get('nat_source', {}).get('value'),
'NatDestinationPort': row_content.get('nat_dest_port'),
'NatSourcePort': row_content.get('nat_source_port'),
'OutboundIf': row_content.get('outbound_if', {}).get('value'),
'PcapID': row_content.get('pcap_id'),
'Protocol': row_content.get('protocol', {}).get('value'),
'RecordSize': row_content.get('record_size'),
'ReportID': row_content.get('report_id'),
'RiskOfApp': row_content.get('risk_of_app'),
'RuleMatched': row_content.get('rule_matched'),
'RuleMatchedUuid': row_content.get('rule_matched_uuid'),
'SanctionedStateOfApp': row_content.get('sanctioned_state_of_app'),
'SequenceNo': row_content.get('sequence_no'),
'SessionID': row_content.get('session_id'),
'Severity': row_content.get('severity'),
'SourceIP': row_content.get('source_ip', {}).get('value'),
'Subtype': row_content.get('sub_type', {}).get('value'),
'TechnologyOfApp': row_content.get('technology_of_app'),
'TimeGenerated': human_readable_time_from_epoch_time(row_content.get('time_generated', 0)),
'ToZone': row_content.get('to_zone'),
'Tunnel': row_content.get('tunnel', {}).get('value'),
'TunneledApp': row_content.get('tunneled_app'),
'URLCategory': row_content.get('url_category', {}).get('value'),
'FileSHA256': row_content.get('file_sha_256'),
'Vsys': row_content.get('vsys'),
'VsysID': row_content.get('vsys_id'),
'VendorName': row_content.get('vendor_name'),
'VendorSeverity': row_content.get('vendor_severity', {}).get('value')
}
def records_to_human_readable_output(fields: str, table_name: str, results: list) -> str:
"""
This function gets all relevant data for the human readable output of a specific table.
By design if the user queries all fields of the table (i.e. enters '*' in the query) than the outputs
shown in the war room will be the same for each query - the outputs will be the headers list in the code.
If the user selects different fields in the query than those fields will be shown to the user.
Args:
fields: The field of the table named table_name
table_name: The name of the table
results: The results needs to be shown
Returns:
A markdown table of the outputs
"""
filtered_results: list = []
if fields == '*':
for result in results:
filtered_result = {
'Source Address': result.get('source_ip', {}).get('value'),
'Destination Address': result.get('dest_ip', {}).get('value'),
'Application': result.get('app'),
'Action': result.get('action', {}).get('value'),
'RuleMatched': result.get('rule_matched'),
'TimeGenerated': human_readable_time_from_epoch_time(result.get('time_generated')),
'FileID': result.get('file_id'),
'FileName': result.get('file_name'),
'FileType': result.get('file_type')
}
filtered_results.append(filtered_result)
else:
for result in results:
filtered_result = {}
for root in result.keys():
parsed_tree: dict = parse_tree_by_root_to_leaf_paths(root, result[root])
filtered_result.update(parsed_tree)
filtered_results.append(filtered_result)
return tableToMarkdown(f'Logs {table_name} table', filtered_results, removeNull=True)
def parse_tree_by_root_to_leaf_paths(root: str, body) -> dict:
"""
This function receives a dict (root and a body) and parses it according to the upcoming example:
Input: root = 'a', body = {'b': 2, 'c': 3, 'd': {'e': 5, 'f': 6, 'g': {'h': 8, 'i': 9}}}.
So the dict is {'a': {'b': 2, 'c': 3, 'd': {'e': 5, 'f': 6, 'g': {'h': 8, 'i': 9}}}}
The expected output is {'a.b': 2, 'a.c': 3, 'a.d.e': 5, 'a.d.f': 6, 'a.d.g.h': 8, 'a.d.g.i': 9}
Basically what this function does is when it gets a tree it creates a dict from it which it's keys are all
root to leaf paths and the corresponding values are the values in the leafs
Please note that the implementation is similar to DFS on trees (which means we don't have to check for visited
nodes since there are no cycles)
Args:
root: The root string
body: The body of the root
Returns:
The parsed tree
"""
parsed_tree: dict = {}
help_stack: list = [(root, body)]
while help_stack:
node: tuple = help_stack.pop()
root_to_node_path: str = node[0]
body = node[1]
if isinstance(body, dict):
for key, value in body.items():
# for each node we append a tuple of it's body and the path from the root to it
help_stack.append((root_to_node_path + '.' + key, value))
elif isinstance(body, list):
for element in body:
help_stack.append((root_to_node_path, element))
else:
parsed_tree[root_to_node_path] = body
return parsed_tree
def build_where_clause(args: dict) -> str:
"""
This function transforms the relevant entries of dict into the where part of a SQL query
Args:
args: The arguments dict
Returns:
A string represents the where part of a SQL query
"""
args_dict = {
'source_ip': 'source_ip.value',
'dest_ip': 'dest_ip.value',
'rule_matched': 'rule_matched',
'from_zone': 'from_zone',
'to_zone': 'to_zone',
'source_port': 'source_port',
'dest_port': 'dest_port',
'action': 'action.value',
'file_sha_256': 'file_sha_256',
'file_name': 'file_name',
'app': 'app',
'app_category': 'app_category',
'dest_device_port': 'dest_device_port',
'dest_edl': 'dest_edl',
'dest_dynamic_address_group': 'dest_dynamic_address_group',
'dest_location': 'dest_location',
'dest_user': 'dest_user',
'file_type': 'file_type',
'is_server_to_client': 'is_server_to_client',
'is_url_denied': 'is_url_denied',
'log_type': 'log_type',
'nat_dest': 'nat_dest',
'nat_dest_port': 'nat_dest_port',
'nat_source': 'nat_source',
'nat_source_port': 'nat_source_port',
'rule_matched_uuid': 'rule_matched_uuid',
'severity': 'severity',
'source_device_host': 'source_device_host',
'source_edl': 'source_edl',
'source_dynamic_address_group': 'source_dynamic_address_group',
'source_location': 'source_location',
'source_user': 'source_user',
'sub_type': 'sub_type.value',
'time_generated': 'time_generated',
'url_category': 'url_category',
'url_domain': 'url_domain'
}
if args.get('ip') and (args.get('source_ip') or args.get('dest_ip')):
raise DemistoException('Error: "ip" argument cannot appear with either "source_ip" nor "dest_ip"')
if args.get('port') and (args.get('source_port') or args.get('dest_port')):
raise DemistoException('Error: "port" argument cannot appear with either "source_port" nor "dest_port"')
non_string_keys = {'dest_port', 'source_port'}
if 'query' in args:
# if query arg is supplied than we just need to parse it and only it
return args['query'].strip()
where_clause = ''
if args.get('ip'):
ips = argToList(args.pop('ip'))
# Creating a query for ip argument using source ip and dest ip
where_clause += '(' + ' OR '.join(f'source_ip.value = "{ip}" OR dest_ip.value = "{ip}"' for ip in ips) + ')'
if any(args.get(key) for key in args_dict) or args.get('port') or args.get('url'):
where_clause += ' AND '
if args.get('port'):
ports = argToList(args.pop('port'))
# Creating a query for port argument using source port and dest port
where_clause += '(' + ' OR '.join(f'source_port = {port} OR dest_port = {port}' for port in ports) + ')'
if any(args.get(key) for key in args_dict):
where_clause += ' AND '
if args.get('url'):
urls = argToList(args.pop('url'))
# Creating a query for url argument using uri and referer
where_clause += '(' + ' OR '.join(f'uri LIKE "%{url}%" OR referer LIKE "%{url}%"' for url in urls) + ')'
if any(args.get(key) for key in args_dict):
where_clause += ' AND '
# We want to add only keys that are part of the query
string_query_fields = {key: value for key, value in args.items() if key in args_dict and key not in non_string_keys}
or_statements = []
for key, values in string_query_fields.items():
string_values_list: list = argToList(values)
field = args_dict[key]
or_statements.append(' OR '.join([f'{field} = "{value}"' for value in string_values_list]))
# ports are digested as ints and cannot be sent as strings
non_string_query_fields = {key: value for key, value in args.items() if key in non_string_keys}
for key, values in non_string_query_fields.items():
non_string_values_list: list = argToList(values)
field = args_dict[key]
or_statements.append(' OR '.join([f'{field} = {value}' for value in non_string_values_list]))
where_clause += ' AND '.join([f'({or_statement})' for or_statement in or_statements if or_statement])
return where_clause
def get_encrypted(auth_id: str, key: str) -> str:
"""
Args:
auth_id (str): auth_id from oproxy
key (str): key from oproxy
Returns:
The encrypted auth_id with the time it was encrypted using AESGCM algorithm
"""
def create_nonce() -> bytes:
return os.urandom(12)
def encrypt(string: str, enc_key: str) -> bytes:
"""
Args:
enc_key (str):
string (str):
Returns:
bytes:
"""
# String to bytes
decoded_key = base64.b64decode(enc_key)
# Create key
aes_gcm = AESGCM(decoded_key)
# Create nonce
nonce = create_nonce()
# Create ciphered data
data = string.encode()
ct = aes_gcm.encrypt(nonce, data, None)
return base64.b64encode(nonce + ct)
now = int(time.time())
return encrypt(f'{now}:{auth_id}', key).decode('utf-8')
def prepare_fetch_incidents_query(fetch_timestamp: str,
fetch_severity: list,
fetch_table: str,
fetch_subtype: list,
fetch_fields: str,
fetch_limit: str) -> str:
"""
Prepares the SQL query for fetch incidents command
Args:
fetch_limit: Indicates how many incidents should be queried
fetch_timestamp: The date from which threat logs should be queried
fetch_severity: Severity associated with the incident.
fetch_subtype: Identifies the log subtype.
fetch_table: Identifies the fetch type.
fetch_fields: Fields to fetch fro the table.
Returns:
SQL query that matches the arguments
"""
query = f'SELECT {fetch_fields} FROM `{fetch_table}` ' # guardrails-disable-line
query += f'WHERE time_generated Between TIMESTAMP("{fetch_timestamp}") ' \
f'AND CURRENT_TIMESTAMP'
if fetch_subtype and 'all' not in fetch_subtype:
sub_types = [f'sub_type.value = "{sub_type}"' for sub_type in fetch_subtype]
query += f' AND ({" OR ".join(sub_types)})'
if fetch_severity and 'all' not in fetch_severity:
severities = [f'vendor_severity.value = "{severity}"' for severity in fetch_severity]
query += f' AND ({" OR ".join(severities)})'
query += f' ORDER BY time_generated ASC LIMIT {fetch_limit}'
return query
def convert_log_to_incident(log: dict, fetch_table: str) -> dict:
time_generated = log.get('time_generated', 0)
occurred = human_readable_time_from_epoch_time(time_generated, utc_time=True)
incident = {
'name': FETCH_TABLE_HR_NAME[fetch_table],
'rawJSON': json.dumps(log, ensure_ascii=False),
'occurred': occurred
}
return incident
''' COMMANDS FUNCTIONS '''
def test_module(client: Client, fetch_table, fetch_fields, is_fetch):
if not is_fetch:
# fetch params not to be tested (won't be used)
fetch_fields = '*'
fetch_table = 'firewall.traffic'
query = f'SELECT {fetch_fields} FROM `{fetch_table}` limit 1'
client.query_loggings(query)
return_outputs('ok')
def query_logs_command(args: dict, client: Client) -> Tuple[str, Dict[str, List[dict]], List[Dict[str, Any]]]:
"""
Return the result of querying the Logging service
"""
query = args.get('query', '')
limit = args.get('limit', '')
transform_results = argToBoolean(args.get('transform_results', 'true'))
if 'limit' not in query.lower():
query += f' LIMIT {limit}'
records, raw_results = client.query_loggings(query)
table_name = get_table_name(query)
output_results = records if not transform_results else [common_context_transformer(record) for record in records]
human_readable = tableToMarkdown('Logs ' + table_name + ' table', output_results, removeNull=True)
ec = {
'CDL.Logging': output_results
}
return human_readable, ec, raw_results
def get_table_name(query: str) -> str:
"""
Table name is stored in log_type attribute of the records
Args:
query: Query string, i.e SELECT * FROM firewall.threat LIMIT 1
Returns:
The query's table name
"""
find_table_name_from_query = r'(FROM `)(\w+.\w+)(`)'
search_result = re.search(find_table_name_from_query, query)
if search_result:
return search_result.group(2)
return "Unrecognized table name"
def get_critical_logs_command(args: dict, client: Client) -> Tuple[str, Dict[str, List[dict]], List[Dict[str, Any]]]:
"""
Queries Cortex Logging according to a pre-set query
"""
logs_amount = args.get('limit')
query_start_time, query_end_time = query_timestamp(args)
query = 'SELECT * FROM `firewall.threat` WHERE severity = "Critical" ' # guardrails-disable-line
query += f'AND time_generated BETWEEN TIMESTAMP("{query_start_time}") AND ' \
f'TIMESTAMP("{query_end_time}") LIMIT {logs_amount}'
records, raw_results = client.query_loggings(query)
transformed_results = [threat_context_transformer(record) for record in records]
human_readable = tableToMarkdown('Logs threat table', transformed_results, removeNull=True)
ec = {
'CDL.Logging.Threat': transformed_results
}
return human_readable, ec, raw_results
def query_timestamp(args: dict) -> Tuple[datetime, datetime]:
start_time = args.get('start_time', '')
end_time = args.get('end_time', '')
time_range = args.get('time_range', '')
if time_range:
query_start_time, query_end_time = parse_date_range(time_range)
else:
# parses user input to datetime object
query_start_time = parser.parse(start_time)
# if end_time is not given- will be replaced with current time
query_end_time = parser.parse(end_time) if end_time else datetime.fromtimestamp(time.time())
return query_start_time.replace(microsecond=0), query_end_time.replace(microsecond=0)
def get_social_applications_command(args: dict,
client: Client) -> Tuple[str, Dict[str, List[dict]], List[Dict[str, Any]]]:
""" Queries Cortex Logging according to a pre-set query """
logs_amount = args.get('limit')
query_start_time, query_end_time = query_timestamp(args)
query = 'SELECT * FROM `firewall.traffic` WHERE app_sub_category = "social-networking" ' # guardrails-disable-line
query += f' AND time_generated BETWEEN TIMESTAMP("{query_start_time}") AND ' \
f'TIMESTAMP("{query_end_time}") LIMIT {logs_amount}'
records, raw_results = client.query_loggings(query)
transformed_results = [traffic_context_transformer(record) for record in records]
human_readable = tableToMarkdown('Logs traffic table', transformed_results, removeNull=True)
ec = {
'CDL.Logging.Traffic': transformed_results
}
return human_readable, ec, raw_results
def search_by_file_hash_command(args: dict, client: Client) -> Tuple[str, Dict[str, List[dict]], List[Dict[str, Any]]]:
"""
Queries Cortex Logging according to a pre-set query
"""
logs_amount = args.get('limit')
file_hash = args.get('SHA256')
query_start_time, query_end_time = query_timestamp(args)
query = f'SELECT * FROM `firewall.threat` WHERE file_sha_256 = "{file_hash}" ' # guardrails-disable-line
query += f'AND time_generated BETWEEN TIMESTAMP("{query_start_time}") AND ' \
f'TIMESTAMP("{query_end_time}") LIMIT {logs_amount}'
records, raw_results = client.query_loggings(query)
transformed_results = [threat_context_transformer(record) for record in records]
human_readable = tableToMarkdown('Logs threat table', transformed_results, removeNull=True)
ec = {
'CDL.Logging.Threat': transformed_results
}
return human_readable, ec, raw_results
def query_traffic_logs_command(args: dict, client: Client) -> Tuple[str, dict, List[Dict[str, Any]]]:
"""
The function of the command that queries firewall.traffic table
Returns: a Demisto's entry with all the parsed data
"""
table_name: str = 'traffic'
context_transformer_function = traffic_context_transformer
table_context_path: str = 'CDL.Logging.Traffic'
return query_table_logs(args, client, table_name, context_transformer_function, table_context_path)
def query_threat_logs_command(args: dict, client: Client) -> Tuple[str, dict, List[Dict[str, Any]]]:
"""
The function of the command that queries firewall.threat table
Returns: a Demisto's entry with all the parsed data
"""
query_table_name: str = 'threat'
context_transformer_function = threat_context_transformer
table_context_path: str = 'CDL.Logging.Threat'
return query_table_logs(args, client, query_table_name, context_transformer_function, table_context_path)
def query_url_logs_command(args: dict, client: Client) -> Tuple[str, dict, List[Dict[str, Any]]]:
"""
The function of the command that queries firewall.url table
Returns: a Demisto's entry with all the parsed data
"""
query_table_name: str = 'url'
context_transformer_function = url_context_transformer
table_context_path: str = 'CDL.Logging.URL'
return query_table_logs(args, client, query_table_name, context_transformer_function, table_context_path)
def query_file_data_command(args: dict, client: Client) -> Tuple[str, dict, List[Dict[str, Any]]]:
query_table_name: str = 'file_data'
context_transformer_function = files_context_transformer
table_context_path: str = 'CDL.Logging.File'
return query_table_logs(args, client, query_table_name, context_transformer_function, table_context_path)
def query_table_logs(args: dict,
client: Client,
table_name: str,
context_transformer_function: Callable[[dict], dict],
table_context_path: str) -> Tuple[str, dict, List[Dict[str, Any]]]:
"""
This function is a generic function that get's all the data needed for a specific table of Cortex and acts as a
regular command function
Args:
args: demisto args
client: The client
table_name: the name of the table in Cortex
context_transformer_function: the context transformer function to parse the data
table_context_path: the context path where the parsed data should be located
"""
fields, query = build_query(args, table_name)
results, raw_results = client.query_loggings(query)
outputs = [context_transformer_function(record) for record in results]
human_readable = records_to_human_readable_output(fields, table_name, results)
context_outputs: dict = {table_context_path: outputs}
return human_readable, context_outputs, raw_results
def build_query(args, table_name):
fields = args.get('fields', 'all')
fields = '*' if 'all' in fields else fields
where = build_where_clause(args)
query_start_time, query_end_time = query_timestamp(args)
timestamp_limitation = f'time_generated BETWEEN TIMESTAMP("{query_start_time}") AND ' \
f'TIMESTAMP("{query_end_time}") '
limit = args.get('limit', '5')
where += f' AND {timestamp_limitation}' if where else timestamp_limitation
query = f'SELECT {fields} FROM `firewall.{table_name}` WHERE {where} LIMIT {limit}'
return fields, query
def fetch_incidents(client: Client,
first_fetch_timestamp: str,
fetch_severity: list,
fetch_table: str,
fetch_subtype: list,
fetch_fields: str,
fetch_limit: str,
last_run: dict) -> Tuple[Dict[str, str], list]:
last_fetched_event_timestamp = last_run.get('lastRun')
if last_fetched_event_timestamp:
last_fetched_event_timestamp = parser.parse(last_fetched_event_timestamp)
else:
last_fetched_event_timestamp, _ = parse_date_range(first_fetch_timestamp)
last_fetched_event_timestamp = last_fetched_event_timestamp.replace(microsecond=0)
query = prepare_fetch_incidents_query(last_fetched_event_timestamp, fetch_severity, fetch_table,
fetch_subtype, fetch_fields, fetch_limit)
demisto.debug('Query being fetched: {}'.format(query))
records, _ = client.query_loggings(query)
if not records:
return {'lastRun': str(last_fetched_event_timestamp)}, []
incidents = [convert_log_to_incident(record, fetch_table) for record in records]
max_fetched_event_timestamp = max(records, key=lambda record: record.get('time_generated', 0)).get('time_generated',
0)
next_run = {'lastRun': epoch_to_timestamp_and_add_milli(max_fetched_event_timestamp)}
return next_run, incidents
''' EXECUTION CODE '''
def main():
os.environ['PAN_CREDENTIALS_DBFILE'] = os.path.join(gettempdir(), 'pancloud_credentials.json')
params = demisto.params()
registration_id_and_url = params.get(REGISTRATION_ID_CONST).split('@')
if len(registration_id_and_url) != 2:
token_retrieval_url = "https://oproxy.demisto.ninja" # guardrails-disable-line
else:
token_retrieval_url = registration_id_and_url[1]
registration_id = registration_id_and_url[0]
# If there's a stored token in integration context, it's newer than current
refresh_token = demisto.getIntegrationContext().get(REFRESH_TOKEN_CONST) or params.get(REFRESH_TOKEN_CONST)
enc_key = params.get(ENCRYPTION_KEY_CONST)
use_ssl = not params.get('insecure', False)
proxy = params.get('proxy', False)
args = demisto.args()
fetch_table = params.get('fetch_table')
fetch_fields = params.get('fetch_fields') or '*'
command = demisto.command()
LOG(f'command is {command}')
# needs to be executed before creating a Client
if command == 'cdl-reset-authentication-timeout':
Client.reset_failure_times()
return_outputs(readable_output="Caching mechanism failure time counters have been successfully reset.")
return
client = Client(token_retrieval_url, registration_id, use_ssl, proxy, refresh_token, enc_key)
try:
if command == 'test-module':
test_module(client, fetch_table, fetch_fields, params.get('isFetch'))
elif command == 'cdl-query-logs':
return_outputs(*query_logs_command(args, client))
elif command == 'cdl-get-critical-threat-logs':
return_outputs(*get_critical_logs_command(args, client))
elif command == 'cdl-get-social-applications':
return_outputs(*get_social_applications_command(args, client))
elif command == 'cdl-search-by-file-hash':
return_outputs(*search_by_file_hash_command(args, client))
elif command == 'cdl-query-traffic-logs':
return_outputs(*query_traffic_logs_command(args, client))
elif command == 'cdl-query-threat-logs':
return_outputs(*query_threat_logs_command(args, client))
elif command == 'cdl-query-url-logs':
return_outputs(*query_url_logs_command(args, client))
elif command == 'cdl-query-file-data':
return_outputs(*query_file_data_command(args, client))
elif command == 'fetch-incidents':
first_fetch_timestamp = params.get('first_fetch_timestamp', '24 hours').strip()
fetch_severity = params.get('firewall_severity')
fetch_table = params.get('fetch_table')
fetch_fields = params.get('fetch_fields') or '*'
fetch_subtype = params.get('firewall_subtype')
fetch_limit = params.get('limit')
last_run = demisto.getLastRun()
next_run, incidents = fetch_incidents(client,
first_fetch_timestamp,
fetch_severity,
fetch_table,
fetch_subtype,
fetch_fields,
fetch_limit,
last_run)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
except Exception as e:
error_message = str(e)
return_error(error_message)
if __name__ in ('__main__', 'builtins'):
main()
| []
| []
| [
"PAN_CREDENTIALS_DBFILE"
]
| [] | ["PAN_CREDENTIALS_DBFILE"] | python | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.